recoll-1.26.3/0000755000175000017500000000000013570165410010066 500000000000000recoll-1.26.3/qtgui/0000755000175000017500000000000013570165410011217 500000000000000recoll-1.26.3/qtgui/rclm_view.cpp0000644000175000017500000004203213566424763013651 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "safeunistd.h" #include #include #include "qxtconfirmationmessage.h" #include "log.h" #include "fileudi.h" #include "execmd.h" #include "transcode.h" #include "docseqhist.h" #include "docseqdb.h" #include "internfile.h" #include "rclmain_w.h" #include "rclzg.h" #include "pathut.h" using namespace std; // Start native viewer or preview for input Doc. This is used to allow // using recoll from another app (e.g. Unity Scope) to view embedded // result docs (docs with an ipath). . We act as a proxy to extract // the data and start a viewer. The Url are encoded as // file://path#ipath void RclMain::viewUrl() { if (m_urltoview.isEmpty() || !rcldb) return; QUrl qurl(m_urltoview); LOGDEB("RclMain::viewUrl: Path [" << ((const char *)qurl.path().toLocal8Bit()) << "] fragment [" << ((const char *)qurl.fragment().toLocal8Bit()) << "]\n"); /* In theory, the url might not be for a file managed by the fs indexer so that the make_udi() call here would be wrong(). When/if this happens we'll have to hide this part inside internfile and have some url magic to indicate the appropriate indexer/identification scheme */ string udi; make_udi((const char *)qurl.path().toLocal8Bit(), (const char *)qurl.fragment().toLocal8Bit(), udi); Rcl::Doc doc; Rcl::Doc idxdoc; // idxdoc.idxi == 0 -> works with base index only if (!rcldb->getDoc(udi, idxdoc, doc) || doc.pc == -1) return; // StartNativeViewer needs a db source to call getEnclosing() on. Rcl::Query *query = new Rcl::Query(rcldb.get()); DocSequenceDb *src = new DocSequenceDb( rcldb, std::shared_ptr(query), "", std::shared_ptr(new Rcl::SearchData)); m_source = std::shared_ptr(src); // Start a native viewer if the mimetype has one defined, else a // preview. string apptag; doc.getmeta(Rcl::Doc::keyapptg, &apptag); string viewer = theconfig->getMimeViewerDef(doc.mimetype, apptag, prefs.useDesktopOpen); if (viewer.empty()) { startPreview(doc); } else { hide(); startNativeViewer(doc); // We have a problem here because xdg-open will exit // immediately after starting the command instead of waiting // for it, so we can't wait either and we don't know when we // can exit (deleting the temp file). As a bad workaround we // sleep some time then exit. The alternative would be to just // prevent the temp file deletion completely, leaving it // around forever. Better to let the user save a copy if he // wants I think. sleep(30); fileExit(); } } /* Look for html browser. We make a special effort for html because it's * used for reading help. This is only used if the normal approach * (xdg-open etc.) failed */ static bool lookForHtmlBrowser(string &exefile) { vector blist{"opera", "google-chrome", "chromium-browser", "palemoon", "iceweasel", "firefox", "konqueror", "epiphany"}; const char *path = getenv("PATH"); if (path == 0) { path = "/usr/local/bin:/usr/bin:/bin"; } // Look for each browser for (const auto& entry : blist) { if (ExecCmd::which(entry, exefile, path)) return true; } exefile.clear(); return false; } void RclMain::openWith(Rcl::Doc doc, string cmdspec) { LOGDEB("RclMain::openWith: " << cmdspec << "\n"); // Split the command line vector lcmd; if (!stringToStrings(cmdspec, lcmd)) { QMessageBox::warning(0, "Recoll", tr("Bad desktop app spec for %1: [%2]\n" "Please check the desktop file") .arg(QString::fromUtf8(doc.mimetype.c_str())) .arg(QString::fromLocal8Bit(cmdspec.c_str()))); return; } // Look for the command to execute in the exec path and the filters // directory string execname = lcmd.front(); lcmd.erase(lcmd.begin()); string url = doc.url; string fn = fileurltolocalpath(doc.url); // Try to keep the letters used more or less consistent with the reslist // paragraph format. map subs; #ifdef _WIN32 path_backslashize(fn); #endif subs["F"] = fn; subs["f"] = fn; subs["U"] = url_encode(url); subs["u"] = url; execViewer(subs, false, execname, lcmd, cmdspec, doc); } void RclMain::startNativeViewer(Rcl::Doc doc, int pagenum, QString term) { string apptag; doc.getmeta(Rcl::Doc::keyapptg, &apptag); LOGDEB("RclMain::startNativeViewer: mtype [" << doc.mimetype << "] apptag [" << apptag << "] page " << pagenum << " term [" << qs2utf8s(term) << "] url [" << doc.url << "] ipath [" << doc.ipath << "]\n"); // Look for appropriate viewer string cmdplusattr = theconfig->getMimeViewerDef(doc.mimetype, apptag, prefs.useDesktopOpen); if (cmdplusattr.empty()) { QMessageBox::warning(0, "Recoll", tr("No external viewer configured for mime type [") + doc.mimetype.c_str() + "]"); return; } LOGDEB("StartNativeViewer: viewerdef from config: " << cmdplusattr << endl); // Separate command string and viewer attributes (if any) ConfSimple viewerattrs; string cmd; theconfig->valueSplitAttributes(cmdplusattr, cmd, viewerattrs); bool ignoreipath = false; int execwflags = 0; if (viewerattrs.get("ignoreipath", cmdplusattr)) ignoreipath = stringToBool(cmdplusattr); if (viewerattrs.get("maximize", cmdplusattr)) { if (stringToBool(cmdplusattr)) { execwflags |= ExecCmd::EXF_MAXIMIZED; } } // Split the command line vector lcmd; if (!stringToStrings(cmd, lcmd)) { QMessageBox::warning(0, "Recoll", tr("Bad viewer command line for %1: [%2]\n" "Please check the mimeview file") .arg(QString::fromUtf8(doc.mimetype.c_str())) .arg(QString::fromLocal8Bit(cmd.c_str()))); return; } // Look for the command to execute in the exec path and the filters // directory string execpath; if (!ExecCmd::which(lcmd.front(), execpath)) { execpath = theconfig->findFilter(lcmd.front()); // findFilter returns its input param if the filter is not in // the normal places. As we already looked in the path, we // have no use for a simple command name here (as opposed to // mimehandler which will just let execvp do its thing). Erase // execpath so that the user dialog will be started further // down. if (!execpath.compare(lcmd.front())) execpath.erase(); // Specialcase text/html because of the help browser need if (execpath.empty() && !doc.mimetype.compare("text/html") && apptag.empty()) { if (lookForHtmlBrowser(execpath)) { lcmd.clear(); lcmd.push_back(execpath); lcmd.push_back("%u"); } } } // Command not found: start the user dialog to help find another one: if (execpath.empty()) { QString mt = QString::fromUtf8(doc.mimetype.c_str()); QString message = tr("The viewer specified in mimeview for %1: %2" " is not found.\nDo you want to start the " " preferences dialog ?") .arg(mt).arg(QString::fromLocal8Bit(lcmd.front().c_str())); switch(QMessageBox::warning(0, "Recoll", message, "Yes", "No", 0, 0, 1)) { case 0: showUIPrefs(); if (uiprefs) uiprefs->showViewAction(mt); break; case 1: break; } // The user will have to click on the link again to try the // new command. return; } // Get rid of the command name. lcmd is now argv[1...n] lcmd.erase(lcmd.begin()); // Process the command arguments to determine if we need to create // a temporary file. // If the command has a %i parameter it will manage the // un-embedding. Else if ipath is not empty, we need a temp file. // This can be overridden with the "ignoreipath" attribute bool groksipath = (cmd.find("%i") != string::npos) || ignoreipath; // We used to try being clever here, but actually, the only case // where we don't need a local file copy of the document (or // parent document) is the case of an HTML page with a non-file // URL (http or https). Trying to guess based on %u or %f is // doomed because we pass %u to xdg-open. bool wantsfile = false; bool wantsparentfile = cmd.find("%F") != string::npos; if (!wantsparentfile && (cmd.find("%f") != string::npos || urlisfileurl(doc.url) || doc.mimetype.compare("text/html"))) { wantsfile = true; } if (wantsparentfile && !urlisfileurl(doc.url)) { QMessageBox::warning(0, "Recoll", tr("Viewer command line for %1 specifies " "parent file but URL is http[s]: unsupported") .arg(QString::fromUtf8(doc.mimetype.c_str()))); return; } if (wantsfile && wantsparentfile) { QMessageBox::warning(0, "Recoll", tr("Viewer command line for %1 specifies both " "file and parent file value: unsupported") .arg(QString::fromUtf8(doc.mimetype.c_str()))); return; } string url = doc.url; string fn = fileurltolocalpath(doc.url); Rcl::Doc pdoc; if (wantsparentfile) { // We want the path for the parent document. For example to // open the chm file, not the internal page. Note that we just // override the other file name in this case. if (!m_source || !m_source->getEnclosing(doc, pdoc)) { QMessageBox::warning(0, "Recoll", tr("Cannot find parent document")); return; } // Override fn with the parent's : fn = fileurltolocalpath(pdoc.url); // If the parent document has an ipath too, we need to create // a temp file even if the command takes an ipath // parameter. We have no viewer which could handle a double // embedding. Will have to change if such a one appears. if (!pdoc.ipath.empty()) { groksipath = false; } } // Can't remember what enterHistory was actually for. Set it to // true always for now bool enterHistory = true; bool istempfile = false; LOGDEB("StartNativeViewer: groksipath " << groksipath << " wantsf " << wantsfile << " wantsparentf " << wantsparentfile << "\n"); // If the command wants a file but this is not a file url, or // there is an ipath that it won't understand, we need a temp file: theconfig->setKeyDir(fn.empty() ? "" : path_getfather(fn)); if (((wantsfile || wantsparentfile) && fn.empty()) || (!groksipath && !doc.ipath.empty()) ) { TempFile temp; Rcl::Doc& thedoc = wantsparentfile ? pdoc : doc; if (!FileInterner::idocToFile(temp, string(), theconfig, thedoc)) { QMessageBox::warning(0, "Recoll", tr("Cannot extract document or create " "temporary file")); return; } enterHistory = true; istempfile = true; rememberTempFile(temp); fn = temp.filename(); url = path_pathtofileurl(fn); } // If using an actual file, check that it exists, and if it is // compressed, we may need an uncompressed version if (!fn.empty() && theconfig->mimeViewerNeedsUncomp(doc.mimetype)) { if (!path_readable(fn)) { QMessageBox::warning(0, "Recoll", tr("Can't access file: ") + u8s2qs(fn)); return; } TempFile temp; if (FileInterner::isCompressed(fn, theconfig)) { if (!FileInterner::maybeUncompressToTemp(temp, fn, theconfig, doc)) { QMessageBox::warning(0, "Recoll", tr("Can't uncompress file: ") + QString::fromLocal8Bit(fn.c_str())); return; } } if (temp.ok()) { istempfile = true; rememberTempFile(temp); fn = temp.filename(); url = path_pathtofileurl(fn); } } if (istempfile) { QxtConfirmationMessage confirm( QMessageBox::Warning, "Recoll", tr("Opening a temporary copy. Edits will be lost if you don't save" "
them to a permanent location."), tr("Do not show this warning next time (use GUI preferences " "to restore).")); confirm.setSettingsPath("Recoll/prefs"); confirm.setOverrideSettingsKey("showTempFileWarning"); confirm.exec(); // Pita: need to keep the prefs struct in sync, else the value // will be clobbered on program exit. QSettings settings("Recoll.org", "recoll"); prefs.showTempFileWarning = settings.value("Recoll/prefs/showTempFileWarning").toInt(); } // If we are not called with a page number (which would happen for a call // from the snippets window), see if we can compute a page number anyway. if (pagenum == -1) { pagenum = 1; string lterm; if (m_source) pagenum = m_source->getFirstMatchPage(doc, lterm); if (pagenum == -1) pagenum = 1; else // We get the match term used to compute the page term = QString::fromUtf8(lterm.c_str()); } char cpagenum[20]; sprintf(cpagenum, "%d", pagenum); // Substitute %xx inside arguments string efftime; if (!doc.dmtime.empty() || !doc.fmtime.empty()) { efftime = doc.dmtime.empty() ? doc.fmtime : doc.dmtime; } else { efftime = "0"; } // Try to keep the letters used more or less consistent with the reslist // paragraph format. map subs; subs["D"] = efftime; #ifdef _WIN32 path_backslashize(fn); #endif subs["f"] = fn; subs["F"] = fn; subs["i"] = FileInterner::getLastIpathElt(doc.ipath); subs["M"] = doc.mimetype; subs["p"] = cpagenum; subs["s"] = (const char*)term.toLocal8Bit(); subs["U"] = url_encode(url); subs["u"] = url; // Let %(xx) access all metadata. for (const auto& ent :doc.meta) { subs[ent.first] = ent.second; } execViewer(subs, enterHistory, execpath, lcmd, cmd, doc, execwflags); } void RclMain::execViewer(const map& subs, bool enterHistory, const string& execpath, const vector& _lcmd, const string& cmd, Rcl::Doc doc, int flags) { string ncmd; vector lcmd; for (vector::const_iterator it = _lcmd.begin(); it != _lcmd.end(); it++) { pcSubst(*it, ncmd, subs); LOGDEB("" << *it << "->" << (ncmd) << "\n" ); lcmd.push_back(ncmd); } // Also substitute inside the unsplitted command line and display // in status bar pcSubst(cmd, ncmd, subs); #ifndef _WIN32 ncmd += " &"; #endif QStatusBar *stb = statusBar(); if (stb) { string prcmd; #ifdef _WIN32 prcmd = ncmd; #else string fcharset = theconfig->getDefCharset(true); transcode(ncmd, prcmd, fcharset, "UTF-8"); #endif QString msg = tr("Executing: [") + QString::fromUtf8(prcmd.c_str()) + "]"; stb->showMessage(msg, 10000); } if (enterHistory) historyEnterDoc(rcldb.get(), g_dynconf, doc); // Do the zeitgeist thing zg_send_event(ZGSEND_OPEN, doc); // We keep pushing back and never deleting. This can't be good... ExecCmd *ecmd = new ExecCmd(ExecCmd::EXF_SHOWWINDOW | flags); m_viewers.push_back(ecmd); ecmd->startExec(execpath, lcmd, false, false); } void RclMain::startManual() { startManual(string()); } void RclMain::startManual(const string& index) { string docdir = path_cat(theconfig->getDatadir(), "doc"); // The single page user manual is nicer if we have an index. Else // the webhelp one is nicer if it is present string usermanual = path_cat(docdir, "usermanual.html"); string webhelp = path_cat(docdir, "webhelp"); webhelp = path_cat(webhelp, "index.html"); bool has_wh = path_exists(webhelp); LOGDEB("RclMain::startManual: help index is " << (index.empty() ? "(null)" : index) << "\n"); bool indexempty = index.empty(); #ifdef _WIN32 // On Windows I could not find any way to pass the fragment through // rclstartw (tried to set text/html as exception with rclstartw %u). // So always start the webhelp indexempty = true; #endif if (!indexempty) { usermanual += "#"; usermanual += index; } Rcl::Doc doc; if (has_wh && indexempty) { doc.url = path_pathtofileurl(webhelp); } else { doc.url = path_pathtofileurl(usermanual); } doc.mimetype = "text/html"; doc.addmeta(Rcl::Doc::keyapptg, "rclman"); startNativeViewer(doc); } recoll-1.26.3/qtgui/webcache.h0000644000175000017500000000416713533651561013067 00000000000000/* Copyright (C) 2016 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _WEBCACHE_H_INCLUDED_ #define _WEBCACHE_H_INCLUDED_ #include "autoconfig.h" #include #include #include #include "ui_webcache.h" #include class WebcacheModelInternal; class QCloseEvent; class WebcacheModel : public QAbstractTableModel { Q_OBJECT; public: WebcacheModel(QObject *parent = 0); ~WebcacheModel(); // Reimplemented methods virtual int rowCount (const QModelIndex& = QModelIndex()) const; virtual int columnCount(const QModelIndex& = QModelIndex()) const; virtual QVariant headerData (int col, Qt::Orientation orientation, int role = Qt::DisplayRole) const; virtual QVariant data(const QModelIndex& index, int role = Qt::DisplayRole ) const; bool deleteIdx(unsigned int idx); std::string getURL(unsigned int idx); public slots: void setSearchFilter(const QString&); void reload(); private: WebcacheModelInternal *m; }; class RclMain; class WebcacheEdit : public QDialog, public Ui::Webcache { Q_OBJECT; public: WebcacheEdit(RclMain *parent); public slots: void saveColState(); void createPopupMenu(const QPoint&); void deleteSelected(); void copyURL(); protected: void closeEvent(QCloseEvent *); private: WebcacheModel *m_model; RclMain *m_recoll; bool m_modified; }; #endif /* _WEBCACHE_H_INCLUDED_ */ recoll-1.26.3/qtgui/firstidx.ui0000644000175000017500000001253713303776056013352 00000000000000 FirstIdxDialog 0 0 502 503 First indexing setup 0 1 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> Qt::RichText true Indexing configuration false false This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Indexing schedule false This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Start indexing now Qt::Horizontal QDialogButtonBox::Close buttonBox accepted() FirstIdxDialog accept() 248 254 157 274 buttonBox rejected() FirstIdxDialog reject() 316 260 286 274 runidxPB clicked() FirstIdxDialog accept() 215 400 215 228 recoll-1.26.3/qtgui/fragbuts.cpp0000644000175000017500000001446213533651561013475 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "safesysstat.h" #include #include #include #include #include #include #include #include #include #include "fragbuts.h" #include "pathut.h" #include "smallut.h" #include "recoll.h" #include "log.h" #include "readfile.h" #include "copyfile.h" using namespace std; class FragButsParser : public QXmlDefaultHandler { public: FragButsParser(FragButs *_parent, vector& _buttons) : parent(_parent), vlw(new QVBoxLayout(parent)), vl(new QVBoxLayout()), buttons(_buttons), hl(0), bg(0), radio(false) { } bool startElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName, const QXmlAttributes &attributes); bool endElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName); bool characters(const QString &str) { currentText += str; return true; } bool error(const QXmlParseException& exception) { fatalError(exception); return false; } bool fatalError(const QXmlParseException& x) { errorMessage = QString("%2 at line %3 column %4") .arg(x.message()) .arg(x.lineNumber()) .arg(x.columnNumber()); return false; } QString errorMessage; private: QWidget *parent; QVBoxLayout *vlw; QVBoxLayout *vl; vector& buttons; // Temporary data while parsing. QHBoxLayout *hl; QButtonGroup *bg; QString currentText; QString label; string frag; bool radio; }; bool FragButsParser::startElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName, const QXmlAttributes &/*attributes*/) { currentText = ""; if (qName == "buttons") { radio = false; hl = new QHBoxLayout(); } else if (qName == "radiobuttons") { radio = true; bg = new QButtonGroup(parent); hl = new QHBoxLayout(); } return true; } bool FragButsParser::endElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName) { if (qName == "label") { label = currentText; } else if (qName == "frag") { frag = qs2utf8s(currentText); } else if (qName == "fragbut") { string slab = qs2utf8s(label); trimstring(slab, " \t\n\t"); label = QString::fromUtf8(slab.c_str()); QAbstractButton *abut; if (radio) { QRadioButton *but = new QRadioButton(label, parent); bg->addButton(but); if (bg->buttons().length() == 1) but->setChecked(true); abut = but; } else { QCheckBox *but = new QCheckBox(label, parent); abut = but; } abut->setToolTip(currentText); buttons.push_back(FragButs::ButFrag(abut, frag)); hl->addWidget(abut); } else if (qName == "buttons" || qName == "radiobuttons") { vl->addLayout(hl); hl = 0; } else if (qName == "fragbuts") { vlw->addLayout(vl); } return true; } FragButs::FragButs(QWidget* parent) : QWidget(parent), m_reftime(0), m_ok(false) { m_fn = path_cat(theconfig->getConfDir(), "fragbuts.xml"); string data, reason; if (!path_exists(m_fn)) { // config does not exist: try to create it from sample string src = path_cat(theconfig->getDatadir(), "examples"); src = path_cat(src, "fragbuts.xml"); copyfile(src.c_str(), m_fn.c_str(), reason); } if (!file_to_string(m_fn, data, &reason)) { QMessageBox::warning(0, "Recoll", tr("%1 not found.").arg( QString::fromLocal8Bit(m_fn.c_str()))); LOGERR("Fragbuts:: can't read [" << (m_fn) << "]\n" ); return; } FragButsParser parser(this, m_buttons); QXmlSimpleReader reader; reader.setContentHandler(&parser); reader.setErrorHandler(&parser); QXmlInputSource xmlInputSource; xmlInputSource.setData(QString::fromUtf8(data.c_str())); if (!reader.parse(xmlInputSource)) { QMessageBox::warning(0, "Recoll", tr("%1:\n %2") .arg(QString::fromLocal8Bit(m_fn.c_str())) .arg(parser.errorMessage)); return; } for (vector::iterator it = m_buttons.begin(); it != m_buttons.end(); it++) { connect(it->button, SIGNAL(clicked(bool)), this, SLOT(onButtonClicked(bool))); } setWindowTitle(tr("Query Fragments")); isStale(&m_reftime); m_ok = true; } FragButs::~FragButs() { } bool FragButs::isStale(time_t *reftime) { struct stat st; stat(m_fn.c_str(), &st); bool ret = st.st_mtime != m_reftime; if (reftime) *reftime = st.st_mtime; return ret; } void FragButs::onButtonClicked(bool on) { LOGDEB("FragButs::onButtonClicked: [" << (int(on)) << "]\n" ); emit fragmentsChanged(); } void FragButs::getfrags(std::vector& frags) { for (vector::iterator it = m_buttons.begin(); it != m_buttons.end(); it++) { if (it->button->isChecked() && !it->fragment.empty()) { LOGDEB("FragButs: fragment [" << (it->fragment) << "]\n" ); frags.push_back(it->fragment); } } } recoll-1.26.3/qtgui/fragbuts.h0000644000175000017500000000330713533651561013136 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _FRAGBUTS_H_INCLUDED_ #define _FRAGBUTS_H_INCLUDED_ #include #include #include #include class QAbstractButton; /* * Display a series of user-defined buttons which activate query * language fragments to augment the current search */ class FragButs : public QWidget { Q_OBJECT; public: FragButs(QWidget* parent = 0); virtual ~FragButs(); struct ButFrag { QAbstractButton *button; std::string fragment; ButFrag(QAbstractButton *but, const std::string& frag) : button(but), fragment(frag) { } }; void getfrags(std::vector&); bool ok() {return m_ok;} bool isStale(time_t *reftime); private slots: void onButtonClicked(bool); signals: void fragmentsChanged(); private: std::vector m_buttons; std::string m_fn; time_t m_reftime; bool m_ok; }; #endif /* _FRAGBUTS_H_INCLUDED_ */ recoll-1.26.3/qtgui/ptrans.ui0000644000175000017500000000775013303776056013026 00000000000000 EditTransBase 0 0 649 362 Path Translations Setting path translations for false Select one or several file types, then use the controls in the frame below to change how they are processed QFrame::StyledPanel QFrame::Sunken QAbstractItemView::NoEditTriggers QAbstractItemView::ExtendedSelection QAbstractItemView::SelectRows true true 2 true false 300 20 false true false Add false Delete Qt::Horizontal 40 20 Cancel Save recoll-1.26.3/qtgui/reslist.h0000644000175000017500000001336513533651561013013 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _RESLIST_H_INCLUDED_ #define _RESLIST_H_INCLUDED_ #include "autoconfig.h" #include #include #include "plaintorich.h" #if defined(USING_WEBENGINE) # include # define RESLIST_PARENTCLASS QWebEngineView #elif defined(USING_WEBKIT) # include # define RESLIST_PARENTCLASS QWebView #else # include # define RESLIST_PARENTCLASS QTextBrowser #endif class RclMain; class QtGuiResListPager; class QEvent; namespace Rcl { class Doc; } /** * Display a list of document records. The data can be out of the history * manager or from an index query, both abstracted as a DocSequence. */ class ResList : public RESLIST_PARENTCLASS { Q_OBJECT; friend class QtGuiResListPager; public: ResList(QWidget* parent = 0, const char* name = 0); virtual ~ResList(); // Return document for given docnum. We mostly act as an // intermediary to the docseq here, but this has also the // side-effect of making the entry current (visible and // highlighted), and only works if the num is inside the current // page or its immediate neighbours. bool getDoc(int docnum, Rcl::Doc &); bool displayingHistory(); int listId() const {return m_listId;} int pageFirstDocNum(); void setFont(); void setRclMain(RclMain *m, bool ismain); public slots: virtual void setDocSource(std::shared_ptr nsource); virtual void resetList(); // Erase current list virtual void resPageUpOrBack(); // Page up pressed virtual void resPageDownOrNext(); // Page down pressed virtual void resultPageBack(); // Previous page of results virtual void resultPageFirst(); // First page of results virtual void resultPageNext(); // Next (or first) page of results virtual void resultPageFor(int docnum); // Page containing docnum virtual void menuPreview(); virtual void menuSaveToFile(); virtual void menuEdit(); virtual void menuOpenWith(QAction *); virtual void menuCopyFN(); virtual void menuCopyURL(); virtual void menuExpand(); virtual void menuPreviewParent(); virtual void menuOpenParent(); virtual void menuShowSnippets(); virtual void menuShowSubDocs(); virtual void previewExposed(int); virtual void append(const QString &text); virtual void readDocSource(); virtual void highlighted(const QString& link); virtual void createPopupMenu(const QPoint& pos); virtual void showQueryDetails(); signals: void nextPageAvailable(bool); void prevPageAvailable(bool); void docPreviewClicked(int, Rcl::Doc, int); void docSaveToFileClicked(Rcl::Doc); void previewRequested(Rcl::Doc); void showSnippets(Rcl::Doc); void showSubDocs(Rcl::Doc); void editRequested(Rcl::Doc); void openWithRequested(Rcl::Doc, string cmd); void docExpand(Rcl::Doc); void wordSelect(QString); void wordReplace(const QString&, const QString&); void hasResults(int); protected: void keyPressEvent(QKeyEvent *e); void mouseReleaseEvent(QMouseEvent *e); void mouseDoubleClickEvent(QMouseEvent*); public slots: virtual void onLinkClicked(const QUrl &); virtual void onPopupJsDone(const QVariant&); void runJS(const QString& js); void runStoredJS(); protected slots: virtual void languageChange(); private: QtGuiResListPager *m_pager{0}; std::shared_ptr m_source; int m_popDoc{-1}; // Docnum for the popup menu. QPoint m_popPos; int m_curPvDoc{-1};// Docnum for current preview int m_lstClckMod{0}; // Last click modifier. int m_listId{0}; // query Id for matching with preview windows #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) // Webview makes it more difficult to append text incrementally, // so we store the page and display it when done. QString m_text; #else // Translate from textedit paragraph number to relative // docnum. Built while we insert text into the qtextedit std::map m_pageParaToReldocnums; virtual int docnumfromparnum(int); virtual std::pair parnumfromdocnum(int); #endif QString m_js; RclMain *m_rclmain{0}; bool m_ismainres{true}; void doCreatePopupMenu(); virtual void displayPage(); static int newListId(); void resetView(); bool scrollIsAtTop(); bool scrollIsAtBottom(); void setupArrows(); }; #ifdef USING_WEBENGINE // Subclass the page to hijack the link clicks class RclWebPage : public QWebEnginePage { Q_OBJECT public: RclWebPage(ResList *parent) : QWebEnginePage((QWidget *)parent), m_reslist(parent) {} protected: virtual bool acceptNavigationRequest( const QUrl& url, NavigationType tp, bool isMainFrame); private: ResList *m_reslist; }; #else // Using Qt Webkit #define RclWebPage QWebPage #endif class PlainToRichQtReslist : public PlainToRich { public: virtual string startMatch(unsigned int idx); virtual string endMatch(); }; #endif /* _RESLIST_H_INCLUDED_ */ recoll-1.26.3/qtgui/preview_load.h0000644000175000017500000000315013533651561013775 00000000000000/* Copyright (C) 2015 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _PVW_LOAD_H_INCLUDED_ #define _PVW_LOAD_H_INCLUDED_ #include #include #include "rcldoc.h" #include "pathut.h" #include "rclutil.h" #include "rclconfig.h" #include "internfile.h" /* * A thread to perform the file reading / format conversion work for preview */ class LoadThread : public QThread { Q_OBJECT; public: LoadThread(RclConfig *conf, const Rcl::Doc& idoc, bool pvhtml, QObject *parent = 0); virtual ~LoadThread() { } virtual void run(); public: // The results are returned through public members. int status; Rcl::Doc fdoc; TempFile tmpimg; std::string missing; FileInterner::ErrorPossibleCause explain{FileInterner::InternfileOther}; private: Rcl::Doc m_idoc; bool m_previewHtml; RclConfig m_config; }; #endif /* _PVW_LOAD_H_INCLUDED_ */ recoll-1.26.3/qtgui/restable.ui0000644000175000017500000000360713303776056013315 00000000000000 ResTable 0 0 640 480 0 0 Qt::Vertical 0 2 QAbstractItemView::NoEditTriggers false true true false false false false 0 0 recoll-1.26.3/qtgui/spell_w.cpp0000644000175000017500000003520013533651561013316 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "log.h" #include "recoll.h" #include "spell_w.h" #include "guiutils.h" #include "rcldb.h" #include "searchdata.h" #include "rclquery.h" #include "rclhelp.h" #include "wasatorcl.h" #include "execmd.h" #include "indexer.h" #include "fstreewalk.h" using std::list; using std::multimap; using std::string; inline bool wordlessMode(SpellW::comboboxchoice v) { return (v == SpellW::TYPECMB_STATS || v == SpellW::TYPECMB_FAILED); } void SpellW::init() { m_c2t.clear(); expTypeCMB->addItem(tr("Wildcards")); m_c2t.push_back(TYPECMB_WILD); expTypeCMB->addItem(tr("Regexp")); m_c2t.push_back(TYPECMB_REG); expTypeCMB->addItem(tr("Stem expansion")); m_c2t.push_back(TYPECMB_STEM); expTypeCMB->addItem(tr("Spelling/Phonetic")); m_c2t.push_back(TYPECMB_SPELL); expTypeCMB->addItem(tr("Show index statistics")); m_c2t.push_back(TYPECMB_STATS); expTypeCMB->addItem(tr("List files which could not be indexed (slow)")); m_c2t.push_back(TYPECMB_FAILED); // Stemming language combobox stemLangCMB->clear(); vector langs; if (!getStemLangs(langs)) { QMessageBox::warning(0, "Recoll", tr("error retrieving stemming languages")); } for (vector::const_iterator it = langs.begin(); it != langs.end(); it++) { stemLangCMB->addItem(u8s2qs(*it)); } (void)new HelpClient(this); HelpClient::installMap((const char *)this->objectName().toUtf8(), "RCL.SEARCH.GUI.TERMEXPLORER"); // signals and slots connections connect(baseWordLE, SIGNAL(textChanged(const QString&)), this, SLOT(wordChanged(const QString&))); connect(baseWordLE, SIGNAL(returnPressed()), this, SLOT(doExpand())); connect(expandPB, SIGNAL(clicked()), this, SLOT(doExpand())); connect(dismissPB, SIGNAL(clicked()), this, SLOT(close())); connect(expTypeCMB, SIGNAL(activated(int)), this, SLOT(onModeChanged(int))); resTW->setShowGrid(0); #if (QT_VERSION >= QT_VERSION_CHECK(5, 0, 0)) resTW->horizontalHeader()->setSectionResizeMode(0, QHeaderView::Stretch); #else resTW->horizontalHeader()->setResizeMode(0, QHeaderView::Stretch); #endif resTW->verticalHeader()->setDefaultSectionSize(20); connect(resTW, SIGNAL(cellDoubleClicked(int, int)), this, SLOT(textDoubleClicked(int, int))); resTW->setColumnWidth(0, 200); resTW->setColumnWidth(1, 150); resTW->installEventFilter(this); int idx = cmbIdx((comboboxchoice)prefs.termMatchType); expTypeCMB->setCurrentIndex(idx); onModeChanged(idx); } int SpellW::cmbIdx(comboboxchoice mode) { vector::const_iterator it = std::find(m_c2t.begin(), m_c2t.end(), mode); if (it == m_c2t.end()) it = m_c2t.begin(); return it - m_c2t.begin(); } static const int maxexpand = 10000; /* Expand term according to current mode */ void SpellW::doExpand() { int idx = expTypeCMB->currentIndex(); if (idx < 0 || idx >= int(m_c2t.size())) idx = 0; comboboxchoice mode = m_c2t[idx]; // Can't clear qt4 table widget: resets column headers too resTW->setRowCount(0); if (baseWordLE->text().isEmpty() && !wordlessMode(mode)) return; string reason; if (!maybeOpenDb(reason)) { QMessageBox::critical(0, "Recoll", QString(reason.c_str())); LOGDEB("SpellW::doExpand: db error: " << (reason) << "\n" ); return; } int mt; switch(mode) { case TYPECMB_WILD: mt = Rcl::Db::ET_WILD; break; case TYPECMB_REG: mt = Rcl::Db::ET_REGEXP; break; case TYPECMB_STEM: mt = Rcl::Db::ET_STEM; break; default: mt = Rcl::Db::ET_WILD; } if (caseSensCB->isChecked()) { mt |= Rcl::Db::ET_CASESENS; } if (diacSensCB->isChecked()) { mt |= Rcl::Db::ET_DIACSENS; } Rcl::TermMatchResult res; string expr = string((const char *)baseWordLE->text().toUtf8()); Rcl::DbStats dbs; rcldb->dbStats(dbs, false); switch (mode) { case TYPECMB_WILD: default: case TYPECMB_REG: case TYPECMB_STEM: { string l_stemlang = qs2utf8s(stemLangCMB->currentText()); if (!rcldb->termMatch(mt, l_stemlang, expr, res, maxexpand)) { LOGERR("SpellW::doExpand:rcldb::termMatch failed\n" ); return; } statsLBL->setText(tr("Index: %1 documents, average length %2 terms." "%3 results") .arg(dbs.dbdoccount).arg(dbs.dbavgdoclen, 0, 'f', 0) .arg(res.entries.size())); } break; case TYPECMB_SPELL: { LOGDEB("SpellW::doExpand: spelling [" << expr << "]\n" ); vector suggs; if (!rcldb->getSpellingSuggestions(expr, suggs)) { QMessageBox::warning(0, "Recoll", tr("Spell expansion error. ")); } for (const auto& it : suggs) { res.entries.push_back(Rcl::TermMatchEntry(it)); } statsLBL->setText(tr("%1 results").arg(res.entries.size())); } break; case TYPECMB_STATS: { showStats(); return; } break; case TYPECMB_FAILED: { showFailed(); return; } break; } if (res.entries.empty()) { resTW->setItem(0, 0, new QTableWidgetItem(tr("No expansion found"))); } else { int row = 0; if (maxexpand > 0 && int(res.entries.size()) >= maxexpand) { resTW->setRowCount(row + 1); resTW->setSpan(row, 0, 1, 2); resTW->setItem(row++, 0, new QTableWidgetItem( tr("List was truncated alphabetically, " "some frequent "))); resTW->setRowCount(row + 1); resTW->setSpan(row, 0, 1, 2); resTW->setItem(row++, 0, new QTableWidgetItem( tr("terms may be missing. " "Try using a longer root."))); resTW->setRowCount(row + 1); resTW->setItem(row++, 0, new QTableWidgetItem("")); } for (vector::iterator it = res.entries.begin(); it != res.entries.end(); it++) { LOGDEB2("SpellW::expand: " << it->wcf << " [" << it->term << "]\n"); char num[30]; if (it->wcf) sprintf(num, "%d / %d", it->docs, it->wcf); else num[0] = 0; resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(u8s2qs(it->term))); resTW->setItem(row++, 1, new QTableWidgetItem(QString::fromUtf8(num))); } } } void SpellW::showStats() { statsLBL->setText(""); int row = 0; Rcl::DbStats res; if (!rcldb->dbStats(res, false)) { LOGERR("SpellW::doExpand:rcldb::dbStats failed\n" ); return; } resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr("Number of documents"))); resTW->setItem(row++, 1, new QTableWidgetItem( QString::number(res.dbdoccount))); resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr("Average terms per document"))); resTW->setItem(row++, 1, new QTableWidgetItem( QString::number(res.dbavgdoclen, 'f', 0))); resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr("Smallest document length (terms)"))); resTW->setItem(row++, 1, new QTableWidgetItem( QString::number(res.mindoclen))); resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr("Longest document length (terms)"))); resTW->setItem(row++, 1, new QTableWidgetItem( QString::number(res.maxdoclen))); if (!theconfig) return; ConfSimple cs(theconfig->getIdxStatusFile().c_str(), 1); DbIxStatus st; cs.get("fn", st.fn); cs.get("docsdone", &st.docsdone); cs.get("filesdone", &st.filesdone); cs.get("fileerrors", &st.fileerrors); cs.get("dbtotdocs", &st.dbtotdocs); cs.get("totfiles", &st.totfiles); resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr("Results from last indexing:"))); resTW->setItem(row++, 1, new QTableWidgetItem("")); resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr(" Documents created/updated"))); resTW->setItem(row++, 1, new QTableWidgetItem(QString::number(st.docsdone))); resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr(" Files tested"))); resTW->setItem(row++, 1, new QTableWidgetItem(QString::number(st.filesdone))); resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr(" Unindexed files"))); resTW->setItem(row++, 1, new QTableWidgetItem(QString::number(st.fileerrors))); baseWordLE->setText(QString::fromLocal8Bit(theconfig->getDbDir().c_str())); int64_t dbkbytes = fsTreeBytes(theconfig->getDbDir()) / 1024; if (dbkbytes < 0) { dbkbytes = 0; } resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr("Database directory size"))); resTW->setItem(row++, 1, new QTableWidgetItem( u8s2qs(displayableBytes(dbkbytes*1024)))); vector allmimetypes = theconfig->getAllMimeTypes(); multimap mtbycnt; for (vector::const_iterator it = allmimetypes.begin(); it != allmimetypes.end(); it++) { string reason; string q = string("mime:") + *it; Rcl::SearchData *sd = wasaStringToRcl(theconfig, "", q, reason); std::shared_ptr rq(sd); Rcl::Query query(rcldb.get()); if (!query.setQuery(rq)) { LOGERR("Query setup failed: " << (query.getReason()) << "" ); return; } int cnt = query.getResCnt(); mtbycnt.insert(pair(cnt,*it)); } resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(tr("MIME types:"))); resTW->setItem(row++, 1, new QTableWidgetItem("")); for (multimap::const_reverse_iterator it = mtbycnt.rbegin(); it != mtbycnt.rend(); it++) { resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(QString(" ") + u8s2qs(it->second))); resTW->setItem(row++, 1, new QTableWidgetItem( QString::number(it->first))); } } void SpellW::showFailed() { statsLBL->setText(""); int row = 0; Rcl::DbStats res; if (!rcldb->dbStats(res, true)) { LOGERR("SpellW::doExpand:rcldb::dbStats failed\n" ); return; } for (auto entry : res.failedurls) { resTW->setRowCount(row+1); resTW->setItem(row, 0, new QTableWidgetItem(u8s2qs(entry))); resTW->setItem(row++, 1, new QTableWidgetItem("")); } } void SpellW::wordChanged(const QString &text) { if (text.isEmpty()) { expandPB->setEnabled(false); resTW->setRowCount(0); } else { expandPB->setEnabled(true); } } void SpellW::textDoubleClicked() {} void SpellW::textDoubleClicked(int row, int) { QTableWidgetItem *item = resTW->item(row, 0); if (item) emit(wordSelect(item->text())); } void SpellW::onModeChanged(int idx) { if (idx < 0 || idx > int(m_c2t.size())) return; comboboxchoice mode = m_c2t[idx]; setModeCommon(mode); } void SpellW::setMode(comboboxchoice mode) { expTypeCMB->setCurrentIndex(cmbIdx(mode)); setModeCommon(mode); } void SpellW::setModeCommon(comboboxchoice mode) { if (wordlessMode(m_prevmode)) { baseWordLE->setText(""); } m_prevmode = mode; resTW->setRowCount(0); if (o_index_stripchars) { caseSensCB->setEnabled(false); diacSensCB->setEnabled(false); } else { caseSensCB->setEnabled(true); diacSensCB->setEnabled(true); } if (mode == TYPECMB_STEM) { stemLangCMB->setEnabled(true); diacSensCB->setChecked(false); diacSensCB->setEnabled(false); caseSensCB->setChecked(false); caseSensCB->setEnabled(false); } else { stemLangCMB->setEnabled(false); } if (wordlessMode(mode)) { baseWordLE->setEnabled(false); QStringList labels(tr("Item")); labels.push_back(tr("Value")); resTW->setHorizontalHeaderLabels(labels); diacSensCB->setEnabled(false); caseSensCB->setEnabled(false); doExpand(); } else { baseWordLE->setEnabled(true); QStringList labels(tr("Term")); labels.push_back(tr("Doc. / Tot.")); resTW->setHorizontalHeaderLabels(labels); prefs.termMatchType = mode; } } void SpellW::copy() { QItemSelectionModel * selection = resTW->selectionModel(); QModelIndexList indexes = selection->selectedIndexes(); if(indexes.size() < 1) return; // QModelIndex::operator < sorts first by row, then by column. // this is what we need std::sort(indexes.begin(), indexes.end()); // You need a pair of indexes to find the row changes QModelIndex previous = indexes.first(); indexes.removeFirst(); QString selected_text; QModelIndex current; Q_FOREACH(current, indexes) { QVariant data = resTW->model()->data(previous); QString text = data.toString(); // At this point `text` contains the text in one cell selected_text.append(text); // If you are at the start of the row the row number of the previous index // isn't the same. Text is followed by a row separator, which is a newline. if (current.row() != previous.row()) { selected_text.append(QLatin1Char('\n')); } // Otherwise it's the same row, so append a column separator, which is a tab. else { selected_text.append(QLatin1Char('\t')); } previous = current; } // add last element selected_text.append(resTW->model()->data(current).toString()); selected_text.append(QLatin1Char('\n')); qApp->clipboard()->setText(selected_text, QClipboard::Selection); qApp->clipboard()->setText(selected_text, QClipboard::Clipboard); } bool SpellW::eventFilter(QObject *target, QEvent *event) { if (event->type() != QEvent::KeyPress || (target != resTW && target != resTW->viewport())) return false; QKeyEvent *keyEvent = (QKeyEvent *)event; if(keyEvent->matches(QKeySequence::Copy) ) { copy(); return true; } return false; } recoll-1.26.3/qtgui/viewaction_w.h0000644000175000017500000000304313537747251014022 00000000000000/* Copyright (C) 2006-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _VIEWACTION_W_H_INCLUDED_ #define _VIEWACTION_W_H_INCLUDED_ #include #include "ui_viewaction.h" class QDialog; class QMouseEvent; class QTableWidget; class ViewAction : public QDialog, public Ui::ViewActionBase { Q_OBJECT public: ViewAction(QWidget* parent = 0) : QDialog(parent) { setupUi(this); init(); } ~ViewAction() {} void selectMT(const QString& mt); public slots: virtual void editActions(); virtual void onCurrentItemChanged(QTableWidgetItem *, QTableWidgetItem *); virtual void onUseDesktopCBToggled(int); virtual void onSetExceptCBToggled(int); virtual void onSelSameClicked(); private: virtual void init(); virtual void fillLists(); }; #endif /* _VIEWACTION_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/ssearchb.ui0000644000175000017500000000575513533651561013312 00000000000000 SSearchBase 0 0 593 48 SSearchBase 2 4 4 4 4 3 false Erase search entry Clear Ctrl+S false Start query Search Qt::TabFocus Choose search type. 8 0 155 0 true Show query history :/images/clock.png recoll-1.26.3/qtgui/ptrans_w.h0000644000175000017500000000275513533651561013164 00000000000000/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _PTRANS_W_H_INCLUDED_ #define _PTRANS_W_H_INCLUDED_ #include #include #include #include "ui_ptrans.h" class QTableWidgetItem; class EditTrans : public QDialog, public Ui::EditTransBase { Q_OBJECT public: EditTrans(const std::string& dbdir, QWidget* parent = 0) : QDialog(parent) { setupUi(this); init(dbdir); } public slots: virtual void onItemDoubleClicked(QTableWidgetItem *); virtual void on_savePB_clicked(); virtual void on_addPB_clicked(); virtual void on_delPB_clicked(); virtual void on_transTW_itemSelectionChanged(); private: virtual void init(const std::string& dbdir); std::string m_dbdir; }; #endif /* _PTRANS_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/ptrans_w.cpp0000644000175000017500000001002213533651561013501 00000000000000/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include using namespace std; #include #include #include #include #include #include #include "recoll.h" #include "log.h" #include "guiutils.h" #include "conftree.h" #include "ptrans_w.h" void EditTrans::init(const string& dbdir) { m_dbdir = path_canon(dbdir); connect(transTW, SIGNAL(itemDoubleClicked(QTableWidgetItem *)), this, SLOT(onItemDoubleClicked(QTableWidgetItem *))); connect(cancelPB, SIGNAL(clicked()), this, SLOT(close())); QString lab = whatIdxLA->text(); lab.append(QString::fromLocal8Bit(m_dbdir.c_str())); whatIdxLA->setText(lab); QStringList labels(tr("Source path")); labels.push_back(tr("Local path")); transTW->setHorizontalHeaderLabels(labels); ConfSimple *conftrans = theconfig->getPTrans(); if (!conftrans) return; int row = 0; vector opaths = conftrans->getNames(m_dbdir); for (vector::const_iterator it = opaths.begin(); it != opaths.end(); it++) { transTW->setRowCount(row+1); transTW->setItem(row, 0, new QTableWidgetItem( QString::fromLocal8Bit(it->c_str()))); string npath; conftrans->get(*it, npath, m_dbdir); transTW->setItem(row, 1, new QTableWidgetItem( QString::fromLocal8Bit(npath.c_str()))); row++; } resize(QSize(640, 300).expandedTo(minimumSizeHint())); } void EditTrans::onItemDoubleClicked(QTableWidgetItem *item) { transTW->editItem(item); } void EditTrans::on_savePB_clicked() { ConfSimple *conftrans = theconfig->getPTrans(); if (!conftrans) { QMessageBox::warning(0, "Recoll", tr("Config error")); return; } conftrans->holdWrites(true); conftrans->eraseKey(m_dbdir); for (int row = 0; row < transTW->rowCount(); row++) { QTableWidgetItem *item0 = transTW->item(row, 0); string from = path_canon((const char *)item0->text().toLocal8Bit()); QTableWidgetItem *item1 = transTW->item(row, 1); string to = path_canon((const char*)item1->text().toLocal8Bit()); conftrans->set(from, to, m_dbdir); } conftrans->holdWrites(false); // The rcldb does not use the same configuration object, but a // copy. Force a reopen, this is quick. string reason; maybeOpenDb(reason, true); close(); } void EditTrans::on_addPB_clicked() { transTW->setRowCount(transTW->rowCount()+1); int row = transTW->rowCount()-1; transTW->setItem(row, 0, new QTableWidgetItem(tr("Original path"))); transTW->setItem(row, 1, new QTableWidgetItem(tr("Local path"))); transTW->editItem(transTW->item(row, 0)); } void EditTrans::on_delPB_clicked() { QModelIndexList indexes = transTW->selectionModel()->selectedIndexes(); vector rows; for (int i = 0; i < indexes.size(); i++) { rows.push_back(indexes.at(i).row()); } sort(rows.begin(), rows.end()); rows.resize(unique(rows.begin(), rows.end()) - rows.begin()); for (int i = rows.size()-1; i >= 0; i--) { transTW->removeRow(rows[i]); } } void EditTrans::on_transTW_itemSelectionChanged() { QModelIndexList indexes = transTW->selectionModel()->selectedIndexes(); if(indexes.size() < 1) delPB->setEnabled(0); else delPB->setEnabled(1); } recoll-1.26.3/qtgui/mtpics/0000755000175000017500000000000013570165410012516 500000000000000recoll-1.26.3/qtgui/mtpics/aptosid-manual.png0000644000175000017500000001061013466271553016072 00000000000000PNG  IHDR00WsBIT|d pHYs11(RtEXtSoftwarewww.inkscape.org<IDATh͚y]U?.o%~/I:kLȒ!hGG- 3?,Pq)f) 42cH2 F"hH{exKntʪUϹs{~90?0׺^ T֪(JjRqTZzj^w!p.̉{>_iSccLѺ7ZF3Z:dB1mPWcloGySNc ^[xkABt~SUJASQdpq*+,+"34ѣ4z+іVM"ntw7noO=;޳to/M7LOPw5 ,N}q+*XoxRJk$a7@;о[ɎoÎFYvN>-e~cZ?~8S)_8ShM165┕儫5A*Eʕ 90z⩅JUiJ$ݸ yEƏ%d#u7mh_K &1VdpHkƎκ{e $:NWæcc97a{$`ip$S)޳4#Xrh-鳽q^*q+ONih 32̹B=rfi)WD%ZIm2 t_ xZO_ضPC4ZIF_]ŗn$BrԴ֘ @Jv\-6j6W}0:'<)KR TC͉ c \\*/Ը֚@ (JsN^4@ ۡCP{DЁOL''v&32\X͐RH²VYiU|~#P@6ԨҚD"dE*^PzeF^~|DGF]8{78њm̹矧R#@dkYjB@0!ǂT8Db_Z]4Z3?pO7A$g=PwUx#gr$Dz6 !n\ ``=FG ]Kr||րoFZnt_>Jڵ\/_ORj+pM z"Dqb16=0g]idz܂JB yBݾNcLVDB?ȉyaTKcy#kir]O╝@A(*>` .>~կ2[xYJeN_t:t&q,qhھ6J1~83 ?NM67s'8k2PL"eY9'%Vj*'"|mY9uav܉SQ{0kRop^CRz{L?`&}/|ຨ ZLAkh@RV9>NWh?@;L7Ne%$7BUU&}?xp&(I359OcEc."~Brl\kMzdɩ$n4B Mr &Nʫh޾}քӧ;xl/t] ikyCy" `EUвUU4l9`Y#ƠKu} x#BЀ?%p!FG _r zpDjkYQ{$~{d]{ҷЉ/P!=\u] y_eH%ѯBPu?\27(>57Se [PFh]I&U|Jƪ*yRR~c ^&&ZeIRĚ~$Μr~ôu[6*Ke_"ɽ{y{~YB/-D@4Zu٥(cR߲%Kؿm-ӟ=Cqddw7|QOys3eTRl 6Ѱi￟|O=5c'cYksU(֓A-Y>14t=4A33liƙIR):zO^yzzr'aE&zzxGxxf}}mzhb޽5>G@q5qf14)I ?|oď~D6r>=>|ok>Qhu5=(M^-ļEyݒ@6Ԙ֚D2IjlhShޜ }w܁755n@2q[◟ZJH?5+VT4'ZJHOMin.M^KU}p}a1w>vL.T*u2ʺ:?UTʽw]B#&rUv5VmKH( .뢕`Yhp$Bus'0u Äù9au]AnB]X7-dR^W nlltcG,lƶm @km[EpBLx<*S^^N8Ʋ,qB;doHHy{~ ^ژ.G5kB,aa9ZkT~3 e֚i,AaC6ŶmBPQzB⼾>ujґy",gDJBڕŏtSJyޗRT(#8RJ9m9K`$&N:J]4৔ bK㠵&RH)ZcYPh4ZyLRVVV\D"Q8NQ B`JJntڵ *}=O멬1Sc&=cƧa4itSi`ϑɈmk Ym:;xoWaIۤR!!Tc<&Ok=rx\ʡnI ;Hr;//^m[|Qx|Iy]Is@ TF뉴iFRJ PPAsۅE,|Bʿmh AiߌP,kDCQ1VUԬYܸøz`IMI"PoE P!/ ȄI!9arѸ( \I@^ QCLy&ʢ,+r^q"@ETbseb&Lӧ.Ų &4VG=]atq""M zM.ЀI@&+!`iLl ,<<@IiaY~b󍍍 ĥ$xZ 2 4K[$67ow!i, 8(p޼{w޷/)MB uu50s}sk$gyRL@kgiڽ}jة6v<ᅱsz!ν5e!&} _|{-䡥^8mCw. Sgj; Gt72m^~3i4@t&R:ٓ|ei676BfF^ F~~z^Z ¾z\=%<<Ͼ#!l]|'`1DѥYoH[ Q"<~P*3<;‡?-BliNu!ɭٕZlj˳dh@"t =$=iihc=xY~2c;zF&/ R.w S)Ot]」OHHZ&ӴkHI)\.{9%GsCEb*LIR|lr=Jɐ"_= Hw9SH]7"BKD=`qG,Z W.AQn輼HV$Z(8:"3WH@EH陙g XX| +@|lz ++jB]҅4`#>Wys=E A&\JBIq\*mbY(, W 9Xl,H[Ow '-Rq(0$ZIENDB`recoll-1.26.3/qtgui/mtpics/book.png0000644000175000017500000001126013466271553014110 00000000000000PNG  IHDR@@iqsRGBbKGD pHYs7]7]F]tIME 9O 0IDATxێّά6m{d! x$c WȌOmUEķlv[JܧoŊ!ֆ~;}=#n?93ɗ0pOR??y{r{Ƿ蓏pw XjtOhksG1sD]؄:j4V$ g띩8ct̂m;a|/xDՍߴO?=oֿ?qpԌ>6tݽ?o1wiB=pwZ4BppwzoDPχZ zz~>ۑx/cD׭7`RWxRsg6[曯x'\FAM#q,P*( 3#26hMKMc` QAq" :}t@59zc_ڣdE$Z"1jR̍"K`No20^dI ։=NHִG5'DD.\!"D8k-zW.{pzSĂ͆_/Ɩ)ZCdTkYVE9iB]Ȓ3@s x @}tœl_ ;c ڡ R5UADp& J3KD]QD˦c(ljХݯ%r4MpWL{A‚ёw/"c4=a^jE.ڬ[E'krŚO޽<0SFY\ "3C$L_}u},*>?#QՄW'<(:ܤ`nYk!^>Öћ$ ey;vdzW|BhQ`/ܞ&7/>I 1e@xBVjA\}w·oHֈ/嶟T^>7KwD ܠ朶!~ʻ/'/w/z Ԃʒй/cc].ܽyǼ}3:{wϟO){ >9O|uw-w?ܷۤsÏ/g|_|/^|׌m Yr  0\ \pnG!?K菿~ʳϾKo~>uKљ|3xJy%jx^~0x}<{1x7';ϞWPrʁݽc|LI K~SZKo\ּ/G݅mtZobBG)[4p F̅vKo0C貧 @qK 4US h=n XS$!}s*'Zxu}w\'0Q} KYQo`KqdDq0 DNk 7<_';B>?VzHu42j  MTAo梵̞$5QB"ŷF7CѻKd]JX^3֨,4" `M՚m4R9V)/X5QE0TX /0E.qURhI5w.Eo,1\'eqiUG2Kq:!9XO6:wtx^-(>M^[kM-:@g]Fj8W-nTLsf ;̊a!wa#hy a|nX$}h4H{oZplQT~ F _"eu!@ xM[PIyYD$Gd|AIk|]GL˜G:b߬H \9 Hqe}Fa96S}1gRW"rzC4B@h,O*ܗ"DBsNu-ַaS9 RXKMM޸Zㇻ;QnE&Wna~K״?vZ"Ȝʶ s&dY5eqg^:niˁh*Ĥ¦ !r+]=ߤD$ @yd4gnRÉJpz!UM,Mrm$Tk *-K\=q*إGVl'(o<]Bݾ95BgiiW?a+4ov,й(m'U[<.IQGCm>pF9CI "iK\'#=Ulc4I 7AA=XEvq:!u6c!M[l[KZ48mz0})]8Is]McG:V}c݋L=qxԇ='8R 9.6 DQ;VOPQv B_ڝm"CvSRacT5)"r|>8[twiq糰7X+H11L&r`yeeF;DyV.0Oǐ+ 'CR ˪Xm\K\ijď<37<]JԅGz} Ko],`0'S~IbiW0:H}:-IjZS'N`;}LZ wo.7j~J~{u/ IENDB`recoll-1.26.3/qtgui/mtpics/archive.png0000644000175000017500000000743413466271553014607 00000000000000PNG  IHDR@@iqsRGBbKGD pHYs7]7]F]tIME 4U9IDATxۯ$ukWu 3\l`)9db,<%Ey8;^ p`ks'8X9+ʥ7$-nDʾ*8G;O$'^ǁ-@pWoYMq & O $פsŋ2t]dbhiwE]GśJa"#ކm2hCX68B tm~ _Al` U;%JYy1 xױzktIr1Jt?LHmV Ɉlۏ`pV9p$AƸ}3J8Y쬣vqS3EU8NqO \5\t5Ա!_N-s@t!c3034EmHŤ0H-g 9IHiXfZ^qa>Dx rN\:2rI CyD0L$:u!X/+h)$" d@Dm/V݄qM1L RWj[ A-pi|*..l 1uЀbJ YߘqeΛ?1'0O|iG^*AGY?Tu"HM\*@0Qk}ev\ȕKh'Njk_([︋㷞`h84u*AxTG"R~cU/Zq#6> sxU,A ׎Рh*ϿͻK_: f&mhmpʱov7n1t j/ҸE A'x5B5,m2kgsxѶ JEDiۖ/opG~ףKOُ&ziJ*k) }Cpo-A5Ibc6xw99ξ.o=[^Ƶx5v:?r۸ >n~^~>9}+QJ±նwY+\]]ED~/`pe2)MARFE,[>zy\_佋Fq A.~tAq"4ƎWct} w/_Cᅣ]0~Jc% ν=ӥiJ!| /3*).{,5J# {.o?in}Y)2nQ%6nma((*#LРX"Ls΀%2A| X1Y'"pny bZ}N5O K2&J3jQ3ljL-ոKXC1LK2@5jx#o?>cz$.DS! -W_ 1GK5ܰԈIPiFq-ִ28߬jREZ\m9`XL;^drk2^ iV_V(6Oן{;Z?d0[':n^  "e(pn?#Os_?^O+r1-5׎Ťx!Տ1Y?3_qo:;_귁1p3UT΍3r 8a$b?;ACEf3|駿;*dRC@ky^Ts]?~W~k_{)9Km 2?'B۶m5ʂBNE'SG7#x?^ T_p"+LƮ5E q`:ryW?_-E ?=:0b݋I㜳:sg.ֲkx^kk]Y]]=9NNx.I8SKq@q2YM¦4w,~^ "zOIENDB`recoll-1.26.3/qtgui/mtpics/aptosid-manual-copyright.txt0000644000175000017500000003400213303776056020132 00000000000000This package was debianized by Kel Modderman on Mon, 9 Apr 2007 13:54:11 +1000. It was downloaded from http://developer.berlios.de/projects/fullstory/ Files: lib/* Copyright: © 2006-2010 Trevor Walkley (bluewater) License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: bg/* Copyright: © 2008-2009 manul License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: da/* Copyright: © 2006-2009 Rasmus Pørksen License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: de/* Copyright: © 2006-2010 Markus Huber © 2006-2009 Markus Müller © 2006-2009 Philipp Rudolph License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License.. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: el/* Copyright: © 2008-2008 Gregory Gretri (grigris) siduxgr@gmail.com © 2008-2010 Nikolas Poniros (edhunter)edhunter@TBA.com © 2007-2008 mixalis (miles) georgiou mechmg93@gmail.com © 2007-2008 Pavlos (lathspell) fubar.ath@gmail.com © 2007-2008 Lazaros (riddle3)lazarost@gmail.com © 2007-2008 spyros melcher (xouzouris) License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: en/* Copyright: © 2006-2010 Trevor Walkley License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: es/* Copyright: © 2006-2009 Richard Holt © 2009-2010 Luis_P License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: fr/* Copyright: © 2006-2009 Philippe Masson License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: hr/* Copyright: © 2006-2009 Dinko Sabo License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: hu/* Copyright: © 2008-2009 mrowl Bagoj Ur © 2008-2009 ruess reuss@chello.hu © 2008-2009 Siposs Zoltan © 2008-2009 honorshark honorshark@gmail.com License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License.Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: it/* Copyright: © 2008-2009 Renato Zanotti zenren@tiscali.it © 2008-2009 speedygeo speedygeo@email.it © 2007-2009 Stefano Tombolini dedo.tombolini@gmail.com © 2008-2010 Alessio Giustini alessio@alessiogiustini.com License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: ja/* Copyright: © 2006-2009 Mutsumu Nomura License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: nl/* Copyright: © 2006-2010 S R Eissens © 2007-2009 Ronald Stam © 2007-2009 HarzG License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: pl/* Copyright: © 2006-2008 Marcin Słotwiński © 2008-2009 Michael R' Tokarczyk © 2009 dongle License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: pt-br/* Copyright: © 2006-2010 Jose Tadeu Barros License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: ro/* Copyright: © 2006-2008 Gabriel Palade © 2009-2010 Dorin Vatavu License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: ru/* Copyright: © 2006-2010 Roland Engert © 2006-2007 Dmytro Kychenko © 2006-2007 Mikhail Burov © 2009 kostiagol License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: menu/* Copyright: © 2006-2010 Trevor Walkley License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: debian/* Copyright: © 2007, Kel Modderman License: GPL-2+ The Debian packaging information is licensed under the GNU General Public License, version 2 or later. Files: menu/* Copyright: © 2006-2010 Trevor Walkley License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License.. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: menu/icons/* Copyright: © 2010 Rick Battle © 2008-2010 Bernard Gray © 2008 David Creedy © 2007 David Vignoni © 2007 Johann Ollivier Lapeyre © 2007 Kenneth Wimer © 2007 Nuno Fernades Pinheiro © 2007 Riccardo Iaconelli © 2007 David Miller License: CC-ASA-3.0 | LGPL-2+ | GPL-2+ aptosid-manual.svg also includes elements from The Oxygen Icon Theme. Licensed under the Creative Common Attribution-ShareAlike 3.0 license, as found here: http://creativecommons.org/licenses/by-sa/3.0/ or the GNU Library General Public License (with following clarification). Clarification: The GNU Lesser General Public License or LGPL is written for software libraries in the first place. We expressly want the LGPL to be valid for this artwork library too. KDE Oxygen theme icons is a special kind of software library, it is an artwork library, it's elements can be used in a Graphical User Interface, or GUI. Source code, for this library means: - where they exist, SVG; - otherwise, if applicable, the multi-layered formats xcf or psd, or otherwise png. The LGPL in some sections obliges you to make the files carry notices. With images this is in some cases impossible or hardly useful. With this library a notice is placed at a prominent place in the directory containing the elements. You may follow this practice. The exception in section 6 of the GNU Lesser General Public License covers the use of elements of this art library in a GUI. Files: * Copyright: © 2006-2010 Trevor Walkley License: GFDL-1.2+ All content is © 2006-2010 and released under GNU Free Documentation License. Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.2 or any later version published by the Free Software Foundation; with no Invariant Sections, with no Front-Cover Texts, and with no Back-Cover Texts. Files: *Coloured sidux Book Icons (type2) Copyright (C) 2007  by spacepenguin, cako and cathbard (http://sidux.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA I suggest a copy be kept in your sources for safe keeping ________________________________________________________________________________ On Debian systems, the complete text of the GNU Free Documentation License, version 2, can be found in `/usr/share/common-licenses/GFDL-1.2'. On Debian systems, the complete text of the GNU General Public License, version 2, can be found in `/usr/share/common-licenses/GPL-2'. On Debian systems, the complete text of the GNU Lesser General Public License, version 2, can be found in `/usr/share/common-licenses/LGPL-2'. The Debian packaging is © 2007--2010, Kel Modderman and is licensed under the GPL, see `/usr/share/common-licenses/GPL'. recoll-1.26.3/qtgui/mtpics/image.png0000644000175000017500000001037113466271553014242 00000000000000PNG  IHDR@@iqgAMA7IDATx՛[lysٝpKriY)ӒcFEFdH y[4HQCH#@ N6 IزJCW,#v\^6;3awƳ]r))Ğ9osvФb?RJGBz kl>eM}}K)u4?wHwG5DZ[$4TځAYYŴ$LLW9<澔/RGQ$c(*̚,L|8hM8{o 5Ke!7i*@o\AF8˖ #%XȨAd7lTFc)2䲥"]KupUn &㨪ڱnx8Mڍdq_e`N+w(W|=EmaGBn#i_sg?|lG$'EAUUJz-_ 8P˲W T*J8x GEvc,?dkk ]3ò,>8theeUlܼyTH#,"qQ.](H+)-R[:A7\ vz)B>GJO?W*F4͖ Pr 011iLLLpe/i67sfܟ0 gdd #{y^366F,4ML$[gϞ`dR,da2a&7L&C*¶m -MZo`-Uf25r: Z=N߁YE\9B0BbHZ*f};J0\亥먪iuhp\CiB -U .X,r\.S.9v؝@r~^??4ZmnTU0 pHy%cc b:b ~d@(qlۦRPשjj5:j{mjۄ.\ہ(m)JD"2Ͽ5¨2, Mb{FP.[:Vʷ W`r[]2ضM,#ˑNdbboۼ YSd"1j}JN> /@V#JyVm˲( Kmwe9] ^HUU2 qE!gyygya>lZ۶C!,fggIضT*`!ŃI?-9hիW9~8m2rH$>55ug;R|$G}rƍs>jj}٭s'- _-J)uHuecc#WT.ѨwEy_4 OՀGQf1%``c ١XQEnnGNb&""gYY`arcLhJFZ5Xfc,[ni}oW,$`pYY \;Cۅ7p>:~OkEN9\5?qI0YNN^z7T kOqq^Wtc@(:DOP jI##BQ.;[OKw8_(^}w}xV}:{Sp0?y'&fs&O$<5r3Ht@y}zL&nRcч2 xG|wepSb 0/zo f͚r =Ә jXXAUF˳B|%lm@ De2(Rr󓍊 0cƧ9_eK iL$!!򔣯iZO*FFCFC9yv^Iu%ڱc ug(FM5mpb0Lv?MH<)!J` uK;u=QQ㥐Вuu`M@S54E~1pW?lRzz])!F#0a5\$?3ԙ)X%+^f{3Ͷ 7塌tMN,/?ώݧXzF#F*&@SS;j - e":q~4 d`|/?T[,s$]~ A wp/ǎWTʙD%oҐ5&@'4oo{X֜mtU=u=@o 1;[ͤmΚ Bo"'!ǟ+9뷨AK*" 2v^@@P J.q5a浙Ji8t 5vБ \dj,Z1BB@UT&ԉɊNks. +W[x3CO"" JBAR$]X\٣2\3 Ƽz@ 7 ސ7GgArer.Zb*W?DD"aL#`J H a& Z &kɽ æk7l9jS -Z$Qh5'mВw +Qq( VÄ ZrDj% ?Ul6-t,P%g "I@S44Q˽HQ@@@:dGfI "$I j8 I嚅`c%W) GEdYAeQ,Ald̽HdAF贇1[^v&EC`;X,j"c(a". 2QiY6mn u a# PpY 8+RP&> Ȃ]r:&4%Vъ,&e.Lä< &!*X1S -K˭Brֳ V̂@_]g@$w,#BE;J<@19ƛ'ɏ1y3rbr 9uu9]N RkzaL~f ||YyK*yHEIر$%[OO6Yup 3چߔ}tR=;?vzdd7;:aӪ.S)(75cop{"e1ӵKp9NTVAjӚ`ۻxk(~% ܗ<"@H܌ZBMx,Gw+$@FNItW'dٯ㷧Y)̺-o.\DRJ](y] RP)`4fF3O:jS#>|g٭$&*' caǁ\ǒb(X,,CGc=N½ܘ^S#"["/?^ pv~_R[a=Wybg>Dݼ:ŔGPv]za:ooݺӹ![VY:}='>|eh'`aB@^HruV暇&@$|RV>shļQ; D94[K)(J,ϛ2 o_C~W[r^{˶mFKKf= hِeZ(mǺlR< [OҲ{Qn4EUUuo6ǷAu`5fϾrkzIx'YlYIJS@$1{=EBo>|)֓ϐZE= ~~ꩧXl 64z0L4ܸ~g  F>btu葰}v2ftlhhhv.QHł3cxSv5^{],u2# ASb.Cdm'۵M68aN}zn`an`iB }CO?Sz22hcmɔ`d=YxJV144Ty uv?$W^Yjhjj3$h?g 3 GY{ 28J 1LY;0V3SB N{Y Ll'y mvR),P$zT4A}}#pe@`tźh n fb,MEpI 0$sD>HH7Dq OTJ4M'Ѵ4QmmC\*J]@@wj]&05n̙SsAOODBkfN_2αM38qbEFUe}}#hfO12bz FLhg&I-139 y>θtA'i7,*#Hq\Ty\$:@j7ԸIS0Ѝ1K2 f`p&ƙ衏(L&m(Y\44MM%1wr=?םuO7,Fǁ̙n{N4dYrbLX otR<0[Nn:M/:~r,Vb>)iڤ8' ovHҪB \,t8ʉAy9D?W[0R9J4-vinӁ羸"Y~C޵]K빺t}꺎aS\T-bOk)koC,e f)]; W Si_k*bQffy M(>Oz j~?_> 1>ܦ ٘,I3 ȕ~B( 1/ ZUKk9~IP[/g,KS1 y[7B8N۷Q[M7Q4T^?Jo~ծ14D Wk7`pa3;Ԓ4Fc)?7wNnR#'hT2ʶSF&h?cپsׅGSz~֬чcՊ*?UKFz *5#K7(P7Cuux'MD~Q@0mn|8ժwynti/G`PNlep|g 7d 3Li2٢g\(wQne#ڬi%.fO<c߉qBgNN<Ų9 P26 p .-/p?{҉$lX BWs?VB4b RGx૟b` %: tp3ؾ}N,_`\#c[pv0;HGa1n&a($K˫[V̖".( wUO~歝|)6="uFԎYYr5,Qѻ\PGl:JB(&V]n\T]VIXN^A"v̒ȒŔbfo4Rbv[XBb6T$fZ#02RA#q`GV^J;H.V6*eFV%Apv&'" *_J,rpUk$<cdž#6+J8A@UU?nڵkkwww7®pssVuS4J2 f8aBj˩YZɲ:Xo=E^ Rt/_o 0'rtwRXik anw_,6Xs&% cX,#Y AW ^1N7+0 .sݧ gM,aʗc.ⳋhS԰jxTI=;#WװdJdʊ>אǮ9@RT Q$ IB+3+8)50c|{@[oYn$"M3J*e'RR?b50pRs(*PVzMߑN!)mNf/pݽ."㹬͊lBŏ3KYgٹk>aoy/}E % )Xyg(R I`NW(c [-T $MAcռQ*Ԯ[9s+c CeĴ.bZ; ʾXz]7e1@,xk׶s+n@ȱ}Hïِl4qh$\U$0H: ?]΅M5cX }zJ-/Oqy~{z6Xw#8$|2-̱ b(X.KAـmx>BMKh,4:!7pC}XRZf?ůܰ^FT*ZёX[EDLŢ"d0 _=sgqYL&~y;S[篣qY =*؋zlgX۸Ξ>v0;;RD_VA$shSD$I;yf]2. @7?>7w]A(CQ:kW5P3b7,ob^b}ZԹ+/k%Q߯_K̻UUBKb U\He&Ÿ^a{ݍDGvEx(bRJ&VTq?oancaYA(6R1ubhuG   L!se*].Vp8,LI  GC,܂æbQUSń[PUDp?8ȝȬEhWj*[ng(0;be 2g;;;qH4' *+,2|G===CLuR4 >:V &DADUtn 9D큿gǮ*t+=x%jpS8 uw̦Ƥ(c>V(,]Y/hE"6lݼy3-LHE@CĴɄl&o7^/ TcsYPӿg.uu Ue&6N `Z\/'Ϸq&Yk|0OF6m>ٚ'F(FD(2l޼8\, /B_$ ٲeK/0_Hz@f,,ٲe'xvLCT&@I$a/U`$ᗿeO>%8uԾǞT*%]\ۥ $Iz7}>_+ߑ[,K ڀ8;&ɀOh' >v]R/, ?^IENDB`recoll-1.26.3/qtgui/mtpics/video.png0000644000175000017500000001143413466271553014267 00000000000000PNG  IHDR@@iqsRGBbKGD pHYs7]7]F]tIME 5 RIDATx{\}?{;~ژ]y/ub0&@6B EPhӴ 8R(і$MHDHRR $!$j q@`1``׻8qνsgm]V318o[f i \o#zT[).i =7ۇ!blsK)|Ǻ[ B`a^_ކ]G(H)ύu \wn[(!eZ^k?oK4{w6+P ((,n,`A Z㵂W\>FFF1ڠ2̓?W_A() N+ ҉0 GH'}kx#Oޞ8KNg4Mj*{ OüE٤l7FεIHC _a^|a;V-[Ky/A)U>(Zβ֮AGSk!M4+dhvǓKё۶mcݺu׏Oe˖|Kv;JH=d 0nPB67sd(" ݏR+3Z;Z$I3i Xmࢋテ'NcccWcppAh0kVJR- pGef;wHIZZfq;Z[@c4ZTU/0gnC>x׮]\veJJU7͛ŗ\۱,9O>S7܁i]RvP fw; *SD\QT4aysvoe.uNӔZkk9sg`A .d,_ lΝ4%nTv .FDQdi -cˏei.G؀/ؿ͛7uVvUz%6aRUT"49H)VkYHUV\:=_TH."DQU֮UV08sy; .>==h8Fi0 i6~-8mS揢U8Ŏ) ( ;-- gy&L8x+a/pe m#r CCCqrʩ<u== ?~EN`h .]ʺu7o.Fwu7 OOOZ6 ȫB  l6-``+XL-ZD޽!'U+YzB؁1ͻ9ᄓK6֬Y!qs뭷s≧2>>>.`-LLt@tM#QJRJI]u8FK̟?PG)ŧndaDV峟2}} k4sLqME| _e[]$GdG晢*K\?cKGg׮=ԫu_)6\=z3<?8Nh¶x ͐0֮e箮s9'%> 䵑YYt),X$ɰ" eX.diU 0 J!2w'ng"h'E7L3`'?yM$d%J%A4o8[X%iB>+r]mpd( Z~iN.U<ϵz9U @J cEp˷SS3[8O;Nk4F=IYc^~EZ$CEw,NjF!z,_~?͛8kJJD9L*tFb]SFJOD MH&82ZwœiΎwaK1׵Lg1ƒ)yϻi|햛9Sț6{檫_t/s]bm}g^e]O1K/}^_F x&Nb/>ENQdԴ8:(:A|9B0D!2tuA.5d!8 !cnj<Zˊgqy+xu<&7 Y16>(K ܰ=Ě+}J;Qp*~gsXI,EwM~C|[0g\WnO vf/ 񩧉B?Ï\ŵ~0"Ȕv4t@|@6aVJ~W4a/~Xk+n$D [ƸI1unf}N5N׼ ;kZ4%Ivbbbpb嘒!5SG"D^;0zmM*Q$qS]ֺzO~S6mz7XR@ʀ,c[ٴ )܁AFFFXx%XQtz5uQqARt($kgcOFk $YZ(^k1(hZ|W?>Q=y駿Yp A1y0_v^AK^fϙC  h*T%I>~|sR Qb~RŨCFOysx񉜵,}6OoܸyU[cAXRvC XUDU`ArtRE!?y5#Ջ䒿-1Z՗=Y=|hXk׶$~Vӂ>pQ\k4E(7N1,#cZïK !֎^ju# #Is,hcG|0u"hQl 5Ge|0j={{|HZ;: @w~dO/t |0:'D"ԩ,wf l1cJC\!ų,#q9\%i?m](fs8|9%MDD>? F hg6ݻsϽZt}(\(nVvLĤLedBkǝWwL1{ٳҞ`+lzʕG݁c1~z1h %X_Q>C˔BeY1 Yj7w.K. "4IL:S|$OsTE1#${dhGy.[zfiĬ;{_$d[%Y&;s~soJ%| _fs ?Ͽyq=u @ Ĝ}/u斟+;3=xA?\FCt?TeU=Lie d 9ZXM/MR{!H0̱3™Uq™%VvPB.Y&諣+X>KvQ5Ɔ"(.o ,q)Lˤ5GUxU%Reu u^?m**`m"WGUq+nzֆ`QIS#56#IR#'CȒt^hɩ"and^MN40-Uu Tr!e]trL!ǔ^$EVy HDo$1{-0˦+\g[<^Hҫzy {|+5Km۸\U`ƪ~/Vv! O.YFuy[4^J,6d9#-Yq/)+Ur!`8]r?p jb$|V^:t9.eXmy޹UtOh;pxQ}fXK ZxƦ\Pfi"I҂^}IXɾ?sL Pn>/p XW?GΎG«Zyχp\KDzY뿭g}++0Lٗ`?B Z a}qT* _gw(Q/7~?o7`!XUfNI&LNNhhh  Ԧ˱OOxIi(ʼ۶g.V aR)&''4 0Fb1Ӳ*yFsZ裏gq̼sz@r}}}eڈbvI=ߧ?!-냂OHl޼7MsY\*:BBPj'N`׮] O 8ȗ![bt.P| jKt{]|5UemxY LD[9gNx Z P.S:zA /mhhh@ ,UcP8tDeZ(ຈ`;T/Xz5^H ?墪 K~xhm ( < dhhW$QdC#&lpS?#/Qo&RzBL1װ,9Aa|}FwaTU]qs$ SiX fjxYܕ"$C(EEzLn- `CT o:<5B'|[/"( bFapy[tvua#8[8*D(z4.rci$#fӜ >Vڂ1~Ӝrg~B T=W)1>۷mg```< z@\FeTUEUY8XE0ım K`6R$O;>cI86.|IڍRQtzTvP.z#/ UbJhq7Ո Dx;?dHMn| OѧBvxxxohɒ3O~h J# #9t2RDAc2} uKem;((h 2f•wx}`#)p%>pk6l]un^$fll֕{\Ȳel<"Q!dImg3s(BCewM@7VH5e X.ES6A! ӾEgLcH"Q/($"  * t.D!v6xg@(tW QP`keOkb$gSሁ;0n!(te:eB> *ئA&cddl6K>'peN\>ϐXrAشi+zֵ"%0u~jRf i;!p ) %+u:4H%Ez$YY_9x-J\FI!G$RKW f F-: s?(BQ(wM.H.d#BIr9"R sp 0Ј5- Po@5jM-8<Qhůi/IR 2008477֭[FȲ7 YW^g즛nz|YM gh@1H?h~SpCڛk#.fۙ 4Mm{V$,Dܲ%-;::xWouUc57 R[,/-C^CSTAPw' /ӛco*?߳{w޻S~AE,c"=Lqo yED?J4 J;H**TPxR' c'cey{iiiYJa1[oZ$k8SEFʟFBȿNh~Ѐ*H~#X#gW~~,DW֯_-[xof|bAW`Ovf Dh3X͟ߵx D,|Aۗ<,8pJj5X U"WڪNӆimm?I$Y1^?tuu-Ky";v`ǎN*"͙͞;eai&]]]wqR!`|tvvٹܾ෿m:;;W<\CR3|λ?Hy?JmݻRVS;{^gJ/yU,?<WoC}}wycB-M$fIENDB`recoll-1.26.3/qtgui/mtpics/txt.png0000644000175000017500000000554013466271553014001 00000000000000PNG  IHDR@@iqbKGD pHYs  tIME 9'[ IDATxݛil\m3q!@B'PZZR(TB@eQtQ@UH@K)REҊVT"*"8`;;^xf[n?xJW;33CӐ 8p uַBu)?7Z8ksZ~!.vttT{vE{.Z Hk֬b۶m\XH$i#eIJF[\WΚs)qk\;o3ȵv3/>]WWẮL&R6+oЩ #qXar#65V(8_J!Բ5%JE$mK1lMq+  *,^{/F"CQLK^Qמ 7/!JO%%[K#\j o&Q3Ŕgޚ?EUtwZSdvmާ;#5쩤U(29kYZ%2h|sgW d\5PQ-OPjL^naN %G)%25{Px7|N"k70;B=kq@Jz&۽(gl?3}-JT'Rv 6FÙ1O#e?]p(FHf|%02BTPC2 F'|~ZP $299h.ƶA yK3cw,}˲T`ztT{UOƍz>1Q;/F%@N"vϟP+;\jwy i TPt|N-.Yʛw?O-g_A߼ĘX=( ]068QꝻwnAHtZ^S]ՠ|/e-(*/i'Q5)Ll]XC#hZ[nEX3~!/B>*ꄟE3Ahoi`6zW5:Ě]WI`KR$.DijV'=8Ʃ!jmޅ O?=:ES}ar {Bwg ƙ۽oQ3R-| Si3A8UժFYsuOYV]AN0&8&OEb=Rq{>uLng$+/OAl&eDUA4 eȪ 5Y2XbZ JY^<d]ƙ_^s̱Ώ:Pc zu2oԼج4SAo.^T@E!G_~,5bxxĢ6H,`!Ln=_ԃkCi&̢5c+JiֵoJ,T (714al,+BB 55˩X4x-VTgl{$NpD۶mFFpQEY+YUUH/bI2+Vc8p.w2ivm ?{ 988FM.+55NϾ%jWriinnx'{ Bf- QSsG8}[,y_I̫Byyp^x& 9|pF3/ ȑS\wz!ò;Ow͸a蔗 D",˚#XId~&sj@Q  v-m;VeM+ʛe6sinnכO&e0RԷ5{xUj  s ɦ477ٳ'oK.t!r赗PLFSBN:EKKK̗C9ZU[ 2]]]ݻ`KvJ ĵކ@@Q3 cǎ|& T C qY>` ^1ۡgC%@rĉ~d}k-.===aߘW_̣'O%c̙3m>>?inGGGH0R^ve___.jb~n@ʁZ" \}F҅/@>緝.imgA*IENDB`recoll-1.26.3/qtgui/mtpics/pdf.png0000644000175000017500000000725113466271553013734 00000000000000PNG  IHDR@@iqgAMA7`IDATx͛{}?$"B[1ryN%NaRTJA)D1Ql ! B-oC=w5;cnvvAVMtOo~bӦM?K)Zk 0z*B)E*GUY(P;w=Tɳ6QQ!'P"d2{o捔/s9מyFA);\1 J\U,XKt-[1"n44կb1H{=!)7]/=JRNyW]\.A ZO,^ vyXSҲ`N dy>SP;Sq/=JLA-[nZk fG  < tvw Ǐ{B1bd"O;Fu*et5f*baWBk+<'X6)x 3dby|xax}w/N; ZZ=o ~1Z{=_K=_px`x1'WJkڥK,G8Nmp](pᅞҫVy)) |p_X+egz)B+`=A^`3@);.jpuq砒~ޡzB]tM,'x֩$gkH˪((0YVB&<7RLBJPԯka̙, qv-3Oɧкh !=֠T)s]1Ih.2P(ІgC?D 1P]odn{2;nX 6[w'jlY|> z(^ϧ Lt|An7'SUZUw-:Ao `+R1>ޟgSDUao!H#f|N]׭0qJci50p[N*)Uﰨ%1#*pYp(FQ 1d`t>u qap&?-_<B+x3nC#F!>Ŵ0ϯ~ 3Be>nS+~s|u3`1 Gӗ/A>Za?L5 ! -0Mw=`S}zH0+ @ D&GӰ ~T}Sf7շLށޛd vwKP@Q Sb@5/mXX=]4> ɑ}P(N O%\? !8&ڴ=Ghğ}[dQN}g u`Jԗ+/^ RUA_joNp:J]C[`hd?$yA(;L@#iRj+JQ4@=ye3s'嵨~<23Bw:'R[*gqg!ѩ$z Y焓[FY6#vR<1/_.!Pi]nE`p0sPv0 K1`L}Y{9L0֚d2I6m뢾!aNHQg4 l!pZc~;CG{PiB@:&LJ9 }>{q7)Ӱ@LܧN!)QEぽHz=B~-k!NJHRil9izD98ߣd1E ɦq/ex;: ݇~1DZ#YP."AHcFP\f (&,5/ZFdzؽ=P٫@JBDa>trRva ଽk^L0R "Ȁі $[t ض B"Lsdaɋ=h!z?GKh *aiyrkmⰨE_f"0kj#+.@5kDDA*oB|J ІW| b/=G3#5dxx8aUt۪Ė-dǯQͭ ]pF(LP/?G>(}d  ]t*_J3ߑC}}ym+ɇpc R+^2;K Ic_АYv62c|琇Ц T/Zy_ze.Pp4 x-c-audE40FW*G/A&^AG6Vk.#b2gD[@$roП_Cw":n5kDL暿.W¸(nh[D67A6~+Ԫq,nhղX&rl`Ac(i-ό0-oH7lE>s#g$ o_@m`[X]e+uqY)GNJGJaYaM0] "w}?6:EHAmޝfZa]'T?G#?{;=kYtuuĽTSv\1xe O$f- ~,W} i`N&&pB3Hpwl߾!`# בw߂hf菮aOCS1\ ICCCmZ*gvׯ{}/S;|b;AvCfy}(̣M<8mO3_ /V]' nQo PB`ܳڷ' }Ghܼapݍ䗶 DS_ m&D[o8L$fYy׿Z&kmn%KH~*/!ijj28,]0lذZg &0[~R2p_1ZZ{ sMrsH߷n h3gҏ }P(ġCذaþ-[TX@ Chӿ Dri`//1w>4cB3u ! ^ǩV( ?sA-whwoUaƛD8ָW6 eq!Ύ{jUje@>.BOi[rG}j)px,iY:BD7vlݺwQy' (8bBUDƖ˲!(KpضMgg'w} [n{.<`tFHB M2?,"HiӦ2 < {20쿁_~s_2H 2g`epbGR(0gd_@{ؔ~[xZ`mI`;=߿e67?ӭ)bOtgHvb8,KQ_ Pn%зaHW윐##8 l810J8ZQ`?)zd7+#67 '_6^``Nf4 UvnՕD2E}e`X L_ќ_#3|!L.Ta70es?C _Am>67~B?OFVAqB@<3|n*p3|0%50C2p ߟߘY8mo*5N)2 < sXӡs\!j I2 1gbfn[)Nl#v_-$"h*3ebx - X R0>$Y8ٙ XdӃfVtc9 |03u(i@%cM% x'HzA Vv`$MX;DĂ YC)Q!=`* f?etc-P^U S M+682rr*y}>4qM &hW$!@8zw?Ē>_cR<a}p!g!@O:3+U*FA9y̘Gz74Wӟ?c}Å6]6 ߞx?B/7KO}( Ey@E6\@=mǑ 0>3$*ѣ .gXta6ykAÓPb X_t(Úx ^o~3Ǟar %P q| ~c/_1ܹ읟 OcϷ6}~ F<ހ*4-}{q{ pK?=HM 6Sޡ+hغh-!_,? Ǐ_2{a W02 } +:5vmZ-Kk` ғPbfX1 ʇ GuA&ah\r'OpY2 =a8y;òVX<?;knw&?c u:% 't},{l1+4%R0s%*b WnfXxsXs%lS>b<,Q`#d @@lpo$qP 6;!,=tIJxP^_{=?^~ 3_g=h |ÿ~cÕW\jhJ3ؘDi!l U@.oAX;?3xXGz:e 3ggÕ{gȁ a .F 2[3,vO~|tgsZM^{b6_>:uy`ґdbВTL odp3bg xr쫈20D=/ )XKwA[+^1Lڣ:2 b6W/1O`_ } 0/6h~Txi20H չ2!u=AyԨYu-Ê~Et O  bVE,.кR & ]W>Wy^ q8v_| k_1\j קI;w'0;jjUi?aU!S``z 2~ 0l=ǧ\ji\ Ng. SB&4n<`AT#OdA ?~2<|aG 2\ϻ:l jg~r*209AAYwHK4Y}W <3|{ue >q( jpg03^V%Cl{y;0-~o-ʄ `z C{ 뎾eϧ&~y<4?EXGD$G'=FaߍjB$ag 0{&лtud@΄q+< +!do'ðCU*5}ΓO 0K?]Kh@X3#ço`̆7/aWrc 3(ru?W͇+Fs!O2bv)g8wC0:Ӌu\ (Y,hD? 25O߽ta':I E+a?=C>X+;++"'zt3Cƌ ξc{'||hP:2 jY?K\.N(:Ҩ0es)۞1K˗~4,D`OӍ=-"( Oomz-oh}]J۵:qK(Dg>^]w[# O|e 9B l+o  g'kaIENDB`recoll-1.26.3/qtgui/mtpics/emblem-symbolic-link.png0000644000175000017500000000354313466271553017176 00000000000000PNG  IHDR@@iqsBIT|d pHYsu85tEXtSoftwarewww.inkscape.org<IDATx[]LTG>3"B)6% }PMS6 >ib&w񯼩b*ҘJ+@H1H(;s땕=̽3wΙ;wwI)a)G(P`ZQFfDZf ?L"<!m(GDXO"fӈ- #!V!m,B!,AGL"F #spާpWVVuttl inn}||SRJN'ySo6nXL&jr+**{D}}/x؋E_*Mί"y238.< 3QLyjf2Y@O>-Y{3tlgwt#AХ]R=6o4]#eee'^yfZM&{h0 :2z0$ ;zxjEIbޖÙp# _{nWI;) Rji)<X j GU{ܮJ#ԣG\w+_}H48R,pLh[ą! C&[Kԭ"&:%0|?R]VbA޶(s^vamܐXR![AĹ H0Dpp">!:Bs]3t)Pя YGx(o›C9u&φz bI玴TQ`y'aJ^RD_ tY BeVضu[R@9ak7>FIk:tyZneL̄}'r @cnG{_ǚ5k`߾,pw laO())\s@< hcÆ/-4PeiYÇp`UĴ4,Я'H`LTF؜8qz}5$9r$2HD AHeSSS066jkoI=}/w"Ė\BMڵ ʎ?gΝSq9 \( \& x-Fɞ8x .1իWzio.k***"5[. ۱c,Rsc hLN&&&V,y;o-sD=~ގFuװڍ\gC)f<䩙df$ǐgFcEd˰{]BL.}/df ?*, (T=P@8sIENDB`recoll-1.26.3/qtgui/mtpics/License_sidux.txt0000644000175000017500000000176313303776056016014 00000000000000For sidux-book.png: _________________________________________________________________ Coloured Sidux Book Icons (type2) Copyright (C) 2007 by spacepenguin, cako and cathbard (http://sidux.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA I suggest a copy be kept in your sources for safe keeping __________________________________________________________________________ recoll-1.26.3/qtgui/mtpics/sownd.png0000644000175000017500000001122413466271553014310 00000000000000PNG  IHDR@@iqgAMA7tEXtSoftwareAdobe ImageReadyqe<&IDATxb?H0@ s2/&P%*̀GCAR">5 Ǐ#")'㰔@YtFfff?GyAAHÇg ( bF2:Z?(`|t9tO#!lW)\oO,)@  ߿ Bnxit>3|,5,c2%@T)aIzefyjj P1 0z$5HJ0ʊ3S{r'4'9@LT*}R&EE)qq!p 88x, 9@C.ki)Avv22He(030330 g涵u]^FR 5c^e#<CGGX.R 벴R T0[r@ȁ %֮RRJ bV̜e==.]ǀ a`ggBVe5P 2+#VVˀJ @1@ fg04e`b -@  Jh 8E>~eم 'Ξ=r H M  @O=ed PӁπ@Å&< _!V8!V2AWP`j@@xSʣGW13:u( -zq8p08F+|Sa0iΩ-Π˫It!ԣO0ep%_0s$@tLjZQnMӧo0}!<kß?С Y|Zoo"b~AJ2=e8ruWXU&--iccHdC ldf ?| ړ5AST!{ `A4t3<(Vpp ,0a!aq`Ob2i-N2\ BJ;Ap M`8 g`&d<ÝwJ-X1ʀ˗#~qXYghhd 0>m k962c G8|daaMI;_ߟOϏ {2?dRRVVH ()13c[%䔀B}-(%B@=&1LXͶa9^_sh}P S`fpJ,Cɦ"_1t^jfdf6//7ȁ1v;#e Ṕ\tf08|`aLT<wb/\ 0c(;VP{\’ASQفe 0"7 KMr@ϟe` {@{VMK,<9dϰZ39c %HpC1 KpR{ֻ[6~pG|x2$$3461Ha?zB GU:!+>8X.gX±ƿkeJ$*A090072,xL 2xvafE Y/cW!@LX3f>  5ln|;%  39ǠϨp5-> /_~>lkt>*>L7!~`gz:!bN=,(]0##_J:Rz bO1DI0pP3ûwP'' kLokcŲa`" 0(yRd 00̹1!J9AWM#iy&.;`/ebl-Ylc^A;B~쳿;;; J8,Y# qls'ч<͠$-I@MȢb!0pm2ÇИ{^ vdff6|.(NvL42_X1<`(b0~{@ 0Ž|ZZpk̃߾}Ar Lԅac>}ec;60a˃:@ )U 09gTw0t:8 W^|||߿cNN`L ~ ܜŒ@c?;&_j>ȩA`aqN`Vw_?;i`ޗ{ \#}v4!rȓ00z@00|[ bb!QCN *g&芋+ݻw}X=?~cPu7odPSSy)0Y2,`3H=/p5޽{YnnnP`޺vMdMA>bz@ٳgVB/^<J,_#P렂r(< 4PF0sWJJ0Zpy@@R7sA%2(=$ ԃ UPkp1Rɝý{@K|}${@UER=0hnj>|K˃rIJJ2HHH0h%iP @ hXWRQQc z\XC=Hn  bVRhD'Ġ* (J$'^Æ`XFF щ, 81$'`]bt@.' 7L3@(P50(  ,PX"%M-:{ R§˗//z X}yc*@bX?q0|`etuaĨA a J<{4`Ci``)}^#EAA!`n`dd ,/;\WH R@Qf])+ jq>@QX_ yNGG~;uWS Z?~R\ ۇj7j y[pǷD A\yMnD^GkJܹs>~|+l"bFI|bK5o8]  le,i TеO,cCW~d?*00=U,YIENDB`recoll-1.26.3/qtgui/mtpics/bookchap.png0000644000175000017500000000557713466271553014762 00000000000000PNG  IHDR@@iqsRGBbKGD pHYs7]7]F]tIME '٣Z IDATx[Ϗ\Gw%`d%C/Ed",+>)b@PĿ?č'nD\@/-,?zwg^wWgvfw֖Ef5zy~ |~6BN84('sA/ȧO5ĉN `P;92qڹ' /133=dýQ=#S㈢Q"cc!3ՌJ5k='}ϥb`,07NFoGwsX5΁Ҿ|#D1P@kQ Z8GHEPP2TtlPAͮ "@$\֔qOFQt\C8""B]o@g™Y1v..skP应YGp;s ɘhg8B@ؘC(7|Ϲ K!c2F>7.:( ߝ;ЭTzEy_%Hg9j1L"Y"@glN303[- X`>cyo
oTmCt0(㜰j:[D hj|5L3>!t l4K+B7 UD|VpBu|Թ,R8,BqhX6^TGD  @8q]v;NO.F,Fؙg0s֡ALg 4=6TGt`Рo2o^rVk(˶>&`rm4.]>OlTNYǦfH rC,!6}+SomlnnݻpUO1&w,@ !$Ò4̾96Q `-ui0)zso.IƴU6j? <$̥|ަ*|(_YYY.Jqo>ckI lZ Y em驳gi9?MmGHg~^>X&.rmLx `-ܻ/Ξ} Y*6XR,;0aC'\@uܾszGIOcWXqO_Xg*qMl<\:>^b!1{|˗ ;82dfx'1^yա%fN`F>9Cދ/]۷/_)Ib+C̦dlB73 j|L|$~ᇿx5kwzxMkqqիW??n?t*= T^5A_g9 fin{Eg;Vk"IENDB`recoll-1.26.3/qtgui/mtpics/wordprocessing.png0000644000175000017500000001304013466271553016224 00000000000000PNG  IHDR@@iqgAMA7IDATx՛o\יn/,$RRl-=1:hc`fy`>pYy`^02(eKIE)[XC=*F7w9=w;|i4i0 CEst6@Yza_[>1nxam`z|e#Q-C׃} 6#G=GAG?x@uxQ@SDOH@!R2D"?2D DH AIHk dHva# )jð7oǓ\K_<鹿R RRF7< -djL _Ŭ9rU.$-HO? D B 1E?{Q۶dR3},#|;keKaMj6جIkwbsg鳶롅Mg?)5 R A^vGhi@]N g,] 0,O#+>LdXqO씉!nA"D]$-vXe.݆̏Y/0ug)  0{]heuC.Z3:yղ?mqu!m]/*~;wYԓr:oA4vH8Ǘtm1}Rpm@ BSx8j֎#78]4qr]"Cv+ȞTV"%23.&4E< /9?G1)!Y m40M >##4 up0&t$Out:uPu4]juH y0 C |A{@R{$S)\%JOXd~~O?'x1|si6[qz-LÇZM:\۶I&SLi:;;;K&azZ-BhR)jt+W0668*>!=H'#@4pzfidpbibY1癟Dz,4MCu68DT*DX,F6!GJ8XZVyhF\tT*u= R# VWW)JlllL&`rrBϟǶm<}a@,#Cu:miL&RR՘#H 3=EFиyl6 ָ|24MbIwc`&U\EEEDuX,v}=m!6 ℈geI)Gclzw+ˣGx"#cnl CbJbloocY<q( 4M0IJ,Ǐ#Q.u p]Yb؉6D #|ߧlb&fYH$ߧlR(}d2ɓ'Ozfk*+ [[[,,,PiZ JOurWI$u+6kkkR׉ErB N)%|uu}{JyK\Nׯ_Gu<AtgD6?N}]ו8D?!?p]M(!()B½{,9qIu]Ǚ>}N:ER7`iimJSSS+vMTZ\nqy1Ll6_WfffRܑNիܽ{W^yEqW:Z(J'ǹx"qΟ?Ri-ƶm }8LF)4!e1999ByXTT*n9{,XO&t~z!(  l*R(L&ӧO+mex|dY&''U0r߿8?rLTѣGXD鰸H H"(cY֡#3=y>NѠ\.s9?~im Ph6ضd^311q,'"@v4S騘|8 \U l^|Y)U)%d28õkVN LyGqY\U:qz'MaGpTlpQn+"hD O۶i68ju]9-hiwRQ_'oN.#NZdFm!;>SB zXFa8R)fggY^^l̒8RkG/# o76#фWMӔr]WoDmH&j5LӤh099Bj,--QVR۶ ccc'e C:&83 Ad2AJ$0 *xk׮fI$*Ѣ}* lT*Eyh'X83 s;<ݩ=JɁ/T*LMMQTt:mfffҥKLMMs%ܹrx<>Z277O~4MOWGT؏nqW^yTRP(0 CA" Hꫯ255oiGOR908<0 jTUUo{XZZU[\_)]& RxhaLOOd2lmmfIӤi4MSr6ի*U6771MӧO#8`ؾ[S$SipIфFբZ # %j 3gΰ{ܹWd㣚E=s挪>#c 0 iP,ISCξ^]XXX8 0KJIZu݁I(yyޡ({ ǁi4MVVVHʉI^guuT8< APՔ: 4͞8{6\~똞JQ `kk[n-@D8ѩ,@0q -~_,onEm&,(0A omm_rwڣ?@rtQҟD?!߿A^Eg0͛˿կ>!0-2VKyTQxii|$LOO:kkkd2bX-Q~ ﳱYݻdYvwwUg"Db1>}͛7n޼mxx#9n_ QVY__'JyL˲ NSӧHK%IUd2ܻw)vwwy:83==M\t:]0"gcc{o͛ ]w$ .޸qvBtdebb+Wi;;;Xŏ~#677d2!ͲeYj5aQ)?b{4Y^^ƶmeho^~>mG`mm;wP*#pU>|*fX,F:ƶm_G ϳͅ pUp]7np}4KKKLNN=Ν;G"n+./yY&o^}ve!?\~ TnX,.viI&t:RuZVq uE F3g`YJzD\.iUǡh˲7[>##a\.:AZUah= D,0 .^H^qvvvTԩSjL] Pٟ .l6ꫯ_ 0 ,Ri Fakk_׏nݺ)ϋH,믐Y]]e||iZOVcssD"aT*S*믿fssK. oVcbbqTj!d||JBTbeel6NJ)1 m>۷oЇIRx7ծED"ӧOI&\ru)JR)u>c&ϟWi8t[eqܹCarUݻ#`{{>h֭[u]Ν;̙9풎Ι{m}F~HK Mʻ\m+ntC:x*GpЋqݑ|^{u聧zw̑_(0rM ;P (.TB ۥ_ F(NZLޱBd,Iڒ(R)DJڀB2-1m!B"$"R&\-kmbm<å\JBӲ^~VDijx0CM:~]a{,@GTGHɯqo#t6,kŜ,!mmmK,)5qYlɃ-W lPɓcU flA}H=3XL<BנA_ȾClh,,(27wi4%ڽ mQjJPrjcih,ZB2. +4֪,Yܸcq B( !I(<MUS +ۥ?MnԹ~d9a/bW~ët̰۫`~:c f<_å&ʞ OiL["kߖ^8mB-P nՀeKRYyW;"* %awG!${)ڒ $7n[( ]/^r.h!<} wo6Y);l[@D'TKe\Cn3O>^=5&8)NB'!טxR@@ejFB^z.&7z|l$Ӧ9vlƶmy.9vyW^%KMMMIEQP  BI)y;;;illD+4_BرcH<g޽EBmǏ@__x! 6A̩( L'NL&ikkݻwD6X'N`ii^زe mmmضRI)#t}L&Cgg'D"fSip * n!LrQyǨի騪G:李ۭv`=^Q*8Exbvvl6K0tU_g}!]]]qL$ )x{moJ4McӦM-655%BaŐRb6PnCu?Z[]R ֭[n_ (X˲&0;;\ (\ηrܷB]H&R)TU-ۣ(w&N$9լekjj@':++ "?&#UpP.&; we mn,q0oJA8{ tvva͋ixy۩hK9OMML,1??۷iii)9o)H$1{(=y$Νq)(Rz+_|A<Iimm-xGUUN:ťK`bb-[Յ'8uuuuݙ#?9㶵a&emh=:b!0MN$0 WH  a&d2XE6u4蠣q2KQq,/?+:fffR7Bdzz4 UbW[bYf]"ߊ000@*r|=۶m#NJmw7oV(UKSp{*NOUU[n<ܪW~JύHS7j I (pT~)ij:25M[ ﷂL@u.\ٳgeppiD3gΐJx衇ؾ}{Q0 ]alݺ6ʆAEQ0 .W_m6usssn(/|'ʾ}( 3>>g}Foo/{!L233S0RUB~|IyL$XsxgRGjNI r1ٳ۷cY$iD];&Z 3Pmm-n2 Dlڴ <~0dӦMn4+۷3wSFY^^.J Mt]gκewNb{( `6088{¬i!Y4M0 t]/P|!8ڹ4MҥKSPdE&000ܜt/k. D""n6===B!ܹCss3aTtٱcǺx^ѣ;vHBOeKd+L444%rxmmmOm۾7o;t?x~JcFyں<Ç1+(n}#rNr[kNy6 Wo|ΐc>7Br>ZPU!ضMKK o}!A ˼[WK1_R&_^^O?ukej@Qr_?~ܭEQFFF\;d0 \oʯ+g]o 6y(O<,@ נa 4IENDB`recoll-1.26.3/qtgui/mtpics/aptosid-book.png0000644000175000017500000001061013466271553015547 00000000000000PNG  IHDR00WsBIT|d pHYs11(RtEXtSoftwarewww.inkscape.org<IDATh͚y]U?.o%~/I:kLȒ!hGG- 3?,Pq)f) 42cH2 F"hH{exKntʪUϹs{~90?0׺^ T֪(JjRqTZzj^w!p.̉{>_iSccLѺ7ZF3Z:dB1mPWcloGySNc ^[xkABt~SUJASQdpq*+,+"34ѣ4z+іVM"ntw7noO=;޳to/M7LOPw5 ,N}q+*XoxRJk$a7@;о[ɎoÎFYvN>-e~cZ?~8S)_8ShM165┕儫5A*Eʕ 90z⩅JUiJ$ݸ yEƏ%d#u7mh_K &1VdpHkƎκ{e $:NWæcc97a{$`ip$S)޳4#Xrh-鳽q^*q+ONih 32̹B=rfi)WD%ZIm2 t_ xZO_ضPC4ZIF_]ŗn$BrԴ֘ @Jv\-6j6W}0:'<)KR TC͉ c \\*/Ը֚@ (JsN^4@ ۡCP{DЁOL''v&32\X͐RH²VYiU|~#P@6ԨҚD"dE*^PzeF^~|DGF]8{78њm̹矧R#@dkYjB@0!ǂT8Db_Z]4Z3?pO7A$g=PwUx#gr$Dz6 !n\ ``=FG ]Kr||րoFZnt_>Jڵ\/_ORj+pM z"Dqb16=0g]idz܂JB yBݾNcLVDB?ȉyaTKcy#kir]O╝@A(*>` .>~կ2[xYJeN_t:t&q,qhھ6J1~83 ?NM67s'8k2PL"eY9'%Vj*'"|mY9uav܉SQ{0kRop^CRz{L?`&}/|ຨ ZLAkh@RV9>NWh?@;L7Ne%$7BUU&}?xp&(I359OcEc."~Brl\kMzdɩ$n4B Mr &Nʫh޾}քӧ;xl/t] ikyCy" `EUвUU4l9`Y#ƠKu} x#BЀ?%p!FG _r zpDjkYQ{$~{d]{ҷЉ/P!=\u] y_eH%ѯBPu?\27(>57Se [PFh]I&U|Jƪ*yRR~c ^&&ZeIRĚ~$Μr~ôu[6*Ke_"ɽ{y{~YB/-D@4Zu٥(cR߲%Kؿm-ӟ=Cqddw7|QOys3eTRl 6Ѱi￟|O=5c'cYksU(֓A-Y>14t=4A33liƙIR):zO^yzzr'aE&zzxGxxf}}mzhb޽5>G@q5qf14)I ?|oď~D6r>=>|ok>Qhu5=(M^-ļEyݒ@6Ԙ֚D2IjlhShޜ }w܁755n@2q[◟ZJH?5+VT4'ZJHOMin.M^KU}p}a1w>vL.T*u2ʺ:?UTʽw]B#&rUv5VmKH( .뢕`Yhp$Bus'0u Äù9au]AnB]X7-dR^W nlltcG,lƶm @km[EpBLx<*S^^N8Ʋ,qB;doHHy{~ ^ژ.G5kB,aa9ZkT~3 e֚i,AaC6ŶmBPQzB⼾>ujґy",gDJBڕŏtSJyޗRT(#8RJ9m9K`$&N:J]4৔ bK㠵&RH)ZcYPh4ZyLRVVV\D"Q8NQ B`JJntڵ *}=O멬1Sc&=cƧa4itSi`ϑɈmk Ym:;xoWaIۤR!!Tc<&Ok=rx\ʡnI ;Hr;//^m[|Qx|Iy]Is@ TF뉴iFRJ PPAsۅE,|Bʿmh &` ܳ7cwyPRЌ7gWH7mlvWUYY@Hb axT[uN]s<h?̓NMa#+Nb36 @~~f^RB.Dv \Ta4UBՃV!g0ߏW돗1iWjF/mAk9ϏN/}:N|2s<=yFCc$h|29 N_jXS9/Ts|`#wcUs2Q$@> DiWtZF.v yWfc~*(?o73qWoԹ4jѠ*gOȩwcZK8y'xsBKXsCCE>uV~c?0d=XKra`U~`Pkz[v5jF]`IȇU8'@Gߏ97 :S#eÇ"@ Pa$B1b(To:Q[--# cFS8ś 9#IOjs Iv ɌL`0Ɓ^(M/Y@(v !Ԃcb֒ 2|p:{SUЏe5Zk\*ҶR[A( C%.bN/Z]".\X5[C iAƫch컑i3PbDZR@3 -x_3V*c~/k˳C--`3ڲA`07ζXS#p_geaz[Lx/ EIT,BUվ5r#i.lCx`8;Pח yO~! |Q5TB7"1ۆYhZFbG&թtN1mEp)GxA' yrzcGv*֒$ qA[YZ9'_aE0\6unavٱs|qvґ1^&~EY%qlf@<(}OU|Cbz{ޛ1@uP*ù|գw6/9C B3 (Rv *RvPkywR01r٫ࣁSݥR2گ$nۡRPiJye }#0c*4b!)}9Ғ\[N] |Z#x~`jA<>"ȡaũa@VTT}bC" e, wCj`'@w[Fl`FZӰP3Bh¯/}EfdTjwkWěC.^ iDHwq@뼉7SM 7GT7>UU029wık1Xr)pyW;S Uaj01XKKtޑЪCl?"&ι5JƘ08֕c|ui/ BN D_jB),ՅZSh42HB2EК'}ƑXhO# Cs,//- :|춄9t@14B=BZ1N@;'ih_1ʟ<4ξ0wV=*<-zfLFIcj!`j`"R@%̺0WWfg~OLL'DN1rMBah)*0% Tp ad.\T6){o[駟?3Һ.@0XR6=TW-Kȇ<Ͼ {zbU w[Du;1# z'n$ eY`2V)ʷSEWL3O~~GQDZرc_6ez@:)M-bL*, C* 75)p[^=$"aHZ[|Fz .Z@~ }>nt3/iqXkhĉ/}KLMM`> "TUo|+_%#$WZ_W}>2ÐZ /p3% X QK/4},ۜ6 9{,aA2%k-ӼN8&KΝkq\1O>up>jQnfZ.l GR IENDB`recoll-1.26.3/qtgui/mtpics/mozilla_doc.png0000644000175000017500000001053213466271553015453 00000000000000PNG  IHDR@@iqsBIT|dtEXtTitleMade with Sodipodi/'tEXtAuthorUnknown! zTXtDescriptionxKT(L.)-J_~ ,IDATxݛytTE?vN:;{ Y!$F=%8@1;8c^1HD"`6BFN[iӝM|Ϲկ~UWU={wmN+SUՓ+}+kl6W̝;wFsZ) 44wbb #%%GϴhT@UAU@QPӪrOo~UEQ9߫]YiAB>t/,E>sl?Pm /.GK䊋}h@\T<ɫA>w&1B@4H4"wA0p D.@gTLD؇mVV|J}5B:7 AuI.'bX4O l{7޼V_TT" 4wF):> sFoϡZc'xl?G_6:2XPʋP˱I]0/vHс#;f)I`G;~ VH*fzyh" @Lsq_&{Ef~/4xF&fʹfڐݾAqB45Wn{Gi׃rm B;]8Z˻Q,i3^ON˼rvtG7溼'7Qb}EQPոvc?ڍT$ImĀ6kWUm{ڳ>Vt֚餥QXXHQQ$a2 q_ h͛dž ڍ1:7D^%D$k)q|u]' Z$''_[o1{lDQ";ׂ,l2֯_όtx(fAG]*ؒy9 0&b 5<:Gyvg'/QQQARR_}k֬owQC+k x*"//c7ՌY&2Rd 3 DFF5ءd ̜\NI&++dzeeooft.l_*s򟳦1;7$^RZ2^Q񮇹7v('M ^M7onu[NUYYYbjӅ_JK>|8>w.%< :A!C:J/`uJ<<_+Oe,XfoS5ŀ.3{lJKKˣ <\]ݻyfq !B'2o!-w DXY2Wڶŋc2x'n\ܹM6( !@R`( MqB,Ezv>Ep!:5ME”*$%%a2xG\8z(ywxn>j Jt5`0r8l{(Wt/dC0Vi:?%o[${&Ni_gǎ 71+~usPGHN4` 0*`0Dv2ebO& U.]=2!|{رcINN|Ҹ|:Ubaĉo-&HZG` 1RN ]NPSZ9'v!]љfx!Z9|'\c""FS^{|fvMAƶJ ]/Ywg-:3/][BjDzp+oӦMa"""v֯_4֘J֎T#7ٗs''R8 շ (~(>#qU7Êt_hy?\!VVw9eW"u6t38tyyyL<Ǐ'faZq:n>}ڋWm m^?+4򛎷.cnj|}]9m]jKh5݉!++^z2-ZV,Ȳ@RW4bza7arB f̶:&103?LGSA>hGZ|9$''3g22|o;e٭CdP5"ڵk?>?8۶msQ$P_&yl++F0RZ]qgbj`\eEIpƬ_D۷hqJ!tÆ L>Lx[U`̛77"byX)PQ$hQT š:*ݼCxfpYQEG,t9 %Eȼ b̙3L:ݻw{nRSSl ^nΝG'O|noLm۶bX4j$cv-)8xsyrssο'ɟ7"koؐlfڴiX@PP)))?N:uT3UAAGL6;vp `0v[7}Ay;<[neʕ{m{|4,E%~Tp /DIlhA #55uyMÒ$!˲z_x{-J& aUd F2*ދ+3YBh@HH$Nh(..8wܘf ;m lܸŋzjR yI_u-G1d<:+&#oD:[pSBZZVJos0tPxnG67zEG猌 ֮]k׮߷W\'2W8w:ŔSuXpLpE V^k׮$<ZBEt:mvc3U`FdkAGU{쿿u߀\ 腽)Q@\q;zzai+39Ν;^4\@;is {Ndղ1)? lݷ8pOKt%wG%#3ft/c/-"s^e #EحPA'N1{ŴQ+AA()=7 U`D@쟎cti{J>}hnz;||;~M[D[שn'##{JVSosQx)u:]  ˲`6 o-X`^SdT_IENDB`recoll-1.26.3/qtgui/mtpics/README0000644000175000017500000000025213303776056013325 00000000000000Most icons thanks to KDE crystalsvg Oxygen (www.oxygen-icons.org) (GPL): archive.png book.png bookchap.png text-x-python.png The Pidgin project (GPL): pidgin.png recoll-1.26.3/qtgui/mtpics/pidgin.png0000644000175000017500000000675413303776056014442 00000000000000PNG  IHDR00WsBIT|d pHYs B(xtEXtSoftwarewww.inkscape.org< iIDAThՙ{tUU~?{sν7$%$H!5::vvtPWβkڪbNRhr|"`P;*G !qs_ǹJD';cY1ȝOq"JrQ7u֢zZpXcw5Xq 0>-cj;paQD.%#+E;-srdr>;$n|j7z~<On_ҕcN1L.o^NC-pϋl>{& %qo4v3^۫uo9AEc7ܩn77vPX0~ Sj"G΃SB5$6=#vΘAD(E = Nm|5u_7[4vQ-HrCKZ`ְ Tt@UTW7,u;91?|KK>/I3nz]ʒۥ:ft$Ncf^{ԝ58h͗&cϥ,QU9jSU=MT%c4r`_#e_&?e¹>|9atmdM͌]wM"d:dyZw%9o$BuuuDv{9nƄ8i"<9ΘqRJ!%BB !RPQ='*Y M0vi_Ya~k?SqB 'ѢoxĉcgXcQKQ b! XkiwXYW]s%?->@a> xw)٦XcJ B9ǕHGr'Ql*ǁm^?r^|9E6'& ɨں gv6K(G*:Fpv.eWl۞RNq6O4r]WϏ5noBkC[[tBIi C8RᲑJ5b#ŧ[>+xj̽xE^M/9aB\3zWE{s'M1kQ77f{I/P 4fœy4wx'sCFZcչm͝*F⺻[pT' XB1*8@ js.d{ƾ/|]f\uW"` |@3>1b(4,gϞU7}4ϣeߑp)w<:#Yl~k|Q6ispp~VS89z^C`촰\vT 湿ZK![H8s.z,_&g#ך%,O]cqHMW9 k,Q'J$ @Gs/!ՖAxyH!κrU .7%ϒMK=BhO" д7NcPfU$Dp" >PJQ^Sʮw1Ɍ [7Nж<?".vx~¨si f]7HZ}ŕ1(f\>G4Zheb4~ ]ٗeO/|LN7˳~&J/cUBHD:)V̼~ Uel}{^F# X, xGs^b]r,VQQ׶h!&>%R@U *v<> 6Ƹ¦*#X~X"=IѠK}23u& o}97-bxyWpş_¤@Z -YĉzǺ2y>klIut>^-SoT\ pVj}(#00voHJMJF:w%F &\36Kc_zG34_NS{qL6C/*+1ڄíʊ}EZ7+CmU+J%!̞E67Osefe2"0cl856\„! P9s#L2d[_,LrpcM1|XoRIg u,Xae>9Fv6!C%HKYIe&& H NaRСD `z@}A êq850k :0@}S a%ëZ9hL`|M&cIHpym՘35:@݀9n @cKgks&S>0k!HΨ<+޼. aсF"؋\lI1r_8ZIgӷq}߭"'&Ž]rxd-wz)T~6RB=%5W1tL%MYꊮT[Ӹj pE:[RHG ]٫TJB݉G/ 2M;7NJ g_:tDbDH)uQFYim*=qRIB!݃vgUa Act&FΧ M<6~oSf_A!2(2tIOx8u]ᛀXX (%m& kdl4Gƶis<2"Afut6u/z'-hZ@5tx"%oA\VIENDB`recoll-1.26.3/qtgui/mtpics/html.png0000644000175000017500000001136013466271553014123 00000000000000PNG  IHDR@@iqgAMA7IDATx՚iuU=3$KI[2)KȊeXm |$qI>(3b˰É6 Pa )cZIuYeIP\+5suwUCu޻ʰШzwԫW%xSmB 䝵o{ ޶t` e}mCg7c9_VU;n$ntiX,P BLv{?떩`1DɅ+E[' m-X"XrN:i랍{`uqjQO<ZK#4\5ϗ? h~ԉ ad2bS z!bF`Y_P F#])RMWytlZF 0m,Z2{c1mHNW''}N3ZPmjYVjgi,J0УX'T'5ݒ7B>ysJh2sMc]?R2 TAw!ܳ-wwƀI[ I c 39020#u _ Ʀ i9ITPtoIQ R;C8_Hp&1x |R X`aF (n6ǮUxR4G,fMsoT @s7h%Pj^EAP؎YUyM X9@{Ƕu2:~aS",*aK=wuag{o ,\{1X#鯳umjc vdu y`-,$fj;li}X2զAJCiKݵ3" Wx; 9_f`jPbB@6h7:YkySsMJjӶb'H6ytewf~klZmwuZkZX0J>^3 )ޟg=_K!gFC^JLM~zϾ37[d}cO5UkGK/Ky]d?>aj2zfF'SJط#ÍWfxe~1ᚍ~a&\Zˡ @#4|c?S+*r,AoM6Nk-Ţs˺W)r_>T24m~rQKhn/q?Ϻ^KSzo_tP5}w^}GJ{247΅a%ek@-phɦ$̫X$J6x|nwb`!~{}wG.iǼ A!;VBC/GC عgȆ>r ,w/Wِea$$f*:dR:e:hVdsV Y{y?}"VPn޷.)NZ4Xeޜsˆ, A,!R p ̠\T&N$%A|)0<*SU>'w4W7[SujYiO-H+@;/ǦC N+<)P%T[\X*^xvz(I\:}L7 J){R DJIX\KhRi.BcRwI&yT-P̹O=jM}A܊C/ D0PG<1RYC`i95ܤ\ g3l^!lL5 m€aV6J!Aʕۧ?@Ix12!0/Ȱ 4ibiJ3|MK4┶50+aAZHM\Zٻֲit-3Gt`ؽiuydT;:.+HΏ\ZBd)d$zU%6O?M&<'y/^$@Г{{_-huÍص=6D *Ia*|rh^f+yJ) ]5>KȔdd> Nj \櫺hDkmHoGLcSV4z6f"}=>YE!+ɥ܊K.%VFYEo̓%i_I+}>qOij/PH3Rbsggwv1u sܽp~Sܺa?^[ػe-}'^( ^dxjvƼ>`>@NYzo#x1Z>z^ X|2cٽ@ȹngy+;tad(*in=w%;˾}[`K^2 ^K''8F|LqMqBՏC9!ɟ9ɁI <a`x{5WB5|"Ů"}{_B+:`,d{|揶_kӬVlRp0/;%[*1^X0[=B96M;ƚlg}c{~[( yT!lZw4Гl |pկ~zA`|6?+!A099IE_Nc]?_MsyMYvlʒNIJTJ22tI~iLNufPd,xz(׾SO=uxR&bNP)E:niBbւFtlaó/xto59yNƒysSMg7 $).@Cw~3tOxEr(( cu씒1>Um[`Qij1ϒZ(aj1$$S*f0䥔>|8:xಘr)يJRs' cPzc6q(;ϖp\frrrȑ#:440q1`k,;izK٩]'?rHt}u2Y񣲫E Q%)y,y=fJGm~[_~:MPJaGCO>䃴m~E E {=~x}_UqHwssMG4*]꥔[[ȝz)B7RR"b@JwaWλp]Wr;w.5+_1@(RPW` p(bbw@_R;ѐݞYfonٳEvEH<- `;Ux}L=F/G֩zܧ Ѹz/Nv* J J.o<߆R}3l/LPY2]_ RJ"բmT$ '@hnUvZ5hz*{P՚yO{`j#  7JA/ZD5 ݶHy.Ҹ~ԺvUm>q\`@3ȶ#uC C[ApG핿Y%Wi Z'@h+v]4xqnOF '@  +=USo?vPJ+ )%Tl' D@:i~-6(l%.BWO Y„ =OvЅ WAx]m8C;CvbbhYE<4;Mmci۹$URǙ$7Ƞ61VaY㡼J)#"Ắ0 {4CCCa!R0 ) 85?M !Vkil]A=h&)%!( P_ ~Q{AK^G)ȈRe8b6RJN:嵷,+4M^|E`h آ۶YMu40}|(( ,,, ( R*BAPA>P( z0Euuj$I<'CQ(!D4Ɠ(ۄD]{)%gA)&RJ뺜:u*-b``PN89ѫDwm@0iX%xNݡP*BtI,¶УE[ױ, 4^f֏Gepp%2 !ADiXh}?R4˲묯j}qqkbmm7nD+++dYVVV"CRWͤZvtpj|X^B0 / P. 0t!FFFBCx0 = 9C&JpM{2dhh)%mH( Fx&a``P G u-^@bT\BqFh;:>4W;vs=H$HR^F`?'''q`WFa93"3;;yd2O?VdD`Ri.kTJnOՅmE޹s~3Çs̙H78==scO9x3ҥK3h4۶)(ja-"Gb@VT*Q,CCRDZessA˛D/^tzsssj h{s9" E,n LzRZ*rK7\߬ .osss_+Κn+AJ?i<ze8q^(L{StwY-C <ܼyJ/ܐtIX$;D 恃DVömDJ?G^[aqC{RD"hXmz iqݗIsEx. ,//"NNY]]lnn?Fb@.> ss6o5!|#;G&& Bয়~:MN!FO&V^4>\|9{333~e$H$hメvB@_8ΝZx#D]x<tU?ܓ?ojFD^* 5igjjꓹ>"?iELOOSSS] p= A9*]l\N0r=55׵ }Ԣ\.?,K˗/_~yAxɍ-:(7`dySFO@ϣvH[&](@@%,=PIENDB`recoll-1.26.3/qtgui/mtpics/document.png0000644000175000017500000001242213466271553014775 00000000000000PNG  IHDR@@iqgAMA7IDATx՛Iodǵw7G2$dEU*d[j[-W w Æ!xnY^'{`-Yx,U,8s`ɈJ&y3"ndgA1z7m4% @&wQ8qjvu@r[q= Hda[Aw9:G"Hp8~ LoMAtDB-I6k iiDݐz'fy`dUne +ht#%A D%O{pLIw!(I$ $􂄔_%I0&7|B 0: :nBe /z|z?.AP4 Cժ{7"t]Cz2b'k#b 1Ic0`&0FZc')lb>3?}e~ڠ;˟؆bbq_>q{wH0 md2~хk, %PLȸeFN38WOƼEǜx~k1 CꝘdjk>7uoȗq5p'N QC#gjyQ,0uk,4, 39Ex䉿q%(FK.TYH-#Cd|Ndƣ@%\['0R :ǬoBȳo8?jOr*M7 SƩhPd2DQDEIFaAŘIX$R hF躎aLOt:z=`6ainl6Cgoo {.>il6IR\|۶}6Z 0bqqpȕ+WڢP(jmRa`Y/Yitp9c XXX@44M#"VVV4l6eYm … P(#cqT*DQW^0 $I0Mq3DZ333N8in޼aEZJzNErpp@'"t]l"h0;;˵kHRǘƳ=iCӴ3癄G:/^R#]׏-ڵkhF$Jڲ0 0$I\%"(Bij8>swdzs ` x =ƅb6mi@ Yvm|NkL <#vt:d2jt]I*q y!T ]zdY8&ct]'I m4@id2!hLMM>rm4Mj `RjFNCf(hPTHRt]$\.S($ j\.Ggee4 M˲Wbu]g I(l, rM*fMӘ^#矧lh4HRJwvvT*N6Eu677^Gy4MjQVY[[S#@ ;J8NnKXΝ;R)`0P(`&nWٛeYʛr9]Fr9&|q4juE("133ÃrTU-(#c Q1 ]׹wRtX\\d{{˲){l,--aaaA `0X, C"&Rmq|DQtk' `~ !( tH?77G\4Mvvv0 ^dz>KWC:t:p2^E ZEUSV}۶yg?$J! C\ץZ 29^R1 L&CAvIRضJx9"|>O<2wXYY!"B4MU(4MemJcAt1xxx4#5S&BrҙNPp0AVl255E.cggGem.v>]&TghEmZ^^f{{NC.c8bYzjeYܻwe:nY|իHX+Sq:iyA`&)0"zudLv]\.8!Kf"2 Fuáu1 B@*²,<\Uz,Ȅ²,ez=}666I8Rhkk z*E;;;*ؠlb&IfB FU>AZ\.jhZ|'GL4:  `zzqDz, PŎL&m۔J%n$c|&al&!\ץT*l(!cMӔ߰,K2W Zfy233?eYJCF$IXXX`iiHʊ.`ii$ jx*yQX@vYS * L^d yDyh'@λFN*vC 0 u?S*h4.R礭¤8βC:4M^y<{PIT(,`ff_~YU~duuL&lYe%## ‘vi"v$ 5rq樂$U> OIX},das 4M677_Ny{/^TGi /K&assSvJ۶|2wޥVc,]YZZlrmli2~jd~ssS3IJTؔ5g@\Ʋ,2 a(mr NE2 ! L&tݦNd2r99|uq%i_O:T ߀L۟aG8.{{{|K_b5uMӸ|2z7n(+OnLӤP("eJl6׾F^O>Q^K/1??žJoV7|ƍ h0qT'Bggg,K.)tw]_~uRREy' C/:G~۶LOOg}v[ tmq[o^{73GT:<`kk=}_U>~묯nq]VK/DO?UWU{=u4^\㘩)}YݻGu]߿ /x1e|2㘟g1O oP I_VKjT:<*_Un뺊wPU';캮U:4>^ @ C~qG1LɐԀ?R>R0 YYYᣏ>ʕ+jc瀯r4odiaT@5OF L1HWIsd*UxIäͤuu &؆HSI%"mRl&kn|vR " e!~^4,vC ƧWȚr+ 0k3=b<ҿ2atz33I0<LqGgwx{2!־3 FjtMSaV1?985F,,t "0xA#s}ңeesZ_YYA&Ú00:`U?<ܓ0 dV]]_vx޹X307a圭X0lI$Zn``Xu{$&~0q-*yx793+ #1}fePzJS9<\)`mx!`X)4\;RD nv|%j4Yrz~I Nu ϓW<\XbF6, Usp^`6]P<&5BE) kvxMgT->#oɻ D 9AƆ]>:% 6]a?}EP3fؿn0i(Q=OՇ\/dޝ)p^-g2+2W`d*Ŕ]ťPQ-7e-CsdXS-^0Ks6@>Fk9LI~tl|cʲ]ai/cuu.Ƈ,(p9MzΖ ]u| />\Ŀ( ge,^#ۗcr$l`Nѽ5& cu@Xrbv53'\\6ODvgO[GKx sԆC =JU~J Bg6HXaw\xJ8p@mc1t'%'*7 ðDvqWan\+lР{5ds=Bkg%1u9]d/٦{mk^7V y^y_#{ø~t<{쯬%ƎV* &|2e;@B~%6hP$߁ mu4jo6ҎmY@cCF6Qϟ?^xK[Ifrr[ }[eoEx0gޢpUaecDy5<'y/ŋo>sr %`SWaQ0VG5ēz^g)xCޞ_ p I566Hq իW/>?O|ɽ*P=73lC"GKmb]\{WB8Ѐdf7E1%h[yeOهo!ps>PXu|||'|r()eҢI7?2-&R_GC`Ta*=HpC͎m Ĉem|ex'H-eKaq 7a7͡3(t ׁn4BIT /I1~nn.ϿDOL.*ƻƜ}Y;3,, EJX~/)]95 5 FJ9.Nڋ/]va|@r ;~-i|aJgѢR $$'W˝ ,̞{{k[l͵Ns9SOb/bp0sv0{R㏿Hxl|6~$@ ɻ4.' !Ń/Ts?K=(Dld?p{% y`=c%XKޢ&VO;q_ R'}|Vao$`3ުM0nG}A6~Un+ QI)No;?w>/-ϒfHK.`Xk9WT?5s[-aYeas(Y fآw^L+wuyN,D,n-K *%ٹ ?.^@j-OgIENDB`recoll-1.26.3/qtgui/mtpics/postscript.png0000644000175000017500000000624313466271553015375 00000000000000PNG  IHDR@@iq pHYs  gAMA|Q cHRMz%u0`:o_F IDATxb?,\0098L D"PK?C/!!Y}v] X` 'gg0fVV"FF!003cz86䰈1_-[& dwJ)c`x󆁁b ?~00AĞ=c`"/&<6 Nj/'h_n4 dt*D/_a`D8tR L ( >ƅqc73?PbHĂ 4(sr20._f```xADEp2CĄPAd>+p`ZBY &utc`8 tu!CJR >,2eƍ @0\@`>}$KP`8)DEMȁ b;6q(F]К  :%/ |T~G pRC o5aD,mmgYj!Ž+ȥ=)d1\4S VDGbC-Tf6 qpOM@4 Xc$Al`-af9c XdTDx,?N Mv`mOȎ ,TTT+c&0x8@@ F19O>ePSS#( P LBBBD9H -Rؑ sѪ lIZVHA{9@L$6%F@L1r4rޅIH)@P3kJ<>rGJ@T/m lu<:Ʀ `) _yr1B؇޽c+j< :3/`˗+hk6%T**YhP'͛ (Urb#c?~0}Y `bb &&==='-Dq UBB!..<c5Pٳg `@rĶ ٝ!<G)1F/Y JQgΜarF6@dW^1ܸq۷o \\\ 222am}./kjjS۷0p$7p>0j2HJI2QH (޽p1م!@_fa- c'F, K@eEX DR z**ÿ?ߚp:zz֣$<Ç59 Aϟyl 7d! ' :0͠,s'&F-(H#lqrr|V"CJ; ;3ï?>r3 jPoF'P= AU7 jms defeXm0V &A51# X5l`qPjaPT߃zP6ɃV3LE Ϣ&ayqȩi a-/P (q5_alX5`|XJS 1zX{w09  "=

Eȃ9c Xs}ݺu?1gqy݃pmAfG{@4XSr$AD  #include #include #include #include #include "preview_plaintorich.h" #include "plaintorich.h" #include "log.h" #include "guiutils.h" #include "cancelcheck.h" using namespace std; PlainToRichQtPreview::PlainToRichQtPreview() { clear(); } void PlainToRichQtPreview::clear() { m_curanchor = 1; m_lastanchor = 0; m_groupanchors.clear(); m_groupcuranchors.clear(); QSettings settings("Recoll.org", "recoll"); m_spacehack = settings.value("anchorSpcHack", 0).toBool(); } bool PlainToRichQtPreview::haveAnchors() { return m_lastanchor != 0; } string PlainToRichQtPreview::PlainToRichQtPreview::header() { if (!m_inputhtml) { switch (prefs.previewPlainPre) { case PrefsPack::PP_BR: m_eolbr = true; return ""; case PrefsPack::PP_PRE: m_eolbr = false; return "

";
        case PrefsPack::PP_PREWRAP:
            m_eolbr = false;
            return ""
                "
";
        }
    }
    return cstr_null;
}

string PlainToRichQtPreview::startMatch(unsigned int grpidx)
{
    LOGDEB2("startMatch, grpidx " << grpidx << "\n");
    grpidx = m_hdata->index_term_groups[grpidx].grpsugidx;
    LOGDEB2("startMatch, ugrpidx " << grpidx << "\n");
    m_groupanchors[grpidx].push_back(++m_lastanchor);
    m_groupcuranchors[grpidx] = 0;
    // We used to create the region as:
    //     term
    // For some reason, this caused problems with the display of some
    // Tamil text (qt bug?). Just inserting a space character after
    // the opening  section. Also: having  before the match
    // term causes the same problem (so not a possible fix).
    string hackspace = m_spacehack? " " : "";
    string startmarker{
        "" + hackspace +
            ""
            };
    return startmarker;
}

string  PlainToRichQtPreview::endMatch()
{
    return "";
}

string  PlainToRichQtPreview::termAnchorName(int i) const
{
    static const char *termAnchorNameBase = "TRM";
    char acname[sizeof(termAnchorNameBase) + 20];
    sprintf(acname, "%s%d", termAnchorNameBase, i);
    return string(acname);
}

string  PlainToRichQtPreview::startChunk()
{
    return "
";
}

int  PlainToRichQtPreview::nextAnchorNum(int grpidx)
{
    LOGDEB2("nextAnchorNum: group " << grpidx << "\n");
    map::iterator curit = 
        m_groupcuranchors.find(grpidx);
    map >::iterator vecit = 
        m_groupanchors.find(grpidx);
    if (grpidx == -1 || curit == m_groupcuranchors.end() ||
        vecit == m_groupanchors.end()) {
        if (m_curanchor >= m_lastanchor)
            m_curanchor = 1;
        else
            m_curanchor++;
    } else {
        if (curit->second >= vecit->second.size() -1)
            m_groupcuranchors[grpidx] = 0;
        else 
            m_groupcuranchors[grpidx]++;
        m_curanchor = vecit->second[m_groupcuranchors[grpidx]];
        LOGDEB2("nextAnchorNum: curanchor now " << m_curanchor << "\n");
    }
    return m_curanchor;
}

int  PlainToRichQtPreview::prevAnchorNum(int grpidx)
{
    map::iterator curit = 
        m_groupcuranchors.find(grpidx);
    map >::iterator vecit = 
        m_groupanchors.find(grpidx);
    if (grpidx == -1 || curit == m_groupcuranchors.end() ||
        vecit == m_groupanchors.end()) {
        if (m_curanchor <= 1)
            m_curanchor = m_lastanchor;
        else
            m_curanchor--;
    } else {
        if (curit->second <= 0)
            m_groupcuranchors[grpidx] = vecit->second.size() -1;
        else 
            m_groupcuranchors[grpidx]--;
        m_curanchor = vecit->second[m_groupcuranchors[grpidx]];
    }
    return m_curanchor;
}

QString  PlainToRichQtPreview::curAnchorName() const
{
    return QString::fromUtf8(termAnchorName(m_curanchor).c_str());
}


ToRichThread::ToRichThread(const string &i, const HighlightData& hd,
                           std::shared_ptr ptr,
                           QStringList& qrichlist,
                           QObject *parent)
    : QThread(parent), m_input(i), m_hdata(hd), m_ptr(ptr), m_output(qrichlist)
{
}

// Insert into editor by chunks so that the top becomes visible
// earlier for big texts. This provokes some artifacts (adds empty line),
// so we can't set it too low.
#define CHUNKL 500*1000

void ToRichThread::run()
{
    list out;
    try {
        m_ptr->plaintorich(m_input, out, m_hdata, CHUNKL);
    } catch (CancelExcept) {
        return;
    }

    // Convert C++ string list to QString list
    for (list::iterator it = out.begin(); 
         it != out.end(); it++) {
        m_output.push_back(QString::fromUtf8(it->c_str(), it->length()));
    }
}
recoll-1.26.3/qtgui/viewaction_w.cpp0000644000175000017500000001546313537747251014366 00000000000000/* Copyright (C) 2006-2019 J.F.Dockes 
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include "autoconfig.h"

#include "viewaction_w.h"

#include 
#include 
#include 

#include 

#include "recoll.h"
#include "log.h"
#include "guiutils.h"

using namespace std;


void ViewAction::init()
{
    selSamePB->setEnabled(false);
    connect(closePB, SIGNAL(clicked()), this, SLOT(close()));
    connect(chgActPB, SIGNAL(clicked()), this, SLOT(editActions()));
    connect(actionsLV,
            SIGNAL(currentItemChanged(QTableWidgetItem *, QTableWidgetItem *)),
            this,
            SLOT(onCurrentItemChanged(QTableWidgetItem *, QTableWidgetItem *)));
    useDesktopCB->setChecked(prefs.useDesktopOpen);
    onUseDesktopCBToggled(prefs.useDesktopOpen);
    connect(useDesktopCB, SIGNAL(stateChanged(int)), 
            this, SLOT(onUseDesktopCBToggled(int)));
    connect(setExceptCB, SIGNAL(stateChanged(int)), 
            this, SLOT(onSetExceptCBToggled(int)));
    connect(selSamePB, SIGNAL(clicked()),
            this, SLOT(onSelSameClicked()));
    resize(QSize(640, 480).expandedTo(minimumSizeHint()));
}
        
void ViewAction::onUseDesktopCBToggled(int onoff)
{
    prefs.useDesktopOpen = onoff != 0;
    fillLists();
    setExceptCB->setEnabled(prefs.useDesktopOpen);
}

void ViewAction::onSetExceptCBToggled(int onoff)
{
    newActionLE->setEnabled(onoff != 0);
}

void ViewAction::fillLists()
{
    currentLBL->clear();
    actionsLV->clear();
    actionsLV->verticalHeader()->setDefaultSectionSize(20); 
    vector > defs;
    theconfig->getMimeViewerDefs(defs);
    actionsLV->setRowCount(defs.size());

    set viewerXs;
    if (prefs.useDesktopOpen) {
        viewerXs = theconfig->getMimeViewerAllEx();
    }

    int row = 0;
    for (const auto& def : defs) {
        actionsLV->setItem(row, 0, new QTableWidgetItem(u8s2qs(def.first)));
        if (!prefs.useDesktopOpen ||
            viewerXs.find(def.first) != viewerXs.end()) {
            actionsLV->setItem(row, 1, new QTableWidgetItem(u8s2qs(def.second)));
        } else {
            actionsLV->setItem(
                row, 1, new QTableWidgetItem(tr("Desktop Default")));
        }
        row++;
    }
    QStringList labels(tr("MIME type"));
    labels.push_back(tr("Command"));
    actionsLV->setHorizontalHeaderLabels(labels);
}

void ViewAction::selectMT(const QString& mt)
{
    actionsLV->clearSelection();
    QListitems = 
        actionsLV->findItems(mt, Qt::MatchFixedString|Qt::MatchCaseSensitive);
    for (QList::iterator it = items.begin();
         it != items.end(); it++) {
        (*it)->setSelected(true);
        actionsLV->setCurrentItem(*it, QItemSelectionModel::Columns);
    }
}

void ViewAction::onSelSameClicked()
{
    actionsLV->clearSelection();
    QString value = currentLBL->text();
    if (value.isEmpty())
        return;
    string action = qs2utf8s(value);
    LOGDEB1("ViewAction::onSelSameClicked: value: " << action << endl);

    vector > defs;
    theconfig->getMimeViewerDefs(defs);
    for (const auto& def : defs) {
        if (def.second == action) {
            QListitems = actionsLV->findItems(
                u8s2qs(def.first), Qt::MatchFixedString|Qt::MatchCaseSensitive);
            for (QList::iterator it = items.begin();
                 it != items.end(); it++) {
                (*it)->setSelected(true);
                actionsLV->item((*it)->row(), 1)->setSelected(true);
            }
        }
    }
}

void ViewAction::onCurrentItemChanged(QTableWidgetItem *item, QTableWidgetItem *)
{
    currentLBL->clear();
    selSamePB->setEnabled(false);
    if (nullptr == item) {
        return;
    }
    QTableWidgetItem *item0 = actionsLV->item(item->row(), 0);
    string mtype = qs2utf8s(item0->text());

    vector > defs;
    theconfig->getMimeViewerDefs(defs);
    for (const auto& def : defs) {
        if (def.first == mtype) {
            currentLBL->setText(u8s2qs(def.second));
            selSamePB->setEnabled(true);
            return;
        }
    }
}

void ViewAction::editActions()
{
    QString action0;
    int except0 = -1;

    set viewerXs = theconfig->getMimeViewerAllEx();
    vector mtypes;
    bool dowarnmultiple = true;
    for (int row = 0; row < actionsLV->rowCount(); row++) {
        QTableWidgetItem *item0 = actionsLV->item(row, 0);
        if (!item0->isSelected())
            continue;
        string mtype = qs2utf8s(item0->text());
        mtypes.push_back(mtype);
        QTableWidgetItem *item1 = actionsLV->item(row, 1);
        QString action = item1->text();
        bool except = viewerXs.find(mtype) != viewerXs.end();
        if (action0.isEmpty()) {
            action0 = action;
            except0 = except;
        } else {
            if ((action != action0 || except != except0) && dowarnmultiple) {
                switch (QMessageBox::warning(0, "Recoll",
                                             tr("Changing entries with "
                                                "different current values"),
                                             "Continue",
                                             "Cancel",
                                             0, 0, 1)) {
                case 0: dowarnmultiple = false; break;
                case 1: return;
                }
            }
        }
    }

    if (action0.isEmpty())
        return;
    string sact = qs2utf8s(newActionLE->text());
    if (!sact.empty()) {
        trimstring(sact);
#ifdef _WIN32
        path_slashize(sact);
#endif
    }
    for (const auto& entry : mtypes) {
        auto xit = viewerXs.find(entry);
        if (setExceptCB->isChecked()) {
            if (xit == viewerXs.end()) {
                viewerXs.insert(entry);
            }
        } else {
            if (xit != viewerXs.end()) {
                viewerXs.erase(xit);
            }
        }
        // An empty action will restore the default (erase from
        // topmost conftree)
        theconfig->setMimeViewerDef(entry, sact);
    }

    theconfig->setMimeViewerAllEx(viewerXs);
    fillLists();
}
recoll-1.26.3/qtgui/rclm_saveload.cpp0000644000175000017500000001025013533651561014462 00000000000000/* Copyright (C) 2005 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include "autoconfig.h"

/** Saving and restoring named queries */

#include "safesysstat.h"

#include 
#include 
#include 

#include "rclmain_w.h"
#include "log.h"
#include "readfile.h"
#include "xmltosd.h"
#include "searchdata.h"
#include "copyfile.h"

using namespace std;
using namespace Rcl;

static QString prevDir()
{
    QSettings settings;
    QString prevdir = 
        settings.value("/Recoll/prefs/lastQuerySaveDir").toString();
    string defpath = path_cat(theconfig->getConfDir(), "saved_queries");
    if (prevdir.isEmpty()) {
        if (!path_exists(defpath)) {
            mkdir(defpath.c_str(), 0700);
        }
        return QString::fromLocal8Bit(defpath.c_str());
    } else {
        return prevdir;
    }
}

void RclMain::saveLastQuery()
{
    string xml;
    if (lastSearchSimple()) {
        xml = sSearch->asXML();
    } else {
        if (g_advshistory) {
            std::shared_ptr sd;
            sd = g_advshistory->getnewest();
            if (sd) {
                xml = sd->asXML();
            }
        }
    }
    if (xml.empty()) {
        QMessageBox::information(this, tr("No search"), 
                                 tr("No preserved previous search"));
        return;
    }
    xml = string("\n") +
        "\n" + xml + "\n\n";

    QFileDialog fileDialog(this, tr("Choose file to save"));
    fileDialog.setNameFilter(tr("Saved Queries (*.rclq)"));
    fileDialog.setDefaultSuffix("rclq");
    fileDialog.setAcceptMode(QFileDialog::AcceptSave);
    fileDialog.setDirectory(prevDir());

    if (!fileDialog.exec())
        return;

    QString s = fileDialog.selectedFiles().first();
    if (s.isEmpty()) {
        return;
    }
    
    string tofile((const char *)s.toLocal8Bit());

    // Work around qt 5.9-11 bug (linux at least): defaultSuffix is
    // not added to saved file name
    string suff = path_suffix(tofile);
    if (suff.compare("rclq")) {
        tofile += ".rclq";
    }

    LOGDEB("RclMain::saveLastQuery: XML: [" << xml << "]\n");
    string reason;
    if (!stringtofile(xml, tofile.c_str(), reason)) {
        QMessageBox::warning(this, tr("Write failed"), 
                                 tr("Could not write to file"));
    }
    return;
}


void RclMain::loadSavedQuery()
{
    QString s = 
        QFileDialog::getOpenFileName(this, "Open saved query", prevDir(), 
                                     tr("Saved Queries (*.rclq)"));
    if (s.isEmpty())
        return;

    string fromfile((const char *)s.toLocal8Bit());
    string xml, reason;
    if (!file_to_string(fromfile, xml, &reason)) {
        QMessageBox::warning(this, tr("Read failed"), 
                             tr("Could not open file: ") + 
                             QString::fromUtf8(reason.c_str()));
        return;
    }

    // Try to parse as advanced search SearchData
    std::shared_ptr sd = xmlToSearchData(xml, false);
    if (sd) {
        showAdvSearchDialog();
        asearchform->fromSearch(sd);
        return;
    }
    LOGDEB("loadSavedQuery: Not advanced search. Parsing as simple search\n");
    // Try to parse as Simple Search
    SSearchDef sdef;
    if (xmlToSSearch(xml, sdef)) {
        if (sSearch->fromXML(sdef))
            return;
    }
    QMessageBox::warning(this, tr("Load error"), 
                         tr("Could not load saved query"));
}

recoll-1.26.3/qtgui/restable.h0000644000175000017500000001343213566424763013132 00000000000000/* Copyright (C) 2006 J.F.Dockes 
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _RESTABLE_H_INCLUDED_
#define _RESTABLE_H_INCLUDED_
#include "autoconfig.h"

#include 

#include 
#include 
#include 
#include 

#include "ui_restable.h"
#include "docseq.h"
#include "plaintorich.h"

class ResTable;

typedef std::string (FieldGetter)(const std::string& fldname, const Rcl::Doc& doc);

class RecollModel : public QAbstractTableModel {

    Q_OBJECT

public:
    RecollModel(const QStringList fields, ResTable *tb, QObject *parent = 0);

    // Reimplemented methods
    virtual int rowCount (const QModelIndex& = QModelIndex()) const;
    virtual int columnCount(const QModelIndex& = QModelIndex()) const;
    virtual QVariant headerData (int col, Qt::Orientation orientation, 
				 int role = Qt::DisplayRole ) const;
    virtual QVariant data(const QModelIndex& index, 
			   int role = Qt::DisplayRole ) const;
    virtual void saveAsCSV(FILE *fp);
    virtual void sort(int column, Qt::SortOrder order = Qt::AscendingOrder);
    // Specific methods
    virtual void readDocSource();
    virtual void setDocSource(std::shared_ptr nsource);
    virtual std::shared_ptr getDocSource() {return m_source;}
    virtual void deleteColumn(int);
    virtual const std::vector& getFields() {return m_fields;}
    virtual const std::map& getAllFields() 
    { 
	return o_displayableFields;
    }
    virtual void addColumn(int, const std::string&);
    // Some column name are aliases/translator for base document field 
    // (ie: date, datetime->mtime). Help deal with this:
    virtual std::string baseField(const std::string&);

    // Ignore sort() call because 
    virtual void setIgnoreSort(bool onoff) {m_ignoreSort = onoff;}

    friend class ResTable;

signals:
    void sortDataChanged(DocSeqSortSpec);

private:
    ResTable *m_table{0};
    mutable std::shared_ptr m_source;
    std::vector m_fields;
    std::vector m_getters;
    static std::map o_displayableFields;
    bool m_ignoreSort;
    FieldGetter* chooseGetter(const std::string&);
    HighlightData m_hdata;
};

class ResTable;

// Modified textBrowser for the detail area
class ResTableDetailArea : public QTextBrowser {
    Q_OBJECT;

 public:
    ResTableDetailArea(ResTable* parent = 0);
    
 public slots:
    virtual void createPopupMenu(const QPoint& pos);

private:
    ResTable *m_table;
};


class ResTablePager;
class QUrl;
class RclMain;

class ResTable : public QWidget, public Ui::ResTable 
{
    Q_OBJECT

public:
    ResTable(QWidget* parent = 0) 
	: QWidget(parent),
	  m_model(0), m_pager(0), m_detail(0), m_detaildocnum(-1),
	  m_rclmain(0), m_ismainres(true)
    {
	setupUi(this);
	init();
    }
	
    virtual ~ResTable() {}
    virtual RecollModel *getModel() {return m_model;}
    virtual ResTableDetailArea* getDetailArea() {return m_detail;}
    virtual int getDetailDocNumOrTopRow();

    void setRclMain(RclMain *m, bool ismain);

public slots:
    virtual void onTableView_currentChanged(const QModelIndex&);
    virtual void on_tableView_entered(const QModelIndex& index);
    virtual void setDocSource(std::shared_ptr nsource);
    virtual void saveColState();
    virtual void resetSource();
    virtual void readDocSource(bool resetPos = true);
    virtual void onSortDataChanged(DocSeqSortSpec);
    virtual void createPopupMenu(const QPoint& pos);
    virtual void onDoubleClick(const QModelIndex&);
    virtual void menuPreview();
    virtual void menuSaveToFile();
    virtual void menuSaveSelection();
    virtual void menuEdit();
    virtual void menuEditAndQuit();
    virtual void menuOpenWith(QAction *);
    virtual void menuCopyFN();
    virtual void menuCopyURL();
    virtual void menuExpand();
    virtual void menuPreviewParent();
    virtual void menuOpenParent();
    virtual void menuShowSnippets();
    virtual void menuShowSubDocs();
    virtual void createHeaderPopupMenu(const QPoint&);
    virtual void deleteColumn();
    virtual void addColumn();
    virtual void resetSort(); // Revert to natural (relevance) order
    virtual void saveAsCSV(); 
    virtual void linkWasClicked(const QUrl&);
    virtual void makeRowVisible(int row);
    virtual void takeFocus();

signals:
    void docPreviewClicked(int, Rcl::Doc, int);
    void docSaveToFileClicked(Rcl::Doc);
    void previewRequested(Rcl::Doc);
    void editRequested(Rcl::Doc);
    void openWithRequested(Rcl::Doc, string cmd);
    void headerClicked();
    void docExpand(Rcl::Doc);
    void showSubDocs(Rcl::Doc);
    void showSnippets(Rcl::Doc);
    void detailDocChanged(Rcl::Doc, std::shared_ptr);
    
    friend class ResTablePager;
    friend class ResTableDetailArea;
protected:
    bool eventFilter(QObject* obj, QEvent* event);
private:
    void init();
    RecollModel   *m_model;
    ResTablePager *m_pager;
    ResTableDetailArea *m_detail;
    int            m_detaildocnum;
    Rcl::Doc       m_detaildoc;
    int            m_popcolumn;
    RclMain *m_rclmain;
    bool     m_ismainres;
};


#endif /* _RESTABLE_H_INCLUDED_ */
recoll-1.26.3/qtgui/snippets.ui0000644000175000017500000001012613303776056013353 00000000000000

 Snippets
 
  
   
    0
    0
    640
    400
   
  
  
   Snippets
  
  
   
    
     
      
      
     
     
      
       
        
         
          
           1
           0
          
         
         
          QFrame::StyledPanel
         
         
          QFrame::Raised
         
         
          
           1
          
          
           1
          
          
           
            
             
              0
              0
             
            
            
             X
            
            
             Qt::ToolButtonTextOnly
            
           
          
          
           
            
             Find:
            
           
          
          
           
            
             
              0
              0
             
            
           
          
          
           
            
             
              0
              0
             
            
            
             Next
            
           
          
          
           
            
             
              0
              0
             
            
            
             Prev
            
           
          
         
        
       
       
        
         
          Qt::Horizontal
         
         
          QDialogButtonBox::Close
         
        
       
      
     
    
   
  
 
 
 
  
   searchClosePB
   clicked()
   searchFM
   hide()
   
    
     33
     414
    
    
     328
     414
    
   
  
 

recoll-1.26.3/qtgui/rclhelp.cpp0000644000175000017500000000430313533651561013302 00000000000000/* Copyright (C) 2005 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include "autoconfig.h"
#include 

#include 
#include 

#include "recoll.h"
#include "rclhelp.h"
#include "log.h"

map HelpClient::helpmap;

void HelpClient::installMap(string wname, string section)
{
    helpmap[wname] = section;
}

HelpClient::HelpClient(QObject *parent, const char *)
    : QObject(parent)
{
    parent->installEventFilter(this);
}
    
bool HelpClient::eventFilter(QObject *obj, QEvent *event)
{
    static time_t last_start;
    if (event->type() == QEvent::KeyPress || 
	event->type() == QEvent::ShortcutOverride) {
	//	LOGDEB("HelpClient::eventFilter: "  << ((int)event->type()) << "\n" );
	QKeyEvent *ke = static_cast(event);
	if (ke->key() == Qt::Key_F1 || ke->key() == Qt::Key_Help) {
	    if (obj->isWidgetType()) {
		QWidget *widget = static_cast(obj)->focusWidget();
		map::iterator it = helpmap.end();
		while (widget) {
		    it = helpmap.find((const char *)widget->objectName().toUtf8());
		    if (it != helpmap.end())
			break;
		    widget = widget->parentWidget();
		}
		if (time(0) - last_start > 5) {
		    last_start = time(0);
		    if (it != helpmap.end()) {
			LOGDEB("HelpClient::eventFilter: "  << (it->first) << "->"  << (it->second) << "\n" );
			startManual(it->second);
		    } else {
			LOGDEB("HelpClient::eventFilter: no help section\n" );
			startManual("");
		    }
		}
	    }
	    return true;
	}
    }
    return false;
}

recoll-1.26.3/qtgui/respopup.cpp0000644000175000017500000001451513533651561013534 00000000000000/*
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include "autoconfig.h"

#include 
#include 
#include 

#include "log.h"
#include "smallut.h"
#include "recoll.h"
#include "docseq.h"
#include "respopup.h"
#include "appformime.h"

namespace ResultPopup {

QMenu *create(QWidget *me, int opts, std::shared_ptr source, Rcl::Doc& doc)
{
    QMenu *popup = new QMenu(me);

    LOGDEB("ResultPopup::create: opts "  << (opts) << " haspages "  << (doc.haspages) << " "  << (source ? "Source not null" : "Source is Null") << " "  << (source ? (source->snippetsCapable() ? 
		      "snippetsCapable" : "not snippetsCapable") : "") << "\n" );

    string apptag;
    doc.getmeta(Rcl::Doc::keyapptg, &apptag);

    popup->addAction(QWidget::tr("&Preview"), me, SLOT(menuPreview()));

    if (!theconfig->getMimeViewerDef(doc.mimetype, apptag, 0).empty()) {
	popup->addAction(QWidget::tr("&Open"), me, SLOT(menuEdit()));
    }

    bool needopenwith = true;
    if (!doc.ipath.empty())
        needopenwith = false;
    if (needopenwith) {
        string backend;
        doc.getmeta(Rcl::Doc::keybcknd, &backend);
        if (!backend.empty() && backend.compare("FS"))
            needopenwith = false;
    }
            
    if (needopenwith) {
        vector aps;
        DesktopDb *ddb = DesktopDb::getDb();
        if (ddb && ddb->appForMime(doc.mimetype, &aps) && 
            !aps.empty()) {
            QMenu *sub = popup->addMenu(QWidget::tr("Open With"));
            if (sub) {
                for (vector::const_iterator it = aps.begin();
                     it != aps.end(); it++) {
                    QAction *act = new 
                        QAction(QString::fromUtf8(it->name.c_str()), me);
                    QVariant v(QString::fromUtf8(it->command.c_str()));
                    act->setData(v);
                    sub->addAction(act);
                }
                sub->connect(sub, SIGNAL(triggered(QAction *)), me, 
                             SLOT(menuOpenWith(QAction *)));
            }
        }

        // See if there are any desktop files in $RECOLL_CONFDIR/scripts
        // and possibly create a "run script" menu.
        aps.clear();
        ddb = new DesktopDb(path_cat(theconfig->getConfDir(), "scripts"));
        if (ddb && ddb->allApps(&aps) && !aps.empty()) {
            QMenu *sub = popup->addMenu(QWidget::tr("Run Script"));
            if (sub) {
                for (vector::const_iterator it = aps.begin();
                     it != aps.end(); it++) {
                    QAction *act = new 
                        QAction(QString::fromUtf8(it->name.c_str()), me);
                    QVariant v(QString::fromUtf8(it->command.c_str()));
                    act->setData(v);
                    sub->addAction(act);
                }
                sub->connect(sub, SIGNAL(triggered(QAction *)), me, 
                             SLOT(menuOpenWith(QAction *)));
            }
        }
        delete ddb;
    }

    popup->addAction(QWidget::tr("Copy &File Name"), me, SLOT(menuCopyFN()));
    popup->addAction(QWidget::tr("Copy &URL"), me, SLOT(menuCopyURL()));

    if ((opts&showSaveOne) && (!doc.isFsFile() || !doc.ipath.empty()))
	popup->addAction(QWidget::tr("&Write to File"), me, 
                         SLOT(menuSaveToFile()));

    if ((opts&showSaveSel))
	popup->addAction(QWidget::tr("Save selection to files"), 
			 me, SLOT(menuSaveSelection()));

    Rcl::Doc pdoc;
    if (source && source->getEnclosing(doc, pdoc)) {
	popup->addAction(QWidget::tr("Preview P&arent document/folder"), 
			 me, SLOT(menuPreviewParent()));
    }
    // Open parent is useful even if there is no parent because we open
    // the enclosing folder.
    if (doc.isFsFile())
        popup->addAction(QWidget::tr("&Open Parent document/folder"), 
                         me, SLOT(menuOpenParent()));

    if (opts & showExpand)
	popup->addAction(QWidget::tr("Find &similar documents"), 
			 me, SLOT(menuExpand()));

    if (doc.haspages && source && source->snippetsCapable()) 
	popup->addAction(QWidget::tr("Open &Snippets window"), 
			 me, SLOT(menuShowSnippets()));

    if ((opts & showSubs) && rcldb && rcldb->hasSubDocs(doc)) 
	popup->addAction(QWidget::tr("Show subdocuments / attachments"), 
			 me, SLOT(menuShowSubDocs()));

    return popup;
}

Rcl::Doc getParent(std::shared_ptr source, Rcl::Doc& doc)
{
    Rcl::Doc pdoc;
    if (!source || !source->getEnclosing(doc, pdoc)) {
	// No parent doc: show enclosing folder with app configured for
	// directories
        pdoc.url = url_parentfolder(doc.url);
	pdoc.meta[Rcl::Doc::keychildurl] = doc.url;
	pdoc.meta[Rcl::Doc::keyapptg] = "parentopen";
	pdoc.mimetype = "inode/directory";
    }
    return pdoc;
}

void copyFN(const Rcl::Doc &doc)
{
    // Our urls currently always begin with "file://" 
    //
    // Problem: setText expects a QString. Passing a (const char*)
    // as we used to do causes an implicit conversion from
    // latin1. File are binary and the right approach would be no
    // conversion, but it's probably better (less worse...) to
    // make a "best effort" tentative and try to convert from the
    // locale's charset than accept the default conversion.
    QString qfn = QString::fromLocal8Bit(doc.url.c_str()+7);
    QApplication::clipboard()->setText(qfn, QClipboard::Selection);
    QApplication::clipboard()->setText(qfn, QClipboard::Clipboard);
}

void copyURL(const Rcl::Doc &doc)
{
    string url =  url_encode(doc.url, 7);
    QApplication::clipboard()->setText(url.c_str(), 
				       QClipboard::Selection);
    QApplication::clipboard()->setText(url.c_str(), 
				       QClipboard::Clipboard);
}

}

recoll-1.26.3/qtgui/respopup.h0000644000175000017500000000246313533651561013200 00000000000000/* Copyright (C) 2006 J.F.Dockes 
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _RESPOPUP_H_INCLUDED_
#define _RESPOPUP_H_INCLUDED_
#include "autoconfig.h"

namespace ResultPopup {
    enum Options {showExpand = 0x1, showSubs = 0x2, isMain = 0x3,
		  showSaveOne = 0x4, showSaveSel = 0x8};
    extern QMenu *create(QWidget *me, int opts,  
			 std::shared_ptr source,
			 Rcl::Doc& doc);
    extern Rcl::Doc getParent(std::shared_ptr source,
			      Rcl::Doc& doc);
    extern void copyFN(const Rcl::Doc &doc);
    extern void copyURL(const Rcl::Doc &doc);
};

#endif /* _RESPOPUP_H_INCLUDED_ */
recoll-1.26.3/qtgui/preview_w.cpp0000644000175000017500000010732113566714503013666 00000000000000/* Copyright (C) 2005-2019 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include "autoconfig.h"

#include 
#include 

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

#include "log.h"
#include "pathut.h"
#include "internfile.h"
#include "recoll.h"
#include "smallut.h"
#include "chrono.h"
#include "wipedir.h"
#include "cancelcheck.h"
#include "preview_w.h"
#include "guiutils.h"
#include "docseqhist.h"
#include "rclhelp.h"
#include "preview_load.h"
#include "preview_plaintorich.h"
#include "rclmain_w.h"

static const QKeySequence closeKS(Qt::Key_Escape);
static const QKeySequence nextDocInTabKS(Qt::ShiftModifier+Qt::Key_Down);
static const QKeySequence prevDocInTabKS(Qt::ShiftModifier+Qt::Key_Up);
static const QKeySequence closeTabKS(Qt::ControlModifier+Qt::Key_W);
static const QKeySequence printTabKS(Qt::ControlModifier+Qt::Key_P);

// Make an attempt at trimming wildcard exprs at both ends of string
static void trimwildcards(string& elt)
{
    if (elt.empty())
        return;
    string::size_type initsize;
    do {
        initsize = elt.size();
        // Trim wildcard chars. 
        trimstring(elt, " *?");
        // Trim wildcard char classes 
        if (elt.size() && elt.back() == ']') {
            string::size_type offs = elt.find_last_of("[");
            if (offs != string::npos) {
                elt = elt.substr(0, offs);
                if (elt.size() && elt.back() == '[') {
                    elt.erase(elt.end()-1);
                }
            }
        }
        if (elt.size() && elt.front() == '[') {
            string::size_type offs = elt.find_first_of("]");
            if (offs != string::npos) {
                elt.erase(0, offs+1);
            }
        }
    } while (elt.size() && elt.size() != initsize);
}

void Preview::init()
{
    LOGDEB("Preview::init\n");
    setAttribute(Qt::WA_DeleteOnClose);
    
    // Create the first tab (the tab widget is created with one
    // initial tab for ease of use in designer, we remove it).
    addEditorTab();
    pvTab->removeTab(0);

    for (const auto& ugroup : m_hData.ugroups) {
        QString s;
        for (auto elt : ugroup) {
            trimwildcards(elt);
            if (!elt.empty()) {
                if (!s.isEmpty()) {
                    s.append(" ");
                }
                s.append(u8s2qs(elt));
            }
        }
        s = s.trimmed();
        searchTextCMB->addItem(s);
    }
    searchTextCMB->setCompleter(0);

    if (prefs.pvwidth > 100) {
        resize(prefs.pvwidth, prefs.pvheight);
    } else {
        resize(QSize(640, 480).expandedTo(minimumSizeHint()));
    }

    (void)new HelpClient(this);
    HelpClient::installMap((const char *)objectName().toUtf8(), 
                           "RCL.SEARCH.GUI.PREVIEW");

    // signals and slots connections
    connect(searchTextCMB, SIGNAL(editTextChanged(const QString&)), 
            this, SLOT(searchTextChanged(const QString&)));
    connect(nextPB, SIGNAL(clicked()), this, SLOT(nextPressed()));
    connect(prevPB, SIGNAL(clicked()), this, SLOT(prevPressed()));
    connect(clearPB, SIGNAL(clicked()), searchTextCMB, SLOT(clearEditText()));
    connect(editPB, SIGNAL(clicked()), this, SLOT(emitEditRequested()));
    connect(pvTab, SIGNAL(currentChanged(int)), this, SLOT(currentChanged(int)));
    connect(pvTab, SIGNAL(tabCloseRequested(int)), this, SLOT(closeTab(int)));

    connect(new QShortcut(closeKS, this), SIGNAL (activated()), 
            this, SLOT (close()));
    connect(new QShortcut(nextDocInTabKS, this), SIGNAL (activated()), 
            this, SLOT (emitShowNext()));
    connect(nextInTabPB, SIGNAL (clicked()), this, SLOT (emitShowNext()));
    connect(new QShortcut(prevDocInTabKS, this), SIGNAL (activated()), 
            this, SLOT (emitShowPrev()));
    connect(prevInTabPB, SIGNAL (clicked()), this, SLOT (emitShowPrev()));
    connect(new QShortcut(closeTabKS, this), SIGNAL (activated()), 
            this, SLOT (closeCurrentTab()));
    connect(new QShortcut(printTabKS, this), SIGNAL (activated()), 
            this, SIGNAL (printCurrentPreviewRequest()));

    currentChanged(pvTab->currentIndex());
}

void Preview::emitShowNext()
{
    if (m_loading)
        return;
    PreviewTextEdit *edit = currentEditor();
    if (edit) {
        emit(showNext(this, m_searchId, edit->m_docnum));
    }
}

void Preview::emitShowPrev()
{
    if (m_loading)
        return;
    PreviewTextEdit *edit = currentEditor();
    if (edit) {
        emit(showPrev(this, m_searchId, edit->m_docnum));
    }
}

void Preview::closeEvent(QCloseEvent *e)
{
    LOGDEB("Preview::closeEvent. m_loading " << m_loading << "\n");
    if (m_loading) {
        CancelCheck::instance().setCancel();
        e->ignore();
        return;
    }
    prefs.pvwidth = width();
    prefs.pvheight = height();

    /* Release all temporary files (but maybe none is actually set) */
    for (int i = 0; i < pvTab->count(); i++) {
        PreviewTextEdit *edit = editor(i);
        if (edit) {
            forgetTempFile(edit->m_tmpfilename);
        }
    }
    emit previewExposed(this, m_searchId, -1);
    emit previewClosed(this);
    QWidget::closeEvent(e);
}

extern const char *eventTypeToStr(int tp);

bool Preview::eventFilter(QObject *target, QEvent *event)
{
    if (event->type() != QEvent::KeyPress) {
#if 0
        LOGDEB("Preview::eventFilter(): " << eventTypeToStr(event->type()) <<
               "\n");
        if (event->type() == QEvent::MouseButtonRelease) {
            QMouseEvent *mev = (QMouseEvent *)event;
            LOGDEB("Mouse: GlobalY " << mev->globalY() << " y " << mev->y() <<
                   "\n");
        }
#endif
        return false;
    }

    PreviewTextEdit *edit = currentEditor();
    QKeyEvent *keyEvent = (QKeyEvent *)event;

    if (m_dynSearchActive) {
        if (keyEvent->key() == Qt::Key_F3) {
            LOGDEB2("Preview::eventFilter: got F3\n");
            doSearch(searchTextCMB->currentText(), true, 
                     (keyEvent->modifiers() & Qt::ShiftModifier) != 0);
            return true;
        }
        if (target != searchTextCMB)
            return QApplication::sendEvent(searchTextCMB, event);
    } else {
        if (edit && 
            (target == edit || target == edit->viewport())) {
            if (keyEvent->key() == Qt::Key_Slash ||
                (keyEvent->key() == Qt::Key_F &&
                 (keyEvent->modifiers() & Qt::ControlModifier))) {
                LOGDEB2("Preview::eventFilter: got / or C-F\n");
                searchTextCMB->setFocus();
                m_dynSearchActive = true;
                return true;
            } else if (keyEvent->key() == Qt::Key_Space) {
                LOGDEB2("Preview::eventFilter: got Space\n");
                int value = edit->verticalScrollBar()->value();
                value += edit->verticalScrollBar()->pageStep();
                edit->verticalScrollBar()->setValue(value);
                return true;
            } else if (keyEvent->key() == Qt::Key_Backspace) {
                LOGDEB2("Preview::eventFilter: got Backspace\n");
                int value = edit->verticalScrollBar()->value();
                value -= edit->verticalScrollBar()->pageStep();
                edit->verticalScrollBar()->setValue(value);
                return true;
            }
        }
    }

    return false;
}

void Preview::searchTextChanged(const QString & text)
{
    LOGDEB("Preview::searchTextChanged:(" << qs2utf8s(text) << ") current: ("<<
            qs2utf8s(searchTextCMB->currentText()) << ") currentindex " <<
            searchTextCMB->currentIndex() << "\n");
    if (!searchTextCMB->itemText(searchTextCMB->currentIndex()).compare(text)) {
        // Then we assume that the text was set by selecting in the
        // combobox There does not seem to be another way to
        // discriminate select and hand edit. Note that the
        // activated() signal is called *after* the editTextChanged()
        // one, so it is useless.
        m_searchTextFromIndex = searchTextCMB->currentIndex();
        doSearch("", false, false);
    } else {
        m_searchTextFromIndex = -1;
        if (text.isEmpty()) {
            m_dynSearchActive = false;
            clearPB->setEnabled(false);
        } else {
            m_dynSearchActive = true;
            clearPB->setEnabled(true);
            doSearch(text, false, false);
        }
    }
}

void Preview::emitSaveDocToFile()
{
    PreviewTextEdit *ce = currentEditor();
    if (ce && !ce->m_dbdoc.url.empty()) {
        emit saveDocToFile(ce->m_dbdoc);
    }
}

void Preview::emitEditRequested()
{
    PreviewTextEdit *ce = currentEditor();
    if (ce && !ce->m_dbdoc.url.empty()) {
        emit editRequested(ce->m_dbdoc);
    }
}

// Perform text search. If next is true, we look for the next match of the
// current search, trying to advance and possibly wrapping around. If next is
// false, the search string has been modified, we search for the new string, 
// starting from the current position
void Preview::doSearch(const QString &_text, bool next, bool reverse, 
                       bool wordOnly)
{
    LOGDEB("Preview::doSearch: text [" << qs2utf8s(_text) << "] idx " <<
           m_searchTextFromIndex << " next " << next << " rev " << reverse <<
           " word " << wordOnly << "\n");
    QString text = _text;

    bool matchCase = casematchCB->isChecked();
    PreviewTextEdit *edit = currentEditor();
    if (edit == 0) {
        // ??
        return;
    }

    if (text.isEmpty() || m_searchTextFromIndex != -1) {
        if (!edit->m_plaintorich->haveAnchors()) {
            LOGDEB("NO ANCHORS\n");
            return;
        }
        // The combobox indices are equal to the search ugroup indices
        // in hldata, that's how we built the list.
        if (reverse) {
            edit->m_plaintorich->prevAnchorNum(m_searchTextFromIndex);
        } else {
            edit->m_plaintorich->nextAnchorNum(m_searchTextFromIndex);
        }
        QString aname = edit->m_plaintorich->curAnchorName();
        LOGDEB("Calling scrollToAnchor(" << qs2utf8s(aname) << ")\n");
        edit->scrollToAnchor(aname);
        // Position the cursor approximately at the anchor (top of
        // viewport) so that searches start from here
        QTextCursor cursor = edit->cursorForPosition(QPoint(0, 0));
        edit->setTextCursor(cursor);
        return;
    }

    // If next is false, the user added characters to the current
    // search string.  We need to reset the cursor position to the
    // start of the previous match, else incremental search is going
    // to look for the next occurrence instead of trying to lenghten
    // the current match
    if (!next) {
        QTextCursor cursor = edit->textCursor();
        cursor.setPosition(cursor.anchor(), QTextCursor::KeepAnchor);
        edit->setTextCursor(cursor);
    }
    Chrono chron;
    LOGDEB("Preview::doSearch: first find call\n");
    QTextDocument::FindFlags flags = 0;
    if (reverse)
        flags |= QTextDocument::FindBackward;
    if (wordOnly)
        flags |= QTextDocument::FindWholeWords;
    if (matchCase)
        flags |= QTextDocument::FindCaseSensitively;
    bool found = edit->find(text, flags);
    LOGDEB("Preview::doSearch: first find call return: found " << found <<
           " " << chron.secs() << " S\n");
    // If not found, try to wrap around. 
    if (!found) { 
        LOGDEB("Preview::doSearch: wrapping around\n");
        if (reverse) {
            edit->moveCursor (QTextCursor::End);
        } else {
            edit->moveCursor (QTextCursor::Start);
        }
        LOGDEB("Preview::doSearch: 2nd find call\n");
        chron.restart();
        found = edit->find(text, flags);
        LOGDEB("Preview::doSearch: 2nd find call return found " << found <<
               " " << chron.secs() << " S\n");
    }

    if (found) {
        m_canBeep = true;
    } else {
        if (m_canBeep && !prefs.noBeeps)
            QApplication::beep();
        m_canBeep = false;
    }
    LOGDEB("Preview::doSearch: return\n");
}

void Preview::nextPressed()
{
    LOGDEB2("Preview::nextPressed\n");
    doSearch(searchTextCMB->currentText(), true, false);
}

void Preview::prevPressed()
{
    LOGDEB2("Preview::prevPressed\n");
    doSearch(searchTextCMB->currentText(), true, true);
}

// Called when user clicks on tab
void Preview::currentChanged(int index)
{
    LOGDEB2("PreviewTextEdit::currentChanged\n");
    PreviewTextEdit *edit = editor(index);
    LOGDEB1("Preview::currentChanged(). Editor: " << edit << "\n");
    
    if (edit == 0) {
        LOGERR("Editor child not found\n");
        return;
    }
    edit->setFocus();

    editPB->setEnabled(canOpen(&edit->m_dbdoc, theconfig));

    // Disconnect the print signal and reconnect it to the current editor
    LOGDEB1("Disconnecting reconnecting print signal\n");
    disconnect(this, SIGNAL(printCurrentPreviewRequest()), 0, 0);
    connect(this, SIGNAL(printCurrentPreviewRequest()), edit, SLOT(print()));
    edit->installEventFilter(this);
    edit->viewport()->installEventFilter(this);
    searchTextCMB->installEventFilter(this);
    emit(previewExposed(this, m_searchId, edit->m_docnum));
}

void Preview::closeCurrentTab()
{
    LOGDEB1("Preview::closeCurrentTab: m_loading " << m_loading << "\n");
    if (m_loading) {
        CancelCheck::instance().setCancel();
        return;
    }
    closeTab(pvTab->currentIndex());
}

void Preview::closeTab(int index)
{
    LOGDEB1("Preview::closeTab: m_loading " << m_loading << "\n");
    if (m_loading) {
        CancelCheck::instance().setCancel();
        return;
    }
    PreviewTextEdit *edit = editor(index);
    if (edit)
        forgetTempFile(edit->m_tmpfilename);
    if (pvTab->count() > 1) {
        pvTab->removeTab(index);
    } else {
        close();
    }
}

PreviewTextEdit *Preview::editor(int index)
{
    return dynamic_cast(pvTab->widget(index));
}

PreviewTextEdit *Preview::currentEditor()
{
    LOGDEB2("Preview::currentEditor()\n");
    return editor(pvTab->currentIndex());
}

PreviewTextEdit *Preview::addEditorTab()
{
    LOGDEB1("PreviewTextEdit::addEditorTab()\n");
    PreviewTextEdit *editor = new PreviewTextEdit(pvTab, "pvEdit", this);
    editor->setReadOnly(true);
    editor->setUndoRedoEnabled(false );
    pvTab->addTab(editor, "Tab");
    pvTab->setCurrentIndex(pvTab->count() - 1);
    return editor;
}

void Preview::setCurTabProps(const Rcl::Doc &doc, int docnum)
{
    LOGDEB1("Preview::setCurTabProps\n");
    QString title;
    string ctitle;
    if (doc.getmeta(Rcl::Doc::keytt, &ctitle) && !ctitle.empty()) {
        title = QString::fromUtf8(ctitle.c_str(), ctitle.length());
    } else {
        title = QString::fromLocal8Bit(path_getsimple(doc.url).c_str());
    }
    if (title.length() > 20) {
        title = title.left(10) + "..." + title.right(10);
    }
    int curidx = pvTab->currentIndex();
    pvTab->setTabText(curidx, title);

    char datebuf[100];
    datebuf[0] = 0;
    if (!doc.fmtime.empty() || !doc.dmtime.empty()) {
        time_t mtime = doc.dmtime.empty() ? 
            atoll(doc.fmtime.c_str()) : atoll(doc.dmtime.c_str());
        struct tm *tm = localtime(&mtime);
        strftime(datebuf, 99, "%Y-%m-%d %H:%M:%S", tm);
    }
    LOGDEB("Doc.url: [" << doc.url << "]\n");
    string url;
    printableUrl(theconfig->getDefCharset(), doc.url, url);
    string tiptxt = url + string("\n");
    tiptxt += doc.mimetype + " " + string(datebuf) + "\n";
    if (!ctitle.empty())
        tiptxt += ctitle + "\n";
    pvTab->setTabToolTip(curidx,
                         QString::fromUtf8(tiptxt.c_str(), tiptxt.length()));

    PreviewTextEdit *e = currentEditor();
    if (e) {
        e->m_url = doc.url;
        e->m_ipath = doc.ipath;
        e->m_docnum = docnum;
    }
}

bool Preview::makeDocCurrent(const Rcl::Doc& doc, int docnum, bool sametab)
{
    LOGDEB("Preview::makeDocCurrent: " << doc.url << "\n");

    if (m_loading) {
        LOGERR("Already loading\n");
        return false;
    }

    /* Check if we already have this page */
    for (int i = 0; i < pvTab->count(); i++) {
        PreviewTextEdit *edit =  editor(i);
        if (edit && !edit->m_url.compare(doc.url) && 
            !edit->m_ipath.compare(doc.ipath)) {
            pvTab->setCurrentIndex(i);
            return true;
        }
    }

    // if just created the first tab was created during init
    if (!sametab && !m_justCreated && !addEditorTab()) {
        return false;
    }
    m_justCreated = false;
    if (!loadDocInCurrentTab(doc, docnum)) {
        closeCurrentTab();
        return false;
    }
    raise();
    return true;
}

void Preview::togglePlainPre()
{
    switch (prefs.previewPlainPre) {
    case PrefsPack::PP_BR:
        prefs.previewPlainPre = PrefsPack::PP_PRE;
        break;
    case PrefsPack::PP_PRE:
        prefs.previewPlainPre = PrefsPack::PP_BR;
        break;
    case PrefsPack::PP_PREWRAP:
    default:
        prefs.previewPlainPre = PrefsPack::PP_PRE;
        break;
    }
    
    PreviewTextEdit *editor = currentEditor();
    if (editor)
        loadDocInCurrentTab(editor->m_dbdoc, editor->m_docnum);
}

void Preview::emitWordSelect(QString word)
{
    emit(wordSelect(word));
}

/*
  Code for loading a file into an editor window. The operations that
  we call have no provision to indicate progression, and it would be
  complicated or impossible to modify them to do so (Ie: for external 
  format converters).

  We implement a complicated and ugly mechanism based on threads to indicate 
  to the user that the app is doing things: lengthy operations are done in 
  threads and we update a progress indicator while they proceed (but we have 
  no estimate of their total duration).
  
  It might be possible, but complicated (need modifications in
  handler) to implement a kind of bucket brigade, to have the
  beginning of the text displayed faster
*/


// Insert into editor by chunks so that the top becomes visible
// earlier for big texts. This provokes some artifacts (adds empty line),
// so we can't set it too low.
#define CHUNKL 500*1000

// Make sure we don't ever reenter loadDocInCurrentTab: note that I
// don't think it's actually possible, this must be the result of a
// misguided debug session.
class LoadGuard {
    bool *m_bp;
public:
    LoadGuard(bool *bp) {m_bp = bp ; *m_bp = true;}
    ~LoadGuard() {*m_bp = false; CancelCheck::instance().setCancel(false);}
};

bool Preview::loadDocInCurrentTab(const Rcl::Doc &idoc, int docnum)
{
    LOGDEB1("Preview::loadDocInCurrentTab()\n");

    LoadGuard guard(&m_loading);
    CancelCheck::instance().setCancel(false);

    setCurTabProps(idoc, docnum);

    QString msg = QString("Loading: %1 (size %2 bytes)")
        .arg(QString::fromLocal8Bit(idoc.url.c_str()))
        .arg(QString::fromUtf8(idoc.fbytes.c_str()));

    QProgressDialog progress(msg, tr("Cancel"), 0, 0, this);
    progress.setMinimumDuration(2000);
    QEventLoop loop;
    QTimer tT;
    tT.setSingleShot(true);
    connect(&tT, SIGNAL(timeout()), &loop, SLOT(quit()));

    ////////////////////////////////////////////////////////////////////////
    // Load and convert document
    // idoc came out of the index data (main text and some fields missing). 
    // fdoc is the complete one what we are going to extract from storage.
    LoadThread lthr(theconfig, idoc, prefs.previewHtml, this);
    connect(<hr, SIGNAL(finished()), &loop, SLOT(quit()));

    lthr.start();
    for (int i = 0;;i++) {
        tT.start(1000); 
        loop.exec();
        if (lthr.isFinished())
            break;
        if (progress.wasCanceled()) {
            CancelCheck::instance().setCancel();
        }
        if (i == 1)
            progress.show();
    }

    LOGDEB("loadDocInCurrentTab: after file load: cancel " <<
           CancelCheck::instance().cancelState() << " status " << lthr.status <<
           " text length " << lthr.fdoc.text.length() << "\n");

    if (CancelCheck::instance().cancelState())
        return false;
    if (lthr.status != 0) {
        bool canGetRawText = rcldb && rcldb->storesDocText();
        QString explain;
        if (!lthr.missing.empty()) {
            explain = QString::fromUtf8("
") + tr("Missing helper program: ") + QString::fromLocal8Bit(lthr.missing.c_str()); QMessageBox::warning(0, "Recoll", tr("Can't turn doc into internal " "representation for ") + lthr.fdoc.mimetype.c_str() + explain); } else { if (progress.wasCanceled()) { QMessageBox::warning(0, "Recoll", tr("Canceled")); } else { progress.reset(); // Note that we can't easily check for a readable file // because it's possible that only a region is locked // (e.g. on Windows for an ost file the first block is // readable even if Outlook is running). QString msg; switch (lthr.explain) { case FileInterner::FetchMissing: msg = tr("Error loading the document: file missing."); break; case FileInterner::FetchPerm: msg = tr("Error loading the document: no permission."); break; case FileInterner::FetchNoBackend: msg = tr("Error loading: backend not configured."); break; case FileInterner::InternfileOther: #ifdef _WIN32 msg = tr("Error loading the document: " "other handler error
" "Maybe the application is locking the file ?"); #else msg = tr("Error loading the document: other handler error."); #endif break; } if (canGetRawText) { msg += tr("
Attempting to display from stored text."); } QMessageBox::warning(0, "Recoll", msg); } } if (canGetRawText) { lthr.fdoc = idoc; if (!rcldb->getDocRawText(lthr.fdoc)) { QMessageBox::warning(0, "Recoll", tr("Could not fetch stored text")); progress.close(); return false; } } else { progress.close(); } } // Reset config just in case. theconfig->setKeyDir(""); //////////////////////////////////////////////////////////////////////// // Create preview text: highlight search terms // We don't do the highlighting for very big texts: too long. We // should at least do special char escaping, in case a '&' or '<' // somehow slipped through previous processing. bool highlightTerms = lthr.fdoc.text.length() < (unsigned long)prefs.maxhltextmbs * 1024 * 1024; // Final text is produced in chunks so that we can display the top // while still inserting at bottom PreviewTextEdit *editor = currentEditor(); editor->m_plaintorich->clear(); // For an actual html file, if we want to have the images and // style loaded in the preview, we need to set the search // path. Not too sure this is a good idea as I find them rather // distracting when looking for text, esp. with qtextedit // relatively limited html support (text sometimes get hidden by // images). #if 0 string path = fileurltolocalpath(idoc.url); if (!path.empty()) { path = path_getfather(path); QStringList paths(QString::fromLocal8Bit(path.c_str())); editor->setSearchPaths(paths); } #endif editor->setHtml(""); editor->m_format = Qt::RichText; bool inputishtml = !lthr.fdoc.mimetype.compare("text/html"); QStringList qrichlst; editor->m_plaintorich->set_activatelinks(prefs.previewActiveLinks); #if 1 if (highlightTerms) { progress.setLabelText(tr("Creating preview text")); qApp->processEvents(); if (inputishtml) { LOGDEB1("Preview: got html " << lthr.fdoc.text << "\n"); editor->m_plaintorich->set_inputhtml(true); } else { LOGDEB1("Preview: got plain " << lthr.fdoc.text << "\n"); editor->m_plaintorich->set_inputhtml(false); } ToRichThread rthr(lthr.fdoc.text, m_hData, editor->m_plaintorich, qrichlst, this); connect(&rthr, SIGNAL(finished()), &loop, SLOT(quit())); rthr.start(); for (;;) { tT.start(1000); loop.exec(); if (rthr.isFinished()) break; if (progress.wasCanceled()) { CancelCheck::instance().setCancel(); } } // Conversion to rich text done if (CancelCheck::instance().cancelState()) { if (qrichlst.size() == 0 || qrichlst.front().size() == 0) { // We can't call closeCurrentTab here as it might delete // the object which would be a nasty surprise to our // caller. return false; } else { qrichlst.back() += "Cancelled !"; } } } else { LOGDEB("Preview: no highlighting, loading " << lthr.fdoc.text.size() << " bytes\n"); // No plaintorich() call. In this case, either the text is // html and the html quoting is hopefully correct, or it's // plain-text and there is no need to escape special // characters. We'd still want to split in chunks (so that the // top is displayed faster), but we must not cut tags, and // it's too difficult on html. For text we do the splitting on // a QString to avoid utf8 issues. QString qr = QString::fromUtf8(lthr.fdoc.text.c_str(), lthr.fdoc.text.length()); int l = 0; if (inputishtml) { qrichlst.push_back(qr); } else { editor->setPlainText(""); editor->m_format = Qt::PlainText; for (int pos = 0; pos < (int)qr.length(); pos += l) { l = MIN(CHUNKL, qr.length() - pos); qrichlst.push_back(qr.mid(pos, l)); } } } #else // For testing qtextedit bugs... highlightTerms = true; const char *textlist[] = { "Du plain text avec un\n termtag fin de ligne:", "texte apres le tag\n", }; const int listl = sizeof(textlist) / sizeof(char*); for (int i = 0 ; i < listl ; i++) qrichlst.push_back(QString::fromUtf8(textlist[i])); #endif /////////////////////////////////////////////////////////// // Load text into editor window. progress.setLabelText(tr("Loading preview text into editor")); qApp->processEvents(); for (QStringList::iterator it = qrichlst.begin(); it != qrichlst.end(); it++) { qApp->processEvents(); editor->append(*it); // We need to save the rich text for printing, the editor does // not do it consistently for us. editor->m_richtxt.append(*it); if (progress.wasCanceled()) { editor->append("Cancelled !"); LOGDEB("loadDocInCurrentTab: cancelled in editor load\n"); break; } } progress.close(); editor->m_curdsp = PreviewTextEdit::PTE_DSPTXT; //////////////////////////////////////////////////////////////////////// // Finishing steps // Maybe the text was actually empty ? Switch to fields then. Else free-up // the text memory in the loaded document. We still have a copy of the text // in editor->m_richtxt bool textempty = lthr.fdoc.text.empty(); if (!textempty) lthr.fdoc.text.clear(); editor->m_fdoc = lthr.fdoc; editor->m_dbdoc = idoc; editPB->setEnabled(canOpen(&editor->m_dbdoc, theconfig)); if (textempty) editor->displayFields(); // If this is an image, display it instead of the text. if (!idoc.mimetype.compare(0, 6, "image/")) { string fn = fileurltolocalpath(idoc.url); theconfig->setKeyDir(fn.empty() ? "" : path_getfather(fn)); // We want a real file, so if this comes from data or we have // an ipath, create it. if (fn.empty() || !idoc.ipath.empty()) { TempFile temp = lthr.tmpimg; if (temp.ok()) { LOGDEB1("Preview: load: got temp file from internfile\n"); } else if (!FileInterner::idocToFile(temp, string(), theconfig, idoc)) { temp = TempFile(); // just in case. } if (temp.ok()) { rememberTempFile(temp); fn = temp.filename(); editor->m_tmpfilename = fn; } else { editor->m_tmpfilename.erase(); fn.erase(); } } if (!fn.empty()) { editor->m_image = QImage(fn.c_str()); if (!editor->m_image.isNull()) editor->displayImage(); } } // Position the editor so that the first search term is visible if (searchTextCMB->currentText().length() != 0) { // If there is a current search string, perform the search. // Do not beep for an automatic search, this is ennoying. m_canBeep = false; doSearch(searchTextCMB->currentText(), true, false); } else { // Position to the first query term if (editor->m_plaintorich->haveAnchors()) { QString aname = editor->m_plaintorich->curAnchorName(); LOGDEB2("Call movetoanchor(" << qs2utf8s(aname) << ")\n"); editor->scrollToAnchor(aname); // Position the cursor approximately at the anchor (top of // viewport) so that searches start from here QTextCursor cursor = editor->cursorForPosition(QPoint(0, 0)); editor->setTextCursor(cursor); } } // Enter document in document history historyEnterDoc(rcldb.get(), g_dynconf, idoc); editor->setFocus(); emit(previewExposed(this, m_searchId, docnum)); LOGDEB("loadDocInCurrentTab: returning true\n"); return true; } PreviewTextEdit::PreviewTextEdit(QWidget* parent, const char* nm, Preview *pv) : QTextBrowser(parent), m_preview(pv), m_plaintorich(new PlainToRichQtPreview()), m_dspflds(false), m_docnum(-1) { setContextMenuPolicy(Qt::CustomContextMenu); setObjectName(nm); connect(this, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(createPopupMenu(const QPoint&))); connect(this, SIGNAL(anchorClicked(const QUrl &)), this, SLOT(onAnchorClicked(const QUrl&))); setOpenExternalLinks(false); setOpenLinks(false); } void PreviewTextEdit::onAnchorClicked(const QUrl& url) { LOGDEB("PreviewTextEdit::onAnchorClicked: " << qs2utf8s(url.toString()) << std::endl); if (prefs.previewActiveLinks && m_preview->m_rclmain) { Rcl::Doc doc; doc.url = qs2utf8s(url.toString()).c_str(); doc.mimetype = "text/html"; m_preview->m_rclmain->startNativeViewer(doc); } } void PreviewTextEdit::createPopupMenu(const QPoint& pos) { LOGDEB1("PreviewTextEdit::createPopupMenu()\n"); QMenu *popup = new QMenu(this); switch (m_curdsp) { case PTE_DSPTXT: popup->addAction(tr("Show fields"), this, SLOT(displayFields())); if (!m_image.isNull()) popup->addAction(tr("Show image"), this, SLOT(displayImage())); break; case PTE_DSPFLDS: popup->addAction(tr("Show main text"), this, SLOT(displayText())); if (!m_image.isNull()) popup->addAction(tr("Show image"), this, SLOT(displayImage())); break; case PTE_DSPIMG: default: popup->addAction(tr("Show fields"), this, SLOT(displayFields())); popup->addAction(tr("Show main text"), this, SLOT(displayText())); break; } popup->addAction(tr("Select All"), this, SLOT(selectAll())); popup->addAction(tr("Copy"), this, SLOT(copy())); popup->addAction(tr("Print"), this, SLOT(print())); if (prefs.previewPlainPre) { popup->addAction(tr("Fold lines"), m_preview, SLOT(togglePlainPre())); } else { popup->addAction(tr("Preserve indentation"), m_preview, SLOT(togglePlainPre())); } if (!m_dbdoc.url.empty()) { popup->addAction(tr("Save document to file"), m_preview, SLOT(emitSaveDocToFile())); if (canOpen(&m_dbdoc, theconfig)) { popup->addAction(tr("Open document"), m_preview, SLOT(emitEditRequested())); } } popup->popup(mapToGlobal(pos)); } // Display main text void PreviewTextEdit::displayText() { LOGDEB1("PreviewTextEdit::displayText()\n"); if (m_format == Qt::PlainText) setPlainText(m_richtxt); else setHtml(m_richtxt); m_curdsp = PTE_DSPTXT; } // Display field values void PreviewTextEdit::displayFields() { LOGDEB1("PreviewTextEdit::displayFields()\n"); QString txt = "\n"; txt += "" + QString::fromLocal8Bit(m_url.c_str()); if (!m_ipath.empty()) txt += "|" + QString::fromUtf8(m_ipath.c_str()); txt += "

"; txt += "
\n"; for (const auto& entry: m_fdoc.meta) { if (!entry.second.empty()) txt += "
" + QString::fromUtf8(entry.first.c_str()) + "
" + "
" + QString::fromUtf8(escapeHtml(entry.second).c_str()) + "
\n"; } txt += "
"; setHtml(txt); m_curdsp = PTE_DSPFLDS; } void PreviewTextEdit::displayImage() { LOGDEB1("PreviewTextEdit::displayImage()\n"); if (m_image.isNull()) displayText(); setPlainText(""); if (m_image.width() > width() || m_image.height() > height()) { m_image = m_image.scaled(width(), height(), Qt::KeepAspectRatio); } document()->addResource(QTextDocument::ImageResource, QUrl("image"), m_image); QTextCursor cursor = textCursor(); cursor.insertImage("image"); m_curdsp = PTE_DSPIMG; } void PreviewTextEdit::mouseDoubleClickEvent(QMouseEvent *event) { LOGDEB2("PreviewTextEdit::mouseDoubleClickEvent\n"); QTextEdit::mouseDoubleClickEvent(event); if (textCursor().hasSelection() && m_preview) m_preview->emitWordSelect(textCursor().selectedText()); } void PreviewTextEdit::print() { LOGDEB("PreviewTextEdit::print\n"); if (!m_preview) return; #ifndef QT_NO_PRINTER QPrinter printer; QPrintDialog *dialog = new QPrintDialog(&printer, this); dialog->setWindowTitle(tr("Print Current Preview")); if (dialog->exec() != QDialog::Accepted) return; QTextEdit::print(&printer); #endif } recoll-1.26.3/qtgui/rclzg.h0000644000175000017500000000210113533651561012431 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _RCLZG_H_INCLUDED_ #define _RCLZG_H_INCLUDED_ #include "rcldoc.h" enum ZgSendType {ZGSEND_PREVIEW, ZGSEND_OPEN}; #ifndef USE_ZEITGEIST inline void zg_send_event(ZgSendType, const Rcl::Doc&){} #else extern void zg_send_event(ZgSendType tp, const Rcl::Doc& doc); #endif #endif // _RCLZG_H_INCLUDED_ recoll-1.26.3/qtgui/preview_w.h0000644000175000017500000001336213566424763013342 00000000000000/* Copyright (C) 2006-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _PREVIEW_W_H_INCLUDED_ #define _PREVIEW_W_H_INCLUDED_ #include "autoconfig.h" // Always use a qtextbrowser for now, there is no compelling reason to // switch to webkit here #if 1 || defined(RESLIST_TEXTBROWSER) #define PREVIEW_TEXTBROWSER #endif #include #include #include #include #include #ifdef PREVIEW_TEXTBROWSER #include #define PREVIEW_PARENTCLASS QTextBrowser #else #include #define PREVIEW_PARENTCLASS QWebView #endif #include #include "rcldb.h" #include "plaintorich.h" #include "rclmain_w.h" #include "ui_preview.h" class QTabWidget; class QLabel; class QPushButton; class QCheckBox; class Preview; class PlainToRichQtPreview; class QUrl; class RclMain; class PreviewTextEdit : public PREVIEW_PARENTCLASS { Q_OBJECT; public: PreviewTextEdit(QWidget* parent, const char* name, Preview *pv); void moveToAnchor(const QString& name); enum DspType {PTE_DSPTXT, PTE_DSPFLDS, PTE_DSPIMG}; public slots: virtual void displayFields(); virtual void displayText(); virtual void displayImage(); virtual void print(); virtual void createPopupMenu(const QPoint& pos); void onAnchorClicked(const QUrl& url); friend class Preview; protected: void mouseDoubleClickEvent(QMouseEvent *); private: Preview *m_preview; std::shared_ptr m_plaintorich; bool m_dspflds; string m_url; // filename for this tab string m_ipath; // Internal doc path inside file int m_docnum; // Index of doc in db search results. // doc out of internfile (previous fields come from the index) with // main text erased (for space). Rcl::Doc m_fdoc; // The input doc out of the index/query list Rcl::Doc m_dbdoc; // Saved rich (or plain actually) text: the textedit seems to // sometimes (but not always) return its text stripped of tags, so // this is needed (for printing for example) QString m_richtxt; Qt::TextFormat m_format; // Temporary file name (possibly, if displaying image). The // TempFile itself is kept inside main.cpp (because that's where // signal cleanup happens), but we use its name to ask for release // when the tab is closed. string m_tmpfilename; QImage m_image; DspType m_curdsp; }; class Preview : public QDialog, public Ui::Preview { Q_OBJECT public: Preview(RclMain *m, int sid, // Search Id const HighlightData& hdata) // Search terms etc. for highlighting : m_rclmain(m), m_searchId(sid), m_hData(hdata) { setupUi(this); init(); } virtual void closeEvent(QCloseEvent *e); virtual bool eventFilter(QObject *target, QEvent *event); /** * Arrange for the document to be displayed either by exposing the tab * if already loaded, or by creating a new tab and loading it. * @para docnum is used to link back to the result list (to highlight * paragraph when tab exposed etc. */ virtual bool makeDocCurrent(const Rcl::Doc& idoc, int docnum, bool sametab = false); void emitWordSelect(QString); friend class PreviewTextEdit; public slots: // Search stuff virtual void searchTextChanged(const QString& text); virtual void doSearch(const QString& str, bool next, bool reverse, bool wo = false); virtual void nextPressed(); virtual void prevPressed(); // Tabs management virtual void currentChanged(int); virtual void closeCurrentTab(); virtual void closeTab(int index); virtual void emitShowNext(); virtual void emitShowPrev(); virtual void emitSaveDocToFile(); virtual void emitEditRequested(); virtual void togglePlainPre(); signals: void previewClosed(Preview *); void wordSelect(QString); void showNext(Preview *w, int sid, int docnum); void showPrev(Preview *w, int sid, int docnum); void previewExposed(Preview *w, int sid, int docnum); void printCurrentPreviewRequest(); void saveDocToFile(Rcl::Doc); void editRequested(Rcl::Doc); private: RclMain *m_rclmain; // Identifier of search in main window. This is used to check that // we make sense when requesting the next document when browsing // successive search results in a tab. int m_searchId; bool m_dynSearchActive{false}; // Index value the search text comes from. -1 if text was edited int m_searchTextFromIndex{0}; bool m_canBeep{true}; bool m_loading{false}; HighlightData m_hData; bool m_justCreated{true}; // First tab create is different void init(); virtual void setCurTabProps(const Rcl::Doc& doc, int docnum); virtual PreviewTextEdit *editor(int); virtual PreviewTextEdit *currentEditor(); virtual PreviewTextEdit *addEditorTab(); virtual bool loadDocInCurrentTab(const Rcl::Doc& idoc, int dnm); }; #endif /* _PREVIEW_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/uiprefs_w.cpp0000644000175000017500000005067513566424763013701 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "safesysstat.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "recoll.h" #include "guiutils.h" #include "rclconfig.h" #include "pathut.h" #include "uiprefs_w.h" #include "viewaction_w.h" #include "log.h" #include "editdialog.h" #include "rclmain_w.h" #include "ptrans_w.h" void UIPrefsDialog::init() { m_viewAction = 0; // See enum above and keep in order ! ssearchTypCMB->addItem(tr("Any term")); ssearchTypCMB->addItem(tr("All terms")); ssearchTypCMB->addItem(tr("File name")); ssearchTypCMB->addItem(tr("Query language")); ssearchTypCMB->addItem(tr("Value from previous program exit")); connect(viewActionPB, SIGNAL(clicked()), this, SLOT(showViewAction())); connect(reslistFontPB, SIGNAL(clicked()), this, SLOT(showFontDialog())); connect(resetFontPB, SIGNAL(clicked()), this, SLOT(resetReslistFont())); connect(stylesheetPB, SIGNAL(clicked()),this, SLOT(showStylesheetDialog())); connect(resetSSPB, SIGNAL(clicked()), this, SLOT(resetStylesheet())); connect(snipCssPB, SIGNAL(clicked()),this, SLOT(showSnipCssDialog())); connect(synFilePB, SIGNAL(clicked()),this, SLOT(showSynFileDialog())); connect(resetSnipCssPB, SIGNAL(clicked()), this, SLOT(resetSnipCss())); connect(idxLV, SIGNAL(itemSelectionChanged()), this, SLOT(extradDbSelectChanged())); connect(ptransPB, SIGNAL(clicked()), this, SLOT(extraDbEditPtrans())); connect(addExtraDbPB, SIGNAL(clicked()), this, SLOT(addExtraDbPB_clicked())); connect(delExtraDbPB, SIGNAL(clicked()), this, SLOT(delExtraDbPB_clicked())); connect(togExtraDbPB, SIGNAL(clicked()), this, SLOT(togExtraDbPB_clicked())); connect(actAllExtraDbPB, SIGNAL(clicked()), this, SLOT(actAllExtraDbPB_clicked())); connect(unacAllExtraDbPB, SIGNAL(clicked()), this, SLOT(unacAllExtraDbPB_clicked())); connect(CLEditPara, SIGNAL(clicked()), this, SLOT(editParaFormat())); connect(CLEditHeader, SIGNAL(clicked()), this, SLOT(editHeaderText())); connect(buttonOk, SIGNAL(clicked()), this, SLOT(accept())); connect(buttonCancel, SIGNAL(clicked()), this, SLOT(reject())); connect(buildAbsCB, SIGNAL(toggled(bool)), replAbsCB, SLOT(setEnabled(bool))); connect(ssNoCompleteCB, SIGNAL(toggled(bool)), ssSearchOnCompleteCB, SLOT(setDisabled(bool))); setFromPrefs(); } // Update dialog state from stored prefs void UIPrefsDialog::setFromPrefs() { // Most values are stored in the prefs struct. Some rarely used // ones go directly through the settings QSettings settings("Recoll.org", "recoll"); // Entries per result page spinbox pageLenSB->setValue(prefs.respagesize); maxHistSizeSB->setValue(prefs.historysize); collapseDupsCB->setChecked(prefs.collapseDuplicates); maxHLTSB->setValue(prefs.maxhltextmbs); if (prefs.ssearchTypSav) { ssearchTypCMB->setCurrentIndex(4); } else { ssearchTypCMB->setCurrentIndex(prefs.ssearchTyp); } switch (prefs.filterCtlStyle) { case PrefsPack::FCS_MN: filterMN_RB->setChecked(1); break; case PrefsPack::FCS_CMB: filterCMB_RB->setChecked(1); break; case PrefsPack::FCS_BT: default: filterBT_RB->setChecked(1); break; } noBeepsCB->setChecked(prefs.noBeeps); ssNoCompleteCB->setChecked(prefs.ssearchNoComplete); ssSearchOnCompleteCB->setChecked(prefs.ssearchStartOnComplete); ssSearchOnCompleteCB->setEnabled(!prefs.ssearchNoComplete); syntlenSB->setValue(prefs.syntAbsLen); syntctxSB->setValue(prefs.syntAbsCtx); initStartAdvCB->setChecked(prefs.startWithAdvSearchOpen); keepSortCB->setChecked(prefs.keepSort); showTrayIconCB->setChecked(prefs.showTrayIcon); if (!prefs.showTrayIcon) { prefs.closeToTray = false; prefs.trayMessages = false; } closeToTrayCB->setEnabled(showTrayIconCB->checkState()); trayMessagesCB->setEnabled(showTrayIconCB->checkState()); closeToTrayCB->setChecked(prefs.closeToTray); trayMessagesCB->setChecked(prefs.trayMessages); // See qxtconfirmationmessage. Needs to be -1 for the dialog to show. showTempFileWarningCB->setChecked(prefs.showTempFileWarning == -1); anchorTamilHackCB->setChecked(settings.value("anchorSpcHack", 0).toBool()); previewHtmlCB->setChecked(prefs.previewHtml); previewActiveLinksCB->setChecked(prefs.previewActiveLinks); switch (prefs.previewPlainPre) { case PrefsPack::PP_BR: plainBRRB->setChecked(1); break; case PrefsPack::PP_PRE: plainPRERB->setChecked(1); break; case PrefsPack::PP_PREWRAP: default: plainPREWRAPRB->setChecked(1); break; } // Query terms color qtermStyleLE->setText(prefs.qtermstyle); // Abstract snippet separator string abssepLE->setText(prefs.abssep); dateformatLE->setText(prefs.reslistdateformat); // Result list font family and size reslistFontFamily = prefs.reslistfontfamily; reslistFontSize = prefs.reslistfontsize; setupReslistFontPB(); // Style sheet qssFile = prefs.qssFile; if (qssFile.isEmpty()) { stylesheetPB->setText(tr("Choose")); } else { string nm = path_getsimple((const char *)qssFile.toLocal8Bit()); stylesheetPB->setText(QString::fromLocal8Bit(nm.c_str())); } snipCssFile = prefs.snipCssFile; if (snipCssFile.isEmpty()) { snipCssPB->setText(tr("Choose")); } else { string nm = path_getsimple((const char *)snipCssFile.toLocal8Bit()); snipCssPB->setText(QString::fromLocal8Bit(nm.c_str())); } snipwMaxLenSB->setValue(prefs.snipwMaxLength); snipwByPageCB->setChecked(prefs.snipwSortByPage); paraFormat = prefs.reslistformat; headerText = prefs.reslistheadertext; // Stemming language combobox stemLangCMB->clear(); stemLangCMB->addItem(g_stringNoStem); stemLangCMB->addItem(g_stringAllStem); vector langs; if (!getStemLangs(langs)) { QMessageBox::warning(0, "Recoll", tr("error retrieving stemming languages")); } int cur = prefs.queryStemLang == "" ? 0 : 1; for (vector::const_iterator it = langs.begin(); it != langs.end(); it++) { stemLangCMB-> addItem(QString::fromUtf8(it->c_str(), it->length())); if (cur == 0 && !strcmp((const char*)prefs.queryStemLang.toUtf8(), it->c_str())) { cur = stemLangCMB->count(); } } stemLangCMB->setCurrentIndex(cur); autoPhraseCB->setChecked(prefs.ssearchAutoPhrase); autoPThreshSB->setValue(prefs.ssearchAutoPhraseThreshPC); buildAbsCB->setChecked(prefs.queryBuildAbstract); replAbsCB->setEnabled(prefs.queryBuildAbstract); replAbsCB->setChecked(prefs.queryReplaceAbstract); autoSuffsCB->setChecked(prefs.autoSuffsEnable); autoSuffsLE->setText(prefs.autoSuffs); synFileCB->setChecked(prefs.synFileEnable); synFile = prefs.synFile; if (synFile.isEmpty()) { synFilePB->setText(tr("Choose")); } else { string nm = path_getsimple((const char *)synFile.toLocal8Bit()); synFilePB->setText(QString::fromLocal8Bit(nm.c_str())); } // Initialize the extra indexes listboxes idxLV->clear(); for (const auto& dbdir : prefs.allExtraDbs) { QListWidgetItem *item = new QListWidgetItem(QString::fromLocal8Bit(dbdir.c_str()), idxLV); if (item) item->setCheckState(Qt::Unchecked); } for (const auto& dbdir : prefs.activeExtraDbs) { auto items = idxLV->findItems (QString::fromLocal8Bit(dbdir.c_str()), Qt::MatchFixedString|Qt::MatchCaseSensitive); for (auto& entry : items) { entry->setCheckState(Qt::Checked); } } idxLV->sortItems(); } void UIPrefsDialog::setupReslistFontPB() { QString s; if (reslistFontFamily.length() == 0) { reslistFontPB->setText(tr("Default QtWebkit font")); } else { reslistFontPB->setText(reslistFontFamily + "-" + s.setNum(reslistFontSize)); } } void UIPrefsDialog::accept() { // Most values are stored in the prefs struct. Some rarely used // ones go directly through the settings QSettings settings("Recoll.org", "recoll"); prefs.noBeeps = noBeepsCB->isChecked(); prefs.ssearchNoComplete = ssNoCompleteCB->isChecked(); prefs.ssearchStartOnComplete = ssSearchOnCompleteCB->isChecked(); if (ssearchTypCMB->currentIndex() == 4) { prefs.ssearchTypSav = true; // prefs.ssearchTyp will be set from the current value when // exiting the program } else { prefs.ssearchTypSav = false; prefs.ssearchTyp = ssearchTypCMB->currentIndex(); } if (filterMN_RB->isChecked()) { prefs.filterCtlStyle = PrefsPack::FCS_MN; } else if (filterCMB_RB->isChecked()) { prefs.filterCtlStyle = PrefsPack::FCS_CMB; } else { prefs.filterCtlStyle = PrefsPack::FCS_BT; } m_mainWindow->setFilterCtlStyle(prefs.filterCtlStyle); prefs.respagesize = pageLenSB->value(); prefs.historysize = maxHistSizeSB->value(); prefs.collapseDuplicates = collapseDupsCB->isChecked(); prefs.maxhltextmbs = maxHLTSB->value(); prefs.qtermstyle = qtermStyleLE->text(); prefs.abssep = abssepLE->text(); prefs.reslistdateformat = dateformatLE->text(); prefs.creslistdateformat = (const char*)prefs.reslistdateformat.toUtf8(); prefs.reslistfontfamily = reslistFontFamily; prefs.reslistfontsize = reslistFontSize; prefs.qssFile = qssFile; QTimer::singleShot(0, m_mainWindow, SLOT(applyStyleSheet())); prefs.snipCssFile = snipCssFile; prefs.reslistformat = paraFormat; prefs.reslistheadertext = headerText; if (prefs.reslistformat.trimmed().isEmpty()) { prefs.reslistformat = prefs.dfltResListFormat; paraFormat = prefs.reslistformat; } prefs.snipwMaxLength = snipwMaxLenSB->value(); prefs.snipwSortByPage = snipwByPageCB->isChecked(); prefs.creslistformat = (const char*)prefs.reslistformat.toUtf8(); if (stemLangCMB->currentIndex() == 0) { prefs.queryStemLang = ""; } else if (stemLangCMB->currentIndex() == 1) { prefs.queryStemLang = "ALL"; } else { prefs.queryStemLang = stemLangCMB->currentText(); } prefs.ssearchAutoPhrase = autoPhraseCB->isChecked(); prefs.ssearchAutoPhraseThreshPC = autoPThreshSB->value(); prefs.queryBuildAbstract = buildAbsCB->isChecked(); prefs.queryReplaceAbstract = buildAbsCB->isChecked() && replAbsCB->isChecked(); prefs.startWithAdvSearchOpen = initStartAdvCB->isChecked(); prefs.keepSort = keepSortCB->isChecked(); prefs.showTrayIcon = showTrayIconCB->isChecked(); m_mainWindow->enableTrayIcon(prefs.showTrayIcon); prefs.closeToTray = closeToTrayCB->isChecked(); prefs.trayMessages = trayMessagesCB->isChecked(); prefs.showTempFileWarning = showTempFileWarningCB->isChecked() ? -1 : 1024; settings.setValue("anchorSpcHack", anchorTamilHackCB->isChecked()); prefs.previewHtml = previewHtmlCB->isChecked(); prefs.previewActiveLinks = previewActiveLinksCB->isChecked(); if (plainBRRB->isChecked()) { prefs.previewPlainPre = PrefsPack::PP_BR; } else if (plainPRERB->isChecked()) { prefs.previewPlainPre = PrefsPack::PP_PRE; } else { prefs.previewPlainPre = PrefsPack::PP_PREWRAP; } prefs.syntAbsLen = syntlenSB->value(); prefs.syntAbsCtx = syntctxSB->value(); prefs.autoSuffsEnable = autoSuffsCB->isChecked(); prefs.autoSuffs = autoSuffsLE->text(); prefs.synFileEnable = synFileCB->isChecked(); prefs.synFile = synFile; prefs.allExtraDbs.clear(); prefs.activeExtraDbs.clear(); for (int i = 0; i < idxLV->count(); i++) { QListWidgetItem *item = idxLV->item(i); if (item) { prefs.allExtraDbs.push_back((const char *)item->text().toLocal8Bit()); if (item->checkState() == Qt::Checked) { prefs.activeExtraDbs.push_back((const char *) item->text().toLocal8Bit()); } } } rwSettings(true); string reason; maybeOpenDb(reason, true); emit uiprefsDone(); QDialog::accept(); } void UIPrefsDialog::editParaFormat() { EditDialog dialog(this); dialog.setWindowTitle(tr("Result list paragraph format " "(erase all to reset to default)")); dialog.plainTextEdit->setPlainText(paraFormat); int result = dialog.exec(); if (result == QDialog::Accepted) paraFormat = dialog.plainTextEdit->toPlainText(); } void UIPrefsDialog::editHeaderText() { EditDialog dialog(this); dialog.setWindowTitle(tr("Result list header (default is empty)")); dialog.plainTextEdit->setPlainText(headerText); int result = dialog.exec(); if (result == QDialog::Accepted) headerText = dialog.plainTextEdit->toPlainText(); } void UIPrefsDialog::reject() { setFromPrefs(); QDialog::reject(); } void UIPrefsDialog::setStemLang(const QString& lang) { int cur = 0; if (lang == "") { cur = 0; } else if (lang == "ALL") { cur = 1; } else { for (int i = 1; i < stemLangCMB->count(); i++) { if (lang == stemLangCMB->itemText(i)) { cur = i; break; } } } stemLangCMB->setCurrentIndex(cur); } void UIPrefsDialog::showFontDialog() { bool ok; QFont font; if (prefs.reslistfontfamily.length()) { font.setFamily(prefs.reslistfontfamily); font.setPointSize(prefs.reslistfontsize); } font = QFontDialog::getFont(&ok, font, this); if (ok) { // We used to check if the default font was set, in which case // we erased the preference, but this would result in letting // webkit make a choice of default font which it usually seems // to do wrong. So now always set the font. There is still a // way for the user to let webkit choose the default though: // click reset, then the font name and size will be empty. reslistFontFamily = font.family(); reslistFontSize = font.pointSize(); setupReslistFontPB(); } } void UIPrefsDialog::showStylesheetDialog() { qssFile = myGetFileName(false, "Select stylesheet file", true); string nm = path_getsimple((const char *)qssFile.toLocal8Bit()); if (!nm.empty()) { stylesheetPB->setText(QString::fromLocal8Bit(nm.c_str())); } else { stylesheetPB->setText(tr("Choose")); } } void UIPrefsDialog::resetStylesheet() { qssFile = ""; stylesheetPB->setText(tr("Choose")); } void UIPrefsDialog::showSnipCssDialog() { snipCssFile = myGetFileName(false, "Select snippets window CSS file", true); string nm = path_getsimple((const char *)snipCssFile.toLocal8Bit()); snipCssPB->setText(QString::fromLocal8Bit(nm.c_str())); } void UIPrefsDialog::resetSnipCss() { snipCssFile = ""; snipCssPB->setText(tr("Choose")); } void UIPrefsDialog::showSynFileDialog() { synFile = myGetFileName(false, "Select synonyms file", true); if (synFile.isEmpty()) return; string nm = path_getsimple((const char *)synFile.toLocal8Bit()); synFilePB->setText(QString::fromLocal8Bit(nm.c_str())); } void UIPrefsDialog::resetReslistFont() { reslistFontFamily = ""; reslistFontSize = 0; setupReslistFontPB(); } void UIPrefsDialog::showViewAction() { if (m_viewAction== 0) { m_viewAction = new ViewAction(0); } else { // Close and reopen, in hope that makes us visible... m_viewAction->close(); } m_viewAction->show(); } void UIPrefsDialog::showViewAction(const QString& mt) { showViewAction(); m_viewAction->selectMT(mt); } //////////////////////////////////////////// // External / extra search indexes setup void UIPrefsDialog::extradDbSelectChanged() { if (idxLV->selectedItems().size() <= 1) ptransPB->setEnabled(true); else ptransPB->setEnabled(false); } void UIPrefsDialog::extraDbEditPtrans() { string dbdir; if (idxLV->selectedItems().size() == 0) { dbdir = theconfig->getDbDir(); } else if (idxLV->selectedItems().size() == 1) { QListWidgetItem *item = idxLV->selectedItems()[0]; QString qd = item->data(Qt::DisplayRole).toString(); dbdir = (const char *)qd.toLocal8Bit(); } else { QMessageBox::warning( 0, "Recoll", tr("At most one index should be selected")); return; } dbdir = path_canon(dbdir); EditTrans *etrans = new EditTrans(dbdir, this); etrans->show(); } void UIPrefsDialog::togExtraDbPB_clicked() { for (int i = 0; i < idxLV->count(); i++) { QListWidgetItem *item = idxLV->item(i); if (item->isSelected()) { if (item->checkState() == Qt::Checked) { item->setCheckState(Qt::Unchecked); } else { item->setCheckState(Qt::Checked); } } } } void UIPrefsDialog::actAllExtraDbPB_clicked() { for (int i = 0; i < idxLV->count(); i++) { QListWidgetItem *item = idxLV->item(i); item->setCheckState(Qt::Checked); } } void UIPrefsDialog::unacAllExtraDbPB_clicked() { for (int i = 0; i < idxLV->count(); i++) { QListWidgetItem *item = idxLV->item(i); item->setCheckState(Qt::Unchecked); } } void UIPrefsDialog::delExtraDbPB_clicked() { QList items = idxLV->selectedItems(); for (QList::iterator it = items.begin(); it != items.end(); it++) { delete *it; } } static bool samedir(const string& dir1, const string& dir2) { #ifdef _WIN32 return !dir1.compare(dir2); #else struct stat st1, st2; if (stat(dir1.c_str(), &st1)) return false; if (stat(dir2.c_str(), &st2)) return false; if (st1.st_dev == st2.st_dev && st1.st_ino == st2.st_ino) { return true; } return false; #endif } void UIPrefsDialog::on_showTrayIconCB_clicked() { if (!showTrayIconCB->checkState()) { closeToTrayCB->setChecked(false); trayMessagesCB->setChecked(false); } closeToTrayCB->setEnabled(showTrayIconCB->checkState()); trayMessagesCB->setEnabled(showTrayIconCB->checkState()); } /** * Browse to add another index. * We do a textual comparison to check for duplicates, except for * the main db for which we check inode numbers. */ void UIPrefsDialog::addExtraDbPB_clicked() { QString input = myGetFileName(true, tr("Select recoll config directory or " "xapian index directory " "(e.g.: /home/me/.recoll or " "/home/me/.recoll/xapiandb)")); if (input.isEmpty()) return; string dbdir = (const char *)input.toLocal8Bit(); if (path_exists(path_cat(dbdir, "recoll.conf"))) { // Chosen dir is config dir. RclConfig conf(&dbdir); dbdir = conf.getDbDir(); if (dbdir.empty()) { QMessageBox::warning( 0, "Recoll", tr("The selected directory looks like a Recoll " "configuration directory but the configuration " "could not be read")); return; } } LOGDEB("ExtraDbDial: got: [" << (dbdir) << "]\n" ); bool stripped; if (!Rcl::Db::testDbDir(dbdir, &stripped)) { QMessageBox::warning(0, "Recoll", tr("The selected directory does not appear to be a Xapian index")); return; } if (o_index_stripchars != stripped) { QMessageBox::warning(0, "Recoll", tr("Cant add index with different case/diacritics" " stripping option")); return; } if (samedir(dbdir, theconfig->getDbDir())) { QMessageBox::warning(0, "Recoll", tr("This is the main/local index!")); return; } for (int i = 0; i < idxLV->count(); i++) { QListWidgetItem *item = idxLV->item(i); string existingdir = (const char *)item->text().toLocal8Bit(); if (samedir(dbdir, existingdir)) { QMessageBox::warning( 0, "Recoll", tr("The selected directory is already in the " "index list")); return; } } QListWidgetItem *item = new QListWidgetItem(QString::fromLocal8Bit(dbdir.c_str()), idxLV); item->setCheckState(Qt::Checked); idxLV->sortItems(); } recoll-1.26.3/qtgui/specialindex.h0000644000175000017500000000303213533651561013764 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SPECIDX_W_H_INCLUDED_ #define _SPECIDX_W_H_INCLUDED_ #include #include #include "ui_specialindex.h" class QPushButton; class SpecIdxW : public QDialog, public Ui::SpecIdxW { Q_OBJECT public: SpecIdxW(QWidget * parent = 0) : QDialog(parent) { setupUi(this); selPatsLE->setEnabled(false); connect(browsePB, SIGNAL(clicked()), this, SLOT(onBrowsePB_clicked())); connect(targLE, SIGNAL(textChanged(const QString&)), this, SLOT(onTargLE_textChanged(const QString&))); } bool noRetryFailed(); bool eraseFirst(); std::vector selpatterns(); std::string toptarg(); public slots: void onTargLE_textChanged(const QString&); void onBrowsePB_clicked(); }; #endif /* _SPECIDX_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/uiprefs.ui0000644000175000017500000011506613566424763013202 00000000000000 uiPrefsDialogBase 0 0 542 449 Recoll - User Preferences true 0 User interface Highlight CSS style for query terms false 50 0 Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Application Qt style sheet false Opens a dialog to select the style sheet file Choose Resets the style sheet to default Reset 1 0 Texts over this size will not be highlighted in preview (too slow). Maximum text size highlighted for preview (megabytes) false 1 3 Prefer Html to plain text for preview. false Make links inside the preview window clickable, and start an external browser when they are clicked. Activate links in preview. false Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. Plain text to HTML line style <BR> buttonGroup <PRE> buttonGroup <PRE> + wrap buttonGroup Choose editor applications Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Document filter choice style: Buttons Panel buttonGroup_2 Toolbar Combobox buttonGroup_2 Menu buttonGroup_2 Start with simple search mode false 1 0 Limit the size of the search history. Use 0 to disable, -1 for unlimited. Maximum size of search history (0: disable, -1: unlimited): false -1 0 Disable Qt autocompletion in search entry. false Start search on completer popup activation. true Start with advanced search dialog open. false Remember sort activation state. false Show system tray icon. false Close to tray instead of exiting. false Generate desktop notifications. false Show warning when opening temporary file. true Suppress all beeps. false Qt::Vertical QSizePolicy::Expanding 20 70 Result List 1 0 Number of entries in a result page false 1 9999 8 Result list font false Opens a dialog to select the result list font Helvetica-10 Resets the result list font to the system default Reset Edit result paragraph format string Edit result page html header insert Date format (strftime(3)) false 30 0 Abstract snippet separator false 30 0 User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Snippets window CSS file false Opens a dialog to select the Snippets window CSS style sheet file Choose Resets the Snippets window style Reset 1 0 Maximum number of snippets displayed in the snippets window false 1 10000000 10 1000 Sort snippets by page number (default: by weight). false Qt::Vertical 20 40 Search parameters If checked, results with the same content under different names will only be shown once. Hide duplicate results. false Stemming language false QFrame::HLine QFrame::Sunken A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Automatically add phrase to simple searches 1 0 Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Autophrase term frequency threshold percentage false 0.200000000000000 2.000000000000000 QFrame::HLine QFrame::Sunken Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Dynamically build abstracts Do we synthetize an abstract even if the document seemed to have one? Replace abstracts from documents 2 0 Synthetic abstract size (characters) false 1 0 80 100000 10 250 1 0 Synthetic abstract context words false 2 20000 4 QFrame::HLine QFrame::Sunken 1 0 The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Query language magic file name suffixes. false Enable 30 0 QFrame::HLine QFrame::Sunken 1 0 Synonyms file false Enable 30 0 Choose Qt::Vertical QSizePolicy::Expanding 20 70 External Indexes QAbstractItemView::ExtendedSelection Toggle selected Activate All Deactivate All Set path translations for the selected index or for the main one if no selection exists. Paths translations QFrame::HLine QFrame::Sunken Remove from list. This has no effect on the disk index. Remove selected Qt::Horizontal QSizePolicy::Expanding 16 20 true Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Add index Misc Work around QTBUG-78923 by inserting space before anchor text Qt::Vertical 20 40 Qt::Horizontal QSizePolicy::Expanding 210 20 Apply changes &OK true true Discard changes &Cancel true recoll-1.26.3/qtgui/snippets_w.cpp0000644000175000017500000002573413566714503014061 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #if defined(USING_WEBKIT) # include # include # include # define QWEBSETTINGS QWebSettings # define QWEBPAGE QWebPage #elif defined(USING_WEBENGINE) // Notes for WebEngine // - All links must begin with http:// for acceptNavigationRequest to be // called. // - The links passed to acceptNav.. have the host part // lowercased -> we change S0 to http://h/S0, not http://S0 # include # include # include # define QWEBSETTINGS QWebEngineSettings # define QWEBPAGE QWebEnginePage #else #include #endif #include #include "log.h" #include "recoll.h" #include "snippets_w.h" #include "guiutils.h" #include "rcldb.h" #include "rclhelp.h" #include "plaintorich.h" using namespace std; #if defined(USING_WEBKIT) #define browser ((QWebView*)browserw) #elif defined(USING_WEBENGINE) #define browser ((QWebEngineView*)browserw) #else #define browser ((QTextBrowser*)browserw) #endif class PlainToRichQtSnippets : public PlainToRich { public: virtual string startMatch(unsigned int) { return string(""); } virtual string endMatch() { return string(""); } }; static PlainToRichQtSnippets g_hiliter; void SnippetsW::init() { m_sortingByPage = prefs.snipwSortByPage; QPushButton *searchButton = new QPushButton(tr("Search")); searchButton->setAutoDefault(false); buttonBox->addButton(searchButton, QDialogButtonBox::ActionRole); // setWindowFlags(Qt::WindowStaysOnTopHint); searchFM->hide(); new QShortcut(QKeySequence::Find, this, SLOT(slotEditFind())); new QShortcut(QKeySequence(Qt::Key_Slash), this, SLOT(slotEditFind())); new QShortcut(QKeySequence(Qt::Key_Escape), searchFM, SLOT(hide())); new QShortcut(QKeySequence::FindNext, this, SLOT(slotEditFindNext())); new QShortcut(QKeySequence(Qt::Key_F3), this, SLOT(slotEditFindNext())); new QShortcut(QKeySequence::FindPrevious, this, SLOT(slotEditFindPrevious())); new QShortcut(QKeySequence(Qt::SHIFT + Qt::Key_F3), this, SLOT(slotEditFindPrevious())); QPushButton *closeButton = buttonBox->button(QDialogButtonBox::Close); if (closeButton) connect(closeButton, SIGNAL(clicked()), this, SLOT(close())); connect(searchButton, SIGNAL(clicked()), this, SLOT(slotEditFind())); connect(searchLE, SIGNAL(textChanged(const QString&)), this, SLOT(slotSearchTextChanged(const QString&))); connect(nextPB, SIGNAL(clicked()), this, SLOT(slotEditFindNext())); connect(prevPB, SIGNAL(clicked()), this, SLOT(slotEditFindPrevious())); // Get rid of the placeholder widget created from the .ui delete browserw; #if defined(USING_WEBKIT) browserw = new QWebView(this); verticalLayout->insertWidget(0, browserw); browser->setUrl(QUrl(QString::fromUtf8("about:blank"))); connect(browser, SIGNAL(linkClicked(const QUrl &)), this, SLOT(onLinkClicked(const QUrl &))); browser->page()->setLinkDelegationPolicy(QWebPage::DelegateAllLinks); browser->page()->currentFrame()->setScrollBarPolicy(Qt::Horizontal, Qt::ScrollBarAlwaysOff); QWEBSETTINGS *ws = browser->page()->settings(); if (prefs.reslistfontfamily != "") { ws->setFontFamily(QWEBSETTINGS::StandardFont, prefs.reslistfontfamily); ws->setFontSize(QWEBSETTINGS::DefaultFontSize, prefs.reslistfontsize); } if (!prefs.snipCssFile.isEmpty()) ws->setUserStyleSheetUrl(QUrl::fromLocalFile(prefs.snipCssFile)); browserw->setContextMenuPolicy(Qt::CustomContextMenu); connect(browserw, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(createPopupMenu(const QPoint&))); #elif defined(USING_WEBENGINE) browserw = new QWebEngineView(this); verticalLayout->insertWidget(0, browserw); browser->setPage(new SnipWebPage(this)); QWEBSETTINGS *ws = browser->page()->settings(); if (prefs.reslistfontfamily != "") { ws->setFontFamily(QWEBSETTINGS::StandardFont, prefs.reslistfontfamily); ws->setFontSize(QWEBSETTINGS::DefaultFontSize, prefs.reslistfontsize); } // Stylesheet TBD browserw->setContextMenuPolicy(Qt::CustomContextMenu); connect(browserw, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(createPopupMenu(const QPoint&))); #else browserw = new QTextBrowser(this); verticalLayout->insertWidget(0, browserw); connect(browser, SIGNAL(anchorClicked(const QUrl &)), this, SLOT(onLinkClicked(const QUrl &))); browser->setReadOnly(true); browser->setUndoRedoEnabled(false); browser->setOpenLinks(false); browser->setTabChangesFocus(true); if (prefs.reslistfontfamily.length()) { QFont nfont(prefs.reslistfontfamily, prefs.reslistfontsize); browser->setFont(nfont); } else { browser->setFont(QFont()); } #endif } void SnippetsW::createPopupMenu(const QPoint& pos) { QMenu *popup = new QMenu(this); if (m_sortingByPage) { popup->addAction(tr("Sort By Relevance"), this, SLOT(reloadByRelevance())); } else { popup->addAction(tr("Sort By Page"), this, SLOT(reloadByPage())); } popup->popup(mapToGlobal(pos)); } void SnippetsW::reloadByRelevance() { m_sortingByPage = false; onSetDoc(m_doc, m_source); } void SnippetsW::reloadByPage() { m_sortingByPage = true; onSetDoc(m_doc, m_source); } void SnippetsW::onSetDoc(Rcl::Doc doc, std::shared_ptr source) { m_doc = doc; m_source = source; if (!source) return; // Make title out of file name if none yet string titleOrFilename; string utf8fn; m_doc.getmeta(Rcl::Doc::keytt, &titleOrFilename); m_doc.getmeta(Rcl::Doc::keyfn, &utf8fn); if (titleOrFilename.empty()) { titleOrFilename = utf8fn; } QString title("Recoll - Snippets"); if (!titleOrFilename.empty()) { title += QString(" : ") + QString::fromUtf8(titleOrFilename.c_str()); } setWindowTitle(title); vector vpabs; source->getAbstract(m_doc, vpabs, prefs.snipwMaxLength, m_sortingByPage); HighlightData hdata; source->getTerms(hdata); ostringstream oss; oss << "" ""; oss << "\n"; oss << qs2utf8s(prefs.reslistheadertext); oss << "" "" "" ; g_hiliter.set_inputhtml(false); bool nomatch = true; for (const auto& snippet : vpabs) { if (snippet.page == -1) { oss << "" << endl; continue; } list lr; if (!g_hiliter.plaintorich(snippet.snippet, lr, hdata)) { LOGDEB1("No match for [" << snippet.snippet << "]\n"); continue; } nomatch = false; oss << "" << endl; } oss << "
" << snippet.snippet << "
"; if (snippet.page > 0) { oss << "" << "P. " << snippet.page << ""; } oss << "" << lr.front().c_str() << "
" << endl; if (nomatch) { oss.str("\n"); oss << qs2utf8s(tr("

Sorry, no exact match was found within limits. " "Probably the document is very big and the snippets " "generator got lost in a maze...

")); } oss << "\n"; #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) browser->setHtml(QString::fromUtf8(oss.str().c_str())); #else browser->insertHtml(QString::fromUtf8(oss.str().c_str())); browser->moveCursor (QTextCursor::Start); browser->ensureCursorVisible(); #endif raise(); } void SnippetsW::slotEditFind() { searchFM->show(); searchLE->selectAll(); searchLE->setFocus(); } void SnippetsW::slotEditFindNext() { if (!searchFM->isVisible()) slotEditFind(); #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) browser->findText(searchLE->text()); #else browser->find(searchLE->text()); #endif } void SnippetsW::slotEditFindPrevious() { if (!searchFM->isVisible()) slotEditFind(); #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) browser->findText(searchLE->text(), QWEBPAGE::FindBackward); #else browser->find(searchLE->text(), QTextDocument::FindBackward); #endif } void SnippetsW::slotSearchTextChanged(const QString& txt) { #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) browser->findText(txt); #else // Cursor thing is so that we don't go to the next occurrence with // each character, but rather try to extend the current match QTextCursor cursor = browser->textCursor(); cursor.setPosition(cursor.anchor(), QTextCursor::KeepAnchor); browser->setTextCursor(cursor); browser->find(txt, 0); #endif } void SnippetsW::onLinkClicked(const QUrl &url) { string ascurl = qs2u8s(url.toString()).substr(9); LOGDEB("Snippets::onLinkClicked: [" << ascurl << "]\n"); if (ascurl.size() > 3) { int what = ascurl[0]; switch (what) { case 'P': { string::size_type numpos = ascurl.find_first_of("0123456789"); if (numpos == string::npos) return; int page = atoi(ascurl.c_str() + numpos); string::size_type termpos = ascurl.find_first_of("T"); string term; if (termpos != string::npos) term = ascurl.substr(termpos+1); emit startNativeViewer(m_doc, page, QString::fromUtf8(term.c_str())); return; } } } LOGERR("Snippets::onLinkClicked: bad link [" << ascurl << "]\n"); } recoll-1.26.3/qtgui/crontool.ui0000644000175000017500000001573713303776056013362 00000000000000 CronToolW 0 0 508 416 Cron Dialog 0 1 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> true Days of week (* or 0-7, 0 or 7 is Sunday) Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter Hours (* or 0-23) Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter 0 0 Minutes (0-59) Qt::AlignRight|Qt::AlignTrailing|Qt::AlignVCenter <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> true Qt::Horizontal QDialogButtonBox::Cancel buttonBox accepted() CronToolW accept() 248 254 157 274 buttonBox rejected() CronToolW reject() 316 260 286 274 recoll-1.26.3/qtgui/recoll.pro.in0000644000175000017500000001203113552273240013544 00000000000000TEMPLATE = app LANGUAGE = C++ VPATH = @srcdir@ DEFINES += BUILDING_RECOLL @QMAKE_ENABLE_WEBKIT@ QT += webkit @QMAKE_ENABLE_WEBKIT@ DEFINES += USING_WEBKIT @QMAKE_ENABLE_WEBENGINE@ QT += webengine @QMAKE_ENABLE_WEBENGINE@ DEFINES += USING_WEBENGINE QMAKE_CXXFLAGS += -std=c++11 @QMAKE_ENABLE_ZEITGEIST@ QT += dbus @QMAKE_ENABLE_ZEITGEIST@ QMAKE_CXXFLAGS += -DUSE_ZEITGEIST QT += xml greaterThan(QT_MAJOR_VERSION, 4): QT += widgets printsupport @QMAKE_ENABLE_WEBKIT@ greaterThan(QT_MAJOR_VERSION, 4): QT += webkitwidgets @QMAKE_ENABLE_WEBENGINE@ greaterThan(QT_MAJOR_VERSION, 4): QT += webenginewidgets CONFIG += qt warn_on thread release HEADERS += \ advsearch_w.h \ advshist.h \ confgui/confgui.h \ confgui/confguiindex.h \ crontool.h \ firstidx.h \ fragbuts.h \ idxsched.h \ preview_load.h \ preview_plaintorich.h \ preview_w.h \ ptrans_w.h \ rclhelp.h \ rclmain_w.h \ reslist.h \ restable.h \ rtitool.h \ searchclause_w.h \ snippets_w.h \ specialindex.h \ spell_w.h \ ssearch_w.h \ systray.h \ uiprefs_w.h \ viewaction_w.h \ webcache.h \ widgets/editdialog.h \ widgets/listdialog.h \ widgets/qxtconfirmationmessage.h SOURCES += \ advsearch_w.cpp \ advshist.cpp \ confgui/confgui.cpp \ confgui/confguiindex.cpp \ crontool.cpp \ fragbuts.cpp \ guiutils.cpp \ main.cpp \ multisave.cpp \ preview_load.cpp \ preview_plaintorich.cpp \ preview_w.cpp \ ptrans_w.cpp \ rclhelp.cpp \ rclm_idx.cpp \ rclm_preview.cpp \ rclm_saveload.cpp \ rclm_view.cpp \ rclm_wins.cpp \ rclmain_w.cpp \ rclzg.cpp \ reslist.cpp \ respopup.cpp \ restable.cpp \ rtitool.cpp \ searchclause_w.cpp \ snippets_w.cpp \ spell_w.cpp \ ssearch_w.cpp \ systray.cpp \ uiprefs_w.cpp \ viewaction_w.cpp \ webcache.cpp \ widgets/qxtconfirmationmessage.cpp \ xmltosd.cpp FORMS = \ advsearch.ui \ crontool.ui \ firstidx.ui \ idxsched.ui \ preview.ui \ ptrans.ui \ rclmain.ui \ restable.ui \ rtitool.ui \ snippets.ui \ specialindex.ui \ spell.ui \ ssearchb.ui \ uiprefs.ui \ viewaction.ui \ webcache.ui \ widgets/editdialog.ui \ widgets/listdialog.ui RESOURCES = recoll.qrc unix { UI_DIR = .ui MOC_DIR = .moc OBJECTS_DIR = .obj LIBS += -L../.libs -lrecoll !macx { # Note: libdir may be substituted with sthing like $(exec_prefix)/lib # at this point and will go as such in the Makefile. Expansion will be # completed at make time. LIBS += -Wl,-rpath=@libdir@/recoll } LIBS += @LIBXAPIAN@ $(LIBXAPIANSTATICEXTRA) \ @LIBICONV@ $(BDYNAMIC) @LIBQZEITGEIST@ -lz INCLUDEPATH += ../common @srcdir@/../common @srcdir@/../index \ @srcdir@/../internfile @srcdir@/../query @srcdir@/../unac \ @srcdir@/../utils @srcdir@/../aspell @srcdir@/../rcldb \ @srcdir@/../qtgui @srcdir@/../xaposix @srcdir@/confgui \ @srcdir@/widgets DEPENDPATH += $$INCLUDEPATH } UNAME = $$system(uname -s) contains( UNAME, [lL]inux ) { LIBS += -ldl -lX11 } contains( UNAME, SunOS ) { LIBS += -ldl } macx { ICON = images/recoll.icns } TRANSLATIONS = \ i18n/recoll_cs.ts \ i18n/recoll_da.ts \ i18n/recoll_de.ts \ i18n/recoll_el.ts \ i18n/recoll_es.ts \ i18n/recoll_fr.ts \ i18n/recoll_it.ts \ i18n/recoll_lt.ts \ i18n/recoll_ru.ts \ i18n/recoll_tr.ts \ i18n/recoll_uk.ts \ i18n/recoll_xx.ts \ i18n/recoll_zh_CN.ts \ i18n/recoll_zh.ts \ unix { isEmpty(PREFIX) { PREFIX = /usr/local } message("Prefix is $$PREFIX") DEFINES += PREFIX=\\\"$$PREFIX\\\" # Installation stuff target.path = "$$PREFIX/bin" imdata.files = @srcdir@/mtpics/*.png imdata.path = $$PREFIX/share/recoll/images trdata.files = @srcdir@/i18n/*.qm trdata.path = $$PREFIX/share/recoll/translations desktop.files += @srcdir@/../desktop/recoll-searchgui.desktop desktop.path = $$PREFIX/share/applications/ icona.files += @srcdir@/../desktop/recoll.png icona.path = $$PREFIX/share/icons/hicolor/48x48/apps/ iconb.files += @srcdir@/../desktop/recoll.png iconb.path = $$PREFIX/share/pixmaps/ appdata.files = @srcdir@/../desktop/recoll.appdata.xml appdata.path = $$PREFIX/share/appdata/ INSTALLS += target imdata trdata desktop icona iconb appdata # The recollinstall script used to do the following to install zh_CN as # zh. Is this still needed? #${INSTALL} -m 0444 ${I18N}/recoll_zh_CN.qm \ # ${datadir}/recoll/translations/recoll_zh.qm || exit 1 } recoll-1.26.3/qtgui/rtitool.h0000644000175000017500000000235513533651561013017 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _RTITOOL_W_H_INCLUDED_ #define _RTITOOL_W_H_INCLUDED_ #include "ui_rtitool.h" class QPushButton; class RTIToolW : public QDialog, public Ui::RTIToolW { Q_OBJECT public: RTIToolW(QWidget * parent = 0) : QDialog(parent) { setupUi(this); init(); } public slots: #ifdef _WIN32 void sesclicked(bool) {} void accept() {} private: void init() {} #else void sesclicked(bool); void accept(); private: void init(); #endif }; #endif /* _RTITOOL_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/rtitool.cpp0000644000175000017500000001136213533651561013350 00000000000000#ifndef _WIN32 /* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include "safesysstat.h" #include "safeunistd.h" #include #include #include #include #include "recoll.h" #include "rtitool.h" #include "smallut.h" #include "pathut.h" #include "copyfile.h" #include "readfile.h" #include "execmd.h" using std::string; static const char *rautostartfile = ".config/autostart/recollindex.desktop"; // Just in case we don't find the file in the shared dir, have a // default text ready static const char *desktopfiletext = "[Desktop Entry]\n" "Name=Recoll real time indexer\n" "Comment=Runs in background to extract and index text from modified " "documents\n" "Icon=system-run\n" "Exec=recollindex -w 60 -m\n" "Terminal=false\n" "TerminalOptions=\n" "Type=Application\n" "Categories=Utility;Filesystem;Database;\n" "NoDisplay=true\n" "X-GNOME-Autostart-enabled=true\n" "X-KDE-autostart-after=panel\n" "X-KDE-UniqueApplet=true\n" ; void RTIToolW::init() { connect(this->sesCB, SIGNAL(clicked(bool)), this, SLOT(sesclicked(bool))); string autostartfile = path_cat(path_home(), rautostartfile); if (path_exists(autostartfile)) { sesCB->setChecked(true); } } void RTIToolW::sesclicked(bool on) { nowCB->setEnabled(on); if (!on) nowCB->setChecked(false); } void RTIToolW::accept() { bool exitdial = false; string autostartfile = path_cat(path_home(), rautostartfile); if (sesCB->isChecked()) { // Setting up daemon indexing autostart if (path_exists(autostartfile)) { QString msg = tr("Replacing: ") + QString::fromLocal8Bit(autostartfile.c_str()); QMessageBox::Button rep = QMessageBox::question(this, tr("Replacing file"), msg, QMessageBox::Ok | QMessageBox::Cancel); if (rep != QMessageBox::Ok) { goto out; } } string text; if (theconfig) { string sourcefile = path_cat(theconfig->getDatadir(), "examples"); sourcefile = path_cat(sourcefile, "recollindex.desktop"); if (path_exists(sourcefile)) { file_to_string(sourcefile, text); } } if (text.empty()) text = desktopfiletext; // Try to create .config and autostart anyway. If they exists this will // do nothing. An error will be detected when we try to create the file string dir = path_cat(path_home(), ".config"); mkdir(dir.c_str(), 0700); dir = path_cat(dir, "autostart"); mkdir(dir.c_str(), 0700); string reason; if (!stringtofile(text, autostartfile.c_str(), reason)) { QString msg = tr("Can't create: ") + QString::fromLocal8Bit(autostartfile.c_str()); QMessageBox::warning(0, tr("Warning"), msg, QMessageBox::Ok); return; } if (nowCB->isChecked()) { ExecCmd cmd; vector args; int status; args.push_back("-m"); args.push_back("-w"); args.push_back("0"); status = cmd.doexec("recollindex", args, 0, 0); if (status) { QMessageBox::warning(0, tr("Warning"), tr("Could not execute recollindex"), QMessageBox::Ok); goto out; } } exitdial = true; } else { // Turning autostart off if (path_exists(autostartfile)) { QString msg = tr("Deleting: ") + QString::fromLocal8Bit(autostartfile.c_str()); QMessageBox::Button rep = QMessageBox::question(this, tr("Deleting file"), msg, QMessageBox::Ok | QMessageBox::Cancel); if (rep == QMessageBox::Ok) { exitdial = true; unlink(autostartfile.c_str()); if (theconfig) { Pidfile pidfile(theconfig->getPidfile()); pid_t pid; if ((pid = pidfile.open()) != 0) { QMessageBox::Button rep = QMessageBox::question(this, tr("Removing autostart"), tr("Autostart file deleted. Kill current process too ?"), QMessageBox::Yes | QMessageBox::No); if (rep == QMessageBox::Yes) { kill(pid, SIGTERM); } } } } } else { exitdial = true; } } out: if (exitdial) QDialog::accept(); } #endif recoll-1.26.3/qtgui/rtitool.ui0000644000175000017500000000754513303776056013215 00000000000000 RTIToolW 0 0 423 207 Real time indexing automatic start <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> true Start indexing daemon with my desktop session. Qt::Horizontal 28 20 false 1 0 Also start indexing daemon right now. Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok buttonBox accepted() RTIToolW accept() 204 159 204 90 buttonBox rejected() RTIToolW reject() 204 159 204 90 recoll-1.26.3/qtgui/xmltosd.h0000644000175000017500000000602413533651561013012 00000000000000/* Copyright (C) 2014 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef XMLTOSD_H_INCLUDED #define XMLTOSD_H_INCLUDED #include "autoconfig.h" /** Parsing XML from saved queries or advanced search history. * * Here is how the schemas looks like: * * For advanced search * * * * AND|OR * * [] * AND|OR|FN|PH|NE * [base64data] * [base64data] * slack * * * [base64 path] * [base64 path] * * * 162014 <--! datemin --> * 3062014 <--! datemax --> * minsize * maxsize * space-sep mtypes * space-sep mtypes * * * * For Simple search: * * * base64-encoded query text * OR|AND|FN|QL * space-separated lang list * * space-separated suffix list * base64-encoded config path>/EX> * */ #include #include "searchdata.h" // Parsing XML from advanced search history or saved advanced search into // a SearchData structure: std::shared_ptr xmlToSearchData(const string& xml, bool complain = true); // Parsing XML from saved simple search to ssearch parameters struct SSearchDef { SSearchDef() : autophrase(false), mode(0) {} std::vector stemlangs; std::vector autosuffs; std::vector extindexes; std::string text; bool autophrase; int mode; }; bool xmlToSSearch(const string& xml, SSearchDef&); #endif /* XMLTOSD_H_INCLUDED */ recoll-1.26.3/qtgui/preview.ui0000644000175000017500000001017513566424763013201 00000000000000 Preview 0 0 777 432 Form 0 true Tab 1 &Search for: searchTextCMB true QComboBox::NoInsert &Next &Previous false Clear Match &Case Qt::Horizontal 40 20 Qt::Vertical Previous result document :/images/prevpage.pngimages/prevpage.png Next result document :/images/nextpage.pngimages/nextpage.png Qt::Vertical Qt::Horizontal 40 20 Open recoll-1.26.3/qtgui/xmltosd.cpp0000644000175000017500000002502113533651561013343 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include "ssearch_w.h" #include "guiutils.h" #include "log.h" #include "xmltosd.h" #include "smallut.h" #include "recoll.h" using namespace std; using namespace Rcl; class SDHXMLHandler : public QXmlDefaultHandler { public: SDHXMLHandler() : isvalid(false) { resetTemps(); } bool startElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName, const QXmlAttributes &attributes); bool endElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName); bool characters(const QString &str) { currentText += str; return true; } // The object we set up std::shared_ptr sd; bool isvalid; private: void resetTemps() { currentText = whatclause = ""; text.clear(); text2.clear(); field.clear(); slack = 0; d = m = y = di.d1 = di.m1 = di.y1 = di.d2 = di.m2 = di.y2 = 0; hasdates = false; exclude = false; } // Temporary data while parsing. QString currentText; QString whatclause; string field, text, text2; int slack; int d, m, y; DateInterval di; bool hasdates; bool exclude; }; bool SDHXMLHandler::startElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName, const QXmlAttributes &attrs) { LOGDEB2("SDHXMLHandler::startElement: name [" << qs2utf8s(qName) << "]\n"); if (qName == "SD") { // Advanced search history entries have no type. So we're good // either if type is absent, or if it's searchdata int idx = attrs.index("type"); if (idx >= 0 && attrs.value(idx).compare("searchdata")) { LOGDEB("XMLTOSD: bad type: " << qs2utf8s(attrs.value(idx)) << endl); return false; } resetTemps(); // A new search descriptor. Allocate data structure sd = std::shared_ptr(new SearchData); if (!sd) { LOGERR("SDHXMLHandler::startElement: out of memory\n"); return false; } } return true; } bool SDHXMLHandler::endElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName) { LOGDEB2("SDHXMLHandler::endElement: name [" << qs2utf8s(qName) << "]\n"); if (qName == "CLT") { if (currentText == "OR") { sd->setTp(SCLT_OR); } } else if (qName == "CT") { whatclause = currentText.trimmed(); } else if (qName == "NEG") { exclude = true; } else if (qName == "F") { field = base64_decode(qs2utf8s(currentText.trimmed())); } else if (qName == "T") { text = base64_decode(qs2utf8s(currentText.trimmed())); } else if (qName == "T2") { text2 = base64_decode(qs2utf8s(currentText.trimmed())); } else if (qName == "S") { slack = atoi((const char *)currentText.toUtf8()); } else if (qName == "C") { SearchDataClause *c; if (whatclause == "AND" || whatclause.isEmpty()) { c = new SearchDataClauseSimple(SCLT_AND, text, field); c->setexclude(exclude); } else if (whatclause == "OR") { c = new SearchDataClauseSimple(SCLT_OR, text, field); c->setexclude(exclude); } else if (whatclause == "RG") { c = new SearchDataClauseRange(text, text2, field); c->setexclude(exclude); } else if (whatclause == "EX") { // Compat with old hist. We don't generete EX (SCLT_EXCL) anymore // it's replaced with OR + exclude flag c = new SearchDataClauseSimple(SCLT_OR, text, field); c->setexclude(true); } else if (whatclause == "FN") { c = new SearchDataClauseFilename(text); c->setexclude(exclude); } else if (whatclause == "PH") { c = new SearchDataClauseDist(SCLT_PHRASE, text, slack, field); c->setexclude(exclude); } else if (whatclause == "NE") { c = new SearchDataClauseDist(SCLT_NEAR, text, slack, field); c->setexclude(exclude); } else { LOGERR("Bad clause type [" << qs2utf8s(whatclause) << "]\n"); return false; } sd->addClause(c); whatclause = ""; text.clear(); field.clear(); slack = 0; exclude = false; } else if (qName == "D") { d = atoi((const char *)currentText.toUtf8()); } else if (qName == "M") { m = atoi((const char *)currentText.toUtf8()); } else if (qName == "Y") { y = atoi((const char *)currentText.toUtf8()); } else if (qName == "DMI") { di.d1 = d; di.m1 = m; di.y1 = y; hasdates = true; } else if (qName == "DMA") { di.d2 = d; di.m2 = m; di.y2 = y; hasdates = true; } else if (qName == "MIS") { sd->setMinSize(atoll((const char *)currentText.toUtf8())); } else if (qName == "MAS") { sd->setMaxSize(atoll((const char *)currentText.toUtf8())); } else if (qName == "ST") { string types = (const char *)currentText.toUtf8(); vector vt; stringToTokens(types, vt); for (unsigned int i = 0; i < vt.size(); i++) sd->addFiletype(vt[i]); } else if (qName == "IT") { string types(qs2utf8s(currentText)); vector vt; stringToTokens(types, vt); for (unsigned int i = 0; i < vt.size(); i++) sd->remFiletype(vt[i]); } else if (qName == "YD") { string d; base64_decode(qs2utf8s(currentText.trimmed()), d); sd->addClause(new SearchDataClausePath(d)); } else if (qName == "ND") { string d; base64_decode(qs2utf8s(currentText.trimmed()), d); sd->addClause(new SearchDataClausePath(d, true)); } else if (qName == "SD") { // Closing current search descriptor. Finishing touches... if (hasdates) sd->setDateSpan(&di); resetTemps(); isvalid = true; } currentText.clear(); return true; } std::shared_ptr xmlToSearchData(const string& xml, bool verbose) { SDHXMLHandler handler; QXmlSimpleReader reader; reader.setContentHandler(&handler); reader.setErrorHandler(&handler); QXmlInputSource xmlInputSource; xmlInputSource.setData(QString::fromUtf8(xml.c_str())); if (!reader.parse(xmlInputSource) || !handler.isvalid) { if (verbose) { LOGERR("xmlToSearchData: parse failed for [" << xml << "]\n"); } return std::shared_ptr(); } return handler.sd; } // Handler for parsing saved simple search data class SSHXMLHandler : public QXmlDefaultHandler { public: SSHXMLHandler() : isvalid(false) { resetTemps(); } bool startElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName, const QXmlAttributes &attributes); bool endElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName); bool characters(const QString &str) { currentText += str; return true; } // The object we set up SSearchDef data; bool isvalid; private: void resetTemps() { currentText = whatclause = ""; text.clear(); } // Temporary data while parsing. QString currentText; QString whatclause; string text; }; bool SSHXMLHandler::startElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName, const QXmlAttributes &attrs) { LOGDEB2("SSHXMLHandler::startElement: name [" << u8s2qs(qName) << "]\n"); if (qName == "SD") { // Simple search saved data has a type='ssearch' attribute. int idx = attrs.index("type"); if (idx < 0 || attrs.value(idx).compare("ssearch")) { if (idx < 0) { LOGDEB("XMLTOSSS: bad type\n"); } else { LOGDEB("XMLTOSSS: bad type: " << qs2utf8s(attrs.value(idx)) << endl); } return false; } resetTemps(); } return true; } bool SSHXMLHandler::endElement(const QString & /* namespaceURI */, const QString & /* localName */, const QString &qName) { LOGDEB2("SSHXMLHandler::endElement: name [" << u8s2qs(qName) << "]\n"); currentText = currentText.trimmed(); if (qName == "SL") { stringToStrings(qs2utf8s(currentText), data.stemlangs); } else if (qName == "T") { base64_decode(qs2utf8s(currentText), data.text); } else if (qName == "EX") { data.extindexes.push_back(base64_decode(qs2utf8s(currentText))); } else if (qName == "SM") { if (!currentText.compare("QL")) { data.mode = SSearch::SST_LANG; } else if (!currentText.compare("FN")) { data.mode = SSearch::SST_FNM; } else if (!currentText.compare("OR")) { data.mode = SSearch::SST_ANY; } else if (!currentText.compare("AND")) { data.mode = SSearch::SST_ALL; } else { LOGERR("BAD SEARCH MODE: [" << qs2utf8s(currentText) << "]\n"); return false; } } else if (qName == "AS") { stringToStrings(qs2utf8s(currentText), data.autosuffs); } else if (qName == "AP") { data.autophrase = true; } else if (qName == "SD") { // Closing current search descriptor. Finishing touches... resetTemps(); isvalid = true; } currentText.clear(); return true; } bool xmlToSSearch(const string& xml, SSearchDef& data) { SSHXMLHandler handler; QXmlSimpleReader reader; reader.setContentHandler(&handler); reader.setErrorHandler(&handler); QXmlInputSource xmlInputSource; xmlInputSource.setData(QString::fromUtf8(xml.c_str())); if (!reader.parse(xmlInputSource) || !handler.isvalid) { LOGERR("xmlToSSearch: parse failed for [" << xml << "]\n"); return false; } data = handler.data; return true; } recoll-1.26.3/qtgui/uiprefs_w.h0000644000175000017500000000474513533651561013333 00000000000000/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _UIPREFS_W_H_INCLUDED_ #define _UIPREFS_W_H_INCLUDED_ #include #include #include "ui_uiprefs.h" class QDialog; class ViewAction; class RclMain; class UIPrefsDialog : public QDialog, public Ui::uiPrefsDialogBase { Q_OBJECT public: UIPrefsDialog(RclMain* parent) : QDialog((QWidget*)parent), m_mainWindow(parent) { setupUi(this); init(); } ~UIPrefsDialog(){}; QString reslistFontFamily; int reslistFontSize; QString qssFile; QString snipCssFile; QString synFile; virtual void init(); void setFromPrefs(); public slots: virtual void showFontDialog(); virtual void resetReslistFont(); virtual void showStylesheetDialog(); virtual void showSynFileDialog(); virtual void showSnipCssDialog(); virtual void resetStylesheet(); virtual void resetSnipCss(); virtual void showViewAction(); virtual void showViewAction(const QString& mt); virtual void addExtraDbPB_clicked(); virtual void delExtraDbPB_clicked(); virtual void togExtraDbPB_clicked(); virtual void on_showTrayIconCB_clicked(); virtual void actAllExtraDbPB_clicked(); virtual void unacAllExtraDbPB_clicked(); virtual void setStemLang(const QString& lang); virtual void editParaFormat(); virtual void editHeaderText(); virtual void extradDbSelectChanged(); virtual void extraDbEditPtrans(); signals: void uiprefsDone(); protected slots: virtual void accept(); virtual void reject(); private: void setupReslistFontPB(); // Locally stored data (pending ok/cancel) QString paraFormat; QString headerText; ViewAction *m_viewAction; RclMain *m_mainWindow; }; #endif /* _UIPREFS_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/spell.ui0000644000175000017500000000764113303776056012635 00000000000000 SpellBase 0 0 520 465 0 0 100 100 Term Explorer 100 0 Match Case Accents false Qt::NoFocus &Expand Alt+E true Qt::NoFocus &Close Alt+C 7 No db info. 2 2 false false baseWordLE expandPB dismissPB stemLangCMB recoll-1.26.3/qtgui/reslist.cpp0000644000175000017500000011224613533651561013344 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "log.h" #include "smallut.h" #include "recoll.h" #include "guiutils.h" #include "pathut.h" #include "docseq.h" #include "pathut.h" #include "mimehandler.h" #include "plaintorich.h" #include "internfile.h" #include "indexer.h" #include "snippets_w.h" #include "listdialog.h" #include "reslist.h" #include "moc_reslist.cpp" #include "rclhelp.h" #include "appformime.h" #include "respopup.h" #include "reslistpager.h" static const QKeySequence quitKeySeq("Ctrl+q"); static const QKeySequence closeKeySeq("Ctrl+w"); #if defined(USING_WEBKIT) # include # include # include # define QWEBSETTINGS QWebSettings #elif defined(USING_WEBENGINE) // Notes for WebEngine // - All links must begin with http:// for acceptNavigationRequest to be // called. // - The links passed to acceptNav.. have the host part // lowercased -> we change S0 to http://localhost/S0, not http://S0 # include # include # include # define QWEBSETTINGS QWebEngineSettings #endif #ifdef USING_WEBENGINE // This script saves the location details when a mouse button is // clicked. This is for replacing data provided by Webkit QWebElement // on a right-click as QT WebEngine does not have an equivalent service. static const string locdetailscript(R"raw( var locDetails = ''; function saveLoc(ev) { el = ev.target; locDetails = ''; while (el && el.attributes && !el.attributes.getNamedItem("rcldocnum")) { el = el.parentNode; } rcldocnum = el.attributes.getNamedItem("rcldocnum"); if (rcldocnum) { rcldocnumvalue = rcldocnum.value; } else { rcldocnumvalue = ""; } if (el && el.attributes) { locDetails = 'rcldocnum = ' + rcldocnumvalue } } )raw"); bool RclWebPage::acceptNavigationRequest(const QUrl& url, NavigationType tp, bool isMainFrame) { Q_UNUSED(isMainFrame); LOGDEB0("QWebEnginePage::acceptNavigationRequest. Type: " << tp << " isMainFrame " << isMainFrame << std::endl); if (tp == QWebEnginePage::NavigationTypeLinkClicked) { m_reslist->onLinkClicked(url); return false; } else { return true; } } #endif // WEBENGINE // Decide if we set font family and style with a css section in the // html or with qwebsettings setfont... calls. We currently do // it with websettings because this gives an instant redisplay, and // the css has a tendancy to not find some system fonts. Otoh, // SetFontSize() needs a strange offset of 3, not needed with css. #undef SETFONT_WITH_HEADSTYLE class QtGuiResListPager : public ResListPager { public: QtGuiResListPager(ResList *p, int ps) : ResListPager(ps), m_reslist(p) {} virtual bool append(const string& data); virtual bool append(const string& data, int idx, const Rcl::Doc& doc); virtual string trans(const string& in); virtual string detailsLink(); virtual const string &parFormat(); virtual const string &dateFormat(); virtual string nextUrl(); virtual string prevUrl(); virtual string headerContent(); virtual void suggest(const vectoruterms, map >& sugg); virtual string absSep() {return (const char *)(prefs.abssep.toUtf8());} virtual string iconUrl(RclConfig *, Rcl::Doc& doc); #ifdef USING_WEBENGINE virtual string linkPrefix() override {return "http://localhost/";} #endif private: ResList *m_reslist; }; #if 0 FILE *fp; void logdata(const char *data) { if (fp == 0) fp = fopen("/tmp/recolltoto.html", "a"); if (fp) fprintf(fp, "%s", data); } #else #define logdata(X) #endif ////////////////////////////// // /// QtGuiResListPager methods: bool QtGuiResListPager::append(const string& data) { LOGDEB2("QtGuiReslistPager::appendString : " << data << "\n"); logdata(data.c_str()); m_reslist->append(QString::fromUtf8(data.c_str())); return true; } bool QtGuiResListPager::append(const string& data, int docnum, const Rcl::Doc&) { LOGDEB2("QtGuiReslistPager::appendDoc: blockCount " << m_reslist->document()->blockCount() << ", " << data << "\n"); logdata(data.c_str()); #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) QString sdoc = QString( "
").arg(docnum); m_reslist->append(sdoc); m_reslist->append(QString::fromUtf8(data.c_str())); m_reslist->append("
"); #else int blkcnt0 = m_reslist->document()->blockCount(); m_reslist->moveCursor(QTextCursor::End, QTextCursor::MoveAnchor); m_reslist->textCursor().insertBlock(); m_reslist->insertHtml(QString::fromUtf8(data.c_str())); m_reslist->moveCursor(QTextCursor::Start, QTextCursor::MoveAnchor); m_reslist->ensureCursorVisible(); int blkcnt1 = m_reslist->document()->blockCount(); for (int block = blkcnt0; block < blkcnt1; block++) { m_reslist->m_pageParaToReldocnums[block] = docnum; } #endif return true; } string QtGuiResListPager::trans(const string& in) { return string((const char*)ResList::tr(in.c_str()).toUtf8()); } string QtGuiResListPager::detailsLink() { string chunk = string(""; chunk += trans("(show query)"); chunk += ""; return chunk; } const string& QtGuiResListPager::parFormat() { return prefs.creslistformat; } const string& QtGuiResListPager::dateFormat() { return prefs.creslistdateformat; } string QtGuiResListPager::nextUrl() { return "n-1"; } string QtGuiResListPager::prevUrl() { return "p-1"; } string QtGuiResListPager::headerContent() { string out; out = "\n"); #if defined(USING_WEBENGINE) out += "\n"; #endif out += qs2utf8s(prefs.reslistheadertext); return out; } void QtGuiResListPager::suggest(const vectoruterms, map >& sugg) { sugg.clear(); bool issimple = m_reslist && m_reslist->m_rclmain && m_reslist->m_rclmain->lastSearchSimple(); for (const auto& uit : uterms) { vector tsuggs; // If the term is in the dictionary, Aspell::suggest won't // list alternatives. In fact we may want to check the // frequencies and propose something anyway if a possible // variation is much more common (as google does) ? if (!rcldb->getSpellingSuggestions(uit, tsuggs)) { continue; } // We should check that the term stems differently from the // base word (else it's not useful to expand the search). Or // is it ? This should depend if stemming is turned on or not if (!tsuggs.empty()) { sugg[uit] = vector(tsuggs.begin(), tsuggs.end()); if (sugg[uit].size() > 5) sugg[uit].resize(5); // Set up the links as a . for (auto& it : sugg[uit]) { if (issimple) { it = string("" + it + ""; } } } } } string QtGuiResListPager::iconUrl(RclConfig *config, Rcl::Doc& doc) { if (doc.ipath.empty()) { vector docs; docs.push_back(doc); vector paths; Rcl::docsToPaths(docs, paths); if (!paths.empty()) { string path; LOGDEB2("ResList::iconUrl: source path [" << paths[0] << "]\n"); if (thumbPathForUrl(cstr_fileu + paths[0], 128, path)) { LOGDEB2("ResList::iconUrl: icon path [" << path << "]\n"); return cstr_fileu + path; } else { LOGDEB2("ResList::iconUrl: no icon: path [" << path << "]\n"); } } else { LOGDEB("ResList::iconUrl: docsToPaths failed\n"); } } return ResListPager::iconUrl(config, doc); } /////// /////// End reslistpager methods string PlainToRichQtReslist::startMatch(unsigned int idx) { (void)idx; #if 0 if (m_hdata) { string s1, s2; stringsToString >(m_hdata->groups[idx], s1); stringsToString >( m_hdata->ugroups[m_hdata->grpsugidx[idx]], s2); LOGDEB2("Reslist startmatch: group " << s1 << " user group " << s2 << "\n"); } #endif return string(""); } string PlainToRichQtReslist::endMatch() { return string(""); } static PlainToRichQtReslist g_hiliter; ///////////////////////////////////// ResList::ResList(QWidget* parent, const char* name) : RESLIST_PARENTCLASS(parent) { if (!name) setObjectName("resList"); else setObjectName(name); #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) setPage(new RclWebPage(this)); #ifdef USING_WEBKIT LOGDEB("Reslist: using Webkit\n"); page()->setLinkDelegationPolicy(QWebPage::DelegateAllLinks); // signals and slots connections connect(this, SIGNAL(linkClicked(const QUrl &)), this, SLOT(onLinkClicked(const QUrl &))); #else LOGDEB("Reslist: using Webengine\n"); #endif settings()->setAttribute(QWEBSETTINGS::JavascriptEnabled, true); #else LOGDEB("Reslist: using QTextBrowser\n"); setReadOnly(true); setUndoRedoEnabled(false); setOpenLinks(false); setTabChangesFocus(true); // signals and slots connections connect(this, SIGNAL(anchorClicked(const QUrl &)), this, SLOT(onLinkClicked(const QUrl &))); #endif setFont(); languageChange(); (void)new HelpClient(this); HelpClient::installMap(qs2utf8s(this->objectName()), "RCL.SEARCH.GUI.RESLIST"); #if 0 // See comments in "highlighted connect(this, SIGNAL(highlighted(const QString &)), this, SLOT(highlighted(const QString &))); #endif setContextMenuPolicy(Qt::CustomContextMenu); connect(this, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(createPopupMenu(const QPoint&))); m_pager = new QtGuiResListPager(this, prefs.respagesize); m_pager->setHighLighter(&g_hiliter); } ResList::~ResList() { // These have to exist somewhere for translations to work #ifdef __GNUC__ __attribute__((unused)) #endif static const char* strings[] = { QT_TR_NOOP("

No results found
"), QT_TR_NOOP("Documents"), QT_TR_NOOP("out of at least"), QT_TR_NOOP("for"), QT_TR_NOOP("Previous"), QT_TR_NOOP("Next"), QT_TR_NOOP("Unavailable document"), QT_TR_NOOP("Preview"), QT_TR_NOOP("Open"), QT_TR_NOOP("Snippets"), QT_TR_NOOP("(show query)"), QT_TR_NOOP("

Alternate spellings (accents suppressed): "), QT_TR_NOOP("

Alternate spellings: "), }; } void ResList::setRclMain(RclMain *m, bool ismain) { m_rclmain = m; m_ismainres = ismain; if (!m_ismainres) { connect(new QShortcut(closeKeySeq, this), SIGNAL (activated()), this, SLOT (close())); connect(new QShortcut(quitKeySeq, this), SIGNAL (activated()), m_rclmain, SLOT (fileExit())); connect(this, SIGNAL(previewRequested(Rcl::Doc)), m_rclmain, SLOT(startPreview(Rcl::Doc))); connect(this, SIGNAL(docSaveToFileClicked(Rcl::Doc)), m_rclmain, SLOT(saveDocToFile(Rcl::Doc))); connect(this, SIGNAL(editRequested(Rcl::Doc)), m_rclmain, SLOT(startNativeViewer(Rcl::Doc))); } } void ResList::runStoredJS() { runJS(m_js); m_js.clear(); } void ResList::runJS(const QString& js) { #if defined(USING_WEBKIT) page()->mainFrame()->evaluateJavaScript(js); #elif defined(USING_WEBENGINE) page()->runJavaScript(js); #else Q_UNUSED(js); #endif } void ResList::setFont() { #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) # ifndef SETFONT_WITH_HEADSTYLE if (prefs.reslistfontfamily.length()) { // For some reason there is (12-2014) an offset of 3 between what // we request from webkit and what we get. settings()->setFontSize(QWEBSETTINGS::DefaultFontSize, prefs.reslistfontsize + 3); settings()->setFontFamily(QWEBSETTINGS::StandardFont, prefs.reslistfontfamily); } else { settings()->resetFontSize(QWEBSETTINGS::DefaultFontSize); settings()->resetFontFamily(QWEBSETTINGS::StandardFont); } # endif #else if (prefs.reslistfontfamily.length()) { QFont nfont(prefs.reslistfontfamily, prefs.reslistfontsize); QTextBrowser::setFont(nfont); } else { QTextBrowser::setFont(QFont()); } #endif } int ResList::newListId() { static int id; return ++id; } void ResList::setDocSource(std::shared_ptr nsource) { LOGDEB("ResList::setDocSource()\n"); m_source = std::shared_ptr(new DocSource(theconfig, nsource)); if (m_pager) m_pager->setDocSource(m_source); } // A query was executed, or the filtering/sorting parameters changed, // re-read the results. void ResList::readDocSource() { LOGDEB("ResList::readDocSource()\n"); resetView(); if (!m_source) return; m_listId = newListId(); // Reset the page size in case the preference was changed m_pager->setPageSize(prefs.respagesize); m_pager->setDocSource(m_source); resultPageNext(); emit hasResults(m_source->getResCnt()); } void ResList::resetList() { LOGDEB("ResList::resetList()\n"); setDocSource(std::shared_ptr()); resetView(); } void ResList::resetView() { m_curPvDoc = -1; // There should be a progress bar for long searches but there isn't // We really want the old result list to go away, otherwise, for a // slow search, the user will wonder if anything happened. The // following helps making sure that the textedit is really // blank. Else, there are often icons or text left around #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) m_text = ""; setHtml(""); #else m_pageParaToReldocnums.clear(); clear(); QTextBrowser::append("."); clear(); #endif } bool ResList::displayingHistory() { // We want to reset the displayed history if it is currently // shown. Using the title value is an ugly hack string htstring = string((const char *)tr("Document history").toUtf8()); if (!m_source || m_source->title().empty()) return false; return m_source->title().find(htstring) == 0; } void ResList::languageChange() { setWindowTitle(tr("Result list")); } #if !defined(USING_WEBKIT) && !defined(USING_WEBENGINE) // Get document number from text block number int ResList::docnumfromparnum(int block) { if (m_pager->pageNumber() < 0) return -1; // Try to find the first number < input and actually in the map // (result blocks can be made of several text blocks) std::map::iterator it; do { it = m_pageParaToReldocnums.find(block); if (it != m_pageParaToReldocnums.end()) return pageFirstDocNum() + it->second; } while (--block >= 0); return -1; } // Get range of paragraph numbers which make up the result for document number pair ResList::parnumfromdocnum(int docnum) { LOGDEB("parnumfromdocnum: docnum " << docnum << "\n"); if (m_pager->pageNumber() < 0) { LOGDEB("parnumfromdocnum: no page return -1,-1\n"); return pair(-1,-1); } int winfirst = pageFirstDocNum(); if (docnum - winfirst < 0) { LOGDEB("parnumfromdocnum: docnum " << docnum << " < winfirst " << winfirst << " return -1,-1\n"); return pair(-1,-1); } docnum -= winfirst; for (std::map::iterator it = m_pageParaToReldocnums.begin(); it != m_pageParaToReldocnums.end(); it++) { if (docnum == it->second) { int first = it->first; int last = first+1; std::map::iterator it1; while ((it1 = m_pageParaToReldocnums.find(last)) != m_pageParaToReldocnums.end() && it1->second == docnum) { last++; } LOGDEB("parnumfromdocnum: return " << first << "," << last << "\n"); return pair(first, last); } } LOGDEB("parnumfromdocnum: not found return -1,-1\n"); return pair(-1,-1); } #endif // TEXTBROWSER // Return doc from current or adjacent result pages. We can get called // for a document not in the current page if the user browses through // results inside a result window (with shift-arrow). This can only // result in a one-page change. bool ResList::getDoc(int docnum, Rcl::Doc &doc) { LOGDEB("ResList::getDoc: docnum " << docnum << " winfirst " << pageFirstDocNum() << "\n"); int winfirst = pageFirstDocNum(); int winlast = m_pager->pageLastDocNum(); if (docnum < 0 || winfirst < 0 || winlast < 0) return false; // Is docnum in current page ? Then all Ok if (docnum >= winfirst && docnum <= winlast) { return m_pager->getDoc(docnum, doc); } // Else we accept to page down or up but not further if (docnum < winfirst && docnum >= winfirst - prefs.respagesize) { resultPageBack(); } else if (docnum < winlast + 1 + prefs.respagesize) { resultPageNext(); } winfirst = pageFirstDocNum(); winlast = m_pager->pageLastDocNum(); if (docnum >= winfirst && docnum <= winlast) { return m_pager->getDoc(docnum, doc); } return false; } void ResList::keyPressEvent(QKeyEvent * e) { if ((e->modifiers() & Qt::ShiftModifier)) { if (e->key() == Qt::Key_PageUp) { // Shift-PageUp -> first page of results resultPageFirst(); return; } } else { if (e->key() == Qt::Key_PageUp || e->key() == Qt::Key_Backspace) { resPageUpOrBack(); return; } else if (e->key() == Qt::Key_PageDown || e->key() == Qt::Key_Space) { resPageDownOrNext(); return; } } RESLIST_PARENTCLASS::keyPressEvent(e); } void ResList::mouseReleaseEvent(QMouseEvent *e) { m_lstClckMod = 0; if (e->modifiers() & Qt::ControlModifier) { m_lstClckMod |= Qt::ControlModifier; } if (e->modifiers() & Qt::ShiftModifier) { m_lstClckMod |= Qt::ShiftModifier; } RESLIST_PARENTCLASS::mouseReleaseEvent(e); } void ResList::highlighted(const QString& ) { // This is supposedly called when a link is preactivated (hover or tab // traversal, but is not actually called for tabs. We would have liked to // give some kind of visual feedback for tab traversal } // Page Up/Down: we don't try to check if current paragraph is last or // first. We just page up/down and check if viewport moved. If it did, // fair enough, else we go to next/previous result page. void ResList::resPageUpOrBack() { #if defined(USING_WEBKIT) if (scrollIsAtTop()) { resultPageBack(); } else { page()->mainFrame()->scroll(0, -int(0.9*geometry().height())); } setupArrows(); #elif defined(USING_WEBENGINE) if (scrollIsAtTop()) { resultPageBack(); } else { QString js = "window.scrollBy(" + QString::number(0) + ", " + QString::number(-int(0.9*geometry().height())) + ");"; runJS(js); } setupArrows(); #else int vpos = verticalScrollBar()->value(); verticalScrollBar()->triggerAction(QAbstractSlider::SliderPageStepSub); if (vpos == verticalScrollBar()->value()) resultPageBack(); #endif } void ResList::resPageDownOrNext() { #if defined(USING_WEBKIT) if (scrollIsAtBottom()) { resultPageNext(); } else { page()->mainFrame()->scroll(0, int(0.9*geometry().height())); } setupArrows(); #elif defined(USING_WEBENGINE) if (scrollIsAtBottom()) { resultPageNext(); } else { QString js = "window.scrollBy(" + QString::number(0) + ", " + QString::number(int(0.9*geometry().height())) + ");"; runJS(js); } setupArrows(); #else int vpos = verticalScrollBar()->value(); verticalScrollBar()->triggerAction(QAbstractSlider::SliderPageStepAdd); LOGDEB("ResList::resPageDownOrNext: vpos before " << vpos << ", after " << verticalScrollBar()->value() << "\n"); if (vpos == verticalScrollBar()->value()) resultPageNext(); #endif } void ResList::setupArrows() { emit prevPageAvailable(m_pager->hasPrev() || !scrollIsAtTop()); emit nextPageAvailable(m_pager->hasNext() || !scrollIsAtBottom()); } bool ResList::scrollIsAtBottom() { #if defined(USING_WEBKIT) QWebFrame *frame = page()->mainFrame(); bool ret; if (!frame || frame->scrollBarGeometry(Qt::Vertical).isEmpty()) { ret = true; } else { int max = frame->scrollBarMaximum(Qt::Vertical); int cur = frame->scrollBarValue(Qt::Vertical); ret = (max != 0) && (cur == max); LOGDEB2("Scrollatbottom: cur " << cur << " max " << max << "\n"); } LOGDEB2("scrollIsAtBottom: returning " << ret << "\n"); return ret; #elif defined(USING_WEBENGINE) QSize css = page()->contentsSize().toSize(); QSize wss = size(); QPoint sp = page()->scrollPosition().toPoint(); LOGDEB1("atBottom: contents W " << css.width() << " H " << css.height() << " widget W " << wss.width() << " Y " << wss.height() << " scroll X " << sp.x() << " Y " << sp.y() << "\n"); // This seems to work but it's mysterious as points and pixels // should not be the same return wss.height() + sp.y() >= css.height() - 10; #else return false; #endif } bool ResList::scrollIsAtTop() { #if defined(USING_WEBKIT) QWebFrame *frame = page()->mainFrame(); bool ret; if (!frame || frame->scrollBarGeometry(Qt::Vertical).isEmpty()) { ret = true; } else { int cur = frame->scrollBarValue(Qt::Vertical); int min = frame->scrollBarMinimum(Qt::Vertical); LOGDEB("Scrollattop: cur " << cur << " min " << min << "\n"); ret = (cur == min); } LOGDEB2("scrollIsAtTop: returning " << ret << "\n"); return ret; #elif defined(USING_WEBENGINE) return page()->scrollPosition().toPoint().ry() == 0; #else return false; #endif } // Show previous page of results. We just set the current number back // 2 pages and show next page. void ResList::resultPageBack() { if (m_pager->hasPrev()) { m_pager->resultPageBack(); displayPage(); } } // Go to the first page void ResList::resultPageFirst() { // In case the preference was changed m_pager->setPageSize(prefs.respagesize); m_pager->resultPageFirst(); displayPage(); } // Fill up result list window with next screen of hits void ResList::resultPageNext() { if (m_pager->hasNext()) { m_pager->resultPageNext(); displayPage(); } } void ResList::resultPageFor(int docnum) { m_pager->resultPageFor(docnum); displayPage(); } void ResList::append(const QString &text) { LOGDEB2("QtGuiReslistPager::appendQString : " << qs2utf8s(text) << "\n"); #if defined(USING_WEBKIT) || defined(USING_WEBENGINE) m_text += text; #else QTextBrowser::append(text); #endif } void ResList::displayPage() { resetView(); m_pager->displayPage(theconfig); #if defined(USING_WEBENGINE) || defined(USING_WEBKIT) setHtml(m_text); #endif #if defined(USING_WEBENGINE) // Have to delay running this. Alternative would be to set it as // onload on the body element in the html, like upplay does, but // this would need an ennoying reslistpager modification. m_js = "elt=document.getElementsByTagName('body')[0];" "elt.addEventListener('contextmenu', saveLoc);"; QTimer::singleShot(200, this, SLOT(runStoredJS())); #endif LOGDEB0("ResList::displayPg: hasNext " << m_pager->hasNext() << " atBot " << scrollIsAtBottom() << " hasPrev " << m_pager->hasPrev() << " at Top " << scrollIsAtTop() << " \n"); setupArrows(); // Possibly color paragraph of current preview if any previewExposed(m_curPvDoc); } // Color paragraph (if any) of currently visible preview void ResList::previewExposed(int docnum) { LOGDEB("ResList::previewExposed: doc " << docnum << "\n"); // Possibly erase old one to white if (m_curPvDoc != -1) { #if defined(USING_WEBKIT) QString sel = QString("div[rcldocnum=\"%1\"]").arg(m_curPvDoc - pageFirstDocNum()); LOGDEB2("Searching for element, selector: [" << qs2utf8s(sel) << "]\n"); QWebElement elt = page()->mainFrame()->findFirstElement(sel); if (!elt.isNull()) { LOGDEB2("Found\n"); elt.removeAttribute("style"); } else { LOGDEB2("Not Found\n"); } #elif defined(USING_WEBENGINE) QString js = QString( "elt=document.getElementById('%1');" "if (elt){elt.removeAttribute('style');}" ).arg(m_curPvDoc - pageFirstDocNum()); runJS(js); #else pair blockrange = parnumfromdocnum(m_curPvDoc); if (blockrange.first != -1) { for (int blockn = blockrange.first; blockn < blockrange.second; blockn++) { QTextBlock block = document()->findBlockByNumber(blockn); QTextCursor cursor(block); QTextBlockFormat format = cursor.blockFormat(); format.clearBackground(); cursor.setBlockFormat(format); } } #endif m_curPvDoc = -1; } // Set background for active preview's doc entry m_curPvDoc = docnum; #if defined(USING_WEBKIT) QString sel = QString("div[rcldocnum=\"%1\"]").arg(docnum - pageFirstDocNum()); LOGDEB2("Searching for element, selector: [" << qs2utf8s(sel) << "]\n"); QWebElement elt = page()->mainFrame()->findFirstElement(sel); if (!elt.isNull()) { LOGDEB2("Found\n"); elt.setAttribute("style", "background: LightBlue;}"); } else { LOGDEB2("Not Found\n"); } #elif defined(USING_WEBENGINE) QString js = QString( "elt=document.getElementById('%1');" "if(elt){elt.setAttribute('style', 'background: LightBlue');}" ).arg(docnum - pageFirstDocNum()); runJS(js); #else pair blockrange = parnumfromdocnum(docnum); // Maybe docnum is -1 or not in this window, if (blockrange.first < 0) return; // Color the new active paragraph QColor color("LightBlue"); for (int blockn = blockrange.first+1; blockn < blockrange.second; blockn++) { QTextBlock block = document()->findBlockByNumber(blockn); QTextCursor cursor(block); QTextBlockFormat format; format.setBackground(QBrush(color)); cursor.mergeBlockFormat(format); setTextCursor(cursor); ensureCursorVisible(); } #endif } // Double click in res list: add selection to simple search void ResList::mouseDoubleClickEvent(QMouseEvent *event) { RESLIST_PARENTCLASS::mouseDoubleClickEvent(event); #if defined(USING_WEBKIT) emit(wordSelect(selectedText())); #elif defined(USING_WEBENGINE) // webengineview does not have such an event function, and // reimplementing event() itself is not useful (tried) as it does // not get mouse clicks. We'd need javascript to do this, but it's // not that useful, so left aside for now. #else if (textCursor().hasSelection()) emit(wordSelect(textCursor().selectedText())); #endif } void ResList::showQueryDetails() { if (!m_source) return; string oq = breakIntoLines(m_source->getDescription(), 100, 50); QString str; QString desc = tr("Result count (est.)") + ": " + str.setNum(m_source->getResCnt()) + "
"; desc += tr("Query details") + ": " + QString::fromUtf8(oq.c_str()); QMessageBox::information(this, tr("Query details"), desc); } void ResList::onLinkClicked(const QUrl &qurl) { // qt5: url.toString() does not accept FullyDecoded, but that's what we // want. e.g. Suggestions links are like Sterm|spelling which we // receive as Sterm%7CSpelling string strurl = url_decode(qs2utf8s(qurl.toString())); LOGDEB1("ResList::onLinkClicked: [" << strurl << "] prefix " << m_pager->linkPrefix() << "\n"); strurl = strurl.substr(m_pager->linkPrefix().size()); int what = strurl[0]; switch (what) { // Open abstract/snippets window case 'A': { if (!m_source) return; int i = atoi(strurl.c_str()+1) - 1; Rcl::Doc doc; if (!getDoc(i, doc)) { LOGERR("ResList::onLinkClicked: can't get doc for " << i << "\n"); return; } emit(showSnippets(doc)); } break; // Show duplicates case 'D': { if (!m_source) return; int i = atoi(strurl.c_str()+1) - 1; Rcl::Doc doc; if (!getDoc(i, doc)) { LOGERR("ResList::onLinkClicked: can't get doc for " << i << "\n"); return; } vector dups; if (m_source->docDups(doc, dups) && m_rclmain) { m_rclmain->newDupsW(doc, dups); } } break; // Open parent folder case 'F': { int i = atoi(strurl.c_str()+1) - 1; Rcl::Doc doc; if (!getDoc(i, doc)) { LOGERR("ResList::onLinkClicked: can't get doc for " << i << "\n"); return; } emit editRequested(ResultPopup::getParent(std::shared_ptr(), doc)); } break; // Show query details case 'h': case 'H': { showQueryDetails(); break; } // Preview and edit case 'P': case 'E': { int i = atoi(strurl.c_str()+1) - 1; Rcl::Doc doc; if (!getDoc(i, doc)) { LOGERR("ResList::onLinkClicked: can't get doc for " << i << "\n"); return; } if (what == 'P') { if (m_ismainres) { emit docPreviewClicked(i, doc, m_lstClckMod); } else { emit previewRequested(doc); } } else { emit editRequested(doc); } } break; // Next/prev page case 'n': resultPageNext(); break; case 'p': resultPageBack(); break; // Run script. Link format Rnn|Script Name case 'R': { int i = atoi(strurl.c_str() + 1) - 1; QString s = qurl.toString(); int bar = s.indexOf("|"); if (bar == -1 || bar >= s.size()-1) break; string cmdname = qs2utf8s(s.right(s.size() - (bar + 1))); DesktopDb ddb(path_cat(theconfig->getConfDir(), "scripts")); DesktopDb::AppDef app; if (ddb.appByName(cmdname, app)) { QAction act(QString::fromUtf8(app.name.c_str()), this); QVariant v(QString::fromUtf8(app.command.c_str())); act.setData(v); m_popDoc = i; menuOpenWith(&act); } } break; // Spelling: replacement suggestion clicked case 'S': { string s; if (!strurl.empty()) s = strurl.substr(1); string::size_type bar = s.find_first_of("|"); if (bar != string::npos && bar < s.size() - 1) { string o = s.substr(0, bar); string n = s.substr(bar+1); LOGDEB2("Emitting wordreplace " << o << " -> " << n << std::endl); emit wordReplace(u8s2qs(o), u8s2qs(n)); } } break; default: LOGERR("ResList::onLinkClicked: bad link [" << strurl.substr(0,20) << "]\n"); break;// ?? } } void ResList::onPopupJsDone(const QVariant &jr) { QString qs(jr.toString()); LOGDEB("onPopupJsDone: parameter: " << qs2utf8s(qs) << "\n"); QStringList qsl = qs.split("\n", QString::SkipEmptyParts); for (int i = 0 ; i < qsl.size(); i++) { int eq = qsl[i].indexOf("="); if (eq > 0) { QString nm = qsl[i].left(eq).trimmed(); QString value = qsl[i].right(qsl[i].size() - (eq+1)).trimmed(); if (!nm.compare("rcldocnum")) { m_popDoc = atoi(qs2utf8s(value).c_str()); } else { LOGERR("onPopupJsDone: unknown key: " << qs2utf8s(nm) << "\n"); } } } doCreatePopupMenu(); } void ResList::createPopupMenu(const QPoint& pos) { LOGDEB("ResList::createPopupMenu(" << pos.x() << ", " << pos.y() << ")\n"); m_popDoc = -1; m_popPos = pos; #if defined(USING_WEBKIT) QWebHitTestResult htr = page()->mainFrame()->hitTestContent(pos); if (htr.isNull()) return; QWebElement el = htr.enclosingBlockElement(); while (!el.isNull() && !el.hasAttribute("rcldocnum")) el = el.parent(); if (el.isNull()) return; QString snum = el.attribute("rcldocnum"); m_popDoc = pageFirstDocNum() + snum.toInt(); #elif defined(USING_WEBENGINE) QString js("window.locDetails;"); RclWebPage *mypage = dynamic_cast(page()); mypage->runJavaScript(js, [this](const QVariant &v) {onPopupJsDone(v);}); #else QTextCursor cursor = cursorForPosition(pos); int blocknum = cursor.blockNumber(); LOGDEB("ResList::createPopupMenu(): block " << blocknum << "\n"); m_popDoc = docnumfromparnum(blocknum); #endif doCreatePopupMenu(); } void ResList::doCreatePopupMenu() { if (m_popDoc < 0) return; Rcl::Doc doc; if (!getDoc(m_popDoc, doc)) return; int options = ResultPopup::showSaveOne; if (m_ismainres) options |= ResultPopup::isMain; QMenu *popup = ResultPopup::create(this, options, m_source, doc); popup->popup(mapToGlobal(m_popPos)); } void ResList::menuPreview() { Rcl::Doc doc; if (getDoc(m_popDoc, doc)) { if (m_ismainres) { emit docPreviewClicked(m_popDoc, doc, 0); } else { emit previewRequested(doc); } } } void ResList::menuSaveToFile() { Rcl::Doc doc; if (getDoc(m_popDoc, doc)) emit docSaveToFileClicked(doc); } void ResList::menuPreviewParent() { Rcl::Doc doc; if (getDoc(m_popDoc, doc) && m_source) { Rcl::Doc pdoc = ResultPopup::getParent(m_source, doc); if (pdoc.mimetype == "inode/directory") { emit editRequested(pdoc); } else { emit previewRequested(pdoc); } } } void ResList::menuOpenParent() { Rcl::Doc doc; if (getDoc(m_popDoc, doc) && m_source) emit editRequested(ResultPopup::getParent(m_source, doc)); } void ResList::menuShowSnippets() { Rcl::Doc doc; if (getDoc(m_popDoc, doc)) emit showSnippets(doc); } void ResList::menuShowSubDocs() { Rcl::Doc doc; if (getDoc(m_popDoc, doc)) emit showSubDocs(doc); } void ResList::menuEdit() { Rcl::Doc doc; if (getDoc(m_popDoc, doc)) emit editRequested(doc); } void ResList::menuOpenWith(QAction *act) { if (act == 0) return; string cmd = qs2utf8s(act->data().toString()); Rcl::Doc doc; if (getDoc(m_popDoc, doc)) emit openWithRequested(doc, cmd); } void ResList::menuCopyFN() { Rcl::Doc doc; if (getDoc(m_popDoc, doc)) ResultPopup::copyFN(doc); } void ResList::menuCopyURL() { Rcl::Doc doc; if (getDoc(m_popDoc, doc)) ResultPopup::copyURL(doc); } void ResList::menuExpand() { Rcl::Doc doc; if (getDoc(m_popDoc, doc)) emit docExpand(doc); } int ResList::pageFirstDocNum() { return m_pager->pageFirstDocNum(); } recoll-1.26.3/qtgui/rclmain_w.h0000644000175000017500000002110013566424763013273 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef RCLMAIN_W_H #define RCLMAIN_W_H #include "autoconfig.h" #include #include #include #include "sortseq.h" #include "preview_w.h" #include "recoll.h" #include "advsearch_w.h" #include "uiprefs_w.h" #include "rcldb.h" #include "searchdata.h" #include "spell_w.h" #include #include "pathut.h" #include "guiutils.h" #include "rclutil.h" class SnippetsW; class IdxSchedW; class ExecCmd; class Preview; class ResTable; class CronToolW; class WinSchedToolW; class RTIToolW; class FragButs; class SpecIdxW; class WebcacheEdit; class ConfIndexW; class RclTrayIcon; #include "ui_rclmain.h" class RclMain : public QMainWindow, public Ui::RclMainBase { Q_OBJECT; public: RclMain(QWidget * parent = 0) : QMainWindow(parent) { setupUi(this); init(); } ~RclMain() {} QString getQueryDescription(); /** This is only called from main() to set an URL to be displayed (using recoll as a doc extracter for embedded docs */ virtual void setUrlToView(const QString& u) { m_urltoview = u; } /** Same usage: actually display the current urltoview */ virtual void viewUrl(); bool lastSearchSimple() { return m_searchIsSimple; } // Takes copies of the args instead of refs. Lazy and safe. void newDupsW(const Rcl::Doc doc, const std::vector dups); enum IndexerState {IXST_UNKNOWN, IXST_NOTRUNNING, IXST_RUNNINGMINE, IXST_RUNNINGNOTMINE}; IndexerState indexerState() const { return m_indexerState; } void enableTrayIcon(bool onoff); public slots: virtual void fileExit(); virtual void periodic100(); virtual void toggleIndexing(); virtual void bumpIndexing(); virtual void rebuildIndex(); virtual void specialIndex(); virtual void startSearch(std::shared_ptr sdata, bool issimple); virtual void previewClosed(Preview *w); virtual void showAdvSearchDialog(); virtual void showSpellDialog(); virtual void showWebcacheDialog(); virtual void showIndexStatistics(); virtual void showFragButs(); virtual void showSpecIdx(); virtual void showAboutDialog(); virtual void showMissingHelpers(); virtual void showActiveTypes(); virtual void startManual(); virtual void startManual(const string&); virtual void showDocHistory(); virtual void showExtIdxDialog(); virtual void setSynEnabled(bool); virtual void showUIPrefs(); virtual void showIndexConfig(); virtual void execIndexConfig(); virtual void showCronTool(); virtual void execCronTool(); virtual void showRTITool(); virtual void execRTITool(); virtual void showIndexSched(); virtual void execIndexSched(); virtual void setUIPrefs(); virtual void enableNextPage(bool); virtual void enablePrevPage(bool); virtual void docExpand(Rcl::Doc); virtual void showSubDocs(Rcl::Doc); virtual void showSnippets(Rcl::Doc); virtual void startPreview(int docnum, Rcl::Doc doc, int keymods); virtual void startPreview(Rcl::Doc); virtual void startNativeViewer(Rcl::Doc, int pagenum = -1, QString term = QString()); virtual void openWith(Rcl::Doc, string); virtual void saveDocToFile(Rcl::Doc); virtual void previewNextInTab(Preview *, int sid, int docnum); virtual void previewPrevInTab(Preview *, int sid, int docnum); virtual void previewExposed(Preview *, int sid, int docnum); virtual void resetSearch(); virtual void eraseDocHistory(); virtual void eraseSearchHistory(); virtual void saveLastQuery(); virtual void loadSavedQuery(); virtual void setStemLang(QAction *id); virtual void adjustPrefsMenu(); virtual void catgFilter(int); virtual void catgFilter(QAction *); virtual void onFragmentsChanged(); virtual void initDbOpen(); virtual void toggleFullScreen(); virtual void on_actionSortByDateAsc_toggled(bool on); virtual void on_actionSortByDateDesc_toggled(bool on); virtual void on_actionShowResultsAsTable_toggled(bool on); virtual void onSortDataChanged(DocSeqSortSpec); virtual void resultCount(int); virtual void applyStyleSheet(); virtual void setFilterCtlStyle(int stl); virtual void showTrayMessage(const QString& text); virtual void onSetDescription(QString); private slots: virtual void updateIdxStatus(); virtual void onWebcacheDestroyed(QObject *); signals: void docSourceChanged(std::shared_ptr); void stemLangChanged(const QString& lang); void sortDataChanged(DocSeqSortSpec); void resultsReady(); void searchReset(); protected: virtual void closeEvent(QCloseEvent *); virtual void showEvent(QShowEvent *); private: SnippetsW *m_snippets{0}; Preview *curPreview{0}; AdvSearch *asearchform{0}; UIPrefsDialog *uiprefs{0}; ConfIndexW *indexConfig{0}; IdxSchedW *indexSched{0}; #ifdef _WIN32 WinSchedToolW *cronTool{0}; #else CronToolW *cronTool{0}; #endif RTIToolW *rtiTool{0}; SpellW *spellform{0}; FragButs *fragbuts{0}; SpecIdxW *specidx{0}; QTimer *periodictimer{0}; WebcacheEdit *webcache{0}; ResTable *restable{0}; bool displayingTable{false}; QAction *m_idNoStem{0}; QAction *m_idAllStem{0}; QToolBar *m_toolsTB{0}; QToolBar *m_resTB{0}; QFrame *m_filtFRM{0}; QComboBox *m_filtCMB{0}; QButtonGroup *m_filtBGRP{0}; QMenu *m_filtMN{0}; QFileSystemWatcher m_watcher; vector m_viewers; ExecCmd *m_idxproc{0}; // Indexing process bool m_idxkilled{false}; // Killed my process TempFile *m_idxreasontmp{nullptr}; map m_stemLangToId; vector m_catgbutvec; int m_catgbutvecidx{0}; DocSeqFiltSpec m_filtspec; bool m_sortspecnochange{false}; DocSeqSortSpec m_sortspec; std::shared_ptr m_source; IndexerState m_indexerState{IXST_UNKNOWN}; bool m_queryActive{false}; bool m_firstIndexing{false}; // Last search was started from simple bool m_searchIsSimple{false}; // This is set to the query string by ssearch, and to empty by // advsearch, and used for the Preview window title. If empty, we // use the Xapian Query string. QString m_queryDescription; // If set on init, will be displayed either through ext app, or // preview (if no ext app set) QString m_urltoview; RclTrayIcon *m_trayicon{0}; // We sometimes take the indexer lock (e.g.: when editing the webcache) Pidfile *m_pidfile{0}; virtual void init(); virtual void setupResTB(bool combo); virtual void previewPrevOrNextInTab(Preview *, int sid, int docnum, bool next); // flags may contain ExecCmd::EXF_xx values virtual void execViewer(const map& subs, bool enterHistory, const string& execpath, const vector& lcmd, const string& cmd, Rcl::Doc doc, int flags=0); virtual void setStemLang(const QString& lang); virtual void onSortCtlChanged(); virtual void showIndexConfig(bool modal); virtual void showIndexSched(bool modal); virtual void showCronTool(bool modal); virtual void showRTITool(bool modal); virtual void updateIdxForDocs(vector&); virtual void initiateQuery(); virtual bool containerUpToDate(Rcl::Doc& doc); virtual void setFiltSpec(); virtual bool checkIdxPaths(); }; #endif // RCLMAIN_W_H recoll-1.26.3/qtgui/advsearch_w.h0000644000175000017500000000462313533651561013611 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _ADVSEARCH_W_H_INCLUDED_ #define _ADVSEARCH_W_H_INCLUDED_ #include "autoconfig.h" #include #include #include #include "searchclause_w.h" #include "recoll.h" #include #include "searchdata.h" #include "advshist.h" class QDialog; #include "ui_advsearch.h" class AdvSearch : public QDialog, public Ui::AdvSearchBase { Q_OBJECT public: AdvSearch(QDialog* parent = 0) : QDialog(parent) { setupUi(this); init(); } public slots: virtual void delFiltypPB_clicked(); virtual void delAFiltypPB_clicked(); virtual void addFiltypPB_clicked(); virtual void addAFiltypPB_clicked(); virtual void guiListsToIgnTypes(); virtual void filterDatesCB_toggled(bool); virtual void filterSizesCB_toggled(bool); virtual void restrictFtCB_toggled(bool); virtual void restrictCtCB_toggled(bool); virtual void runSearch(); virtual void fromSearch(std::shared_ptr sdata); virtual void browsePB_clicked(); virtual void saveFileTypes(); virtual void delClause(bool updsaved=true); virtual void addClause(bool updsaved=true); virtual void addClause(int, bool updsaved=true); virtual void slotHistoryNext(); virtual void slotHistoryPrev(); signals: void startSearch(std::shared_ptr, bool); void setDescription(QString); private: virtual void init(); std::vector m_clauseWins; QStringList m_ignTypes; bool m_ignByCats; void saveCnf(); void fillFileTypes(); size_t stringToSize(QString); }; #endif /* _ADVSEARCH_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/multisave.cpp0000644000175000017500000001134113533651561013662 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include using namespace std; #include #include #include #include "recoll.h" #include "multisave.h" #include "smallut.h" #include "log.h" #include "pathut.h" #include "internfile.h" const unsigned int maxlen = 200; void multiSave(QWidget *p, vector& docs) { QFileDialog fdialog(p, QWidget::tr("Create or choose save directory")); fdialog.setAcceptMode(QFileDialog::AcceptSave); fdialog.setFileMode(QFileDialog::Directory); fdialog.setOption(QFileDialog::ShowDirsOnly); if (fdialog.exec() == 0) return; QStringList dirl = fdialog.selectedFiles(); if (dirl.size() != 1) { // Can't happen ? QMessageBox::warning(0, "Recoll", QWidget::tr("Choose exactly one directory")); return; } string dir((const char *)dirl[0].toLocal8Bit()); LOGDEB2("multiSave: got dir " << (dir) << "\n" ); /* Save doc to files in target directory. Issues: - It is quite common to have docs in the array with the same file names, e.g. all messages in a folder have the same file name (the folder's). - There is no warranty that the ipath is going to be acceptable as a file name or interesting at all. We don't use it. - We have to make sure the names don't end up too long. If collisions occur, we add a numeric infix (e.g. somefile.23.pdf). We never overwrite existing files and don't give the user an option to do it (they can just as well save to an empty directory and use the file manager to accomplish whatever they want). We don't try hard to protect against race-conditions though. The existing file names are read before beginning the save sequence, and collisions appearing after this are handled by aborting. There is a window between existence check and creation because idoctofile does not use O_EXCL */ set existingNames; string reason; if (!readdir(dir, reason, existingNames)) { QMessageBox::warning(0, "Recoll", QWidget::tr("Could not read directory: ") + QString::fromLocal8Bit(reason.c_str())); return; } set toBeCreated; vector filenames; for (vector::iterator it = docs.begin(); it != docs.end(); it++) { string utf8fn; it->getmeta(Rcl::Doc::keyfn, &utf8fn); string suffix = path_suffix(utf8fn); LOGDEB("Multisave: [" << (utf8fn) << "] suff [" << (suffix) << "]\n" ); if (suffix.empty() || suffix.size() > 10) { suffix = theconfig->getSuffixFromMimeType(it->mimetype); LOGDEB("Multisave: suff from config [" << (suffix) << "]\n" ); } string simple = path_basename(utf8fn, string(".") + suffix); LOGDEB("Multisave: simple [" << (simple) << "]\n" ); if (simple.empty()) simple = "rclsave"; if (simple.size() > maxlen) { simple = simple.substr(0, maxlen); } for (int vers = 0; ; vers++) { ostringstream ss; ss << simple; if (vers) ss << "." << vers; if (!suffix.empty()) ss << "." << suffix; string fn = (const char *)QString::fromUtf8(ss.str().c_str()).toLocal8Bit(); if (existingNames.find(fn) == existingNames.end() && toBeCreated.find(fn) == toBeCreated.end()) { toBeCreated.insert(fn); filenames.push_back(fn); break; } } } for (unsigned int i = 0; i != docs.size(); i++) { string fn = path_cat(dir, filenames[i]); if (path_exists(fn)) { QMessageBox::warning(0, "Recoll", QWidget::tr("Unexpected file name collision, " "cancelling.")); return; } // There is still a race condition here, should we care ? TempFile temp;// not used if (!FileInterner::idocToFile(temp, fn, theconfig, docs[i], false)) { QMessageBox::warning(0, "Recoll", QWidget::tr("Cannot extract document: ") + QString::fromLocal8Bit(docs[i].url.c_str()) + " | " + QString::fromLocal8Bit(docs[i].ipath.c_str()) ); } } } recoll-1.26.3/qtgui/idxsched.h0000644000175000017500000000223713533651561013115 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _IDXSCHED_H_INCLUDED_ #define _IDXSCHED_H_INCLUDED_ #include "ui_idxsched.h" #include "rclhelp.h" class IdxSchedW : public QDialog, public Ui::IdxSchedW { Q_OBJECT public: IdxSchedW(QWidget * parent = 0) : QDialog(parent) { setupUi(this); (void)new HelpClient(this); HelpClient::installMap((const char *)this->objectName().toUtf8(), "RCL.INDEXING"); } }; #endif /* _IDXSCHED_H_INCLUDED_ */ recoll-1.26.3/qtgui/ssearch_w.cpp0000644000175000017500000005731513567777133013655 00000000000000/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "log.h" #include "guiutils.h" #include "searchdata.h" #include "ssearch_w.h" #include "textsplit.h" #include "wasatorcl.h" #include "rclhelp.h" #include "xmltosd.h" #include "smallut.h" #include "rcldb.h" #include "recoll.h" using namespace std; // Max search history matches displayed in completer static const int maxhistmatch = 10; // Max db term matches fetched from the index static const int maxdbtermmatch = 20; // Visible rows for the completer listview static const int completervisibleitems = 20; void RclCompleterModel::init() { if (!clockPixmap.load(":/images/clock.png") || !interroPixmap.load(":/images/interro.png")) { LOGERR("SSearch: pixmap loading failed\n"); } } int RclCompleterModel::rowCount(const QModelIndex &) const { LOGDEB1("RclCompleterModel::rowCount: " << currentlist.size() << "\n"); return currentlist.size(); } QVariant RclCompleterModel::data(const QModelIndex &index, int role) const { LOGDEB1("RclCompleterModel::data: row: " << index.row() << " role " << role << "\n"); if (role != Qt::DisplayRole && role != Qt::EditRole && role != Qt::DecorationRole) { return QVariant(); } if (index.row() < 0 || index.row() >= int(currentlist.size())) { return QVariant(); } if (role == Qt::DecorationRole) { LOGDEB1("RclCompleterModel::data: returning pixmap\n"); return index.row() < firstfromindex ? QVariant(clockPixmap) : QVariant(interroPixmap); } else { LOGDEB1("RclCompleterModel::data: return: " << qs2u8s(currentlist[index.row()]) << endl); return QVariant(currentlist[index.row()]); } } void RclCompleterModel::onPartialWord( int tp, const QString& _qtext, const QString& qpartial) { string partial = qs2u8s(qpartial); QString qtext = _qtext.trimmed(); bool onlyspace = qtext.isEmpty(); LOGDEB1("RclCompleterModel::onPartialWord: [" << partial << "] onlyspace "<< onlyspace << "\n"); currentlist.clear(); beginResetModel(); if ((prefs.ssearchNoComplete && !onlyspace) || tp == SSearch::SST_FNM) { // Nocomplete: only look at history by entering space // Filename: no completion for now. We'd need to termatch with // the right prefix? endResetModel(); return; } int histmatch = 0; // Look for matches between the full entry and the search history // (anywhere in the string) for (int i = 0; i < prefs.ssearchHistory.count(); i++) { LOGDEB1("[" << qs2u8s(prefs.ssearchHistory[i]) << "] contains [" << qs2u8s(qtext) << "] ?\n"); // If there is current text, only show a limited count of // matching entries, else show the full history. if (onlyspace || prefs.ssearchHistory[i].contains(qtext, Qt::CaseInsensitive)) { currentlist.push_back(prefs.ssearchHistory[i]); if (!onlyspace && ++histmatch >= maxhistmatch) break; } } firstfromindex = currentlist.size(); // Look for Recoll terms beginning with the partial word if (!qpartial.trimmed().isEmpty()) { Rcl::TermMatchResult rclmatches; if (!rcldb->termMatch(Rcl::Db::ET_WILD, string(), partial + "*", rclmatches, maxdbtermmatch)) { LOGDEB1("RclCompleterModel: termMatch failed: [" << partial + "*" << "]\n"); } else { LOGDEB1("RclCompleterModel: termMatch cnt: " << rclmatches.entries.size() << endl); } for (const auto& entry : rclmatches.entries) { LOGDEB1("RclCompleterModel: match " << entry.term << endl); string data = entry.term; currentlist.push_back(u8s2qs(data)); } } endResetModel(); QTimer::singleShot(0, m_parent, SLOT(onCompleterShown())); } void SSearch::init() { // See enum in .h and keep in order ! searchTypCMB->addItem(tr("Any term")); searchTypCMB->addItem(tr("All terms")); searchTypCMB->addItem(tr("File name")); searchTypCMB->addItem(tr("Query language")); connect(queryText, SIGNAL(returnPressed()), this, SLOT(startSimpleSearch())); connect(queryText, SIGNAL(textChanged(const QString&)), this, SLOT(searchTextChanged(const QString&))); connect(queryText, SIGNAL(textEdited(const QString&)), this, SLOT(searchTextEdited(const QString&))); connect(clearqPB, SIGNAL(clicked()), queryText, SLOT(clear())); connect(searchPB, SIGNAL(clicked()), this, SLOT(startSimpleSearch())); connect(searchTypCMB, SIGNAL(activated(int)), this, SLOT(searchTypeChanged(int))); m_completermodel = new RclCompleterModel(this); m_completer = new QCompleter(m_completermodel, this); m_completer->setCompletionMode(QCompleter::UnfilteredPopupCompletion); m_completer->setFilterMode(Qt::MatchContains); m_completer->setCaseSensitivity(Qt::CaseInsensitive); m_completer->setMaxVisibleItems(completervisibleitems); queryText->setCompleter(m_completer); m_completer->popup()->installEventFilter(this); queryText->installEventFilter(this); connect(this, SIGNAL(partialWord(int, const QString&, const QString&)), m_completermodel, SLOT(onPartialWord(int,const QString&,const QString&))); connect(m_completer, SIGNAL(activated(const QString&)), this, SLOT(onCompletionActivated(const QString&))); connect(historyPB, SIGNAL(clicked()), this, SLOT(onHistoryClicked())); } void SSearch::takeFocus() { LOGDEB1("SSearch: take focus\n"); queryText->setFocus(Qt::ShortcutFocusReason); // If the focus was already in the search entry, the text is not selected. // Do it for consistency queryText->selectAll(); } QString SSearch::currentText() { return queryText->text(); } void SSearch::clearAll() { queryText->clear(); } void SSearch::onCompleterShown() { LOGDEB("SSearch::onCompleterShown\n"); QCompleter *completer = queryText->completer(); if (!completer) { LOGDEB0("SSearch::onCompleterShown: no completer\n"); return; } QAbstractItemView *popup = completer->popup(); if (!popup) { LOGDEB0("SSearch::onCompleterShown: no popup\n"); return; } QVariant data = popup->model()->data(popup->currentIndex()); if (!data.isValid()) { LOGDEB0("SSearch::onCompleterShown: data not valid\n"); return; } // Test if the completer text begins with the current input. QString text = data.toString(); if (!text.lastIndexOf(queryText->text()) == 0) { return; } LOGDEB0("SSearch::onCompleterShown:" << " current [" << qs2utf8s(currentText()) << "] saved [" << qs2utf8s(m_savedEditText) << "] popup [" << qs2utf8s(text) << "]\n"); // We append the completion part to the end of the current input, // line, and select it so that the user has a clear indication of // what will happen if they type Enter. int pos = queryText->cursorPosition(); int len = text.size() - currentText().size(); queryText->setText(text); queryText->setCursorPosition(pos); queryText->setSelection(pos, len); } // This is to avoid that if the user types Backspace or Del while we // have inserted / selected the current completion, the lineedit text // goes back to what it was, the completion fires, and it looks like // nothing was typed. So we disable the completion if a bool SSearch::eventFilter(QObject *target, QEvent *event) { Q_UNUSED(target); LOGDEB1("SSearch::eventFilter: event\n"); if (event->type() != QEvent::KeyPress) { return false; } LOGDEB1("SSearch::eventFilter: KeyPress event. Target " << target << " popup "<popup() << " lineedit "<key() == Qt::Key_Backspace) { LOGDEB("SSearch::eventFilter: backspace\n"); queryText->setCompleter(nullptr); queryText->backspace(); return true; } else if (keyEvent->key()==Qt::Key_Delete) { LOGDEB("SSearch::eventFilter: delete\n"); queryText->setCompleter(nullptr); queryText->del(); return true; } else { if (nullptr == queryText->completer()) { queryText->setCompleter(m_completer); } } return false; } // onCompletionActivated() is called when an entry is selected in the // popup, but the edit text is going to be replaced in any case if // there is a current match (we can't prevent it in the signal). If // there is no match (e.g. the user clicked the history button and // selected an entry), the query text will not be set. // So: // - We set the query text to the popup activation value in all cases // - We schedule a callback to set the text to what we want (which is the // concatenation of the user entry before the current partial word and the // pop up data. // - Note that a history click will replace a current partial word, // so that the effect is different if there is a space at the end // of the entry or not: pure concatenation vs replacement of the // last (partial) word. void SSearch::restoreText() { LOGDEB("SSearch::restoreText: savedEdit: " << qs2u8s(m_savedEditText) << endl); if (!m_savedEditText.trimmed().isEmpty()) { // If the popup text begins with the saved text, just let it replace if (currentText().lastIndexOf(m_savedEditText) != 0) { queryText->setText(m_savedEditText.trimmed() + " " + currentText()); } m_savedEditText = ""; } queryText->setFocus(); if (prefs.ssearchStartOnComplete) { QTimer::singleShot(0, this, SLOT(startSimpleSearch())); } } void SSearch::onCompletionActivated(const QString& text) { LOGDEB("SSearch::onCompletionActivated: queryText [" << qs2u8s(currentText()) << "] text [" << qs2u8s(text) << "]\n"); queryText->setText(text); QTimer::singleShot(0, this, SLOT(restoreText())); } void SSearch::onHistoryClicked() { if (m_completermodel) { queryText->setCompleter(m_completer); m_completermodel->onPartialWord(SST_LANG, "", ""); queryText->completer()->complete(); } } void SSearch::searchTextEdited(const QString& text) { LOGDEB1("SSearch::searchTextEdited: text [" << qs2u8s(text) << "]\n"); QString pword; int cs = getPartialWord(pword); int tp = searchTypCMB->currentIndex(); m_savedEditText = text.left(cs); LOGDEB1("SSearch::searchTextEdited: cs " <= 0) { emit partialWord(tp, currentText(), pword); } else { emit partialWord(tp, currentText(), " "); } } void SSearch::searchTextChanged(const QString& text) { LOGDEB1("SSearch::searchTextChanged: text [" << qs2u8s(text) << "]\n"); if (text.isEmpty()) { searchPB->setEnabled(false); clearqPB->setEnabled(false); queryText->setFocus(); emit clearSearch(); } else { searchPB->setEnabled(true); clearqPB->setEnabled(true); } } void SSearch::searchTypeChanged(int typ) { LOGDEB1("Search type now " << typ << "\n"); // Adjust context help if (typ == SST_LANG) { HelpClient::installMap((const char *)this->objectName().toUtf8(), "RCL.SEARCH.LANG"); } else { HelpClient::installMap((const char *)this->objectName().toUtf8(), "RCL.SEARCH.GUI.SIMPLE"); } // Also fix tooltips switch (typ) { case SST_LANG: queryText->setToolTip( // Do not modify the text here, test with the // sshelp/qhelp.html file and a browser, then use // sshelp/helphtmltoc.sh to turn to code and insert here tr("") + tr("

Query language cheat-sheet. In doubt: click Show Query. ") + tr("You should really look at the manual (F1)

") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("") + tr("
WhatExamples
Andone two   one AND two   one && two
Orone OR two   one || two
Complex boolean. OR has priority, use parentheses ") + tr("where needed(one AND two) OR three
Not-term
Phrase\"pride and prejudice\"
Ordered proximity (slack=1)\"pride prejudice\"o1
Unordered proximity (slack=1)\"prejudice pride\"po1
Unordered prox. (default slack=10)\"prejudice pride\"p
No stem expansion: capitalizeFloor
Field-specificauthor:austen  title:prejudice
AND inside field (no order)author:jane,austen
OR inside fieldauthor:austen/bronte
Field namestitle/subject/caption  author/from
recipient/to  filename  ext
Directory path filterdir:/home/me  dir:doc
MIME type filtermime:text/plain mime:video/*
Date intervalsdate:2018-01-01/2018-31-12
") + tr("date:2018  date:2018-01-01/P12M
Sizesize>100k size<1M
") ); break; case SST_FNM: queryText->setToolTip(tr("Enter file name wildcard expression.")); break; case SST_ANY: case SST_ALL: default: queryText->setToolTip(tr("Enter search terms here.")); } } void SSearch::startSimpleSearch() { if (queryText->completer() && queryText->completer()->popup()->isVisible()) { return; } string u8 = qs2u8s(queryText->text()); trimstring(u8); if (u8.length() == 0) return; if (!startSimpleSearch(u8)) return; // Search terms history. // New text at the front and erase any older identical entry QString txt = currentText().trimmed(); if (txt.isEmpty()) return; if (prefs.historysize) { prefs.ssearchHistory.insert(0, txt); prefs.ssearchHistory.removeDuplicates(); } if (prefs.historysize >= 0) { for (int i = (int)prefs.ssearchHistory.count(); i > prefs.historysize; i--) { prefs.ssearchHistory.removeLast(); } } } void SSearch::setPrefs() { } string SSearch::asXML() { return m_xml; } bool SSearch::startSimpleSearch(const string& u8, int maxexp) { LOGDEB("SSearch::startSimpleSearch(" << u8 << ")\n"); string stemlang = prefs.stemlang(); ostringstream xml; xml << "\n"; xml << " " << stemlang << "\n"; xml << " " << base64_encode(u8) << "\n"; SSearchType tp = (SSearchType)searchTypCMB->currentIndex(); Rcl::SearchData *sdata = 0; if (tp == SST_LANG) { xml << " QL\n"; string reason; if (prefs.autoSuffsEnable) { sdata = wasaStringToRcl(theconfig, stemlang, u8, reason, (const char *)prefs.autoSuffs.toUtf8()); if (!prefs.autoSuffs.isEmpty()) { xml << " " << qs2u8s(prefs.autoSuffs) << "\n"; } } else { sdata = wasaStringToRcl(theconfig, stemlang, u8, reason); } if (sdata == 0) { QMessageBox::warning(0, "Recoll", tr("Bad query string") + ": " + QString::fromUtf8(reason.c_str())); return false; } } else { sdata = new Rcl::SearchData(Rcl::SCLT_OR, stemlang); if (sdata == 0) { QMessageBox::warning(0, "Recoll", tr("Out of memory")); return false; } Rcl::SearchDataClause *clp = 0; if (tp == SST_FNM) { xml << " FN\n"; clp = new Rcl::SearchDataClauseFilename(u8); } else { // ANY or ALL, several words. if (tp == SST_ANY) { xml << " OR\n"; clp = new Rcl::SearchDataClauseSimple(Rcl::SCLT_OR, u8); } else { xml << " AND\n"; clp = new Rcl::SearchDataClauseSimple(Rcl::SCLT_AND, u8); } } sdata->addClause(clp); } if (prefs.ssearchAutoPhrase && rcldb) { xml << " \n"; sdata->maybeAddAutoPhrase(*rcldb, prefs.ssearchAutoPhraseThreshPC / 100.0); } if (maxexp != -1) { sdata->setMaxExpand(maxexp); } for (const auto& dbdir : prefs.activeExtraDbs) { xml << " " << base64_encode(dbdir) << ""; } xml << "\n"; m_xml = xml.str(); LOGDEB("SSearch::startSimpleSearch:xml:[" << m_xml << "]\n"); std::shared_ptr rsdata(sdata); emit setDescription(u8s2qs(u8)); emit startSearch(rsdata, true); return true; } bool SSearch::fromXML(const SSearchDef& fxml) { string asString; set cur; set stored; // Retrieve current list of stemlangs. prefs returns a // space-separated list Warn if stored differs from current, // but don't change the latter. stringToStrings(prefs.stemlang(), cur); stored = set(fxml.stemlangs.begin(), fxml.stemlangs.end()); stringsToString(fxml.stemlangs, asString); if (cur != stored) { QMessageBox::warning( 0, "Recoll", tr("Stemming languages for stored query: ") + QString::fromUtf8(asString.c_str()) + tr(" differ from current preferences (kept)")); } // Same for autosuffs stringToStrings(qs2u8s(prefs.autoSuffs), cur); stored = set(fxml.autosuffs.begin(), fxml.autosuffs.end()); stringsToString(fxml.stemlangs, asString); if (cur != stored) { QMessageBox::warning( 0, "Recoll", tr("Auto suffixes for stored query: ") + QString::fromUtf8(asString.c_str()) + tr(" differ from current preferences (kept)")); } cur = set(prefs.activeExtraDbs.begin(), prefs.activeExtraDbs.end()); stored = set(fxml.extindexes.begin(), fxml.extindexes.end()); stringsToString(fxml.extindexes, asString); if (cur != stored) { QMessageBox::warning( 0, "Recoll", tr("External indexes for stored query: ") + QString::fromUtf8(asString.c_str()) + tr(" differ from current preferences (kept)")); } if (prefs.ssearchAutoPhrase && !fxml.autophrase) { QMessageBox::warning( 0, "Recoll", tr("Autophrase is set but it was unset for stored query")); } else if (!prefs.ssearchAutoPhrase && fxml.autophrase) { QMessageBox::warning( 0, "Recoll", tr("Autophrase is unset but it was set for stored query")); } setSearchString(QString::fromUtf8(fxml.text.c_str())); // We used to use prefs.ssearchTyp here. Not too sure why? // Minimize user surprise factor ? Anyway it seems cleaner to // restore the saved search type searchTypCMB->setCurrentIndex(fxml.mode); return true; } void SSearch::setSearchString(const QString& txt) { queryText->setText(txt); } bool SSearch::hasSearchString() { return !currentText().isEmpty(); } // Add term to simple search. Term comes out of double-click in // reslist or preview. // It would probably be better to cleanup in preview.ui.h and // reslist.cpp and do the proper html stuff in the latter case // (which is different because it format is explicit richtext // instead of auto as for preview, needed because it's built by // fragments?). static const char* punct = " \t()<>\"'[]{}!^*.,:;\n\r"; void SSearch::addTerm(QString term) { LOGDEB("SSearch::AddTerm: [" << qs2u8s(term) << "]\n"); string t = (const char *)term.toUtf8(); string::size_type pos = t.find_last_not_of(punct); if (pos == string::npos) return; t = t.substr(0, pos+1); pos = t.find_first_not_of(punct); if (pos != string::npos) t = t.substr(pos); if (t.empty()) return; term = QString::fromUtf8(t.c_str()); QString text = currentText(); text += QString::fromLatin1(" ") + term; queryText->setText(text); } void SSearch::onWordReplace(const QString& o, const QString& n) { LOGDEB("SSearch::onWordReplace: o [" << qs2u8s(o) << "] n [" << qs2u8s(n) << "]\n"); QString txt = currentText(); QRegExp exp = QRegExp(QString("\\b") + o + QString("\\b")); exp.setCaseSensitivity(Qt::CaseInsensitive); txt.replace(exp, n); queryText->setText(txt); Qt::KeyboardModifiers mods = QApplication::keyboardModifiers (); if (mods == Qt::NoModifier) startSimpleSearch(); } void SSearch::setAnyTermMode() { searchTypCMB->setCurrentIndex(SST_ANY); } // If text does not end with space, return last (partial) word and >0 // else return -1 int SSearch::getPartialWord(QString& word) { // Extract last word in text QString txt = currentText(); if (txt.isEmpty()) { return -1; } int lstidx = txt.size()-1; // If the input ends with a space or dquote (phrase input), or // dquote+qualifiers, no partial word. if (txt[lstidx] == ' ') { return -1; } int cs = txt.lastIndexOf("\""); if (cs > 0) { bool dquoteToEndNoSpace{true}; for (int i = cs; i <= lstidx; i++) { if (txt[i] == ' ') { dquoteToEndNoSpace = false; break; } } if (dquoteToEndNoSpace) { return -1; } } cs = txt.lastIndexOf(" "); if (cs < 0) cs = 0; else cs++; word = txt.right(txt.size() - cs); return cs; } recoll-1.26.3/qtgui/webcache.cpp0000644000175000017500000002245713533651561013424 00000000000000/* Copyright (C) 2016 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include #include #include #include #include #include #include #include #include "recoll.h" #include "webcache.h" #include "webstore.h" #include "circache.h" #include "conftree.h" #include "rclmain_w.h" #include "smallut.h" using namespace std; class CEnt { public: CEnt(const string& ud, const string& ur, const string& mt) : udi(ud), url(ur), mimetype(mt) { } string udi; string url; string mimetype; }; class WebcacheModelInternal { public: std::shared_ptr cache; vector all; vector disp; }; WebcacheModel::WebcacheModel(QObject *parent) : QAbstractTableModel(parent), m(new WebcacheModelInternal()) { //qDebug() << "WebcacheModel::WebcacheModel()"; reload(); } WebcacheModel::~WebcacheModel() { delete m; } void WebcacheModel::reload() { m->cache = std::shared_ptr(new WebStore(theconfig)); m->all.clear(); m->disp.clear(); if (m->cache) { bool eof; m->cache->cc()->rewind(eof); while (!eof) { string udi, sdic; m->cache->cc()->getCurrent(udi, sdic); ConfSimple dic(sdic); string mime, url; dic.get("mimetype", mime); dic.get("url", url); if (!udi.empty()) { m->all.push_back(CEnt(udi, url, mime)); m->disp.push_back(CEnt(udi, url, mime)); } if (!m->cache->cc()->next(eof)) break; } } emit dataChanged(createIndex(0,0), createIndex(1, m->all.size())); } bool WebcacheModel::deleteIdx(unsigned int idx) { if (idx > m->disp.size() || !m->cache) return false; return m->cache->cc()->erase(m->disp[idx].udi, true); } string WebcacheModel::getURL(unsigned int idx) { if (idx > m->disp.size() || !m->cache) return string(); return m->disp[idx].url; } int WebcacheModel::rowCount(const QModelIndex&) const { //qDebug() << "WebcacheModel::rowCount(): " << m->disp.size(); return int(m->disp.size()); } int WebcacheModel::columnCount(const QModelIndex&) const { //qDebug() << "WebcacheModel::columnCount()"; return 2; } QVariant WebcacheModel::headerData (int col, Qt::Orientation orientation, int role) const { // qDebug() << "WebcacheModel::headerData()"; if (orientation != Qt::Horizontal || role != Qt::DisplayRole) { return QVariant(); } switch (col) { case 0: return QVariant(tr("MIME")); case 1: return QVariant(tr("Url")); default: return QVariant(); } } QVariant WebcacheModel::data(const QModelIndex& index, int role) const { //qDebug() << "WebcacheModel::data()"; Q_UNUSED(index); if (role != Qt::DisplayRole) { return QVariant(); } int row = index.row(); if (row < 0 || row >= int(m->disp.size())) { return QVariant(); } const string& mime = m->disp[row].mimetype; const string& url = m->disp[row].url; switch (index.column()) { case 0: return QVariant(QString::fromUtf8(mime.c_str())); case 1: return QVariant(QString::fromUtf8(url.c_str())); default: return QVariant(); } } void WebcacheModel::setSearchFilter(const QString& _txt) { SimpleRegexp re(qs2utf8s(_txt), SimpleRegexp::SRE_NOSUB); m->disp.clear(); for (unsigned int i = 0; i < m->all.size(); i++) { if (re(m->all[i].url)) { m->disp.push_back(m->all[i]); } else { //qDebug() << "match failed. exp" << _txt << "data" << // m->all[i].url.c_str(); } } emit dataChanged(createIndex(0,0), createIndex(1, m->all.size())); } static const int ROWHEIGHTPAD = 2; static const char *cwnm = "/Recoll/prefs/webcachecolw"; static const char *wwnm = "/Recoll/prefs/webcachew"; static const char *whnm = "/Recoll/prefs/webcacheh"; static const QKeySequence closeKS(Qt::ControlModifier+Qt::Key_W); WebcacheEdit::WebcacheEdit(RclMain *parent) : QDialog(parent), m_recoll(parent), m_modified(false) { //qDebug() << "WebcacheEdit::WebcacheEdit()"; setupUi(this); m_model = new WebcacheModel(this); tableview->setModel(m_model); tableview->setSelectionBehavior(QAbstractItemView::SelectRows); tableview->setSelectionMode(QAbstractItemView::ExtendedSelection); tableview->setContextMenuPolicy(Qt::CustomContextMenu); tableview->setHorizontalScrollBarPolicy(Qt::ScrollBarAlwaysOff); QSettings settings; QStringList wl; wl = settings.value(cwnm).toStringList(); QHeaderView *header = tableview->horizontalHeader(); if (header) { if (int(wl.size()) == header->count()) { for (int i = 0; i < header->count(); i++) { header->resizeSection(i, wl[i].toInt()); } } } connect(header, SIGNAL(sectionResized(int,int,int)), this, SLOT(saveColState())); header = tableview->verticalHeader(); if (header) { header->setDefaultSectionSize(QApplication::fontMetrics().height() + ROWHEIGHTPAD); } int width = settings.value(wwnm, 0).toInt(); int height = settings.value(whnm, 0).toInt(); if (width && height) { resize(QSize(width, height)); } connect(searchLE, SIGNAL(textEdited(const QString&)), m_model, SLOT(setSearchFilter(const QString&))); connect(new QShortcut(closeKS, this), SIGNAL (activated()), this, SLOT (close())); connect(tableview, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(createPopupMenu(const QPoint&))); } void WebcacheEdit::createPopupMenu(const QPoint& pos) { int selsz = tableview->selectionModel()->selectedRows().size(); if (selsz <= 0) { return; } QMenu *popup = new QMenu(this); if (selsz == 1) { popup->addAction(tr("Copy URL"), this, SLOT(copyURL())); } if (m_recoll) { RclMain::IndexerState ixstate = m_recoll->indexerState(); switch (ixstate) { case RclMain::IXST_UNKNOWN: QMessageBox::warning(0, "Recoll", tr("Unknown indexer state. " "Can't edit webcache file.")); break; case RclMain::IXST_RUNNINGMINE: case RclMain::IXST_RUNNINGNOTMINE: QMessageBox::warning(0, "Recoll", tr("Indexer is running. " "Can't edit webcache file.")); break; case RclMain::IXST_NOTRUNNING: popup->addAction(tr("Delete selection"), this, SLOT(deleteSelected())); break; } } popup->popup(tableview->mapToGlobal(pos)); } void WebcacheEdit::deleteSelected() { QModelIndexList selection = tableview->selectionModel()->selectedRows(); for (int i = 0; i < selection.size(); i++) { if (m_model->deleteIdx(selection[i].row())) { m_modified = true; } } m_model->reload(); m_model->setSearchFilter(searchLE->text()); tableview->clearSelection(); } void WebcacheEdit::copyURL() { QModelIndexList selection = tableview->selectionModel()->selectedRows(); if (selection.size() != 1) return; string url = m_model->getURL(selection[0].row()); if (!url.empty()) { url = url_encode(url, 7); QApplication::clipboard()->setText(url.c_str(), QClipboard::Selection); QApplication::clipboard()->setText(url.c_str(), QClipboard::Clipboard); } } void WebcacheEdit::saveColState() { //qDebug() << "void WebcacheEdit::saveColState()"; QHeaderView *header = tableview->horizontalHeader(); QStringList newwidths; for (int vi = 0; vi < header->count(); vi++) { int li = header->logicalIndex(vi); newwidths.push_back(lltodecstr(header->sectionSize(li)).c_str()); } QSettings settings; settings.setValue(cwnm, newwidths); } void WebcacheEdit::closeEvent(QCloseEvent *event) { if (m_modified) { QMessageBox::information(0, "Recoll", tr("Webcache was modified, you will need " "to run the indexer after closing this " "window.")); } if (!isFullScreen()) { QSettings settings; settings.setValue(wwnm, width()); settings.setValue(whnm, height()); } event->accept(); } recoll-1.26.3/qtgui/preview_plaintorich.h0000644000175000017500000000457013566424763015411 00000000000000/* Copyright (C) 2015 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _PREVIEW_PLAINTORICH_H_INCLUDED_ #define _PREVIEW_PLAINTORICH_H_INCLUDED_ #include "autoconfig.h" #include #include #include #include #include #include #include "plaintorich.h" /** Preview text highlighter */ class PlainToRichQtPreview : public PlainToRich { public: PlainToRichQtPreview(); void clear(); bool haveAnchors(); virtual std::string header(); virtual std::string startMatch(unsigned int grpidx); virtual std::string endMatch(); virtual std::string termAnchorName(int i) const; virtual std::string startChunk(); int nextAnchorNum(int grpidx); int prevAnchorNum(int grpidx); QString curAnchorName() const; private: int m_curanchor; int m_lastanchor; // Lists of anchor numbers (match locations) for the term (groups) // in the query (the map key is and index into HighlightData.groups). std::map > m_groupanchors; std::map m_groupcuranchors; bool m_spacehack{false}; }; /* A thread to convert to rich text (mark search terms) */ class ToRichThread : public QThread { Q_OBJECT; public: ToRichThread(const std::string &i, const HighlightData& hd, std::shared_ptr ptr, QStringList& qrichlst, // Output QObject *parent = 0); virtual void run(); private: const std::string &m_input; const HighlightData &m_hdata; std::shared_ptr m_ptr; QStringList &m_output; }; #endif /* _PREVIEW_PLAINTORICH_H_INCLUDED_ */ recoll-1.26.3/qtgui/crontool.h0000644000175000017500000000243513533651561013161 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _CRONTOOL_W_H_INCLUDED_ #define _CRONTOOL_W_H_INCLUDED_ #include "ui_crontool.h" class QPushButton; class CronToolW : public QDialog, public Ui::CronToolW { Q_OBJECT public: CronToolW(QWidget * parent = 0) : QDialog(parent), enableButton(0), disableButton(0) { setupUi(this); init(); } QPushButton *enableButton; QPushButton *disableButton; private slots: void enableCron(); void disableCron(); private: void init(); void changeCron(bool enable); }; #endif /* _CRONTOOL_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/crontool.cpp0000644000175000017500000000601513533651561013512 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include "recoll.h" #include "crontool.h" #include "ecrontab.h" #include "smallut.h" static string marker; static string idstring(const string& confdir) { // Quote conf dir, there may be spaces and whatelse in there return string("RECOLL_CONFDIR=") + escapeShell(confdir); } void CronToolW::init() { marker = "RCLCRON_RCLINDEX="; enableButton = new QPushButton(tr("Enable")); disableButton = new QPushButton(tr("Disable")); buttonBox->addButton(enableButton, QDialogButtonBox::ActionRole); buttonBox->addButton(disableButton, QDialogButtonBox::ActionRole); connect(enableButton, SIGNAL(clicked()), this, SLOT(enableCron())); connect(disableButton, SIGNAL(clicked()), this, SLOT(disableCron())); // Try to read the current values if (!theconfig) return; if (checkCrontabUnmanaged(marker, "recollindex")) { QMessageBox::warning(0, "Recoll", tr("It seems that manually edited entries exist for recollindex, cannot edit crontab")); QTimer::singleShot(0, this, SLOT(close())); } string id = idstring(theconfig->getConfDir()); vector sched; if (getCrontabSched(marker, id, sched)) { minsLE->setText(QString::fromUtf8(sched[0].c_str())); hoursLE->setText(QString::fromUtf8(sched[1].c_str())); daysLE->setText(QString::fromUtf8(sched[4].c_str())); } } void CronToolW::enableCron() { changeCron(true); } void CronToolW::disableCron() { changeCron(false); } void CronToolW::changeCron(bool enable) { if (!theconfig) return; string id = idstring(theconfig->getConfDir()); string cmd("recollindex"); string reason; if (!enable) { editCrontab(marker, id, "", "", reason); accept(); } else { string mins(qs2utf8s(minsLE->text().remove(QChar(' ')))); string hours(qs2utf8s(hoursLE->text().remove(QChar(' ')))); string days(qs2utf8s(daysLE->text().remove(QChar(' ')))); string sched = mins + " " + hours + " * * " + days; if (editCrontab(marker, id, sched, cmd, reason)) { accept(); } else { QMessageBox::warning(0, "Recoll", tr("Error installing cron entry. Bad syntax in fields ?")); } } } recoll-1.26.3/qtgui/spell_w.h0000644000175000017500000000362413533651561012770 00000000000000/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SPELL_W_H_INCLUDED_ #define _SPELL_W_H_INCLUDED_ #include #include #include #include "ui_spell.h" class SpellW : public QWidget, public Ui::SpellBase { Q_OBJECT; public: SpellW(QWidget* parent = 0) : QWidget(parent), m_prevmode(TYPECMB_NONE) { setupUi(this); init(); } virtual bool eventFilter(QObject *target, QEvent *event ); enum comboboxchoice {TYPECMB_NONE, TYPECMB_WILD, TYPECMB_REG, TYPECMB_STEM, TYPECMB_SPELL, TYPECMB_STATS, TYPECMB_FAILED}; public slots: virtual void doExpand(); virtual void wordChanged(const QString&); virtual void textDoubleClicked(); virtual void textDoubleClicked(int, int); virtual void setMode(comboboxchoice); private slots: virtual void onModeChanged(int); signals: void wordSelect(QString); private: // combobox index to expansion type std::vector m_c2t; comboboxchoice m_prevmode; void init(); void copy(); void showStats(); void showFailed(); int cmbIdx(comboboxchoice mode); void setModeCommon(comboboxchoice mode); }; #endif /* _SPELL_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/images/0000755000175000017500000000000013570165410012464 500000000000000recoll-1.26.3/qtgui/images/interro.png0000644000175000017500000000061513533651561014604 00000000000000PNG  IHDRh6 pHYs  tIME E*<,IDAT(ύҡ΃0?jHʒ $8LaE2'B rS \' Kzw*0eY]`y6 4vS\.5ð:J²,m&IzcA,t:EA)}p8B!ir˲08]҄!1acmo[Ͷïu0mj۶Spi*\H)40&̯EQ]}WUE)UU~?8'iuS_Cu˲8- T|2?+/0a.fIENDB`recoll-1.26.3/qtgui/images/history.png0000644000175000017500000000275313466271574014637 00000000000000PNG  IHDRĴl; pHYs  #ugAMA|Q cHRMz%u0`:o_FaIDATxbd (l?{ɶ#. o@ Ca {O|}"6aZ>]_WYACEAUO Ooax|>YϿQ|@(% @Z!Z!@WdY|~gfX!yGt O3Дn(Ơ# 6/P   b B  ?+ߟ@p 3xD2 01123* 2p888!w?_m@`Uu C+Pk  K",^<{` 𒟗+v8 @ f6 h2B+/0y:2Ι× ?j(0KRL>L_0XoQ-dhE B >32x᧟ Z$7d&@(EnSU!~ `Z4 fn^ LXMق/v(e.+X$@L`0%~AAN+1l~@@2|.&5Pō <@ۛnn< "- `0 ('/?02p2|u5ŷ=``g0aÎtt7o }bf rt1(q3|-ފh@uJ j M 0p01 2Gav^ p(xzo'`dζ 9A4"Fo;3 +6`D 00O;ǰ~0 8U]ɇ@C dh+#/#| \l@q&0@|eyq@@A R`xv8!ek˸XAj2n" /^ς5ӎ0zN .f63 5 \,W =~}.+G&Ȱ}ǟ2\ " ??`h{0nө@E߃//-et1( gy)T\ pVM\@ в?rT`?@c",L@1S2 >0^@w6`BaIENDB`recoll-1.26.3/qtgui/images/nextpage.png0000644000175000017500000000240113466271574014737 00000000000000PNG  IHDRĴl;gAMA7IDATxYlU󯳴3-XR ["!  B 74/$.Qbbh 7"$QjPU@-]:3l?Ԋor|= (0eݯB!1Ȫ31 D?b}6-QR=̫-؅e$]j=уGC!ޣrGuTM"4!gahN]_wz7?eʻn{6B ۷C, 0LD,aŲzzz"to%f민o;^{}\ĒpsRHNAY60WD0m!vX(8-_2X'!GR0,H*]b7./E]ٶn+=]A"#0e@8a ؖ"&(hiH*A Y Xb+@Xq.kvy+iW LF`rWoOC,~(jD8|{Ѽ5 ByU%Z@Q5SY6R%`LhJ4ĸ[U_ )Ex<.v`_ g]N2MK;RI`,z@Q@nOD@qa0苀$, :PN(Y0T5I4jPAc IBٸuhY)WrI]"VFՄ,9MщdI$ ?8e#D^mK&=ݞc7CȸݠNbف8IVh>"h͓α;z`j|˦2+;x2Rp]?kaCy_/xU8e<p@Ilho-ʡ TfLq~}&^a9YƩA\>}g ?gW(UO- Q[E~&r@N8K7=2]>Z#|[yu%%q{'XHh4˩7<{i>xǝ 5_aiC'ΡGЯOڄus\$L/^s| VWz|AwT2M|<Le~؉u'uxf" %}_j* @B E>zё)}E0q,XIENDB`recoll-1.26.3/qtgui/images/prevpage.png0000644000175000017500000000240213466271574014736 00000000000000PNG  IHDRĴl;gAMA7IDATx[lUgn;;v/[J\*"E@҂! HD ꋆ/#JH$"P)5`7*mi–vw;3> tA.zeΜ sl"ؚl%HjC|*j^F֟“WUH'Ҙ%ѹ6)"pSxUS1;Ą&/BwD>QA>d//{~*^ZlXҐ,\/s:s9 ߚ#k~wa쇄B%> SQ?6bkng./kxj/^A& s&D㐶 O+8h=${X{s=y;mU `d h TBI N[|҄0\1'1Ѵ1֙kZɂ'\ K*CuuP# r1seUB +-ZV@{ͤ >?h*ưmzBRtؘZn8uڌ _b4aQ5]BWsÂaS F\:n}+ @W::60| V{*>&$} Y߼-I}UhYsè282D-,-Z6dY@8_Cufa(jv 纈 V6ɹk\ .,Oq$7dSO!! |Sg\Cc%]J6VߞLۭŽF<}&-Ҙ,gv~9?=*1ڊĀ(1#2m+.9feSOT6PADQG?RstJ+ Hm*X |ΉY4 `[o{lcd:+4EB8,u Z!S< 6,ʐ+R>T6F/gC=D/%?m+#ߏ7`a2GBA`TCjoQ: ڱ U|a{fogS佰z !Fk )[! N/Hd ]Gu(g&C:+/ :3 /=)YWP)xT sꠃQKǬpOM=3$W;D\'-cTЌٰ6E6-I;CV(򩄼`3[^f@#˜k c-R Ҏ! ͐4}ӧ3F1 9ӭvwn\WCj\׷7t1簪hr$)[%#'}Z-p,X=eٛLW"^vLVٍ$R"hu<CKp+Sg^@)3!>x#v=نc Ax,A (S￈І^;0]C%ա/_>'Rˏ0!D6Jb:}b֝/_ڶ-ho^T_/κdSHm{O*2rW Jho/|IENDB`recoll-1.26.3/qtgui/images/close.png0000644000175000017500000000213713466271574014237 00000000000000PNG  IHDRĴl;gAMA7tEXtSoftwareAdobe ImageReadyqe<IDATxb?-@FFF  8.= 5& Rgdd`eea`cckï_?f"Plk? @ 5R󙘘 LM45j_̰kq͛3Ќ3g]@6 P _HHH0A]]o8>~a@\x 0n0 $%̄P?1ܺuds> d] y1pp] *g+mm`0!&Ɵh`0bB˿|eA >}e*dg'%%)vv60ˇ@,$}eeyFx=2'-%% L .\` go6m: A,J޽غ(0Eȃ ` dW;0 @LXdeee pr.+`D6CAf0}Ðӧ1=xP+ pz=BKKcT? däIPĿ}w$@(`lffC.0Y y *P@yecCaMM3O_pe]]mXXX| .?] 6 3#,?-| Cob`aod8y:XD9sW`3` R1^6PSS}!Pn VsO/ů\ @YzL @B&&Al_ZZV@KKPt*0BׯBQl e̶K;w2\x? K 3 DXóg޼y L=P  y\5,qUMH `IENDB`recoll-1.26.3/qtgui/images/cancel.png0000644000175000017500000000156313466271574014361 00000000000000PNG  IHDRĴl;gAMA7tEXtSoftwareAdobe ImageReadyqe<IDATxb?-@1҅A3/3(\c@7 " 5?ay T Ph_?0m823" P N_W@4PYA qySQOC  3\_?~}gra T|Ca8  `b4###(LAd(p_?>l@ aπз1?Ёf. SA.QNj(@PX A   ;Wfk(`0@a r&` þ4f0@a5* ǰ_[KWB^290W:FȆ2|o3@1Ja 66EEpR<Ǝ=ď_30|33(?Ɗi8@1.Ca>ȥ C1/"`B"  \*h7נd1܆AleFP)EL⇆~e&#nhb8(S@?G_c /AYqo|Dv,ޟ]lef08%] d?-@fJS`K)  ԥ3 0&Py DWM@`.EyH 4IENDB`recoll-1.26.3/qtgui/images/recoll.png0000644000175000017500000000071113303776056014401 00000000000000PNG  IHDR00`n pHYs  tIME tEXtCommentCreated with The GIMPd%n?IDATXcr3}~-\t$iKᒵKg\kqI11 2@l!4آq4ʆZ)z-Ƌ'ƀ[//px:պ\3\x&dّG тq4ʆf1[KѫM h kר_u\.\6kZ?kh.# ] tih4/p%K.Y7%PA+7/9|1vǸ$޾£$6(A~hWz4Fl#ڒx1, H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3-gAMA|Q cHRMz%u0`:o_F IDATxڔMoUsSG"j+"TE*l@bς5B|P>HaU(P[Z8 "q&{Xx#]͝?>nmmUW/xloWDfG{{\k#k);'YtzE{cj1_'/*z0+?O9~V;\k. `LIENDB`recoll-1.26.3/qtgui/images/sortparms.png0000644000175000017500000000140013466271574015154 00000000000000PNG  IHDRĴl;bKGD pHYs  d_tIMEIDAT8˭KA?;;{A/"SA;:DƤQHU\H.QCHHq9l]~av7{1c0Ԅaֆ?*ϧ[1ưwPq+~TQ㟥C%*gA42<Fnt GGc"p]ܪ6C/#Iz^߻ӑ\ėb1&\"Ap< 71re7U1&X`W&1RW޼dvYqOI/}FOm]$ 5euQ 4a,(\WR)*տ8'Xgы*eT|jAR͠AJV:ɒ.hqiQ\Ӣemr)V9E6qz\^[ۦq-ϊ7{2IFiZMuXSennAOfIENDB`recoll-1.26.3/qtgui/images/up.png0000644000175000017500000000666613466271574013571 00000000000000PNG  IHDRĴl; pHYs   OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3-gAMA|Q cHRMz%u0`:o_FIDATxڬoTUy0+hhTBDwn cۙ?Vf#qk , `TЪ-൯LƩwry{92haE1:HUf/p+mV dL70 ׉?.glZCfv[A-| FRPw UnW0Zj7{l8bV'G*VF?o09?|V6rrsHyb# ~u-V{!җ @*!N,u<|N'X^jt~瀃yAP q KyPwFwl7:5?ηa;wG=MġJ]XT=)ڽYPTI)2]\u_gedYBGefQЬJއMtu,yczxi' >;{éN o^]dpجНyKBzgT НV'ceޛ}b'ά߫ọQtgð@.`Y1@ ~  Ģ e7ⁱ-\GJT$,o˞jl3\#F{g.`7[f"boo_3}[ɢ[IENDB`recoll-1.26.3/qtgui/images/spell.png0000644000175000017500000000052313466271574014246 00000000000000PNG  IHDRĴl;gAMA abKGD pHYs  #utIME IDATxR 0}6=J>0 @An4"986d,<3GRF4/9Lmi&,ZQ.mL "af!URuLDUkK56Bgzڂ&*8|1bHEι ˢu<,f$vqy\\`qLGqI0΄P[IENDB`recoll-1.26.3/qtgui/images/table.png0000644000175000017500000000642713466271574014227 00000000000000PNG  IHDRĴl; pHYs  ~ OiCCPPhotoshop ICC profilexڝSgTS=BKKoR RB&*! J!QEEȠQ, !{kּ> H3Q5 B.@ $pd!s#~<<+"x M0B\t8K@zB@F&S`cbP-`'{[! eDh;VEX0fK9-0IWfH  0Q){`##xFW<+*x<$9E[-qWW.(I+6aa@.y24x6_-"bbϫp@t~,/;m%h^ uf@Wp~<5j>{-]cK'Xto(hw?G%fIq^D$.Tʳ?D*A, `6B$BB dr`)B(Ͱ*`/@4Qhp.U=pa( Aa!ڈbX#!H$ ɈQ"K5H1RT UH=r9\F;2G1Q= C7F dt1r=6Ыhڏ>C03l0.B8, c˱" VcϱwE 6wB aAHXLXNH $4 7 Q'"K&b21XH,#/{C7$C2'ITFnR#,4H#dk9, +ȅ3![ b@qS(RjJ4e2AURݨT5ZBRQ4u9̓IKhhitݕNWGw Ljg(gwLӋT071oUX**| J&*/Tު UUT^S}FU3S ԖUPSSg;goT?~YYLOCQ_ cx,!k u5&|v*=9C3J3WRf?qtN (~))4L1e\kXHQG6EYAJ'\'GgSSݧ M=:.kDwn^Loy}/TmG X $ <5qo</QC]@Caaᄑ.ȽJtq]zۯ6iܟ4)Y3sCQ? 0k߬~OCOg#/c/Wװwa>>r><72Y_7ȷOo_C#dz%gA[z|!?:eAAA!h쐭!ΑiP~aa~ 'W?pX15wCsDDDޛg1O9-J5*>.j<74?.fYXXIlK9.*6nl {/]py.,:@LN8A*%w% yg"/6шC\*NH*Mz쑼5y$3,幄'L Lݛ:v m2=:1qB!Mggfvˬen/kY- BTZ(*geWf͉9+̳ې7ᒶKW-X潬j9(xoʿܔĹdff-[n ڴ VE/(ۻCɾUUMfeI?m]Nmq#׹=TR+Gw- 6 U#pDy  :v{vg/jBFS[b[O>zG499?rCd&ˮ/~јѡ򗓿m|x31^VwwO| (hSЧc3-gAMA|Q cHRMz%u0`:o_F2IDATxڴkTAwmv7Y5"X X,m^,$6ve!ELLM{og c8o3waΈq_{rb*Q S#Fŀ pN9$af$>IoL}k+kO<*!D1(1*!1F)$8&\ħ %1266J~;NsTq 7IG28C&d6rӋ8> uGf 7>dh5w~ D;]l233_xR}5GۥW vrtur" Uuh(!DJQ4_'k "}Wo?@!P=,5>({q;<kZ̻1cZN1YzZǍFcƧXODpb''?yШ,or!t?_ո?J*5ګt__fuqԗH}JzW:L^lI \֦ʳmo>j``E91~' ! 8IENDB`recoll-1.26.3/qtgui/images/code-block.png0000644000175000017500000000100013466271574015120 00000000000000PNG  IHDRj sRGB pHYs cytIME/6;CPLTEUUUֈVVVWWWXXX[[[]]]___aaacccdddgggiiijjjkkkmmmnnnooo)> tRNS{{}}`ubKGD=mQYIDATxڝ@ EqAE@\TqO]rHW ib٤ <:dHg8t6֞kmGf2,Γ鬈^ƨQ?ZQ^ ,&K82m1d^(ѐܗ%P%\Y귁Z9i&IENDB`recoll-1.26.3/qtgui/images/recoll.icns0000644000175000017500000002156613303776056014564 00000000000000icns#vis32h쾐㙓뾑⚔𾌎嗐ӵǩӎﮔԍ쾾½{ss斎|tt嗐||Ϭӏ쩏ґ嬖З㙙羕ޞ䘒쾐㙓_ܙ}ۚ~ߗ{z|~ܙĮ¿|z{օ}}~zxy؃{}mkz|nl|~ĝ÷|z{օ}~߮|~ԇخό֛ݙ~|~ܙ}ffh@10LihY226cgfVh@10LihY226cgfhhj@/.MkjY/04eih@@?SZZM??FZYWB@@11/[lmN./@jie40100/R``G..?kje4/0LMIvGML>ije`nZ./4ejihid`mY/04eihYZVR\S?@BWYY220UdeJ00@ihd512M0ZjkM/0@hhd512665WeeM44Bdc`956ccdB44MeeW569`dcggi@0/MjiY115dhgffh@10MihY226cgfs8mkil32 Kޞ嗓ޞ嗓ޞ嗓ޞ嗓ݟ䘔ᛎ蔏󼋐뿐콑쿏뿏켑𽎓ṋ⽛߁ނx{zzy~ۡsuty斑tvuz䘔tvuz嗓tvuz䘔rtsw铎 ¤򼌑뿏켑쿏쿏/迒꽔偗ܠ⚖ߝ斒ޞ嗓ޞ嗓ޞ嗓ޞ嗓ݯ|߬~څݯ|߬~څݯ|߬~څݯ|߬~ځفܯ}ެف؁x~{z~~}ށ݃|}xӈy}|}vۀ{|΍~Ն~ύ|Ն΍}Ն|ы~zׄŔˏѩ}Ҭρ΁lustp|܁ہܩqxwxtެف؁ݩpwvws߬~ځفܩqxwxtݭف؁irpqm}x}||{߃~zҊ{~xق}~΍}Ն~Ό|Ն~ύ|Ն}ό{օ̐҉دڬցՁޯ{}ہځݯ|߬~څݯ|߬~څݯ|߬~څݯ|߬~ځقfhM043329`gffeiL1325dgfhM043329`gffeiL1325dgfhM043329`gffeiL1325dgfhM043329`gffeiL1325dgfhM043329`gffeiL1325dgfhjM-21108bihhglL/103fihMLMLM01-Kmhiijb7.001,Nlhiijg2/042Lhdeef_:34451Mgefc63431Lieffg`923340Mhfgd5231Mjfggha923340Mhfgd52320Jfbccd]81223/Nighe4129:6Vxsttvm@799:6Mb`^;9`a[m]a`_cL79;^`ghbudhgfjK0214ehgfgatcgfeiL1325dgfgatcgfeiL1325dgfef`sbfedhL2436cfeijdxfjihmK-100/2gjiL MHtVJMLML1/Hd`aab[70112-Mjhif30131Mjfggha:23340Mhfgd5231Lieffg`923340Mhfgd5231Lieffg`923340Mhfgd52320Ljfggh`81223/Mighe41253Lgcdde_;45562Mfdeb745dfM26554:^eddcgL3547bedgiM/32219ahggfjL0214ehgfhM043329`gffeiL1325dgfhM043329`gffeiL1325dgfhM043329`gffeiL1325dgfhM043329`gffeiL1325dgfl8mkih32 ӆ菔ӆ菔ӆ菔ӆ菔ӆ菔ӆ菔ӆ菔ӆ菔ӆ膔Ȇӆӆӆӆӆӆӆӆӆӆ膾vӆ菔vӆ菔vӆ菔vӆ菔vӆ菔vӆ菔vӆ菔vӆ菔vӆ膔ӆӆӆӆӆӆӆӆӆӆ膔ӆ菔ӆ菔ӆ菔ӆ菔ӆ菔ӆ菔ӆ菔ӆ菔ӆ膔ÆÆÆÆÆÆÆÆÆنÆÆُÆُÆُÆُÆُÆُÆُÆُÆن vvvvvvvvvنÆÆُÆُÆُÆُÆُÆُÆُÆُÆنÆÆÆÆÆÆÆÆÆنfY3Lf@3fY3Lf@3fY3Mf@3fY3Lf@3fY3Lf@3fY3Mf@3fY3Lf@3fY3Mf@3fY3Mf@3fYS@LYF@Y3@fM3Yf3@fM3Yf3@fL3Yf3@fL3Yf3@fM3Yf3@fL3Yf3@fL3Yf3@fM3Yf3@fL3Yf3LavLff@3ff@3ff@3ff@3ff@3ff@3ff@3ff@3ff@3f@Qb@SY@3@fL3Yf3@fM3Yf3@fM3Yf3@fL3Yf3@fM3Yf3@fM3Yf3@fL3Yf3@fM3Yf3@fM3Yf3fY3Lf@3fY3Mf@3fY3Mf@3fY3Lf@3fY3Mf@3fY3Mf@3fY3Lf@3fY3Mf@3fY3Mf@3fh8mk recoll-1.26.3/qtgui/images/clock.png0000644000175000017500000000055513533651561014220 00000000000000PNG  IHDRh6 pHYs  tIME T2 IDAT(ϕR!0l# Ld^46o*4"!@q"ݱ[2=這>mڶqYIiZUUEgNpvi}(L@QuQG331#$a|̼RJ9ϳ43u]5].W"Z3sZ+<5#s.Ij*t--x|Zc<ω_ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "recoll.h" #include "docseq.h" #include "log.h" #include "guiutils.h" #include "reslistpager.h" #include "reslist.h" #include "rclconfig.h" #include "plaintorich.h" #include "indexer.h" #include "respopup.h" #include "rclmain_w.h" #include "multisave.h" #include "appformime.h" #include "transcode.h" static const QKeySequence quitKeySeq("Ctrl+q"); static const QKeySequence closeKeySeq("Ctrl+w"); // Compensate for the default and somewhat bizarre vertical placement // of text in cells static const int ROWHEIGHTPAD = 2; static const int TEXTINCELLVTRANS = -4; static PlainToRichQtReslist g_hiliter; ////////////////////////////////////////////////////////////////////////// // Restable "pager". We use it to print details for a document in the // detail area /// class ResTablePager : public ResListPager { public: ResTablePager(ResTable *p) : ResListPager(1), m_parent(p) {} virtual bool append(const string& data, int idx, const Rcl::Doc& doc); virtual string trans(const string& in); virtual const string &parFormat(); virtual string absSep() {return (const char *)(prefs.abssep.toUtf8());} virtual string iconUrl(RclConfig *, Rcl::Doc& doc); private: ResTable *m_parent; }; bool ResTablePager::append(const string& data, int, const Rcl::Doc&) { m_parent->m_detail->moveCursor(QTextCursor::End, QTextCursor::MoveAnchor); m_parent->m_detail->textCursor().insertBlock(); m_parent->m_detail->insertHtml(u8s2qs(data)); // LOGDEB("RESTABLEPAGER::APPEND: data : " << data << std::endl); // m_parent->m_detail->setHtml(u8s2qs(data)); return true; } string ResTablePager::trans(const string& in) { return string((const char*)ResList::tr(in.c_str()).toUtf8()); } const string& ResTablePager::parFormat() { return prefs.creslistformat; } string ResTablePager::iconUrl(RclConfig *config, Rcl::Doc& doc) { if (doc.ipath.empty()) { vector docs; docs.push_back(doc); vector paths; Rcl::docsToPaths(docs, paths); if (!paths.empty()) { string path; if (thumbPathForUrl(cstr_fileu + paths[0], 128, path)) { return cstr_fileu + path; } } } return ResListPager::iconUrl(config, doc); } ///////////////////////////////////////////////////////////////////////////// /// Detail text area methods ResTableDetailArea::ResTableDetailArea(ResTable* parent) : QTextBrowser(parent), m_table(parent) { setContextMenuPolicy(Qt::CustomContextMenu); connect(this, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(createPopupMenu(const QPoint&))); } void ResTableDetailArea::createPopupMenu(const QPoint& pos) { if (m_table && m_table->m_model && m_table->m_detaildocnum >= 0) { int opts = m_table->m_ismainres ? ResultPopup::showExpand : 0; opts |= ResultPopup::showSaveOne; QMenu *popup = ResultPopup::create(m_table, opts, m_table->m_model->getDocSource(), m_table->m_detaildoc); popup->popup(mapToGlobal(pos)); } } ////////////////////////////////////////////////////////////////////////////// //// Data model methods //// // Routines used to extract named data from an Rcl::Doc. The basic one // just uses the meta map. Others (ie: the date ones) need to do a // little processing static string gengetter(const string& fld, const Rcl::Doc& doc) { const auto it = doc.meta.find(fld); if (it == doc.meta.end()) { return string(); } return it->second; } static string sizegetter(const string& fld, const Rcl::Doc& doc) { const auto it = doc.meta.find(fld); if (it == doc.meta.end()) { return string(); } int64_t size = atoll(it->second.c_str()); return displayableBytes(size) + " (" + it->second + ")"; } static string dategetter(const string&, const Rcl::Doc& doc) { string sdate; if (!doc.dmtime.empty() || !doc.fmtime.empty()) { char datebuf[100]; datebuf[0] = 0; time_t mtime = doc.dmtime.empty() ? atoll(doc.fmtime.c_str()) : atoll(doc.dmtime.c_str()); struct tm *tm = localtime(&mtime); strftime(datebuf, 99, "%Y-%m-%d", tm); transcode(datebuf, sdate, RclConfig::getLocaleCharset(), "UTF-8"); } return sdate; } static string datetimegetter(const string&, const Rcl::Doc& doc) { char datebuf[100]; datebuf[0] = 0; if (!doc.dmtime.empty() || !doc.fmtime.empty()) { time_t mtime = doc.dmtime.empty() ? atoll(doc.fmtime.c_str()) : atoll(doc.dmtime.c_str()); struct tm *tm = localtime(&mtime); strftime(datebuf, 99, prefs.creslistdateformat.c_str(), tm); } return datebuf; } // Static map to translate from internal column names to displayable ones map RecollModel::o_displayableFields; FieldGetter *RecollModel::chooseGetter(const string& field) { if (!stringlowercmp("date", field)) return dategetter; else if (!stringlowercmp("datetime", field)) return datetimegetter; else if (!stringlowercmp("bytes", field.substr(1))) return sizegetter; else return gengetter; } string RecollModel::baseField(const string& field) { if (!stringlowercmp("date", field) || !stringlowercmp("datetime", field)) return "mtime"; else return field; } RecollModel::RecollModel(const QStringList fields, ResTable *tb, QObject *parent) : QAbstractTableModel(parent), m_table(tb), m_ignoreSort(false) { // Initialize the translated map for column headers o_displayableFields["abstract"] = tr("Abstract"); o_displayableFields["author"] = tr("Author"); o_displayableFields["dbytes"] = tr("Document size"); o_displayableFields["dmtime"] = tr("Document date"); o_displayableFields["fbytes"] = tr("File size"); o_displayableFields["filename"] = tr("File name"); o_displayableFields["fmtime"] = tr("File date"); o_displayableFields["ipath"] = tr("Ipath"); o_displayableFields["keywords"] = tr("Keywords"); o_displayableFields["mtype"] = tr("MIME type"); o_displayableFields["origcharset"] = tr("Original character set"); o_displayableFields["relevancyrating"] = tr("Relevancy rating"); o_displayableFields["title"] = tr("Title"); o_displayableFields["url"] = tr("URL"); o_displayableFields["mtime"] = tr("Mtime"); o_displayableFields["date"] = tr("Date"); o_displayableFields["datetime"] = tr("Date and time"); // Add dynamic "stored" fields to the full column list. This // could be protected to be done only once, but it's no real // problem if (theconfig) { const set& stored = theconfig->getStoredFields(); for (set::const_iterator it = stored.begin(); it != stored.end(); it++) { if (o_displayableFields.find(*it) == o_displayableFields.end()) { o_displayableFields[*it] = QString::fromUtf8(it->c_str()); } } } // Construct the actual list of column names for (QStringList::const_iterator it = fields.begin(); it != fields.end(); it++) { m_fields.push_back((const char *)(it->toUtf8())); m_getters.push_back(chooseGetter(m_fields.back())); } g_hiliter.set_inputhtml(false); } int RecollModel::rowCount(const QModelIndex&) const { LOGDEB2("RecollModel::rowCount\n"); if (!m_source) return 0; return m_source->getResCnt(); } int RecollModel::columnCount(const QModelIndex&) const { LOGDEB2("RecollModel::columnCount\n"); return m_fields.size(); } void RecollModel::readDocSource() { LOGDEB("RecollModel::readDocSource()\n"); beginResetModel(); endResetModel(); } void RecollModel::setDocSource(std::shared_ptr nsource) { LOGDEB("RecollModel::setDocSource\n"); if (!nsource) { m_source = std::shared_ptr(); } else { // We used to allocate a new DocSource here instead of sharing // the input, but I can't see why. m_source = nsource; m_hdata.clear(); } } void RecollModel::deleteColumn(int col) { if (col > 0 && col < int(m_fields.size())) { vector::iterator it = m_fields.begin(); it += col; m_fields.erase(it); vector::iterator it1 = m_getters.begin(); it1 += col; m_getters.erase(it1); readDocSource(); } } void RecollModel::addColumn(int col, const string& field) { LOGDEB("AddColumn: col " << col << " fld [" << field << "]\n"); if (col >= 0 && col < int(m_fields.size())) { col++; vector::iterator it = m_fields.begin(); vector::iterator it1 = m_getters.begin(); if (col) { it += col; it1 += col; } m_fields.insert(it, field); m_getters.insert(it1, chooseGetter(field)); readDocSource(); } } QVariant RecollModel::headerData(int idx, Qt::Orientation orientation, int role) const { LOGDEB2("RecollModel::headerData: idx " << idx << " orientation " << (orientation == Qt::Vertical ? "vertical":"horizontal") << " role " << role << "\n"); if (orientation == Qt::Vertical && role == Qt::DisplayRole) { return idx; } if (orientation == Qt::Horizontal && role == Qt::DisplayRole && idx < int(m_fields.size())) { map::const_iterator it = o_displayableFields.find(m_fields[idx]); if (it == o_displayableFields.end()) return QString::fromUtf8(m_fields[idx].c_str()); else return it->second; } return QVariant(); } QVariant RecollModel::data(const QModelIndex& index, int role) const { LOGDEB2("RecollModel::data: row " << index.row() << " col " << index.column() << " role " << role << "\n"); if (!m_source || role != Qt::DisplayRole || !index.isValid() || index.column() >= int(m_fields.size())) { return QVariant(); } Rcl::Doc doc; if (!m_source->getDoc(index.row(), doc)) { return QVariant(); } string colname = m_fields[index.column()]; string data = m_getters[index.column()](colname, doc); #ifndef _WIN32 // Special case url, because it may not be utf-8. URL-encode in this case. // Not on windows, where we always read the paths as Unicode. if (!colname.compare("url")) { int ecnt; string data1; if (!transcode(data, data1, "UTF-8", "UTF-8", &ecnt) || ecnt > 0) { data = url_encode(data); } } #endif list lr; g_hiliter.plaintorich(data, lr, m_hdata); return QString::fromUtf8(lr.front().c_str()); } void RecollModel::saveAsCSV(FILE *fp) { if (!m_source) return; int cols = columnCount(); int rows = rowCount(); vector tokens; for (int col = 0; col < cols; col++) { QString qs = headerData(col, Qt::Horizontal,Qt::DisplayRole).toString(); tokens.push_back((const char *)qs.toUtf8()); } string csv; stringsToCSV(tokens, csv); fprintf(fp, "%s\n", csv.c_str()); tokens.clear(); for (int row = 0; row < rows; row++) { Rcl::Doc doc; if (!m_source->getDoc(row, doc)) { continue; } for (int col = 0; col < cols; col++) { tokens.push_back(m_getters[col](m_fields[col], doc)); } stringsToCSV(tokens, csv); fprintf(fp, "%s\n", csv.c_str()); tokens.clear(); } } // This gets called when the column headers are clicked void RecollModel::sort(int column, Qt::SortOrder order) { if (m_ignoreSort) { return; } LOGDEB("RecollModel::sort(" << column << ", " << order << ")\n"); DocSeqSortSpec spec; if (column >= 0 && column < int(m_fields.size())) { spec.field = m_fields[column]; if (!stringlowercmp("relevancyrating", spec.field) && order != Qt::AscendingOrder) { QMessageBox::warning(0, "Recoll", tr("Can't sort by inverse relevance")); QTimer::singleShot(0, m_table, SLOT(resetSort())); return; } if (!stringlowercmp("date", spec.field) || !stringlowercmp("datetime", spec.field)) spec.field = "mtime"; spec.desc = order == Qt::AscendingOrder ? false : true; } emit sortDataChanged(spec); } /////////////////////////// // ResTable panel methods // We use a custom delegate to display the cells because the base // tableview's can't handle rich text to highlight the match terms class ResTableDelegate: public QStyledItemDelegate { public: ResTableDelegate(QObject *parent) : QStyledItemDelegate(parent) {} // We might want to optimize by passing the data to the base // method if the text does not contain any term matches. Would // need a modif to plaintorich to return the match count (easy), // and a way to pass an indicator from data(), a bit more // difficult. Anyway, the display seems fast enough as is. void paint(QPainter *painter, const QStyleOptionViewItem &option, const QModelIndex &index) const { QStyleOptionViewItem opt = option; initStyleOption(&opt, index); QVariant value = index.data(Qt::DisplayRole); if (value.isValid() && !value.isNull()) { QString text = value.toString(); if (!text.isEmpty()) { QTextDocument document; painter->save(); if (opt.state & QStyle::State_Selected) { painter->fillRect(opt.rect, opt.palette.highlight()); // Set the foreground color. The pen approach does // not seem to work, probably it's reset by the // textdocument. Couldn't use // setdefaultstylesheet() either. the div thing is // an ugly hack. Works for now #if 0 QPen pen = painter->pen(); pen.setBrush(opt.palette.brush(QPalette::HighlightedText)); painter->setPen(pen); #else text = QString::fromUtf8("
") + text + QString::fromUtf8("
"); #endif } painter->setClipRect(option.rect); QPoint where = option.rect.topLeft(); where.ry() += TEXTINCELLVTRANS; painter->translate(where); document.setHtml(text); document.drawContents(painter); painter->restore(); return; } } QStyledItemDelegate::paint(painter, option, index); } }; void ResTable::init() { if (!(m_model = new RecollModel(prefs.restableFields, this))) return; tableView->setModel(m_model); tableView->setMouseTracking(true); tableView->setSelectionBehavior(QAbstractItemView::SelectRows); tableView->setItemDelegate(new ResTableDelegate(this)); tableView->setContextMenuPolicy(Qt::CustomContextMenu); new QShortcut(QKeySequence("Ctrl+o"), this, SLOT(menuEdit())); new QShortcut(QKeySequence("Ctrl+Shift+o"), this, SLOT(menuEditAndQuit())); new QShortcut(QKeySequence("Ctrl+d"), this, SLOT(menuPreview())); new QShortcut(QKeySequence("Ctrl+e"), this, SLOT(menuShowSnippets())); connect(tableView, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(createPopupMenu(const QPoint&))); QHeaderView *header = tableView->horizontalHeader(); if (header) { if (int(prefs.restableColWidths.size()) == header->count()) { for (int i = 0; i < header->count(); i++) { header->resizeSection(i, prefs.restableColWidths[i]); } } header->setSortIndicatorShown(true); header->setSortIndicator(-1, Qt::AscendingOrder); header->setContextMenuPolicy(Qt::CustomContextMenu); header->setStretchLastSection(1); connect(header, SIGNAL(sectionResized(int,int,int)), this, SLOT(saveColState())); connect(header, SIGNAL(customContextMenuRequested(const QPoint&)), this, SLOT(createHeaderPopupMenu(const QPoint&))); } #if (QT_VERSION >= QT_VERSION_CHECK(5, 0, 0)) header->setSectionsMovable(true); #else header->setMovable(true); #endif header = tableView->verticalHeader(); if (header) { header->setDefaultSectionSize(QApplication::fontMetrics().height() + ROWHEIGHTPAD); } QShortcut *sc = new QShortcut(QKeySequence(Qt::Key_Escape), this); connect(sc, SIGNAL(activated()), tableView->selectionModel(), SLOT(clear())); connect(tableView->selectionModel(), SIGNAL(currentChanged(const QModelIndex&, const QModelIndex &)), this, SLOT(onTableView_currentChanged(const QModelIndex&))); connect(tableView, SIGNAL(doubleClicked(const QModelIndex&)), this, SLOT(onDoubleClick(const QModelIndex&))); m_pager = new ResTablePager(this); m_pager->setHighLighter(&g_hiliter); QSettings settings; QVariant saved = settings.value("resTableSplitterSizes"); if (saved != QVariant()) { splitter->restoreState(saved.toByteArray()); } else { QList sizes; sizes << 355 << 125; splitter->setSizes(sizes); } delete textBrowser; m_detail = new ResTableDetailArea(this); m_detail->setReadOnly(true); m_detail->setUndoRedoEnabled(false); m_detail->setOpenLinks(false); // signals and slots connections connect(m_detail, SIGNAL(anchorClicked(const QUrl &)), this, SLOT(linkWasClicked(const QUrl &))); splitter->addWidget(m_detail); splitter->setOrientation(Qt::Vertical); installEventFilter(this); } bool ResTable::eventFilter(QObject* obj, QEvent* event) { if (event->type() == QEvent::KeyPress) { QKeyEvent* key = static_cast(event); if ((key->key() == Qt::Key_Enter) || (key->key() == Qt::Key_Return)) { menuEdit(); return true; } else { return QObject::eventFilter(obj, event); } } else { return QObject::eventFilter(obj, event); } return false; } void ResTable::setRclMain(RclMain *m, bool ismain) { m_rclmain = m; m_ismainres = ismain; // We allow single selection only in the main table because this // may have a mix of file-level docs and subdocs and multisave // only works for subdocs if (m_ismainres) tableView->setSelectionMode(QAbstractItemView::SingleSelection); else tableView->setSelectionMode(QAbstractItemView::ExtendedSelection); if (!m_ismainres) { // don't set this shortcut when we are a child of main, would // be duplicate/ambiguous connect(new QShortcut(quitKeySeq, this), SIGNAL(activated()), m_rclmain, SLOT (fileExit())); } new QShortcut(closeKeySeq, this, SLOT (close())); connect(this, SIGNAL(previewRequested(Rcl::Doc)), m_rclmain, SLOT(startPreview(Rcl::Doc))); connect(this, SIGNAL(editRequested(Rcl::Doc)), m_rclmain, SLOT(startNativeViewer(Rcl::Doc))); connect(this, SIGNAL(docSaveToFileClicked(Rcl::Doc)), m_rclmain, SLOT(saveDocToFile(Rcl::Doc))); connect(this, SIGNAL(showSnippets(Rcl::Doc)), m_rclmain, SLOT(showSnippets(Rcl::Doc))); } int ResTable::getDetailDocNumOrTopRow() { if (m_detaildocnum >= 0) return m_detaildocnum; QModelIndex modelIndex = tableView->indexAt(QPoint(0, 0)); return modelIndex.row(); } void ResTable::makeRowVisible(int row) { LOGDEB("ResTable::showRow(" << row << ")\n"); QModelIndex modelIndex = m_model->index(row, 0); tableView->scrollTo(modelIndex, QAbstractItemView::PositionAtTop); tableView->selectionModel()->clear(); m_detail->clear(); m_detaildocnum = -1; } // This is called by rclmain_w prior to exiting void ResTable::saveColState() { if (!m_ismainres) return; QSettings settings; settings.setValue("resTableSplitterSizes", splitter->saveState()); QHeaderView *header = tableView->horizontalHeader(); const vector& vf = m_model->getFields(); if (!header) { LOGERR("ResTable::saveColState: no table header ??\n"); return; } // Remember the current column order. Walk in visual order and // create new list QStringList newfields; vector newwidths; for (int vi = 0; vi < header->count(); vi++) { int li = header->logicalIndex(vi); if (li < 0 || li >= int(vf.size())) { LOGERR("saveColState: logical index beyond list size!\n"); continue; } newfields.push_back(QString::fromUtf8(vf[li].c_str())); newwidths.push_back(header->sectionSize(li)); } prefs.restableFields = newfields; prefs.restableColWidths = newwidths; } void ResTable::onTableView_currentChanged(const QModelIndex& index) { LOGDEB2("ResTable::onTableView_currentChanged(" << index.row() << ", " << index.column() << ")\n"); if (!m_model || !m_model->getDocSource()) return; Rcl::Doc doc; if (m_model->getDocSource()->getDoc(index.row(), doc)) { m_detail->clear(); m_detaildocnum = index.row(); m_detaildoc = doc; m_pager->displayDoc(theconfig, index.row(), m_detaildoc, m_model->m_hdata); emit(detailDocChanged(doc, m_model->getDocSource())); } else { m_detaildocnum = -1; } } void ResTable::on_tableView_entered(const QModelIndex& index) { LOGDEB2("ResTable::on_tableView_entered(" << index.row() << ", " << index.column() << ")\n"); if (!tableView->selectionModel()->hasSelection()) onTableView_currentChanged(index); } void ResTable::takeFocus() { // LOGDEB("resTable: take focus\n"); tableView->setFocus(Qt::ShortcutFocusReason); } void ResTable::setDocSource(std::shared_ptr nsource) { LOGDEB("ResTable::setDocSource\n"); if (m_model) m_model->setDocSource(nsource); if (m_pager) m_pager->setDocSource(nsource, 0); if (m_detail) m_detail->clear(); m_detaildocnum = -1; } void ResTable::resetSource() { LOGDEB("ResTable::resetSource\n"); setDocSource(std::shared_ptr()); readDocSource(); } void ResTable::saveAsCSV() { LOGDEB("ResTable::saveAsCSV\n"); if (!m_model) return; QString s = QFileDialog::getSaveFileName(this, //parent tr("Save table to CSV file"), QString::fromLocal8Bit(path_home().c_str()) ); if (s.isEmpty()) return; const char *tofile = s.toLocal8Bit(); FILE *fp = fopen(tofile, "w"); if (fp == 0) { QMessageBox::warning(0, "Recoll", tr("Can't open/create file: ") + s); return; } m_model->saveAsCSV(fp); fclose(fp); } // This is called when the sort order is changed from another widget void ResTable::onSortDataChanged(DocSeqSortSpec spec) { LOGDEB("ResTable::onSortDataChanged: [" << spec.field << "] desc " << spec.desc << "\n"); QHeaderView *header = tableView->horizontalHeader(); if (!header || !m_model) return; // Check if the specified field actually matches one of columns // and set indicator m_model->setIgnoreSort(true); bool matched = false; const vector fields = m_model->getFields(); for (unsigned int i = 0; i < fields.size(); i++) { if (!spec.field.compare(m_model->baseField(fields[i]))) { header->setSortIndicator(i, spec.desc ? Qt::DescendingOrder : Qt::AscendingOrder); matched = true; } } if (!matched) header->setSortIndicator(-1, Qt::AscendingOrder); m_model->setIgnoreSort(false); } void ResTable::resetSort() { LOGDEB("ResTable::resetSort()\n"); QHeaderView *header = tableView->horizontalHeader(); if (header) header->setSortIndicator(-1, Qt::AscendingOrder); // the model's sort slot is not called by qt in this case (qt 4.7) if (m_model) m_model->sort(-1, Qt::AscendingOrder); } void ResTable::readDocSource(bool resetPos) { LOGDEB("ResTable::readDocSource(" << resetPos << ")\n"); if (resetPos) tableView->verticalScrollBar()->setSliderPosition(0); if (m_model->m_source) { m_model->m_source->getTerms(m_model->m_hdata); } else { m_model->m_hdata.clear(); } m_model->readDocSource(); m_detail->clear(); m_detaildocnum = -1; } void ResTable::linkWasClicked(const QUrl &url) { if (m_detaildocnum < 0) { return; } QString s = url.toString(); const char *ascurl = s.toUtf8(); LOGDEB("ResTable::linkWasClicked: [" << ascurl << "]\n"); int i = atoi(ascurl+1) -1; int what = ascurl[0]; switch (what) { // Open abstract/snippets window case 'A': if (m_detaildocnum >= 0) emit(showSnippets(m_detaildoc)); break; case 'D': { vector dups; if (m_detaildocnum >= 0 && m_rclmain && m_model->getDocSource()->docDups(m_detaildoc, dups)) { m_rclmain->newDupsW(m_detaildoc, dups); } } break; // Open parent folder case 'F': { emit editRequested(ResultPopup::getParent(std::shared_ptr(), m_detaildoc)); } break; case 'P': case 'E': { if (what == 'P') { if (m_ismainres) { emit docPreviewClicked(i, m_detaildoc, 0); } else { emit previewRequested(m_detaildoc); } } else { emit editRequested(m_detaildoc); } } break; // Run script. Link format Rnn|Script Name case 'R': { int bar = s.indexOf("|"); if (bar == -1 || bar >= s.size()-1) break; string cmdname = qs2utf8s(s.right(s.size() - (bar + 1))); DesktopDb ddb(path_cat(theconfig->getConfDir(), "scripts")); DesktopDb::AppDef app; if (ddb.appByName(cmdname, app)) { QAction act(QString::fromUtf8(app.name.c_str()), this); QVariant v(QString::fromUtf8(app.command.c_str())); act.setData(v); menuOpenWith(&act); } } break; default: LOGERR("ResTable::linkWasClicked: bad link [" << ascurl << "]\n"); break;// ?? } } void ResTable::onDoubleClick(const QModelIndex& index) { if (!m_model || !m_model->getDocSource()) return; Rcl::Doc doc; if (m_model->getDocSource()->getDoc(index.row(), doc)) { if (m_detaildocnum != index.row()) { m_detail->clear(); m_detaildocnum = index.row(); m_pager->displayDoc(theconfig, index.row(), m_detaildoc, m_model->m_hdata); } m_detaildoc = doc; if (m_detaildocnum >= 0) emit editRequested(m_detaildoc); } else { m_detaildocnum = -1; } } void ResTable::createPopupMenu(const QPoint& pos) { LOGDEB("ResTable::createPopupMenu: m_detaildocnum " << m_detaildocnum << "\n"); if (m_detaildocnum >= 0 && m_model) { int opts = m_ismainres? ResultPopup::isMain : 0; int selsz = tableView->selectionModel()->selectedRows().size(); if (selsz == 1) { opts |= ResultPopup::showSaveOne; } else if (selsz > 1 && !m_ismainres) { // We don't show save multiple for the main list because not all // docs are necessary subdocs and multisave only works with those. opts |= ResultPopup::showSaveSel; } QMenu *popup = ResultPopup::create(this, opts, m_model->getDocSource(), m_detaildoc); popup->popup(mapToGlobal(pos)); } } void ResTable::menuPreview() { if (m_detaildocnum >= 0) { if (m_ismainres) { emit docPreviewClicked(m_detaildocnum, m_detaildoc, 0); } else { emit previewRequested(m_detaildoc); } } } void ResTable::menuSaveToFile() { if (m_detaildocnum >= 0) emit docSaveToFileClicked(m_detaildoc); } void ResTable::menuSaveSelection() { if (m_model == 0 || !m_model->getDocSource()) return; QModelIndexList indexl = tableView->selectionModel()->selectedRows(); vector v; for (int i = 0; i < indexl.size(); i++) { Rcl::Doc doc; if (m_model->getDocSource()->getDoc(indexl[i].row(), doc)) v.push_back(doc); } if (v.size() == 0) { return; } else if (v.size() == 1) { emit docSaveToFileClicked(v[0]); } else { multiSave(this, v); } } void ResTable::menuPreviewParent() { if (m_detaildocnum >= 0 && m_model && m_model->getDocSource()) { Rcl::Doc pdoc = ResultPopup::getParent(m_model->getDocSource(), m_detaildoc); if (pdoc.mimetype == "inode/directory") { emit editRequested(pdoc); } else { emit previewRequested(pdoc); } } } void ResTable::menuOpenParent() { if (m_detaildocnum >= 0 && m_model && m_model->getDocSource()) emit editRequested( ResultPopup::getParent(m_model->getDocSource(), m_detaildoc)); } void ResTable::menuEdit() { if (m_detaildocnum >= 0) emit editRequested(m_detaildoc); } void ResTable::menuEditAndQuit() { if (m_detaildocnum >= 0) { emit editRequested(m_detaildoc); m_rclmain->fileExit(); } } void ResTable::menuOpenWith(QAction *act) { if (act == 0) return; string cmd = qs2utf8s(act->data().toString()); if (m_detaildocnum >= 0) emit openWithRequested(m_detaildoc, cmd); } void ResTable::menuCopyFN() { if (m_detaildocnum >= 0) ResultPopup::copyFN(m_detaildoc); } void ResTable::menuCopyURL() { if (m_detaildocnum >= 0) ResultPopup::copyURL(m_detaildoc); } void ResTable::menuExpand() { if (m_detaildocnum >= 0) emit docExpand(m_detaildoc); } void ResTable::menuShowSnippets() { if (m_detaildocnum >= 0) emit showSnippets(m_detaildoc); } void ResTable::menuShowSubDocs() { if (m_detaildocnum >= 0) emit showSubDocs(m_detaildoc); } void ResTable::createHeaderPopupMenu(const QPoint& pos) { LOGDEB("ResTable::createHeaderPopupMenu(" << pos.x() << ", " << pos.y() << ")\n"); QHeaderView *header = tableView->horizontalHeader(); if (!header || !m_model) return; m_popcolumn = header->logicalIndexAt(pos); if (m_popcolumn < 0) return; const map& allfields = m_model->getAllFields(); const vector& fields = m_model->getFields(); QMenu *popup = new QMenu(this); popup->addAction(tr("&Reset sort"), this, SLOT(resetSort())); popup->addSeparator(); popup->addAction(tr("&Save as CSV"), this, SLOT(saveAsCSV())); popup->addSeparator(); popup->addAction(tr("&Delete column"), this, SLOT(deleteColumn())); popup->addSeparator(); QAction *act; for (map::const_iterator it = allfields.begin(); it != allfields.end(); it++) { if (std::find(fields.begin(), fields.end(), it->first) != fields.end()) continue; act = new QAction(tr("Add \"%1\" column").arg(it->second), popup); act->setData(QString::fromUtf8(it->first.c_str())); connect(act, SIGNAL(triggered(bool)), this , SLOT(addColumn())); popup->addAction(act); } popup->popup(mapToGlobal(pos)); } void ResTable::deleteColumn() { if (m_model) m_model->deleteColumn(m_popcolumn); } void ResTable::addColumn() { if (!m_model) return; QAction *action = (QAction *)sender(); LOGDEB("addColumn: text " << qs2utf8s(action->text()) << ", data " << qs2utf8s(action->data().toString()) << "\n"); m_model->addColumn(m_popcolumn, qs2utf8s(action->data().toString())); } recoll-1.26.3/qtgui/searchclause_w.h0000644000175000017500000000276013533651561014313 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef SEARCHCLAUSE_H #define SEARCHCLAUSE_H // A class for entry of a search clause: type (OR/AND/etc.), distance // for PHRASE or NEAR, and text #include #include #include "searchdata.h" class QVBoxLayout; class QHBoxLayout; class QComboBox; class QSpinBox; class QLineEdit; class SearchClauseW : public QWidget { Q_OBJECT public: SearchClauseW(QWidget* parent = 0); ~SearchClauseW(); Rcl::SearchDataClause *getClause(); void setFromClause(Rcl::SearchDataClauseSimple *cl); void clear(); QComboBox* sTpCMB; QComboBox* fldCMB; QSpinBox* proxSlackSB; QLineEdit* wordsLE; public slots: virtual void tpChange(int); protected slots: virtual void languageChange(); }; #endif // SEARCHCLAUSE_H recoll-1.26.3/qtgui/rclhelp.h0000644000175000017500000000232013533651561012744 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef RCLHELP_H #define RCLHELP_H #include #include #include using std::map; using std::string; class HelpClient : public QObject { Q_OBJECT public: HelpClient(QObject *parent, const char *name = 0); // Install mapping from widget name to manual section static void installMap(string wname, string section); protected: bool eventFilter(QObject *obj, QEvent *event); static map helpmap; }; #endif // RCLHELP_H recoll-1.26.3/qtgui/guiutils.cpp0000644000175000017500000005510213567750017013524 00000000000000/* Copyright (C) 2005-2019 Jean-Francois Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include "recoll.h" #include "log.h" #include "smallut.h" #include "guiutils.h" #include "pathut.h" #include "base64.h" #include "advshist.h" #include #include RclDynConf *g_dynconf; AdvSearchHist *g_advshistory; RclConfig *theconfig; // The table should not be necessary, but I found no css way to get // qt 4.6 qtextedit to clear the margins after the float img without // introducing blank space. const char *PrefsPack::dfltResListFormat = "\n" "\n" "\n" "\n" "
%L  %S   %T
\n" "%M %D    %U %i
\n" "%A %K
\n" ; // The global preferences structure PrefsPack prefs; // Using the same macro to read/write a setting. insurance against typing // mistakes #define SETTING_RW(var, nm, tp, def) \ if (writing) { \ settings.setValue(nm , var); \ } else { \ var = settings.value(nm, def).to##tp \ (); \ } /** * Saving and restoring user preferences. These are stored in a global * structure during program execution and saved to disk using the QT * settings mechanism */ /* Remember if settings were actually read (to avoid writing them if * we stopped before reading them (else some kinds of errors would reset * the qt/recoll settings to defaults) */ static bool havereadsettings; void rwSettings(bool writing) { LOGDEB1("rwSettings: write " << writing << "\n"); if (writing && !havereadsettings) return; QSettings settings("Recoll.org", "recoll"); SETTING_RW(prefs.mainwidth, "/Recoll/geometry/width", Int, 0); SETTING_RW(prefs.mainheight, "/Recoll/geometry/height", Int, 0); SETTING_RW(prefs.showmode, "/Recoll/geometry/showmode", Int, 0); SETTING_RW(prefs.pvwidth, "/Recoll/geometry/pvwidth", Int, 0); SETTING_RW(prefs.pvheight, "/Recoll/geometry/pvheight", Int, 0); SETTING_RW(prefs.toolArea, "/Recoll/geometry/toolArea", Int, 0); SETTING_RW(prefs.resArea, "/Recoll/geometry/resArea", Int, 0); SETTING_RW(prefs.ssearchTypSav, "/Recoll/prefs/ssearchTypSav", Bool, 0); SETTING_RW(prefs.ssearchTyp, "/Recoll/prefs/simpleSearchTyp", Int, 3); SETTING_RW(prefs.startWithAdvSearchOpen, "/Recoll/prefs/startWithAdvSearchOpen", Bool, false); SETTING_RW(prefs.previewHtml, "/Recoll/prefs/previewHtml", Bool, true); SETTING_RW(prefs.previewActiveLinks, "/Recoll/prefs/previewActiveLinks", Bool, false); QString advSearchClauses; const int maxclauselistsize = 20; if (writing) { // Limit clause list size to non-absurd size if (prefs.advSearchClauses.size() > maxclauselistsize) { prefs.advSearchClauses.resize(maxclauselistsize); } for (auto clause : prefs.advSearchClauses) { char buf[20]; sprintf(buf, "%d ", clause); advSearchClauses += QString::fromUtf8(buf); } } QString ascdflt; SETTING_RW(advSearchClauses,"/Recoll/prefs/adv/clauseList", String, ascdflt); if (!writing) { vector clauses; stringToStrings(qs2utf8s(advSearchClauses), clauses); // There was a long-lurking bug where the clause list was // growing to absurd sizes. The prefs.advSearchClauses clear() // call was missing (ok with the now false initial assumption // that the prefs were read once per session), which was // causing a doubling of the size each time the prefs were // read. Should be fixed, but in any case, limit the clause // list to a non-absurd size. if (clauses.size() > maxclauselistsize) { clauses.resize(maxclauselistsize); } prefs.advSearchClauses.clear(); prefs.advSearchClauses.reserve(clauses.size()); for (auto clause : clauses) { prefs.advSearchClauses.push_back(atoi(clause.c_str())); } } SETTING_RW(prefs.ssearchNoComplete, "/Recoll/prefs/ssearch/noComplete", Bool, false); SETTING_RW(prefs.ssearchStartOnComplete, "/Recoll/prefs/ssearch/startOnComplete", Bool, true); SETTING_RW(prefs.filterCtlStyle, "/Recoll/prefs/filterCtlStyle", Int, 0); SETTING_RW(prefs.ssearchAutoPhrase, "/Recoll/prefs/ssearchAutoPhrase", Bool, true); SETTING_RW(prefs.ssearchAutoPhraseThreshPC, "/Recoll/prefs/ssearchAutoPhraseThreshPC", Double, 2.0); SETTING_RW(prefs.respagesize, "/Recoll/prefs/reslist/pagelen", Int, 8); SETTING_RW(prefs.historysize, "/Recoll/prefs/historysize", Int, -1); SETTING_RW(prefs.collapseDuplicates, "/Recoll/prefs/reslist/collapseDuplicates", Bool, false); SETTING_RW(prefs.showResultsAsTable, "/Recoll/prefs/showResultsAsTable", Bool, false); SETTING_RW(prefs.maxhltextmbs, "/Recoll/prefs/preview/maxhltextmbs", Int, 3); SETTING_RW(prefs.previewPlainPre, "/Recoll/prefs/preview/plainPre", Int, PrefsPack::PP_PREWRAP); // History: used to be able to only set a bare color name. Can now // set any CSS style. Hack on ':' presence to keep compat with old // values SETTING_RW(prefs.qtermstyle, "/Recoll/prefs/qtermcolor", String, "color: blue"); if (!writing && prefs.qtermstyle == "") prefs.qtermstyle = "color: blue"; { // histo compatibility hack int colon = prefs.qtermstyle.indexOf(":"); int semi = prefs.qtermstyle.indexOf(";"); // The 2nd part of the test is to keep compat with the // injection hack of the 1st user who suggested this (had // #ff5000;font-size:110%;... in 'qtermcolor') if (colon == -1 || (colon != -1 && semi != -1 && semi < colon)) { prefs.qtermstyle = QString::fromUtf8("color: ") + prefs.qtermstyle; } } SETTING_RW(prefs.reslistdateformat, "/Recoll/prefs/reslist/dateformat", String," %Y-%m-%d %H:%M:%S %z"); if (!writing && prefs.reslistdateformat == "") prefs.reslistdateformat = " %Y-%m-%d %H:%M:%S %z"; prefs.creslistdateformat = (const char*)prefs.reslistdateformat.toUtf8(); SETTING_RW(prefs.reslistfontfamily, "/Recoll/prefs/reslist/fontFamily", String, ""); SETTING_RW(prefs.reslistfontsize, "/Recoll/prefs/reslist/fontSize", Int, 10); QString rlfDflt = QString::fromUtf8(prefs.dfltResListFormat); if (writing) { if (prefs.reslistformat.compare(rlfDflt)) { settings.setValue("/Recoll/prefs/reslist/format", prefs.reslistformat); } else { settings.remove("/Recoll/prefs/reslist/format"); } } else { prefs.reslistformat = settings.value("/Recoll/prefs/reslist/format", rlfDflt).toString(); prefs.creslistformat = qs2utf8s(prefs.reslistformat); } SETTING_RW(prefs.reslistheadertext, "/Recoll/prefs/reslist/headertext", String, ""); SETTING_RW(prefs.qssFile, "/Recoll/prefs/stylesheet", String, ""); SETTING_RW(prefs.snipCssFile, "/Recoll/prefs/snippets/cssfile", String, ""); SETTING_RW(prefs.queryStemLang, "/Recoll/prefs/query/stemLang", String, "english"); SETTING_RW(prefs.useDesktopOpen, "/Recoll/prefs/useDesktopOpen", Bool, true); SETTING_RW(prefs.keepSort, "/Recoll/prefs/keepSort", Bool, false); SETTING_RW(prefs.sortField, "/Recoll/prefs/sortField", String, ""); SETTING_RW(prefs.sortActive, "/Recoll/prefs/sortActive", Bool, false); SETTING_RW(prefs.sortDesc, "/Recoll/prefs/query/sortDesc", Bool, 0); if (!writing) { // Handle transition from older prefs which did not store sortColumn // (Active always meant sort by date). if (prefs.sortActive && prefs.sortField.isNull()) prefs.sortField = "mtime"; } SETTING_RW(prefs.queryBuildAbstract, "/Recoll/prefs/query/buildAbstract", Bool, true); SETTING_RW(prefs.queryReplaceAbstract, "/Recoll/prefs/query/replaceAbstract", Bool, false); SETTING_RW(prefs.syntAbsLen, "/Recoll/prefs/query/syntAbsLen", Int, 250); SETTING_RW(prefs.syntAbsCtx, "/Recoll/prefs/query/syntAbsCtx", Int, 4); // Abstract snippet separator SETTING_RW(prefs.abssep, "/Recoll/prefs/reslist/abssep", String,"…"); if (!writing && prefs.abssep == "") prefs.abssep = "…"; SETTING_RW(prefs.snipwMaxLength, "/Recoll/prefs/snipwin/maxlen", Int, 1000); SETTING_RW(prefs.snipwSortByPage,"/Recoll/prefs/snipwin/bypage", Bool,false); SETTING_RW(prefs.autoSuffs, "/Recoll/prefs/query/autoSuffs", String, ""); SETTING_RW(prefs.autoSuffsEnable, "/Recoll/prefs/query/autoSuffsEnable", Bool, false); SETTING_RW(prefs.synFileEnable, "/Recoll/prefs/query/synFileEnable", Bool, false); SETTING_RW(prefs.synFile, "/Recoll/prefs/query/synfile", String, ""); SETTING_RW(prefs.termMatchType, "/Recoll/prefs/query/termMatchType", Int, 0); SETTING_RW(prefs.noBeeps, "/Recoll/prefs/query/noBeeps", Bool, false); // This is not really the current program version, just a value to // be used in case we have incompatible changes one day SETTING_RW(prefs.rclVersion, "/Recoll/prefs/rclVersion", Int, 1009); // Ssearch combobox history list if (writing) { settings.setValue("/Recoll/prefs/query/ssearchHistory", prefs.ssearchHistory); } else { prefs.ssearchHistory = settings.value("/Recoll/prefs/query/ssearchHistory").toStringList(); } // Ignored file types (advanced search) if (writing) { settings.setValue("/Recoll/prefs/query/asearchIgnFilTyps", prefs.asearchIgnFilTyps); } else { prefs.asearchIgnFilTyps = settings.value( "/Recoll/prefs/query/asearchIgnFilTyps").toStringList(); } // Field list for the restable if (writing) { settings.setValue("/Recoll/prefs/query/restableFields", prefs.restableFields); } else { prefs.restableFields = settings.value("/Recoll/prefs/query/restableFields").toStringList(); if (prefs.restableFields.empty()) { prefs.restableFields.push_back("date"); prefs.restableFields.push_back("title"); prefs.restableFields.push_back("filename"); prefs.restableFields.push_back("author"); prefs.restableFields.push_back("url"); } } // restable col widths QString rtcw; if (writing) { for (const auto& width : prefs.restableColWidths) { char buf[20]; sprintf(buf, "%d ", width); rtcw += QString::fromUtf8(buf); } } SETTING_RW(rtcw, "/Recoll/prefs/query/restableWidths", String, "83 253 132 172 130 "); if (!writing) { prefs.restableColWidths.clear(); vector widths; stringToStrings(qs2utf8s(rtcw), widths); for (const auto& width : widths) { prefs.restableColWidths.push_back(atoi(width.c_str())); } } SETTING_RW(prefs.fileTypesByCats, "/Recoll/prefs/query/asearchFilTypByCat", Bool, false); SETTING_RW(prefs.showTrayIcon, "/Recoll/prefs/showTrayIcon", Bool, false); SETTING_RW(prefs.closeToTray, "/Recoll/prefs/closeToTray", Bool, false); SETTING_RW(prefs.trayMessages, "/Recoll/prefs/trayMessages", Bool, false); // See qxtconfirmationmessage. Needs to be -1 for the dialog to show. SETTING_RW(prefs.showTempFileWarning, "Recoll/prefs/showTempFileWarning", Int, -1); if (g_dynconf == 0) { // Happens return; } // The extra databases settings. These are stored as a list of // xapian directory names, encoded in base64 to avoid any // binary/charset conversion issues. There are 2 lists for all // known dbs and active (searched) ones. // When starting up, we also add from the RECOLL_EXTRA_DBS environment // variable. // This are stored inside the dynamic configuration file (aka: history), // as they are likely to depend on RECOLL_CONFDIR. if (writing) { g_dynconf->eraseAll(allEdbsSk); for (const auto& dbdir : prefs.allExtraDbs) { g_dynconf->enterString(allEdbsSk, dbdir); } g_dynconf->eraseAll(actEdbsSk); for (const auto& dbdir : prefs.activeExtraDbs) { g_dynconf->enterString(actEdbsSk, dbdir); } } else { prefs.allExtraDbs = g_dynconf->getStringEntries(allEdbsSk); const char *cp; if ((cp = getenv("RECOLL_EXTRA_DBS")) != 0) { vector dbl; stringToTokens(cp, dbl, ":"); for (const auto& path : dbl) { string dbdir = path_canon(path); path_catslash(dbdir); if (std::find(prefs.allExtraDbs.begin(), prefs.allExtraDbs.end(), dbdir) != prefs.allExtraDbs.end()) continue; bool stripped; if (!Rcl::Db::testDbDir(dbdir, &stripped)) { LOGERR("Not a xapian index: [" << dbdir << "]\n"); continue; } if (stripped != o_index_stripchars) { LOGERR("Incompatible character stripping: [" << dbdir << "]\n"); continue; } prefs.allExtraDbs.push_back(dbdir); } } // Get the remembered "active external indexes": prefs.activeExtraDbs = g_dynconf->getStringEntries(actEdbsSk); // Clean up the list: remove directories which are not // actually there: useful for removable volumes. for (auto it = prefs.activeExtraDbs.begin(); it != prefs.activeExtraDbs.end();) { bool stripped; if (!Rcl::Db::testDbDir(*it, &stripped) || stripped != o_index_stripchars) { LOGINFO("Not a Xapian index or char stripping differs: [" << *it << "]\n"); it = prefs.activeExtraDbs.erase(it); } else { it++; } } // Get active db directives from the environment. This can only add to // the remembered and cleaned up list const char *cp4Act; if ((cp4Act = getenv("RECOLL_ACTIVE_EXTRA_DBS")) != 0) { vector dbl; stringToTokens(cp4Act, dbl, ":"); for (const auto& path : dbl) { string dbdir = path_canon(path); path_catslash(dbdir); if (std::find(prefs.activeExtraDbs.begin(), prefs.activeExtraDbs.end(), dbdir) != prefs.activeExtraDbs.end()) continue; bool strpd; if (!Rcl::Db::testDbDir(dbdir, &strpd) || strpd != o_index_stripchars) { LOGERR("Not a Xapian dir or diff. char stripping: [" << dbdir << "]\n"); continue; } prefs.activeExtraDbs.push_back(dbdir); } //for } //if } #if 0 std::cerr << "All extra Dbs:\n"; for (const auto& dir : prefs.allExtraDbs) std::cerr << " [" << dir << "]\n"; std::cerr << "Active extra Dbs:\n"; for (const auto& dir : prefs.activeExtraDbs) std::cerr << " [" << dir << "]\n"; #endif const string asbdSk = "asearchSbd"; if (writing) { while (prefs.asearchSubdirHist.size() > 20) prefs.asearchSubdirHist.pop_back(); g_dynconf->eraseAll(asbdSk); for (const auto& qdbd : prefs.asearchSubdirHist) { g_dynconf->enterString(asbdSk, qs2utf8s(qdbd)); } } else { vector tl = g_dynconf->getStringEntries(asbdSk); for (const auto& dbd: tl) { prefs.asearchSubdirHist.push_back(u8s2qs(dbd.c_str())); } } if (!writing) havereadsettings = true; } string PrefsPack::stemlang() { string stemLang(qs2utf8s(prefs.queryStemLang)); if (stemLang == "ALL") { if (theconfig) theconfig->getConfParam("indexstemminglanguages", stemLang); else stemLang = ""; } return stemLang; } #ifdef SHOWEVENTS const char *eventTypeToStr(int tp) { switch (tp) { case 0: return "None"; case 1: return "Timer"; case 2: return "MouseButtonPress"; case 3: return "MouseButtonRelease"; case 4: return "MouseButtonDblClick"; case 5: return "MouseMove"; case 6: return "KeyPress"; case 7: return "KeyRelease"; case 8: return "FocusIn"; case 9: return "FocusOut"; case 10: return "Enter"; case 11: return "Leave"; case 12: return "Paint"; case 13: return "Move"; case 14: return "Resize"; case 15: return "Create"; case 16: return "Destroy"; case 17: return "Show"; case 18: return "Hide"; case 19: return "Close"; case 20: return "Quit"; case 21: return "ParentChange"; case 131: return "ParentAboutToChange"; case 22: return "ThreadChange"; case 24: return "WindowActivate"; case 25: return "WindowDeactivate"; case 26: return "ShowToParent"; case 27: return "HideToParent"; case 31: return "Wheel"; case 33: return "WindowTitleChange"; case 34: return "WindowIconChange"; case 35: return "ApplicationWindowIconChange"; case 36: return "ApplicationFontChange"; case 37: return "ApplicationLayoutDirectionChange"; case 38: return "ApplicationPaletteChange"; case 39: return "PaletteChange"; case 40: return "Clipboard"; case 42: return "Speech"; case 43: return "MetaCall"; case 50: return "SockAct"; case 132: return "WinEventAct"; case 52: return "DeferredDelete"; case 60: return "DragEnter"; case 61: return "DragMove"; case 62: return "DragLeave"; case 63: return "Drop"; case 64: return "DragResponse"; case 68: return "ChildAdded"; case 69: return "ChildPolished"; case 70: return "ChildInserted"; case 72: return "LayoutHint"; case 71: return "ChildRemoved"; case 73: return "ShowWindowRequest"; case 74: return "PolishRequest"; case 75: return "Polish"; case 76: return "LayoutRequest"; case 77: return "UpdateRequest"; case 78: return "UpdateLater"; case 79: return "EmbeddingControl"; case 80: return "ActivateControl"; case 81: return "DeactivateControl"; case 82: return "ContextMenu"; case 83: return "InputMethod"; case 86: return "AccessibilityPrepare"; case 87: return "TabletMove"; case 88: return "LocaleChange"; case 89: return "LanguageChange"; case 90: return "LayoutDirectionChange"; case 91: return "Style"; case 92: return "TabletPress"; case 93: return "TabletRelease"; case 94: return "OkRequest"; case 95: return "HelpRequest"; case 96: return "IconDrag"; case 97: return "FontChange"; case 98: return "EnabledChange"; case 99: return "ActivationChange"; case 100: return "StyleChange"; case 101: return "IconTextChange"; case 102: return "ModifiedChange"; case 109: return "MouseTrackingChange"; case 103: return "WindowBlocked"; case 104: return "WindowUnblocked"; case 105: return "WindowStateChange"; case 110: return "ToolTip"; case 111: return "WhatsThis"; case 112: return "StatusTip"; case 113: return "ActionChanged"; case 114: return "ActionAdded"; case 115: return "ActionRemoved"; case 116: return "FileOpen"; case 117: return "Shortcut"; case 51: return "ShortcutOverride"; case 30: return "Accel"; case 32: return "AccelAvailable"; case 118: return "WhatsThisClicked"; case 120: return "ToolBarChange"; case 121: return "ApplicationActivated"; case 122: return "ApplicationDeactivated"; case 123: return "QueryWhatsThis"; case 124: return "EnterWhatsThisMode"; case 125: return "LeaveWhatsThisMode"; case 126: return "ZOrderChange"; case 127: return "HoverEnter"; case 128: return "HoverLeave"; case 129: return "HoverMove"; case 119: return "AccessibilityHelp"; case 130: return "AccessibilityDescription"; case 150: return "EnterEditFocus"; case 151: return "LeaveEditFocus"; case 152: return "AcceptDropsChange"; case 153: return "MenubarUpdated"; case 154: return "ZeroTimerEvent"; case 155: return "GraphicsSceneMouseMove"; case 156: return "GraphicsSceneMousePress"; case 157: return "GraphicsSceneMouseRelease"; case 158: return "GraphicsSceneMouseDoubleClick"; case 159: return "GraphicsSceneContextMenu"; case 160: return "GraphicsSceneHoverEnter"; case 161: return "GraphicsSceneHoverMove"; case 162: return "GraphicsSceneHoverLeave"; case 163: return "GraphicsSceneHelp"; case 164: return "GraphicsSceneDragEnter"; case 165: return "GraphicsSceneDragMove"; case 166: return "GraphicsSceneDragLeave"; case 167: return "GraphicsSceneDrop"; case 168: return "GraphicsSceneWheel"; case 169: return "KeyboardLayoutChange"; case 170: return "DynamicPropertyChange"; case 171: return "TabletEnterProximity"; case 172: return "TabletLeaveProximity"; default: return "UnknownEvent"; } } #endif recoll-1.26.3/qtgui/ui_rclmain.h-4.50000644000175000017500000003411613303776056013753 00000000000000/******************************************************************************** ** Form generated from reading ui file 'rclmain.ui' ** ** Created: Mon Jan 25 20:45:26 2010 ** by: Qt User Interface Compiler version 4.4.0 ** ** WARNING! All changes made in this file will be lost when recompiling ui file! ********************************************************************************/ #ifndef UI_RCLMAIN_H #define UI_RCLMAIN_H #include #include #include #include #include #include #include #include #include #include #include #include #include #include "reslist.h" #include "ssearch_w.h" QT_BEGIN_NAMESPACE class Ui_RclMainBase { public: QAction *fileExitAction; QAction *fileToggleIndexingAction; QAction *fileEraseDocHistoryAction; QAction *showMissingHelpers_Action; QAction *helpAbout_RecollAction; QAction *userManualAction; QAction *toolsDoc_HistoryAction; QAction *toolsAdvanced_SearchAction; QAction *toolsSort_parametersAction; QAction *toolsSpellAction; QAction *nextPageAction; QAction *firstPageAction; QAction *prevPageAction; QAction *indexConfigAction; QAction *queryPrefsAction; QAction *extIdxAction; QWidget *widget; QVBoxLayout *vboxLayout; QVBoxLayout *vboxLayout1; SSearch *sSearch; Q3ButtonGroup *catgBGRP; QRadioButton *allRDB; ResList *resList; Q3ToolBar *Toolbar; Q3ToolBar *Toolbar1; QMenuBar *MenuBar; QMenu *fileMenu; QMenu *toolsMenu; QMenu *preferencesMenu; QMenu *helpMenu; void setupUi(Q3MainWindow *RclMainBase) { if (RclMainBase->objectName().isEmpty()) RclMainBase->setObjectName(QString::fromUtf8("RclMainBase")); RclMainBase->resize(800, 600); QSizePolicy sizePolicy(static_cast(5), static_cast(5)); sizePolicy.setHorizontalStretch(0); sizePolicy.setVerticalStretch(0); sizePolicy.setHeightForWidth(RclMainBase->sizePolicy().hasHeightForWidth()); RclMainBase->setSizePolicy(sizePolicy); fileExitAction = new QAction(RclMainBase); fileExitAction->setObjectName(QString::fromUtf8("fileExitAction")); fileExitAction->setName("fileExitAction"); fileToggleIndexingAction = new QAction(RclMainBase); fileToggleIndexingAction->setObjectName(QString::fromUtf8("fileToggleIndexingAction")); fileToggleIndexingAction->setName("fileToggleIndexingAction"); fileEraseDocHistoryAction = new QAction(RclMainBase); fileEraseDocHistoryAction->setObjectName(QString::fromUtf8("fileEraseDocHistoryAction")); fileEraseDocHistoryAction->setName("fileEraseDocHistoryAction"); showMissingHelpers_Action = new QAction(RclMainBase); showMissingHelpers_Action->setObjectName(QString::fromUtf8("showMissingHelpers_Action")); showMissingHelpers_Action->setName("showMissingHelpers_Action"); helpAbout_RecollAction = new QAction(RclMainBase); helpAbout_RecollAction->setObjectName(QString::fromUtf8("helpAbout_RecollAction")); helpAbout_RecollAction->setName("helpAbout_RecollAction"); userManualAction = new QAction(RclMainBase); userManualAction->setObjectName(QString::fromUtf8("userManualAction")); userManualAction->setName("userManualAction"); toolsDoc_HistoryAction = new QAction(RclMainBase); toolsDoc_HistoryAction->setObjectName(QString::fromUtf8("toolsDoc_HistoryAction")); toolsDoc_HistoryAction->setName("toolsDoc_HistoryAction"); toolsAdvanced_SearchAction = new QAction(RclMainBase); toolsAdvanced_SearchAction->setObjectName(QString::fromUtf8("toolsAdvanced_SearchAction")); toolsAdvanced_SearchAction->setName("toolsAdvanced_SearchAction"); toolsSort_parametersAction = new QAction(RclMainBase); toolsSort_parametersAction->setObjectName(QString::fromUtf8("toolsSort_parametersAction")); toolsSort_parametersAction->setName("toolsSort_parametersAction"); toolsSpellAction = new QAction(RclMainBase); toolsSpellAction->setObjectName(QString::fromUtf8("toolsSpellAction")); toolsSpellAction->setName("toolsSpellAction"); nextPageAction = new QAction(RclMainBase); nextPageAction->setObjectName(QString::fromUtf8("nextPageAction")); nextPageAction->setName("nextPageAction"); nextPageAction->setEnabled(false); firstPageAction = new QAction(RclMainBase); firstPageAction->setObjectName(QString::fromUtf8("firstPageAction")); firstPageAction->setName("firstPageAction"); firstPageAction->setEnabled(false); prevPageAction = new QAction(RclMainBase); prevPageAction->setObjectName(QString::fromUtf8("prevPageAction")); prevPageAction->setName("prevPageAction"); prevPageAction->setEnabled(false); indexConfigAction = new QAction(RclMainBase); indexConfigAction->setObjectName(QString::fromUtf8("indexConfigAction")); indexConfigAction->setName("indexConfigAction"); queryPrefsAction = new QAction(RclMainBase); queryPrefsAction->setObjectName(QString::fromUtf8("queryPrefsAction")); queryPrefsAction->setName("queryPrefsAction"); extIdxAction = new QAction(RclMainBase); extIdxAction->setObjectName(QString::fromUtf8("extIdxAction")); extIdxAction->setName("extIdxAction"); widget = new QWidget(RclMainBase); widget->setObjectName(QString::fromUtf8("widget")); vboxLayout = new QVBoxLayout(widget); vboxLayout->setSpacing(2); vboxLayout->setMargin(4); vboxLayout->setObjectName(QString::fromUtf8("vboxLayout")); vboxLayout->setContentsMargins(0, 0, 0, 0); vboxLayout1 = new QVBoxLayout(); vboxLayout1->setSpacing(2); vboxLayout1->setMargin(2); vboxLayout1->setObjectName(QString::fromUtf8("vboxLayout1")); sSearch = new SSearch(widget); sSearch->setObjectName(QString::fromUtf8("sSearch")); QSizePolicy sizePolicy1(static_cast(7), static_cast(0)); sizePolicy1.setHorizontalStretch(0); sizePolicy1.setVerticalStretch(0); sizePolicy1.setHeightForWidth(sSearch->sizePolicy().hasHeightForWidth()); sSearch->setSizePolicy(sizePolicy1); vboxLayout1->addWidget(sSearch); catgBGRP = new Q3ButtonGroup(widget); catgBGRP->setObjectName(QString::fromUtf8("catgBGRP")); QSizePolicy sizePolicy2(static_cast(5), static_cast(0)); sizePolicy2.setHorizontalStretch(0); sizePolicy2.setVerticalStretch(0); sizePolicy2.setHeightForWidth(catgBGRP->sizePolicy().hasHeightForWidth()); catgBGRP->setSizePolicy(sizePolicy2); catgBGRP->setFrameShape(Q3GroupBox::GroupBoxPanel); catgBGRP->setFrameShadow(Q3GroupBox::Sunken); catgBGRP->setProperty("selectedId", QVariant(0)); allRDB = new QRadioButton(catgBGRP); allRDB->setObjectName(QString::fromUtf8("allRDB")); vboxLayout1->addWidget(catgBGRP); resList = new ResList(widget); resList->setObjectName(QString::fromUtf8("resList")); QSizePolicy sizePolicy3(static_cast(5), static_cast(5)); sizePolicy3.setHorizontalStretch(2); sizePolicy3.setVerticalStretch(0); sizePolicy3.setHeightForWidth(resList->sizePolicy().hasHeightForWidth()); resList->setSizePolicy(sizePolicy3); vboxLayout1->addWidget(resList); vboxLayout->addLayout(vboxLayout1); RclMainBase->setCentralWidget(widget); Toolbar = new Q3ToolBar(RclMainBase); Toolbar->setObjectName(QString::fromUtf8("Toolbar")); Toolbar1 = new Q3ToolBar(RclMainBase); Toolbar1->setObjectName(QString::fromUtf8("Toolbar1")); MenuBar = new QMenuBar(RclMainBase); MenuBar->setObjectName(QString::fromUtf8("MenuBar")); fileMenu = new QMenu(MenuBar); fileMenu->setObjectName(QString::fromUtf8("fileMenu")); toolsMenu = new QMenu(MenuBar); toolsMenu->setObjectName(QString::fromUtf8("toolsMenu")); preferencesMenu = new QMenu(MenuBar); preferencesMenu->setObjectName(QString::fromUtf8("preferencesMenu")); helpMenu = new QMenu(MenuBar); helpMenu->setObjectName(QString::fromUtf8("helpMenu")); Toolbar->addAction(toolsAdvanced_SearchAction); Toolbar->addAction(toolsSort_parametersAction); Toolbar->addAction(toolsDoc_HistoryAction); Toolbar->addAction(toolsSpellAction); Toolbar1->addAction(firstPageAction); Toolbar1->addAction(prevPageAction); Toolbar1->addAction(nextPageAction); MenuBar->addAction(fileMenu->menuAction()); MenuBar->addAction(toolsMenu->menuAction()); MenuBar->addAction(preferencesMenu->menuAction()); MenuBar->addSeparator(); MenuBar->addAction(helpMenu->menuAction()); fileMenu->addAction(fileToggleIndexingAction); fileMenu->addSeparator(); fileMenu->addAction(fileEraseDocHistoryAction); fileMenu->addAction(showMissingHelpers_Action); fileMenu->addSeparator(); fileMenu->addAction(fileExitAction); toolsMenu->addAction(toolsDoc_HistoryAction); toolsMenu->addAction(toolsAdvanced_SearchAction); toolsMenu->addAction(toolsSort_parametersAction); toolsMenu->addAction(toolsSpellAction); preferencesMenu->addAction(indexConfigAction); preferencesMenu->addSeparator(); preferencesMenu->addAction(queryPrefsAction); preferencesMenu->addAction(extIdxAction); preferencesMenu->addSeparator(); helpMenu->addAction(userManualAction); helpMenu->addAction(showMissingHelpers_Action); helpMenu->addSeparator(); helpMenu->addAction(helpAbout_RecollAction); retranslateUi(RclMainBase); QMetaObject::connectSlotsByName(RclMainBase); } // setupUi void retranslateUi(Q3MainWindow *RclMainBase) { RclMainBase->setWindowTitle(QApplication::translate("RclMainBase", "Recoll", 0, QApplication::UnicodeUTF8)); fileExitAction->setText(QApplication::translate("RclMainBase", "E&xit", 0, QApplication::UnicodeUTF8)); fileExitAction->setShortcut(QApplication::translate("RclMainBase", "Ctrl+Q", 0, QApplication::UnicodeUTF8)); fileToggleIndexingAction->setText(QApplication::translate("RclMainBase", "Update &index", 0, QApplication::UnicodeUTF8)); fileEraseDocHistoryAction->setText(QApplication::translate("RclMainBase", "&Erase document history", 0, QApplication::UnicodeUTF8)); showMissingHelpers_Action->setText(QApplication::translate("RclMainBase", "&Show missing helpers", 0, QApplication::UnicodeUTF8)); helpAbout_RecollAction->setText(QApplication::translate("RclMainBase", "&About Recoll", 0, QApplication::UnicodeUTF8)); userManualAction->setText(QApplication::translate("RclMainBase", "&User manual", 0, QApplication::UnicodeUTF8)); toolsDoc_HistoryAction->setText(QApplication::translate("RclMainBase", "Document &History", 0, QApplication::UnicodeUTF8)); #ifndef QT_NO_TOOLTIP toolsDoc_HistoryAction->setToolTip(QApplication::translate("RclMainBase", "Document History", 0, QApplication::UnicodeUTF8)); #endif // QT_NO_TOOLTIP toolsAdvanced_SearchAction->setText(QApplication::translate("RclMainBase", "&Advanced Search", 0, QApplication::UnicodeUTF8)); #ifndef QT_NO_TOOLTIP toolsAdvanced_SearchAction->setToolTip(QApplication::translate("RclMainBase", "Advanced/complex Search", 0, QApplication::UnicodeUTF8)); #endif // QT_NO_TOOLTIP toolsSort_parametersAction->setText(QApplication::translate("RclMainBase", "&Sort parameters", 0, QApplication::UnicodeUTF8)); #ifndef QT_NO_TOOLTIP toolsSort_parametersAction->setToolTip(QApplication::translate("RclMainBase", "Sort parameters", 0, QApplication::UnicodeUTF8)); #endif // QT_NO_TOOLTIP toolsSpellAction->setText(QApplication::translate("RclMainBase", "Term &explorer", 0, QApplication::UnicodeUTF8)); #ifndef QT_NO_TOOLTIP toolsSpellAction->setToolTip(QApplication::translate("RclMainBase", "Term explorer tool", 0, QApplication::UnicodeUTF8)); #endif // QT_NO_TOOLTIP nextPageAction->setIconText(QApplication::translate("RclMainBase", "Next page", 0, QApplication::UnicodeUTF8)); #ifndef QT_NO_TOOLTIP nextPageAction->setToolTip(QApplication::translate("RclMainBase", "Next page of results", 0, QApplication::UnicodeUTF8)); #endif // QT_NO_TOOLTIP firstPageAction->setIconText(QApplication::translate("RclMainBase", "First page", 0, QApplication::UnicodeUTF8)); #ifndef QT_NO_TOOLTIP firstPageAction->setToolTip(QApplication::translate("RclMainBase", "Go to first page of results", 0, QApplication::UnicodeUTF8)); #endif // QT_NO_TOOLTIP prevPageAction->setIconText(QApplication::translate("RclMainBase", "Previous page", 0, QApplication::UnicodeUTF8)); #ifndef QT_NO_TOOLTIP prevPageAction->setToolTip(QApplication::translate("RclMainBase", "Previous page of results", 0, QApplication::UnicodeUTF8)); #endif // QT_NO_TOOLTIP indexConfigAction->setText(QApplication::translate("RclMainBase", "&Indexing configuration", 0, QApplication::UnicodeUTF8)); queryPrefsAction->setText(QApplication::translate("RclMainBase", "&Query configuration", 0, QApplication::UnicodeUTF8)); extIdxAction->setIconText(QApplication::translate("RclMainBase", "External index dialog", 0, QApplication::UnicodeUTF8)); extIdxAction->setText(QApplication::translate("RclMainBase", "External index dialog", 0, QApplication::UnicodeUTF8)); catgBGRP->setTitle(QString()); allRDB->setText(QApplication::translate("RclMainBase", "All", 0, QApplication::UnicodeUTF8)); Toolbar->setLabel(QApplication::translate("RclMainBase", "Search tools", 0, QApplication::UnicodeUTF8)); Toolbar1->setLabel(QApplication::translate("RclMainBase", "Result list", 0, QApplication::UnicodeUTF8)); fileMenu->setTitle(QApplication::translate("RclMainBase", "&File", 0, QApplication::UnicodeUTF8)); toolsMenu->setTitle(QApplication::translate("RclMainBase", "&Tools", 0, QApplication::UnicodeUTF8)); preferencesMenu->setTitle(QApplication::translate("RclMainBase", "&Preferences", 0, QApplication::UnicodeUTF8)); helpMenu->setTitle(QApplication::translate("RclMainBase", "&Help", 0, QApplication::UnicodeUTF8)); } // retranslateUi }; namespace Ui { class RclMainBase: public Ui_RclMainBase {}; } // namespace Ui QT_END_NAMESPACE #endif // UI_RCLMAIN_H recoll-1.26.3/qtgui/confgui/0000755000175000017500000000000013570165410012651 500000000000000recoll-1.26.3/qtgui/confgui/confgui.cpp0000644000175000017500000007462713566424763014765 00000000000000/* Copyright (C) 2005-2016 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "confgui.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "smallut.h" #ifdef ENABLE_XMLCONF #include "picoxml.h" #endif using namespace std; namespace confgui { static const int spacing = 3; // left,top,right, bottom static QMargins margin(4,3,4,3); ConfTabsW::ConfTabsW(QWidget *parent, const QString& title, ConfLinkFact *fact) : QDialog(parent), m_makelink(fact) { setWindowTitle(title); tabWidget = new QTabWidget; buttonBox = new QDialogButtonBox(QDialogButtonBox::Ok | QDialogButtonBox::Cancel); QVBoxLayout *mainLayout = new QVBoxLayout; mainLayout->setSpacing(spacing); mainLayout->setContentsMargins(margin); mainLayout->addWidget(tabWidget); mainLayout->addWidget(buttonBox); setLayout(mainLayout); resize(QSize(500, 400).expandedTo(minimumSizeHint())); connect(buttonBox, SIGNAL(accepted()), this, SLOT(acceptChanges())); connect(buttonBox, SIGNAL(rejected()), this, SLOT(rejectChanges())); } void ConfTabsW::hideButtons() { if (buttonBox) buttonBox->hide(); } void ConfTabsW::acceptChanges() { for (auto& entry : m_panels) { entry->storeValues(); } for (auto& entry : m_widgets) { entry->storeValues(); } emit sig_prefsChanged(); if (!buttonBox->isHidden()) close(); } void ConfTabsW::rejectChanges() { reloadPanels(); if (!buttonBox->isHidden()) close(); } void ConfTabsW::reloadPanels() { for (auto& entry : m_panels) { entry->loadValues(); } for (auto& entry : m_widgets) { entry->loadValues(); } } int ConfTabsW::addPanel(const QString& title) { ConfPanelW *w = new ConfPanelW(this); m_panels.push_back(w); return tabWidget->addTab(w, title); } int ConfTabsW::addForeignPanel(ConfPanelWIF* w, const QString& title) { m_widgets.push_back(w); QWidget *qw = dynamic_cast(w); if (qw == 0) { qDebug() << "Can't cast panel to QWidget"; abort(); } return tabWidget->addTab(qw, title); } void ConfTabsW::setCurrentIndex(int idx) { if (tabWidget) { tabWidget->setCurrentIndex(idx); } } QWidget *ConfTabsW::addBlurb(int tabindex, const QString& txt) { ConfPanelW *panel = (ConfPanelW*)tabWidget->widget(tabindex); if (panel == 0) { return 0; } QFrame *line = new QFrame(panel); line->setFrameShape(QFrame::HLine); line->setFrameShadow(QFrame::Sunken); panel->addWidget(line); QLabel *explain = new QLabel(panel); explain->setWordWrap(true); explain->setText(txt); panel->addWidget(explain); line = new QFrame(panel); line->setFrameShape(QFrame::HLine); line->setFrameShadow(QFrame::Sunken); panel->addWidget(line); return explain; } ConfParamW *ConfTabsW::addParam( int tabindex, ParamType tp, const QString& varname, const QString& label, const QString& tooltip, int ival, int maxval, const QStringList* sl) { ConfLink lnk = (*m_makelink)(varname); ConfPanelW *panel = (ConfPanelW*)tabWidget->widget(tabindex); if (panel == 0) { return 0; } ConfParamW *cp = 0; switch (tp) { case CFPT_BOOL: cp = new ConfParamBoolW(varname, this, lnk, label, tooltip, ival); break; case CFPT_INT: { size_t v = (size_t)sl; int v1 = (v & 0xffffffff); cp = new ConfParamIntW(varname, this, lnk, label, tooltip, ival, maxval, v1); break; } case CFPT_STR: cp = new ConfParamStrW(varname, this, lnk, label, tooltip); break; case CFPT_CSTR: cp = new ConfParamCStrW(varname, this, lnk, label, tooltip, *sl); break; case CFPT_FN: cp = new ConfParamFNW(varname, this, lnk, label, tooltip, ival); break; case CFPT_STRL: cp = new ConfParamSLW(varname, this, lnk, label, tooltip); break; case CFPT_DNL: cp = new ConfParamDNLW(varname, this, lnk, label, tooltip); break; case CFPT_CSTRL: cp = new ConfParamCSLW(varname, this, lnk, label, tooltip, *sl); break; } panel->addParam(cp); return cp; } ConfParamW *ConfTabsW::findParamW(const QString& varname) { for (const auto& panel : m_panels) { ConfParamW *w = panel->findParamW(varname); if (w) return w; } return nullptr; } void ConfTabsW::endOfList(int tabindex) { ConfPanelW *panel = (ConfPanelW*)tabWidget->widget(tabindex); if (nullptr == panel) return; panel->endOfList(); } bool ConfTabsW::enableLink(ConfParamW* boolw, ConfParamW* otherw, bool revert) { ConfParamBoolW *bw = dynamic_cast(boolw); if (bw == 0) { cerr << "ConfTabsW::enableLink: not a boolw\n"; return false; } otherw->setEnabled(revert ? !bw->m_cb->isChecked() : bw->m_cb->isChecked()); if (revert) { connect(bw->m_cb, SIGNAL(toggled(bool)), otherw, SLOT(setDisabled(bool))); } else { connect(bw->m_cb, SIGNAL(toggled(bool)), otherw, SLOT(setEnabled(bool))); } return true; } ConfPanelW::ConfPanelW(QWidget *parent) : QWidget(parent) { m_vboxlayout = new QVBoxLayout(this); m_vboxlayout->setSpacing(spacing); m_vboxlayout->setAlignment(Qt::AlignTop); m_vboxlayout->setContentsMargins(margin); } void ConfPanelW::addParam(ConfParamW *w) { m_vboxlayout->addWidget(w); m_params.push_back(w); } void ConfPanelW::addWidget(QWidget *w) { m_vboxlayout->addWidget(w); } ConfParamW *ConfPanelW::findParamW(const QString& varname) { for (const auto& param : m_params) { if (!varname.compare(param->getVarName())) { return param; } } return nullptr; } void ConfPanelW::endOfList() { m_vboxlayout->addStretch(2); } void ConfPanelW::storeValues() { for (auto& widgetp : m_params) { widgetp->storeValue(); } } void ConfPanelW::loadValues() { for (auto& widgetp : m_params) { widgetp->loadValue(); } } static QString myGetFileName(bool isdir, QString caption = QString(), bool filenosave = false); static QString myGetFileName(bool isdir, QString caption, bool filenosave) { QFileDialog dialog(0, caption); if (isdir) { dialog.setFileMode(QFileDialog::Directory); dialog.setOptions(QFileDialog::ShowDirsOnly); } else { dialog.setFileMode(QFileDialog::AnyFile); if (filenosave) { dialog.setAcceptMode(QFileDialog::AcceptOpen); } else { dialog.setAcceptMode(QFileDialog::AcceptSave); } } dialog.setViewMode(QFileDialog::List); QFlags flags = QDir::NoDotAndDotDot | QDir::Hidden; if (isdir) { flags |= QDir::Dirs; } else { flags |= QDir::Dirs | QDir::Files; } dialog.setFilter(flags); if (dialog.exec() == QDialog::Accepted) { return dialog.selectedFiles().value(0); } return QString(); } void ConfParamW::setValue(const QString& value) { if (m_fsencoding) { m_cflink->set(string((const char *)value.toLocal8Bit())); } else { m_cflink->set(string((const char *)value.toUtf8())); } } void ConfParamW::setValue(int value) { char buf[30]; sprintf(buf, "%d", value); m_cflink->set(string(buf)); } void ConfParamW::setValue(bool value) { char buf[30]; sprintf(buf, "%d", value); m_cflink->set(string(buf)); } extern void setSzPol(QWidget *w, QSizePolicy::Policy hpol, QSizePolicy::Policy vpol, int hstretch, int vstretch); void setSzPol(QWidget *w, QSizePolicy::Policy hpol, QSizePolicy::Policy vpol, int hstretch, int vstretch) { QSizePolicy policy(hpol, vpol); policy.setHorizontalStretch(hstretch); policy.setVerticalStretch(vstretch); policy.setHeightForWidth(w->sizePolicy().hasHeightForWidth()); w->setSizePolicy(policy); } bool ConfParamW::createCommon(const QString& lbltxt, const QString& tltptxt) { m_hl = new QHBoxLayout(this); m_hl->setSpacing(spacing); m_hl->setContentsMargins(margin); QLabel *tl = new QLabel(this); setSzPol(tl, QSizePolicy::Preferred, QSizePolicy::Fixed, 0, 0); tl->setText(lbltxt); tl->setToolTip(tltptxt); m_hl->addWidget(tl); return true; } ConfParamIntW::ConfParamIntW( const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt, int minvalue, int maxvalue, int defaultvalue) : ConfParamW(varnm, parent, cflink), m_defaultvalue(defaultvalue) { if (!createCommon(lbltxt, tltptxt)) { return; } m_sb = new QSpinBox(this); m_sb->setMinimum(minvalue); m_sb->setMaximum(maxvalue); setSzPol(m_sb, QSizePolicy::Fixed, QSizePolicy::Fixed, 0, 0); m_hl->addWidget(m_sb); QFrame *fr = new QFrame(this); setSzPol(fr, QSizePolicy::Preferred, QSizePolicy::Fixed, 0, 0); m_hl->addWidget(fr); loadValue(); } void ConfParamIntW::storeValue() { if (m_origvalue != m_sb->value()) { setValue(m_sb->value()); } } void ConfParamIntW::loadValue() { string s; if (m_cflink->get(s)) { m_sb->setValue(m_origvalue = atoi(s.c_str())); } else { m_sb->setValue(m_origvalue = m_defaultvalue); } } void ConfParamIntW::setImmediate() { connect(m_sb, SIGNAL(valueChanged(int)), this, SLOT(setValue(int))); } ConfParamStrW::ConfParamStrW( const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt) : ConfParamW(varnm, parent, cflink) { if (!createCommon(lbltxt, tltptxt)) { return; } m_le = new QLineEdit(this); setSzPol(m_le, QSizePolicy::Preferred, QSizePolicy::Fixed, 1, 0); m_hl->addWidget(m_le); loadValue(); } void ConfParamStrW::storeValue() { if (m_origvalue.compare(m_le->text())) { setValue(m_le->text()); } } void ConfParamStrW::loadValue() { string s; m_cflink->get(s); if (m_fsencoding) { m_le->setText(m_origvalue = QString::fromLocal8Bit(s.c_str())); } else { m_le->setText(m_origvalue = QString::fromUtf8(s.c_str())); } } void ConfParamStrW::setImmediate() { connect(m_le, SIGNAL(textChanged(const QString&)), this, SLOT(setValue(const QString&))); } ConfParamCStrW::ConfParamCStrW( const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt, const QStringList& sl) : ConfParamW(varnm, parent, cflink) { if (!createCommon(lbltxt, tltptxt)) { return; } m_cmb = new QComboBox(this); m_cmb->setEditable(false); m_cmb->insertItems(0, sl); setSzPol(m_cmb, QSizePolicy::Preferred, QSizePolicy::Fixed, 1, 0); m_hl->addWidget(m_cmb); loadValue(); } void ConfParamCStrW::setList(const QStringList& sl) { m_cmb->clear(); m_cmb->insertItems(0, sl); loadValue(); } void ConfParamCStrW::storeValue() { if (m_origvalue.compare(m_cmb->currentText())) { setValue(m_cmb->currentText()); } } void ConfParamCStrW::loadValue() { string s; m_cflink->get(s); QString cs; if (m_fsencoding) { cs = QString::fromLocal8Bit(s.c_str()); } else { cs = QString::fromUtf8(s.c_str()); } for (int i = 0; i < m_cmb->count(); i++) { if (!cs.compare(m_cmb->itemText(i))) { m_cmb->setCurrentIndex(i); break; } } m_origvalue = cs; } void ConfParamCStrW::setImmediate() { connect(m_cmb, SIGNAL(activated(const QString&)), this, SLOT(setValue(const QString&))); } ConfParamBoolW::ConfParamBoolW( const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt, bool deflt) : ConfParamW(varnm, parent, cflink), m_dflt(deflt) { // No createCommon because the checkbox has a label m_hl = new QHBoxLayout(this); m_hl->setSpacing(spacing); m_hl->setContentsMargins(margin); m_cb = new QCheckBox(lbltxt, this); setSzPol(m_cb, QSizePolicy::Fixed, QSizePolicy::Fixed, 0, 0); m_cb->setToolTip(tltptxt); m_hl->addWidget(m_cb); QFrame *fr = new QFrame(this); setSzPol(fr, QSizePolicy::Preferred, QSizePolicy::Fixed, 1, 0); m_hl->addWidget(fr); loadValue(); } void ConfParamBoolW::storeValue() { if (m_origvalue != m_cb->isChecked()) { setValue(m_cb->isChecked()); } } void ConfParamBoolW::loadValue() { string s; if (!m_cflink->get(s)) { m_origvalue = m_dflt; } else { m_origvalue = stringToBool(s); } m_cb->setChecked(m_origvalue); } void ConfParamBoolW::setImmediate() { connect(m_cb, SIGNAL(toggled(bool)), this, SLOT(setValue(bool))); } ConfParamFNW::ConfParamFNW( const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt, bool isdir) : ConfParamW(varnm, parent, cflink), m_isdir(isdir) { if (!createCommon(lbltxt, tltptxt)) { return; } m_fsencoding = true; m_le = new QLineEdit(this); m_le->setMinimumSize(QSize(150, 0)); setSzPol(m_le, QSizePolicy::Preferred, QSizePolicy::Fixed, 1, 0); m_hl->addWidget(m_le); m_pb = new QPushButton(this); QString text = tr("Choose"); m_pb->setText(text); int width = m_pb->fontMetrics().boundingRect(text).width() + 15; m_pb->setMaximumWidth(width); setSzPol(m_pb, QSizePolicy::Minimum, QSizePolicy::Fixed, 0, 0); m_hl->addWidget(m_pb); loadValue(); QObject::connect(m_pb, SIGNAL(clicked()), this, SLOT(showBrowserDialog())); } void ConfParamFNW::storeValue() { if (m_origvalue.compare(m_le->text())) { setValue(m_le->text()); } } void ConfParamFNW::loadValue() { string s; m_cflink->get(s); m_le->setText(m_origvalue = QString::fromLocal8Bit(s.c_str())); } void ConfParamFNW::showBrowserDialog() { QString s = myGetFileName(m_isdir); if (!s.isEmpty()) { m_le->setText(s); } } void ConfParamFNW::setImmediate() { connect(m_le, SIGNAL(textChanged(const QString&)), this, SLOT(setValue(const QString&))); } class SmallerListWidget: public QListWidget { public: SmallerListWidget(QWidget *parent) : QListWidget(parent) {} virtual QSize sizeHint() const { return QSize(150, 40); } }; ConfParamSLW::ConfParamSLW( const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt) : ConfParamW(varnm, parent, cflink) { // Can't use createCommon here cause we want the buttons below the label m_hl = new QHBoxLayout(this); m_hl->setSpacing(spacing); m_hl->setContentsMargins(margin); QVBoxLayout *vl1 = new QVBoxLayout(); vl1->setSpacing(spacing); vl1->setContentsMargins(margin); QHBoxLayout *hl1 = new QHBoxLayout(); hl1->setSpacing(spacing); hl1->setContentsMargins(margin); QLabel *tl = new QLabel(this); setSzPol(tl, QSizePolicy::Preferred, QSizePolicy::Fixed, 0, 0); tl->setText(lbltxt); tl->setToolTip(tltptxt); vl1->addWidget(tl); QPushButton *pbA = new QPushButton(this); QString text = tr("+"); pbA->setText(text); pbA->setToolTip(tr("Add entry")); int width = pbA->fontMetrics().boundingRect(text).width() + 15; pbA->setMaximumWidth(width); setSzPol(pbA, QSizePolicy::Minimum, QSizePolicy::Fixed, 0, 0); hl1->addWidget(pbA); QObject::connect(pbA, SIGNAL(clicked()), this, SLOT(showInputDialog())); QPushButton *pbD = new QPushButton(this); text = tr("-"); pbD->setText(text); pbD->setToolTip(tr("Delete selected entries")); width = pbD->fontMetrics().boundingRect(text).width() + 15; pbD->setMaximumWidth(width); setSzPol(pbD, QSizePolicy::Minimum, QSizePolicy::Fixed, 0, 0); hl1->addWidget(pbD); QObject::connect(pbD, SIGNAL(clicked()), this, SLOT(deleteSelected())); m_pbE = new QPushButton(this); text = tr("~"); m_pbE->setText(text); m_pbE->setToolTip(tr("Edit selected entries")); width = m_pbE->fontMetrics().boundingRect(text).width() + 15; m_pbE->setMaximumWidth(width); setSzPol(m_pbE, QSizePolicy::Minimum, QSizePolicy::Fixed, 0, 0); hl1->addWidget(m_pbE); QObject::connect(m_pbE, SIGNAL(clicked()), this, SLOT(editSelected())); m_pbE->hide(); vl1->addLayout(hl1); m_hl->addLayout(vl1); m_lb = new SmallerListWidget(this); m_lb->setSelectionMode(QAbstractItemView::ExtendedSelection); connect(m_lb, SIGNAL(currentTextChanged(const QString&)), this, SIGNAL(currentTextChanged(const QString&))); setSzPol(m_lb, QSizePolicy::Preferred, QSizePolicy::Preferred, 1, 1); m_hl->addWidget(m_lb); setSzPol(this, QSizePolicy::Preferred, QSizePolicy::Preferred, 1, 1); loadValue(); } void ConfParamSLW::setEditable(bool onoff) { if (onoff) { m_pbE->show(); } else { m_pbE->hide(); } } string ConfParamSLW::listToString() { vector ls; for (int i = 0; i < m_lb->count(); i++) { // General parameters are encoded as utf-8. File names as // local8bit There is no hope for 8bit file names anyway // except for luck: the original encoding is unknown. QString text = m_lb->item(i)->text(); if (m_fsencoding) { ls.push_back((const char *)(text.toLocal8Bit())); } else { ls.push_back((const char *)(text.toUtf8())); } } string s; stringsToString(ls, s); return s; } void ConfParamSLW::storeValue() { string s = listToString(); if (s.compare(m_origvalue)) { m_cflink->set(s); } } void ConfParamSLW::loadValue() { m_origvalue.clear(); m_cflink->get(m_origvalue); vector ls; stringToStrings(m_origvalue, ls); QStringList qls; for (const auto& str : ls) { if (m_fsencoding) { qls.push_back(QString::fromLocal8Bit(str.c_str())); } else { qls.push_back(QString::fromUtf8(str.c_str())); } } m_lb->clear(); m_lb->insertItems(0, qls); } void ConfParamSLW::showInputDialog() { bool ok; QString s = QInputDialog::getText(this, "", "", QLineEdit::Normal, "", &ok); if (!ok || s.isEmpty()) { return; } performInsert(s); } void ConfParamSLW::performInsert(const QString& s) { QList existing = m_lb->findItems(s, Qt::MatchFixedString | Qt::MatchCaseSensitive); if (!existing.empty()) { m_lb->setCurrentItem(existing[0]); return; } m_lb->insertItem(0, s); m_lb->sortItems(); existing = m_lb->findItems(s, Qt::MatchFixedString | Qt::MatchCaseSensitive); if (existing.empty()) { cerr << "Item not found after insertion!" << endl; return; } m_lb->setCurrentItem(existing[0], QItemSelectionModel::ClearAndSelect); if (m_immediate) { string nv = listToString(); m_cflink->set(nv); } } void ConfParamSLW::deleteSelected() { // We used to repeatedly go through the list and delete the first // found selected item (then restart from the beginning). But it // seems (probably depends on the qt version), that, when deleting // a selected item, qt will keep the selection active at the same // index (now containing the next item), so that we'd end up // deleting the whole list. // // Instead, we now build a list of indices, and delete it starting // from the top so as not to invalidate lower indices vector idxes; for (int i = 0; i < m_lb->count(); i++) { if (m_lb->item(i)->isSelected()) { idxes.push_back(i); } } for (vector::reverse_iterator it = idxes.rbegin(); it != idxes.rend(); it++) { QListWidgetItem *item = m_lb->takeItem(*it); emit entryDeleted(item->text()); delete item; } if (m_immediate) { string nv = listToString(); m_cflink->set(nv); } if (m_lb->count()) { m_lb->setCurrentRow(0, QItemSelectionModel::ClearAndSelect); } } void ConfParamSLW::editSelected() { for (int i = 0; i < m_lb->count(); i++) { if (m_lb->item(i)->isSelected()) { bool ok; QString s = QInputDialog::getText( this, "", "", QLineEdit::Normal, m_lb->item(i)->text(), &ok); if (ok && !s.isEmpty()) { m_lb->item(i)->setText(s); if (m_immediate) { string nv = listToString(); m_cflink->set(nv); } } } } } // "Add entry" dialog for a file name list void ConfParamDNLW::showInputDialog() { QString s = myGetFileName(true); if (s.isEmpty()) { return; } performInsert(s); } // "Add entry" dialog for a constrained string list void ConfParamCSLW::showInputDialog() { bool ok; QString s = QInputDialog::getItem(this, "", "", m_sl, 0, false, &ok); if (!ok || s.isEmpty()) { return; } performInsert(s); } #ifdef ENABLE_XMLCONF static QString u8s2qs(const std::string us) { return QString::fromUtf8(us.c_str()); } static const string& mapfind(const string& nm, const map& mp) { static string strnull; map::const_iterator it; it = mp.find(nm); if (it == mp.end()) { return strnull; } return it->second; } static string looksLikeAssign(const string& data) { //LOGDEB("looksLikeAssign. data: [" << data << "]"); vector toks; stringToTokens(data, toks, "\n\r\t "); if (toks.size() >= 2 && !toks[1].compare("=")) { return toks[0]; } return string(); } ConfTabsW *xmlToConfGUI(const string& xml, string& toptext, ConfLinkFact* lnkf, QWidget *parent) { //LOGDEB("xmlToConfGUI: [" << xml << "]"); class XMLToConfGUI : public PicoXMLParser { public: XMLToConfGUI(const string& x, ConfLinkFact *lnkf, QWidget *parent) : PicoXMLParser(x), m_lnkfact(lnkf), m_parent(parent), m_idx(0), m_hadTitle(false), m_hadGroup(false) { } virtual ~XMLToConfGUI() {} virtual void startElement(const string& tagname, const map& attrs) { if (!tagname.compare("var")) { m_curvar = mapfind("name", attrs); m_curvartp = mapfind("type", attrs); m_curvarvals = mapfind("values", attrs); //LOGDEB("Curvar: " << m_curvar); if (m_curvar.empty() || m_curvartp.empty()) { throw std::runtime_error( " with no name attribute or no type ! nm [" + m_curvar + "] tp [" + m_curvartp + "]"); } else { m_brief.clear(); m_descr.clear(); } } else if (!tagname.compare("filetitle") || !tagname.compare("grouptitle")) { m_other.clear(); } } virtual void endElement(const string& tagname) { if (!tagname.compare("var")) { if (!m_hadTitle) { m_w = new ConfTabsW(m_parent, "Teh title", m_lnkfact); m_hadTitle = true; } if (!m_hadGroup) { m_idx = m_w->addPanel("Group title"); m_hadGroup = true; } ConfTabsW::ParamType paramtype; if (!m_curvartp.compare("bool")) { paramtype = ConfTabsW::CFPT_BOOL; } else if (!m_curvartp.compare("int")) { paramtype = ConfTabsW::CFPT_INT; } else if (!m_curvartp.compare("string")) { paramtype = ConfTabsW::CFPT_STR; } else if (!m_curvartp.compare("cstr")) { paramtype = ConfTabsW::CFPT_CSTR; } else if (!m_curvartp.compare("cstrl")) { paramtype = ConfTabsW::CFPT_CSTRL; } else if (!m_curvartp.compare("fn")) { paramtype = ConfTabsW::CFPT_FN; } else if (!m_curvartp.compare("dfn")) { paramtype = ConfTabsW::CFPT_FN; } else if (!m_curvartp.compare("strl")) { paramtype = ConfTabsW::CFPT_STRL; } else if (!m_curvartp.compare("dnl")) { paramtype = ConfTabsW::CFPT_DNL; } else { throw std::runtime_error("Bad type " + m_curvartp + " for " + m_curvar); } rtrimstring(m_brief, " ."); switch (paramtype) { case ConfTabsW::CFPT_BOOL: { int def = atoi(m_curvarvals.c_str()); m_w->addParam(m_idx, paramtype, u8s2qs(m_curvar), u8s2qs(m_brief), u8s2qs(m_descr), def); break; } case ConfTabsW::CFPT_INT: { vector vals; stringToTokens(m_curvarvals, vals); int min = 0, max = 0, def = 0; if (vals.size() >= 3) { min = atoi(vals[0].c_str()); max = atoi(vals[1].c_str()); def = atoi(vals[2].c_str()); } QStringList *sldef = 0; sldef = (QStringList*)(((char*)sldef) + def); m_w->addParam(m_idx, paramtype, u8s2qs(m_curvar), u8s2qs(m_brief), u8s2qs(m_descr), min, max, sldef); break; } case ConfTabsW::CFPT_CSTR: case ConfTabsW::CFPT_CSTRL: { vector cstrl; stringToTokens(neutchars(m_curvarvals, "\n\r"), cstrl); QStringList qstrl; for (unsigned int i = 0; i < cstrl.size(); i++) { qstrl.push_back(u8s2qs(cstrl[i])); } m_w->addParam(m_idx, paramtype, u8s2qs(m_curvar), u8s2qs(m_brief), u8s2qs(m_descr), 0, 0, &qstrl); break; } default: m_w->addParam(m_idx, paramtype, u8s2qs(m_curvar), u8s2qs(m_brief), u8s2qs(m_descr)); } } else if (!tagname.compare("filetitle")) { m_w = new ConfTabsW(m_parent, u8s2qs(m_other), m_lnkfact); m_hadTitle = true; m_other.clear(); } else if (!tagname.compare("grouptitle")) { if (!m_hadTitle) { m_w = new ConfTabsW(m_parent, "Teh title", m_lnkfact); m_hadTitle = true; } // Get rid of "parameters" in the title, it's not interesting // and this makes our tab headers smaller. string ps{"parameters"}; string::size_type pos = m_other.find(ps); if (pos != string::npos) { m_other = m_other.replace(pos, ps.size(), ""); } m_idx = m_w->addPanel(u8s2qs(m_other)); m_hadGroup = true; m_other.clear(); } else if (!tagname.compare("descr")) { } else if (!tagname.compare("brief")) { m_brief = neutchars(m_brief, "\n\r"); } } virtual void characterData(const string& data) { if (!tagStack().back().compare("brief")) { m_brief += data; } else if (!tagStack().back().compare("descr")) { m_descr += data; } else if (!tagStack().back().compare("filetitle") || !tagStack().back().compare("grouptitle")) { // We don't want \n in there m_other += neutchars(data, "\n\r"); m_other += " "; } else if (!tagStack().back().compare("confcomments")) { string nvarname = looksLikeAssign(data); if (!nvarname.empty() && nvarname.compare(m_curvar)) { cerr << "Var assigned [" << nvarname << "] mismatch " "with current variable [" << m_curvar << "]\n"; } m_toptext += data; } } ConfTabsW *m_w; ConfLinkFact *m_lnkfact; QWidget *m_parent; int m_idx; string m_curvar; string m_curvartp; string m_curvarvals; string m_brief; string m_descr; string m_other; string m_toptext; bool m_hadTitle; bool m_hadGroup; }; XMLToConfGUI parser(xml, lnkf, parent); try { if (!parser.parse()) { cerr << "Parse failed: " << parser.getReason() << endl; return 0; } } catch (const std::runtime_error& e) { cerr << e.what() << endl; return 0; } toptext = parser.m_toptext; return parser.m_w; } #endif /* ENABLE_XMLCONF */ } // Namespace confgui recoll-1.26.3/qtgui/confgui/confguiindex.cpp0000644000175000017500000006047013566450615015777 00000000000000/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include using std::vector; using std::set; using std::string; #include "recoll.h" #include "confguiindex.h" #include "smallut.h" #include "log.h" #include "rcldb.h" #include "execmd.h" #include "rclconfig.h" static const int spacing = 3; static const int margin = 3; using namespace confgui; /* Link class for ConfTree. Has a subkey pointer member which makes it easy * to change the current subkey for multiple instances. */ class ConfLinkRclRep : public ConfLinkRep { public: ConfLinkRclRep(ConfNull **conf, const string& nm, string *sk = 0) : m_conf(conf), m_nm(nm), m_sk(sk) /* KEEP THE POINTER, shared data */ {} virtual ~ConfLinkRclRep() {} virtual bool set(const string& val) { if (!m_conf || !*m_conf) return false; LOGDEB("ConfLinkRclRep: set " << m_nm << " -> " << val << " sk " << getSk() << std::endl); bool ret = (*m_conf)->set(m_nm, val, getSk()); if (!ret) LOGERR("Value set failed\n" ); return ret; } virtual bool get(string& val) { if (!m_conf || !*m_conf) return false; bool ret = (*m_conf)->get(m_nm, val, getSk()); LOGDEB("ConfLinkRcl::get: [" << m_nm << "] sk [" << getSk() << "] -> [" << (ret ? val : "no value") << "]\n"); return ret; } private: string getSk() { return m_sk ? *m_sk : string(); } ConfNull **m_conf; const string m_nm; const string *m_sk; }; /* Special link for skippedNames and noContentSuffixes which are computed as set differences */ typedef std::function()> RclConfVecValueGetter; class ConfLinkPlusMinus : public ConfLinkRep { public: ConfLinkPlusMinus(RclConfig *rclconf, ConfNull **conf, const string& basename, RclConfVecValueGetter getter, string *sk = 0) : m_rclconf(rclconf), m_conf(conf), m_basename(basename), m_getter(getter), m_sk(sk) /* KEEP THE POINTER, shared data */ { } virtual ~ConfLinkPlusMinus() {} virtual bool set(const string& snval) { if (!m_conf || !*m_conf || !m_rclconf) return false; string sbase; (*m_conf)->get(m_basename, sbase, getSk()); std::set nval; stringToStrings(snval, nval); string splus, sminus; RclConfig::setPlusMinus(sbase, nval, splus, sminus); LOGDEB1("ConfLinkPlusMinus: base [" << sbase << "] nvalue [" << snval << "] splus [" << splus << "] sminus [" << sminus << "]\n"); if (!(*m_conf)->set(m_basename + "-", sminus, getSk())) { return false; } if (!(*m_conf)->set(m_basename + "+", splus, getSk())) { return false; } return true; } virtual bool get(string& val) { LOGDEB("ConfLinPlusMinus::get [" << m_basename << "]\n"); if (!m_conf || !*m_conf || !m_rclconf) return false; m_rclconf->setKeyDir(getSk()); vector vval = m_getter(); val = stringsToString(vval); LOGDEB1("ConfLinkPlusMinus: " << m_basename << " -> " << val << "\n"); return true; } private: string getSk() { return m_sk ? *m_sk : string(); } RclConfig *m_rclconf; ConfNull **m_conf; string m_basename; RclConfVecValueGetter m_getter; const string *m_sk; }; class MyConfLinkFactRCL : public ConfLinkFact { public: MyConfLinkFactRCL() {} MyConfLinkFactRCL(ConfNull **conf, string *sk = 0) : m_conf(conf), m_sk(sk) /* KEEP THE POINTER, shared data */ {} virtual ConfLink operator()(const QString& nm) { ConfLinkRep *lnk = new ConfLinkRclRep(m_conf, qs2utf8s(nm), m_sk); return ConfLink(lnk); } ConfNull **m_conf{nullptr}; string *m_sk{nullptr}; }; string sknull; static MyConfLinkFactRCL conflinkfactory; void ConfIndexW::showPrefs(bool modal) { delete m_conf; if ((m_conf = m_rclconf->cloneMainConfig()) == 0) { return; } m_conf->holdWrites(true); if (nullptr == m_w) { QString title = u8s2qs("Recoll - Index Settings: "); title += QString::fromLocal8Bit(m_rclconf->getConfDir().c_str()); conflinkfactory = MyConfLinkFactRCL(&m_conf, &sknull); if (nullptr == (m_w = new ConfTabsW(this, title, &conflinkfactory))) { return; } connect(m_w, SIGNAL(sig_prefsChanged()), this, SLOT(acceptChanges())); initPanels(); } else { m_w->hide(); } m_w->reloadPanels(); if (modal) { m_w->exec(); m_w->setModal(false); } else { m_w->show(); } } void ConfIndexW::acceptChanges() { LOGDEB("ConfIndexW::acceptChanges()\n" ); if (!m_conf) { LOGERR("ConfIndexW::acceptChanges: no config\n" ); return; } if (!m_conf->holdWrites(false)) { QMessageBox::critical(0, "Recoll", tr("Can't write configuration file")); } // Delete local copy and update the main one from the file delete m_conf; m_conf = 0; m_rclconf->updateMainConfig(); } void ConfIndexW::initPanels() { int idx = m_w->addPanel(tr("Global parameters")); setupTopPanel(idx); idx = m_w->addForeignPanel( new ConfSubPanelW(m_w, &m_conf, m_rclconf), tr("Local parameters")); idx = m_w->addPanel("Web history"); setupWebHistoryPanel(idx); idx = m_w->addPanel(tr("Search parameters")); setupSearchPanel(idx); } bool ConfIndexW::setupTopPanel(int idx) { m_w->addParam(idx, ConfTabsW::CFPT_DNL, "topdirs", tr("Top directories"), tr("The list of directories where recursive " "indexing starts. Default: your home.")); ConfParamW *cparam = m_w->addParam( idx, ConfTabsW::CFPT_DNL, "skippedPaths", tr("Skipped paths"), tr("These are pathnames of directories which indexing " "will not enter.
Path elements may contain wildcards. " "The entries must match the paths seen by the indexer " "(e.g.: if topdirs includes '/home/me' and '/home' is " "actually a link to '/usr/home', a correct skippedPath entry " "would be '/home/me/tmp*', not '/usr/home/me/tmp*')")); cparam->setFsEncoding(true); ((confgui::ConfParamSLW*)cparam)->setEditable(true); if (m_stemlangs.empty()) { vector cstemlangs = Rcl::Db::getStemmerNames(); for (const auto &clang : cstemlangs) { m_stemlangs.push_back(u8s2qs(clang)); } } m_w->addParam(idx, ConfTabsW::CFPT_CSTRL, "indexstemminglanguages", tr("Stemming languages"), tr("The languages for which stemming expansion
" "dictionaries will be built."), 0, 0, &m_stemlangs); m_w->addParam(idx, ConfTabsW::CFPT_FN, "logfilename", tr("Log file name"), tr("The file where the messages will be written.
" "Use 'stderr' for terminal output"), 0); m_w->addParam( idx, ConfTabsW::CFPT_INT, "loglevel", tr("Log verbosity level"), tr("This value adjusts the amount of messages,
from only " "errors to a lot of debugging data."), 0, 6); m_w->addParam(idx, ConfTabsW::CFPT_INT, "idxflushmb", tr("Index flush megabytes interval"), tr("This value adjust the amount of " "data which is indexed between flushes to disk.
" "This helps control the indexer memory usage. " "Default 10MB "), 0, 1000); m_w->addParam(idx, ConfTabsW::CFPT_INT, "maxfsoccuppc", tr("Disk full threshold to stop indexing
" "(e.g. 90%, 0 means no limit)"), tr("This is the percentage of disk usage " "- total disk usage, not index size - at which " "indexing will fail and stop.
" "The default value of 0 removes any limit."), 0, 100); ConfParamW *bparam = m_w->addParam( idx, ConfTabsW::CFPT_BOOL, "noaspell", tr("No aspell usage"), tr("Disables use of aspell to generate spelling " "approximation in the term explorer tool.
" "Useful if aspell is absent or does not work. ")); cparam = m_w->addParam( idx, ConfTabsW::CFPT_STR, "aspellLanguage", tr("Aspell language"), tr("The language for the aspell dictionary. " "This should look like 'en' or 'fr' ...
" "If this value is not set, the NLS environment " "will be used to compute it, which usually works. " "To get an idea of what is installed on your system, " "type 'aspell config' and look for .dat files inside " "the 'data-dir' directory. ")); m_w->enableLink(bparam, cparam, true); m_w->addParam( idx, ConfTabsW::CFPT_FN, "dbdir", tr("Database directory name"), tr("The name for a directory where to store the index
" "A non-absolute path is taken relative to the " "configuration directory. The default is 'xapiandb'."), true); m_w->addParam(idx, ConfTabsW::CFPT_STR, "unac_except_trans", tr("Unac exceptions"), tr("

These are exceptions to the unac mechanism " "which, by default, removes all diacritics, " "and performs canonic decomposition. You can override " "unaccenting for some characters, depending on your " "language, and specify additional decompositions, " "e.g. for ligatures. In each space-separated entry, " "the first character is the source one, and the rest " "is the translation." )); m_w->endOfList(idx); return true; } bool ConfIndexW::setupWebHistoryPanel(int idx) { ConfParamW *bparam = m_w->addParam( idx, ConfTabsW::CFPT_BOOL, "processwebqueue", tr("Process the WEB history queue"), tr("Enables indexing Firefox visited pages.
" "(you need also install the Firefox Recoll plugin)")); ConfParamW *cparam = m_w->addParam( idx, ConfTabsW::CFPT_FN, "webcachedir", tr("Web page store directory name"), tr("The name for a directory where to store the copies " "of visited web pages.
" "A non-absolute path is taken relative to the " "configuration directory."), 1); m_w->enableLink(bparam, cparam); cparam = m_w->addParam( idx, ConfTabsW::CFPT_INT, "webcachemaxmbs", tr("Max. size for the web store (MB)"), tr("Entries will be recycled once the size is reached." "
" "Only increasing the size really makes sense because " "reducing the value will not truncate an existing " "file (only waste space at the end)." ), -1, 1000*1000); // Max 1TB... m_w->enableLink(bparam, cparam); m_w->endOfList(idx); return true; } bool ConfIndexW::setupSearchPanel(int idx) { if (!o_index_stripchars) { m_w->addParam(idx, ConfTabsW::CFPT_BOOL, "autodiacsens", tr("Automatic diacritics sensitivity"), tr("

Automatically trigger diacritics sensitivity " "if the search term has accented characters " "(not in unac_except_trans). Else you need to " "use the query language and the D " "modifier to specify diacritics sensitivity.")); m_w->addParam(idx, ConfTabsW::CFPT_BOOL, "autocasesens", tr("Automatic character case sensitivity"), tr("

Automatically trigger character case " "sensitivity if the entry has upper-case " "characters in any but the first position. " "Else you need to use the query language and " "the C modifier to specify character-case " "sensitivity.")); } m_w->addParam(idx, ConfTabsW::CFPT_INT, "maxTermExpand", tr("Maximum term expansion count"), tr("

Maximum expansion count for a single term " "(e.g.: when using wildcards). The default " "of 10 000 is reasonable and will avoid " "queries that appear frozen while the engine is " "walking the term list."), 0, 100000); m_w->addParam(idx, ConfTabsW::CFPT_INT, "maxXapianClauses", tr("Maximum Xapian clauses count"), tr("

Maximum number of elementary clauses we " "add to a single Xapian query. In some cases, " "the result of term expansion can be " "multiplicative, and we want to avoid using " "excessive memory. The default of 100 000 " "should be both high enough in most cases " "and compatible with current typical hardware " "configurations."), 0, 1000000); m_w->endOfList(idx); return true; } ConfSubPanelW::ConfSubPanelW(QWidget *parent, ConfNull **config, RclConfig *rclconf) : QWidget(parent), m_config(config) { QVBoxLayout *vboxLayout = new QVBoxLayout(this); vboxLayout->setSpacing(spacing); vboxLayout->setMargin(margin); m_subdirs = new ConfParamDNLW( "bogus00", this, ConfLink(new confgui::ConfLinkNullRep()), QObject::tr("Customised subtrees"), QObject::tr("The list of subdirectories in the indexed " "hierarchy
where some parameters need " "to be redefined. Default: empty.")); m_subdirs->getListBox()->setSelectionMode( QAbstractItemView::SingleSelection); connect(m_subdirs->getListBox(), SIGNAL(currentItemChanged(QListWidgetItem *, QListWidgetItem *)), this, SLOT(subDirChanged(QListWidgetItem *, QListWidgetItem *))); connect(m_subdirs, SIGNAL(entryDeleted(QString)), this, SLOT(subDirDeleted(QString))); // We only retrieve the subkeys from the user's config (shallow), // no use to confuse the user by showing the subtrees which are // customized in the system config like .thunderbird or // .purple. This doesn't prevent them to add and customize them // further. vector allkeydirs = (*config)->getSubKeys(true); QStringList qls; for (const auto& dir: allkeydirs) { qls.push_back(u8s2qs(dir)); } m_subdirs->getListBox()->insertItems(0, qls); vboxLayout->addWidget(m_subdirs); QFrame *line2 = new QFrame(this); line2->setFrameShape(QFrame::HLine); line2->setFrameShadow(QFrame::Sunken); vboxLayout->addWidget(line2); QLabel *explain = new QLabel(this); explain->setWordWrap(true); explain->setText( QObject::tr( "The parameters that follow are set either at the " "top level, if nothing " "or an empty line is selected in the listbox above, " "or for the selected subdirectory. " "You can add or remove directories by clicking " "the +/- buttons.")); vboxLayout->addWidget(explain); m_groupbox = new QGroupBox(this); setSzPol(m_groupbox, QSizePolicy::Preferred, QSizePolicy::Preferred, 1, 3); QGridLayout *gl1 = new QGridLayout(m_groupbox); gl1->setSpacing(spacing); gl1->setMargin(margin); int gridy = 0; ConfParamSLW *eskn = new ConfParamSLW( "skippedNames", m_groupbox, ConfLink(new ConfLinkPlusMinus( rclconf, config, "skippedNames", std::bind(&RclConfig::getSkippedNames, rclconf), &m_sk)), QObject::tr("Skipped names"), QObject::tr("These are patterns for file or directory " " names which should not be indexed.")); eskn->setFsEncoding(true); eskn->setImmediate(); m_widgets.push_back(eskn); gl1->addWidget(eskn, gridy, 0); vector amimes = rclconf->getAllMimeTypes(); QStringList amimesq; for (const auto& mime: amimes) { amimesq.push_back(u8s2qs(mime)); } ConfParamCSLW *eincm = new ConfParamCSLW( "indexedmimetypes", m_groupbox, ConfLink(new ConfLinkRclRep(config, "indexedmimetypes", &m_sk)), tr("Only mime types"), tr("An exclusive list of indexed mime types.
Nothing " "else will be indexed. Normally empty and inactive"), amimesq); eincm->setImmediate(); m_widgets.push_back(eincm); gl1->addWidget(eincm, gridy++, 1); ConfParamCSLW *eexcm = new ConfParamCSLW( "excludedmimetypes", m_groupbox, ConfLink(new ConfLinkRclRep(config, "excludedmimetypes", &m_sk)), tr("Exclude mime types"), tr("Mime types not to be indexed"), amimesq); eexcm->setImmediate(); m_widgets.push_back(eexcm); gl1->addWidget(eexcm, gridy, 0); ConfParamSLW *encs = new ConfParamSLW( "noContentSuffixes", m_groupbox, ConfLink(new ConfLinkPlusMinus( rclconf, config, "noContentSuffixes", std::bind(&RclConfig::getStopSuffixes, rclconf), &m_sk)), QObject::tr("Ignored endings"), QObject::tr("These are file name endings for files which will be " "indexed by name only \n(no MIME type identification " "attempt, no decompression, no content indexing).")); encs->setImmediate(); encs->setFsEncoding(true); m_widgets.push_back(encs); gl1->addWidget(encs, gridy++, 1); vector args; args.push_back("-l"); ExecCmd ex; string icout; string cmd = "iconv"; int status = ex.doexec(cmd, args, 0, &icout); if (status) { LOGERR("Can't get list of charsets from 'iconv -l'"); } icout = neutchars(icout, ","); vector ccsets; stringToStrings(icout, ccsets); QStringList charsets; charsets.push_back(""); for (const auto& charset : ccsets) { charsets.push_back(u8s2qs(charset)); } ConfParamCStrW *e21 = new ConfParamCStrW( "defaultcharset", m_groupbox, ConfLink(new ConfLinkRclRep(config, "defaultcharset", &m_sk)), QObject::tr("Default
character set"), QObject::tr("Character set used for reading files " "which do not identify the character set " "internally, for example pure text files.
" "The default value is empty, " "and the value from the NLS environnement is used." ), charsets); e21->setImmediate(); m_widgets.push_back(e21); gl1->addWidget(e21, gridy++, 0); ConfParamBoolW *e3 = new ConfParamBoolW( "followLinks", m_groupbox, ConfLink(new ConfLinkRclRep(config, "followLinks", &m_sk)), QObject::tr("Follow symbolic links"), QObject::tr("Follow symbolic links while " "indexing. The default is no, " "to avoid duplicate indexing")); e3->setImmediate(); m_widgets.push_back(e3); gl1->addWidget(e3, gridy, 0); ConfParamBoolW *eafln = new ConfParamBoolW( "indexallfilenames", m_groupbox, ConfLink(new ConfLinkRclRep(config, "indexallfilenames", &m_sk)), QObject::tr("Index all file names"), QObject::tr("Index the names of files for which the contents " "cannot be identified or processed (no or " "unsupported mime type). Default true")); eafln->setImmediate(); m_widgets.push_back(eafln); gl1->addWidget(eafln, gridy++, 1); ConfParamIntW *ezfmaxkbs = new ConfParamIntW( "compressedfilemaxkbs", m_groupbox, ConfLink(new ConfLinkRclRep(config, "compressedfilemaxkbs", &m_sk)), tr("Max. compressed file size (KB)"), tr("This value sets a threshold beyond which compressed" "files will not be processed. Set to -1 for no " "limit, to 0 for no decompression ever."), -1, 1000000, -1); ezfmaxkbs->setImmediate(); m_widgets.push_back(ezfmaxkbs); gl1->addWidget(ezfmaxkbs, gridy, 0); ConfParamIntW *etxtmaxmbs = new ConfParamIntW( "textfilemaxmbs", m_groupbox, ConfLink(new ConfLinkRclRep(config, "textfilemaxmbs", &m_sk)), tr("Max. text file size (MB)"), tr("This value sets a threshold beyond which text " "files will not be processed. Set to -1 for no " "limit. \nThis is for excluding monster " "log files from the index."), -1, 1000000); etxtmaxmbs->setImmediate(); m_widgets.push_back(etxtmaxmbs); gl1->addWidget(etxtmaxmbs, gridy++, 1); ConfParamIntW *etxtpagekbs = new ConfParamIntW( "textfilepagekbs", m_groupbox, ConfLink(new ConfLinkRclRep(config, "textfilepagekbs", &m_sk)), tr("Text file page size (KB)"), tr("If this value is set (not equal to -1), text " "files will be split in chunks of this size for " "indexing.\nThis will help searching very big text " " files (ie: log files)."), -1, 1000000); etxtpagekbs->setImmediate(); m_widgets.push_back(etxtpagekbs); gl1->addWidget(etxtpagekbs, gridy, 0); ConfParamIntW *efiltmaxsecs = new ConfParamIntW( "filtermaxseconds", m_groupbox, ConfLink(new ConfLinkRclRep(config, "filtermaxseconds", &m_sk)), tr("Max. filter exec. time (s)"), tr("External filters working longer than this will be " "aborted. This is for the rare case (ie: postscript) " "where a document could cause a filter to loop. " "Set to -1 for no limit.\n"), -1, 10000); efiltmaxsecs->setImmediate(); m_widgets.push_back(efiltmaxsecs); gl1->addWidget(efiltmaxsecs, gridy++, 1); vboxLayout->addWidget(m_groupbox); subDirChanged(0, 0); LOGDEB("ConfSubPanelW::ConfSubPanelW: done\n"); } void ConfSubPanelW::loadValues() { LOGDEB("ConfSubPanelW::loadValues\n"); for (auto widget : m_widgets) { widget->loadValue(); } LOGDEB("ConfSubPanelW::loadValues done\n"); } void ConfSubPanelW::storeValues() { for (auto widget : m_widgets) { widget->storeValue(); } } void ConfSubPanelW::subDirChanged(QListWidgetItem *current, QListWidgetItem *) { LOGDEB("ConfSubPanelW::subDirChanged\n"); if (current == 0 || current->text() == "") { m_sk = ""; m_groupbox->setTitle(tr("Global")); } else { m_sk = qs2utf8s(current->text()); m_groupbox->setTitle(current->text()); } LOGDEB("ConfSubPanelW::subDirChanged: now [" << m_sk << "]\n"); loadValues(); LOGDEB("ConfSubPanelW::subDirChanged: done\n"); } void ConfSubPanelW::subDirDeleted(QString sbd) { LOGDEB("ConfSubPanelW::subDirDeleted(" << qs2utf8s(sbd) << ")\n"); if (sbd == "") { // Can't do this, have to reinsert it QTimer::singleShot(0, this, SLOT(restoreEmpty())); return; } // Have to delete all entries for submap (*m_config)->eraseKey(qs2utf8s(sbd)); } void ConfSubPanelW::restoreEmpty() { LOGDEB("ConfSubPanelW::restoreEmpty()\n"); m_subdirs->getListBox()->insertItem(0, ""); } recoll-1.26.3/qtgui/confgui/confgui.h0000644000175000017500000003724113566424763014421 00000000000000/* Copyright (C) 2007-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published by * the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _confgui_h_included_ #define _confgui_h_included_ /** * Utilities for a configuration/preferences settings user interface. * * This file declares a number of data input Qt widgets (virtual base: * ConfParamW), with a well defined virtual interface to configuration storage, * which may be QSettings or something else (e.g. conftree). * * Subclasses are defined for entering different kind of data, e.g. a string, * a file name, an integer, etc. * * Each GUI object is linked to the configuration data through * a "link" object which knows the details of interacting with the actual * configuration data, like the parameter name, the actual * configuration interface, etc. * * The link object is set when the input widget is created and cannot be * changed. * * The link object get() methods are called for reading the initial data. * * The set() methods for all the objects are normally called when the * user clicks "Accept", only if the current value() differs from the * value obtained by get() when the object was initialized. This can * be used to avoid cluttering the output with values which are * unmodified from the defaults. * * The setImmediate() method can be called on the individial controls * to ensure that set() is inconditionnaly called whenever the user * changes the related value. This can be especially useful if the * configuration state can't be fully represented in the GUI (for * example if the same parameter name can exist in different sections * depending on the value of another parameter). It allows using local * storage for the values, and flushing, for example, when * sig_prefsChanged() is emitted after the user clicks accept. * * The file also defines a multi-tabbed dialog container for the * parameter objects, with simple interface methods to add tabs and add * configuration elements to them. * * Some of the tab widgets can be defined as "foreign", with specific internals. * They just need to implement a loadValues() to be called at * initialisation and a storeValues(), called when the user commits * the changes. */ #include #include #include #include #include #include #include #include class QCheckBox; class QComboBox; class QDialogButtonBox; class QHBoxLayout; class QLineEdit; class QListWidget; class QPushButton; class QSpinBox; class QTabWidget; class QVBoxLayout; namespace confgui { /** Interface between the GUI widget and the config storage mechanism: */ class ConfLinkRep { public: virtual ~ConfLinkRep() {} virtual bool set(const std::string& val) = 0; virtual bool get(std::string& val) = 0; }; typedef std::shared_ptr ConfLink; // May be used to store/manage data which has no direct representation // in the stored configuration. class ConfLinkNullRep : public ConfLinkRep { public: virtual ~ConfLinkNullRep() {} virtual bool set(const std::string&) { return true; } virtual bool get(std::string& val) {val = ""; return true;} }; /** Link maker class. Will be called back by addParam() to create the link */ class ConfLinkFact { public: virtual ~ConfLinkFact() {} virtual ConfLink operator()(const QString& nm) = 0; }; /** Interface for "foreign" panels. The object must also be a QWidget, which * we don't express by inheriting here to avoid qt issues */ class ConfPanelWIF { public: virtual ~ConfPanelWIF() {} virtual void storeValues() = 0; virtual void loadValues() = 0; }; class ConfPanelW; class ConfParamW; /** The top level widget has tabs, each tab/panel has multiple widgets * for setting parameter values */ class ConfTabsW : public QDialog { Q_OBJECT; public: ConfTabsW(QWidget *parent, const QString& title, ConfLinkFact *linkfact); enum ParamType {CFPT_BOOL, CFPT_INT, CFPT_STR, CFPT_CSTR, // Constrained string: from list CFPT_FN, // File/directory CFPT_STRL, CFPT_DNL, CFPT_CSTRL // lists of the same }; /** Add tab and return its identifier / index */ int addPanel(const QString& title); /** Add foreign tab where we only know to call loadvalues/storevalues. * The object has to derive from QWidget */ int addForeignPanel(ConfPanelWIF* w, const QString& title); /** Add parameter setter to specified tab */ ConfParamW *addParam( int tabindex, ParamType tp, const QString& varname, const QString& label, const QString& tooltip, int isdirorminval = 0, /* Dep. on type: directory flag or min value */ int maxval = 0, const QStringList* sl = 0); /** Add explanatory text between 2 horizontal lines */ QWidget *addBlurb(int tabindex, const QString& txt); /** Enable link between bool value and another parameter: the control will * be enabled depending on the boolean value (with possible inversion). Can * be called multiple times for the same bool to enable/disable * several controls */ bool enableLink(ConfParamW* boolw, ConfParamW* otherw, bool revert = false); /** Call this when you are done filling up a tab */ void endOfList(int tabindex); /** Find param widget associated with given variable name */ ConfParamW *findParamW(const QString& varname); void hideButtons(); public slots: void acceptChanges(); void rejectChanges(); void reloadPanels(); void setCurrentIndex(int); signals: /** This is emitted when acceptChanges() is called, after the * values have been stored */ void sig_prefsChanged(); private: ConfLinkFact *m_makelink{nullptr}; // All ConfPanelW managed panels. Each has a load/store interface // and an internal list of controls std::vector m_panels; // "Foreign" panels. Just implement load/store std::vector m_widgets; QTabWidget *tabWidget{nullptr}; QDialogButtonBox *buttonBox{nullptr}; }; ///////////////////////////////////////////////// // The rest of the class definitions are only useful if you need to // access a specific element for customisation (use findParamW() and a // dynamic cast). /** A panel/tab contains multiple controls for parameters */ class ConfPanelW : public QWidget { Q_OBJECT public: ConfPanelW(QWidget *parent); void addParam(ConfParamW *w); void addWidget(QWidget *w); void storeValues(); void loadValues(); void endOfList(); /** Find param widget associated with given variable name */ ConfParamW *findParamW(const QString& varname); private: QVBoxLayout *m_vboxlayout; std::vector m_params; }; /** Config panel element: manages one configuration * parameter. Subclassed for specific parameter types. */ class ConfParamW : public QWidget { Q_OBJECT public: ConfParamW(const QString& varnm, QWidget *parent, ConfLink cflink) : QWidget(parent), m_varname(varnm), m_cflink(cflink), m_fsencoding(false) { } virtual void loadValue() = 0; // Call setValue() each time the control changes, instead of on accept. virtual void setImmediate() = 0; virtual void setFsEncoding(bool onoff) { m_fsencoding = onoff; } const QString& getVarName() { return m_varname; } public slots: virtual void setEnabled(bool) = 0; virtual void storeValue() = 0; protected slots: void setValue(const QString& newvalue); void setValue(int newvalue); void setValue(bool newvalue); protected: QString m_varname; ConfLink m_cflink; QHBoxLayout *m_hl; // File names are encoded as local8bit in the config files. Other // are encoded as utf-8 bool m_fsencoding; virtual bool createCommon(const QString& lbltxt, const QString& tltptxt); }; //////// Widgets for setting the different types of configuration parameters: /** Boolean */ class ConfParamBoolW : public ConfParamW { Q_OBJECT public: ConfParamBoolW(const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt, bool deflt = false); virtual void loadValue(); virtual void storeValue(); virtual void setImmediate(); public slots: virtual void setEnabled(bool i) { if (m_cb) { ((QWidget*)m_cb)->setEnabled(i); } } public: QCheckBox *m_cb; bool m_dflt; bool m_origvalue; }; // Int class ConfParamIntW : public ConfParamW { Q_OBJECT public: // The default value is only used if none exists in the sample // configuration file. Defaults are normally set in there. ConfParamIntW(const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt, int minvalue = INT_MIN, int maxvalue = INT_MAX, int defaultvalue = 0); virtual void loadValue(); virtual void storeValue(); virtual void setImmediate(); public slots: virtual void setEnabled(bool i) { if (m_sb) { ((QWidget*)m_sb)->setEnabled(i); } } protected: QSpinBox *m_sb; int m_defaultvalue; int m_origvalue; }; // Arbitrary string class ConfParamStrW : public ConfParamW { Q_OBJECT public: ConfParamStrW(const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt); virtual void loadValue(); virtual void storeValue(); virtual void setImmediate(); public slots: virtual void setEnabled(bool i) { if (m_le) { ((QWidget*)m_le)->setEnabled(i); } } protected: QLineEdit *m_le; QString m_origvalue; }; // Constrained string: choose from list class ConfParamCStrW : public ConfParamW { Q_OBJECT public: ConfParamCStrW(const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt, const QStringList& sl); virtual void loadValue(); virtual void storeValue(); virtual void setList(const QStringList& sl); virtual void setImmediate(); public slots: virtual void setEnabled(bool i) { if (m_cmb) { ((QWidget*)m_cmb)->setEnabled(i); } } protected: QComboBox *m_cmb; QString m_origvalue; }; // File name class ConfParamFNW : public ConfParamW { Q_OBJECT public: ConfParamFNW(const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt, bool isdir = false); virtual void loadValue(); virtual void storeValue(); virtual void setImmediate(); protected slots: void showBrowserDialog(); public slots: virtual void setEnabled(bool i) { if (m_le) { ((QWidget*)m_le)->setEnabled(i); } if (m_pb) { ((QWidget*)m_pb)->setEnabled(i); } } protected: QLineEdit *m_le; QPushButton *m_pb; bool m_isdir; QString m_origvalue; }; // String list class ConfParamSLW : public ConfParamW { Q_OBJECT public: ConfParamSLW(const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt); virtual void loadValue(); virtual void storeValue(); QListWidget *getListBox() { return m_lb; } virtual void setEditable(bool onoff); virtual void setImmediate() { m_immediate = true; } public slots: virtual void setEnabled(bool i) { if (m_lb) { ((QWidget*)m_lb)->setEnabled(i); } } protected slots: virtual void showInputDialog(); void deleteSelected(); void editSelected(); void performInsert(const QString&); signals: void entryDeleted(QString); void currentTextChanged(const QString&); protected: QListWidget *m_lb; std::string listToString(); std::string m_origvalue; QPushButton *m_pbE; bool m_immediate{false}; }; // Dir name list class ConfParamDNLW : public ConfParamSLW { Q_OBJECT public: ConfParamDNLW(const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt) : ConfParamSLW(varnm, parent, cflink, lbltxt, tltptxt) { m_fsencoding = true; } protected slots: virtual void showInputDialog(); }; // Constrained string list (chose from predefined) class ConfParamCSLW : public ConfParamSLW { Q_OBJECT public: ConfParamCSLW(const QString& varnm, QWidget *parent, ConfLink cflink, const QString& lbltxt, const QString& tltptxt, const QStringList& sl) : ConfParamSLW(varnm, parent, cflink, lbltxt, tltptxt), m_sl(sl) { } protected slots: virtual void showInputDialog(); protected: const QStringList m_sl; }; extern void setSzPol(QWidget *w, QSizePolicy::Policy hpol, QSizePolicy::Policy vpol, int hstretch, int vstretch); #ifdef ENABLE_XMLCONF /** * Interpret an XML string and create a configuration interface. XML sample: * * * Configuration file parameters for upmpdcli * MPD parameters * * Host MPD runs on. * Defaults to localhost. This can also be specified as -h * * mpdhost = default-host * * IP port used by MPD. * Can also be specified as -p port. Defaults to the... * * mpdport = defport * * Set if we own the MPD queue. * If this is set (on by default), we own the MPD... * * ownqueue = * * * creates a panel in which the following are set. * The attributes should be self-explanatory. "values" * is used for different things depending on the var type * (min/max, default, str list). Check the code about this. * type values: "bool" "int" "string" "cstr" "cstrl" "fn" "dfn" "strl" "dnl" * * The XML would typically exist as comments inside a reference configuration * file (ConfSimple can extract such comments). * * This means that the reference configuration file can generate both * the documentation and the GUI interface. * * @param xml the input xml * @param[output] toptxt the top level XML text (text not inside , * normally commented variable assignments). This will be evaluated * as a config for default values. * @lnkf factory to create the objects which link the GUI to the * storage mechanism. */ extern ConfTabsW *xmlToConfGUI(const std::string& xml, std::string& toptxt, ConfLinkFact* lnkf, QWidget *parent); #endif } #endif /* _confgui_h_included_ */ recoll-1.26.3/qtgui/confgui/confguiindex.h0000644000175000017500000000473713566424763015455 00000000000000/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _confguiindex_h_included_ #define _confguiindex_h_included_ /** * Classes to handle the gui for the indexing configuration. These group * confgui elements, linked to configuration parameters, into panels. */ #include #include #include #include #include #include #include #include #include #include #include "confgui.h" class ConfNull; class RclConfig; class ConfIndexW : public QWidget { Q_OBJECT public: ConfIndexW(QWidget *parent, RclConfig *config) : m_parent(parent), m_rclconf(config) {} public slots: void showPrefs(bool modal); void acceptChanges(); QWidget *getDialog() {return m_w;} private: void initPanels(); bool setupTopPanel(int idx); bool setupWebHistoryPanel(int idx); bool setupSearchPanel(int idx); QWidget *m_parent; RclConfig *m_rclconf; ConfNull *m_conf{nullptr}; confgui::ConfTabsW *m_w{nullptr}; QStringList m_stemlangs; }; /** A special panel for parameters which may change in subdirectories: */ class ConfSubPanelW : public QWidget, public confgui::ConfPanelWIF { Q_OBJECT; public: ConfSubPanelW(QWidget *parent, ConfNull **config, RclConfig *rclconf); virtual void storeValues(); virtual void loadValues(); private slots: void subDirChanged(QListWidgetItem *, QListWidgetItem *); void subDirDeleted(QString); void restoreEmpty(); private: std::string m_sk; ConfNull **m_config; confgui::ConfParamDNLW *m_subdirs; std::vector m_widgets; QGroupBox *m_groupbox; }; #endif /* _confguiindex_h_included_ */ recoll-1.26.3/qtgui/viewaction.ui0000644000175000017500000001356713303776057013673 00000000000000 ViewActionBase 0 0 635 726 Native Viewers Select one or several mime types then use the controls in the bottom frame to change how they are processed. false Use Desktop preferences by default Select one or several file types, then use the controls in the frame below to change how they are processed QFrame::StyledPanel QFrame::Sunken QAbstractItemView::NoEditTriggers QAbstractItemView::ExtendedSelection QAbstractItemView::SelectRows true true 2 true true 150 true true false Recoll action: 1 0 QFrame::Box QFrame::Raised current value Qt::PlainText Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse Select same QFrame::Box QFrame::Plain <b>New Values:</b> Exception to Desktop preferences Action (empty -> recoll default) Apply to current selection Qt::Horizontal 40 20 Close recoll-1.26.3/qtgui/advsearch_w.cpp0000644000175000017500000004233713533651561014150 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "advsearch_w.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include using namespace std; #include "recoll.h" #include "rclconfig.h" #include "log.h" #include "searchdata.h" #include "guiutils.h" #include "rclhelp.h" static const int initclausetypes[] = {1, 3, 0, 2, 5}; static const unsigned int iclausescnt = sizeof(initclausetypes) / sizeof(int); static map cat_translations; static map cat_rtranslations; void AdvSearch::init() { (void)new HelpClient(this); HelpClient::installMap((const char *)objectName().toUtf8(), "RCL.SEARCH.GUI.COMPLEX"); // signals and slots connections connect(delFiltypPB, SIGNAL(clicked()), this, SLOT(delFiltypPB_clicked())); connect(searchPB, SIGNAL(clicked()), this, SLOT(runSearch())); connect(filterDatesCB, SIGNAL(toggled(bool)), this, SLOT(filterDatesCB_toggled(bool))); connect(filterSizesCB, SIGNAL(toggled(bool)), this, SLOT(filterSizesCB_toggled(bool))); connect(restrictFtCB, SIGNAL(toggled(bool)), this, SLOT(restrictFtCB_toggled(bool))); connect(restrictCtCB, SIGNAL(toggled(bool)), this, SLOT(restrictCtCB_toggled(bool))); connect(dismissPB, SIGNAL(clicked()), this, SLOT(close())); connect(browsePB, SIGNAL(clicked()), this, SLOT(browsePB_clicked())); connect(addFiltypPB, SIGNAL(clicked()), this, SLOT(addFiltypPB_clicked())); connect(delAFiltypPB, SIGNAL(clicked()), this, SLOT(delAFiltypPB_clicked())); connect(addAFiltypPB, SIGNAL(clicked()), this, SLOT(addAFiltypPB_clicked())); connect(saveFileTypesPB, SIGNAL(clicked()), this, SLOT(saveFileTypes())); connect(addClausePB, SIGNAL(clicked()), this, SLOT(addClause())); connect(delClausePB, SIGNAL(clicked()), this, SLOT(delClause())); new QShortcut(QKeySequence(Qt::Key_Up), this, SLOT(slotHistoryNext()));; new QShortcut(QKeySequence(Qt::Key_Down), this, SLOT(slotHistoryPrev())); conjunctCMB->insertItem(1, tr("All clauses")); conjunctCMB->insertItem(2, tr("Any clause")); // Create preconfigured clauses for (unsigned int i = 0; i < iclausescnt; i++) { addClause(initclausetypes[i], false); } // Tune initial state according to last saved { vector::iterator cit = m_clauseWins.begin(); unsigned int existing = m_clauseWins.size(); for (unsigned int i = 0; i < prefs.advSearchClauses.size(); i++) { if (i < existing) { (*cit)->tpChange(prefs.advSearchClauses[i]); cit++; } else { addClause(prefs.advSearchClauses[i], false); } } } (*m_clauseWins.begin())->wordsLE->setFocus(); // Initialize min/max mtime from extrem values in the index int minyear, maxyear; if (rcldb) { rcldb->maxYearSpan(&minyear, &maxyear); minDateDTE->setDisplayFormat("yyyy-MM-dd"); maxDateDTE->setDisplayFormat("yyyy-MM-dd"); minDateDTE->setDate(QDate(minyear, 1, 1)); maxDateDTE->setDate(QDate(maxyear, 12, 31)); } // Initialize lists of accepted and ignored mime types from config // and settings m_ignTypes = prefs.asearchIgnFilTyps; m_ignByCats = prefs.fileTypesByCats; restrictCtCB->setEnabled(false); restrictCtCB->setChecked(m_ignByCats); fillFileTypes(); subtreeCMB->insertItems(0, prefs.asearchSubdirHist); subtreeCMB->setEditText(""); // The clauseline frame is needed to force designer to accept a // vbox to englobe the base clauses grid and 'something else' (the // vbox is so that we can then insert SearchClauseWs), but we // don't want to see it. clauseline->close(); bool calpop = 0; minDateDTE->setCalendarPopup(calpop); maxDateDTE->setCalendarPopup(calpop); // Translations for known categories cat_translations[QString::fromUtf8("texts")] = tr("text"); cat_rtranslations[tr("texts")] = QString::fromUtf8("text"); cat_translations[QString::fromUtf8("spreadsheet")] = tr("spreadsheet"); cat_rtranslations[tr("spreadsheets")] = QString::fromUtf8("spreadsheet"); cat_translations[QString::fromUtf8("presentation")] = tr("presentation"); cat_rtranslations[tr("presentation")] =QString::fromUtf8("presentation"); cat_translations[QString::fromUtf8("media")] = tr("media"); cat_rtranslations[tr("media")] = QString::fromUtf8("media"); cat_translations[QString::fromUtf8("message")] = tr("message"); cat_rtranslations[tr("message")] = QString::fromUtf8("message"); cat_translations[QString::fromUtf8("other")] = tr("other"); cat_rtranslations[tr("other")] = QString::fromUtf8("other"); } void AdvSearch::saveCnf() { // Save my state prefs.advSearchClauses.clear(); for (const auto& clause : m_clauseWins) { prefs.advSearchClauses.push_back(clause->sTpCMB->currentIndex()); } } void AdvSearch::addClause(bool updsaved) { addClause(0, updsaved); } void AdvSearch::addClause(int tp, bool updsaved) { SearchClauseW *w = new SearchClauseW(clauseFRM); m_clauseWins.push_back(w); ((QVBoxLayout *)(clauseFRM->layout()))->addWidget(w); w->show(); w->tpChange(tp); if (m_clauseWins.size() > iclausescnt) { delClausePB->setEnabled(true); } else { delClausePB->setEnabled(false); } if (updsaved) { saveCnf(); } } void AdvSearch::delClause(bool updsaved) { if (m_clauseWins.size() <= iclausescnt) return; delete m_clauseWins.back(); m_clauseWins.pop_back(); if (m_clauseWins.size() > iclausescnt) { delClausePB->setEnabled(true); } else { delClausePB->setEnabled(false); } if (updsaved) { saveCnf(); } } void AdvSearch::delAFiltypPB_clicked() { yesFiltypsLB->selectAll(); delFiltypPB_clicked(); } // Move selected file types from the searched to the ignored box void AdvSearch::delFiltypPB_clicked() { QList items = yesFiltypsLB->selectedItems(); for (QList::iterator it = items.begin(); it != items.end(); it++) { int row = yesFiltypsLB->row(*it); QListWidgetItem *item = yesFiltypsLB->takeItem(row); noFiltypsLB->insertItem(0, item); } guiListsToIgnTypes(); } // Move selected file types from the ignored to the searched box void AdvSearch::addFiltypPB_clicked() { QList items = noFiltypsLB->selectedItems(); for (QList::iterator it = items.begin(); it != items.end(); it++) { int row = noFiltypsLB->row(*it); QListWidgetItem *item = noFiltypsLB->takeItem(row); yesFiltypsLB->insertItem(0, item); } guiListsToIgnTypes(); } // Compute list of ignored mime type from widget lists void AdvSearch::guiListsToIgnTypes() { yesFiltypsLB->sortItems(); noFiltypsLB->sortItems(); m_ignTypes.clear(); for (int i = 0; i < noFiltypsLB->count();i++) { QListWidgetItem *item = noFiltypsLB->item(i); m_ignTypes.append(item->text()); } } void AdvSearch::addAFiltypPB_clicked() { noFiltypsLB->selectAll(); addFiltypPB_clicked(); } // Activate file type selection void AdvSearch::restrictFtCB_toggled(bool on) { restrictCtCB->setEnabled(on); yesFiltypsLB->setEnabled(on); delFiltypPB->setEnabled(on); addFiltypPB->setEnabled(on); delAFiltypPB->setEnabled(on); addAFiltypPB->setEnabled(on); noFiltypsLB->setEnabled(on); saveFileTypesPB->setEnabled(on); } // Activate file type selection void AdvSearch::filterSizesCB_toggled(bool on) { minSizeLE->setEnabled(on); maxSizeLE->setEnabled(on); } // Activate file type selection void AdvSearch::filterDatesCB_toggled(bool on) { minDateDTE->setEnabled(on); maxDateDTE->setEnabled(on); } void AdvSearch::restrictCtCB_toggled(bool on) { m_ignByCats = on; // Only reset the list if we're enabled. Else this is init from prefs if (restrictCtCB->isEnabled()) m_ignTypes.clear(); fillFileTypes(); } void AdvSearch::fillFileTypes() { noFiltypsLB->clear(); yesFiltypsLB->clear(); noFiltypsLB->insertItems(0, m_ignTypes); QStringList ql; if (m_ignByCats == false) { vector types = theconfig->getAllMimeTypes(); rcldb->getAllDbMimeTypes(types); sort(types.begin(), types.end()); types.erase(unique(types.begin(), types.end()), types.end()); for (vector::iterator it = types.begin(); it != types.end(); it++) { QString qs = QString::fromUtf8(it->c_str()); if (m_ignTypes.indexOf(qs) < 0) ql.append(qs); } } else { vector cats; theconfig->getMimeCategories(cats); for (vector::const_iterator it = cats.begin(); it != cats.end(); it++) { map::const_iterator it1; QString cat; if ((it1 = cat_translations.find(QString::fromUtf8(it->c_str()))) != cat_translations.end()) { cat = it1->second; } else { cat = QString::fromUtf8(it->c_str()); } if (m_ignTypes.indexOf(cat) < 0) ql.append(cat); } } yesFiltypsLB->insertItems(0, ql); } // Save current set of ignored file types to prefs void AdvSearch::saveFileTypes() { prefs.asearchIgnFilTyps = m_ignTypes; prefs.fileTypesByCats = m_ignByCats; rwSettings(true); } void AdvSearch::browsePB_clicked() { QString dir = myGetFileName(true); #ifdef _WIN32 string s = qs2utf8s(dir); for (string::size_type i = 0; i < s.size(); i++) { if (s[i] == '\\') { s[i] = '/'; } } if (s.size() >= 2 && isalpha(s[0]) && s[1] == ':') { s.erase(1,1); s = string("/") + s; } dir = u8s2qs(s); #endif subtreeCMB->setEditText(dir); } size_t AdvSearch::stringToSize(QString qsize) { size_t size = size_t(-1); qsize.replace(QRegExp("[\\s]+"), ""); if (!qsize.isEmpty()) { string csize(qs2utf8s(qsize)); char *cp; size = strtoll(csize.c_str(), &cp, 10); if (*cp != 0) { switch (*cp) { case 'k': case 'K': size *= 1E3;break; case 'm': case 'M': size *= 1E6;break; case 'g': case 'G': size *= 1E9;break; case 't': case 'T': size *= 1E12;break; default: QMessageBox::warning(0, "Recoll", tr("Bad multiplier suffix in size filter")); size = size_t(-1); } } } return size; } using namespace Rcl; void AdvSearch::runSearch() { string stemLang = prefs.stemlang(); std::shared_ptr sdata(new SearchData(conjunctCMB->currentIndex() == 0 ? SCLT_AND : SCLT_OR, stemLang)); bool hasclause = false; for (vector::iterator it = m_clauseWins.begin(); it != m_clauseWins.end(); it++) { SearchDataClause *cl; if ((cl = (*it)->getClause())) { sdata->addClause(cl); hasclause = true; } } if (!hasclause) return; if (restrictFtCB->isChecked() && noFiltypsLB->count() > 0) { for (int i = 0; i < yesFiltypsLB->count(); i++) { if (restrictCtCB->isChecked()) { QString qcat = yesFiltypsLB->item(i)->text(); map::const_iterator qit; string cat; if ((qit = cat_rtranslations.find(qcat)) != cat_rtranslations.end()) { cat = qs2utf8s(qit->second); } else { cat = qs2utf8s(qcat); } vector types; theconfig->getMimeCatTypes(cat, types); for (vector::const_iterator it = types.begin(); it != types.end(); it++) { sdata->addFiletype(*it); } } else { sdata->addFiletype(qs2utf8s(yesFiltypsLB->item(i)->text())); } } } if (filterDatesCB->isChecked()) { QDate mindate = minDateDTE->date(); QDate maxdate = maxDateDTE->date(); DateInterval di; di.y1 = mindate.year(); di.m1 = mindate.month(); di.d1 = mindate.day(); di.y2 = maxdate.year(); di.m2 = maxdate.month(); di.d2 = maxdate.day(); sdata->setDateSpan(&di); } if (filterSizesCB->isChecked()) { size_t size = stringToSize(minSizeLE->text()); sdata->setMinSize(size); size = stringToSize(maxSizeLE->text()); sdata->setMaxSize(size); } if (!subtreeCMB->currentText().isEmpty()) { QString current = subtreeCMB->currentText(); Rcl::SearchDataClausePath *pathclause = new Rcl::SearchDataClausePath((const char*)current.toLocal8Bit(), direxclCB->isChecked()); if (sdata->getTp() == SCLT_AND) { sdata->addClause(pathclause); } else { std::shared_ptr nsdata(new SearchData(SCLT_AND, stemLang)); nsdata->addClause(new Rcl::SearchDataClauseSub(sdata)); nsdata->addClause(pathclause); sdata = nsdata; } // Keep history clean and sorted. Maybe there would be a // simpler way to do this list entries; for (int i = 0; i < subtreeCMB->count(); i++) { entries.push_back(subtreeCMB->itemText(i)); } entries.push_back(subtreeCMB->currentText()); entries.sort(); entries.unique(); LOGDEB("Subtree list now has " << (entries.size()) << " entries\n" ); subtreeCMB->clear(); for (list::iterator it = entries.begin(); it != entries.end(); it++) { subtreeCMB->addItem(*it); } subtreeCMB->setCurrentIndex(subtreeCMB->findText(current)); prefs.asearchSubdirHist.clear(); for (int index = 0; index < subtreeCMB->count(); index++) prefs.asearchSubdirHist.push_back(subtreeCMB->itemText(index)); } saveCnf(); g_advshistory && g_advshistory->push(sdata); emit setDescription(""); emit startSearch(sdata, false); } // Set up fields from existing search data, which must be compatible // with what we can do... void AdvSearch::fromSearch(std::shared_ptr sdata) { if (sdata->m_tp == SCLT_OR) conjunctCMB->setCurrentIndex(1); else conjunctCMB->setCurrentIndex(0); while (sdata->m_query.size() > m_clauseWins.size()) { addClause(); } subtreeCMB->setEditText(""); direxclCB->setChecked(0); for (unsigned int i = 0; i < sdata->m_query.size(); i++) { // Set fields from clause if (sdata->m_query[i]->getTp() == SCLT_SUB) { LOGERR("AdvSearch::fromSearch: SUB clause found !\n" ); continue; } if (sdata->m_query[i]->getTp() == SCLT_PATH) { SearchDataClausePath *cs = dynamic_cast(sdata->m_query[i]); // We can only use one such clause. There should be only one too // if this is sfrom aved search data. QString qdir = QString::fromLocal8Bit(cs->gettext().c_str()); subtreeCMB->setEditText(qdir); direxclCB->setChecked(cs->getexclude()); continue; } SearchDataClauseSimple *cs = dynamic_cast(sdata->m_query[i]); m_clauseWins[i]->setFromClause(cs); } for (unsigned int i = sdata->m_query.size(); i < m_clauseWins.size(); i++) { m_clauseWins[i]->clear(); } restrictCtCB->setChecked(0); if (!sdata->m_filetypes.empty()) { restrictFtCB_toggled(1); delAFiltypPB_clicked(); for (unsigned int i = 0; i < sdata->m_filetypes.size(); i++) { QString ft = QString::fromUtf8(sdata->m_filetypes[i].c_str()); QList lst = noFiltypsLB->findItems(ft, Qt::MatchExactly); if (!lst.isEmpty()) { int row = noFiltypsLB->row(lst[0]); QListWidgetItem *item = noFiltypsLB->takeItem(row); yesFiltypsLB->insertItem(0, item); } } yesFiltypsLB->sortItems(); } else { addAFiltypPB_clicked(); restrictFtCB_toggled(0); } if (sdata->m_haveDates) { filterDatesCB->setChecked(1); DateInterval &di(sdata->m_dates); QDate mindate(di.y1, di.m1, di.d1); QDate maxdate(di.y2, di.m2, di.d2); minDateDTE->setDate(mindate); maxDateDTE->setDate(maxdate); } else { filterDatesCB->setChecked(0); QDate date; minDateDTE->setDate(date); maxDateDTE->setDate(date); } if (sdata->m_maxSize != (size_t)-1 || sdata->m_minSize != (size_t)-1) { filterSizesCB->setChecked(1); QString sz; if (sdata->m_minSize != (size_t)-1) { sz.setNum(sdata->m_minSize); minSizeLE->setText(sz); } else { minSizeLE->setText(""); } if (sdata->m_maxSize != (size_t)-1) { sz.setNum(sdata->m_maxSize); maxSizeLE->setText(sz); } else { maxSizeLE->setText(""); } } else { filterSizesCB->setChecked(0); minSizeLE->setText(""); maxSizeLE->setText(""); } } void AdvSearch::slotHistoryNext() { if (g_advshistory == 0) return; std::shared_ptr sd = g_advshistory->getnewer(); if (!sd) return; fromSearch(sd); } void AdvSearch::slotHistoryPrev() { if (g_advshistory == 0) return; std::shared_ptr sd = g_advshistory->getolder(); if (!sd) return; fromSearch(sd); } recoll-1.26.3/qtgui/rclmain_w.cpp0000644000175000017500000010745613566450615013644 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "recoll.h" #include "log.h" #include "mimehandler.h" #include "pathut.h" #include "smallut.h" #include "advsearch_w.h" #include "sortseq.h" #include "uiprefs_w.h" #include "guiutils.h" #include "reslist.h" #include "ssearch_w.h" #include "internfile.h" #include "docseqdb.h" #include "docseqhist.h" #include "docseqdocs.h" #include "restable.h" #include "firstidx.h" #include "indexer.h" #include "rclzg.h" #include "snippets_w.h" #include "fragbuts.h" #include "systray.h" #include "rclmain_w.h" #include "rclhelp.h" #include "moc_rclmain_w.cpp" using std::pair; QString g_stringAllStem, g_stringNoStem; static Qt::ToolBarArea int2area(int in) { switch (in) { case Qt::LeftToolBarArea: return Qt::LeftToolBarArea; case Qt::RightToolBarArea: return Qt::RightToolBarArea; case Qt::BottomToolBarArea: return Qt::BottomToolBarArea; case Qt::TopToolBarArea: default: return Qt::TopToolBarArea; } } static QString configToTitle() { string confdir = path_getsimple(theconfig->getConfDir()); // Lower-case version. This only works with the ascii part, but // that's ok even if there are non-ascii chars in there, because // we further operate only on ascii substrings. string lconfdir = stringtolower((const string&)confdir); if (!lconfdir.empty() && lconfdir[0] == '.') { lconfdir = lconfdir.substr(1); confdir = confdir.substr(1); } string::size_type pos = lconfdir.find("recoll"); if (pos != string::npos) { lconfdir = lconfdir.substr(0, pos) + lconfdir.substr(pos+6); confdir = confdir.substr(0, pos) + confdir.substr(pos+6); } if (!confdir.empty()) { switch (confdir[0]) { case '.': case '-': case '_': confdir = confdir.substr(1); break; default: break; } } if (confdir.empty()) { confdir = "Recoll"; } else { confdir = string("Recoll - ") + confdir; } return QString::fromUtf8(confdir.c_str()); } void RclMain::init() { // This is just to get the common catg strings into the message file static const char* catg_strings[] = { QT_TR_NOOP("All"), QT_TR_NOOP("media"), QT_TR_NOOP("message"), QT_TR_NOOP("other"), QT_TR_NOOP("presentation"), QT_TR_NOOP("spreadsheet"), QT_TR_NOOP("text"), QT_TR_NOOP("sorted"), QT_TR_NOOP("filtered") }; setWindowTitle(configToTitle()); DocSequence::set_translations((const char *)tr("sorted").toUtf8(), (const char *)tr("filtered").toUtf8()); periodictimer = new QTimer(this); // idxstatus file. Make sure it exists before trying to watch it // (case where we're started on an older index, or if the status // file was deleted since indexing) QString idxfn = QString::fromLocal8Bit(theconfig->getIdxStatusFile().c_str()); QFile qf(idxfn); qf.open(QIODevice::ReadWrite); qf.setPermissions(QFile::ReadOwner|QFile::WriteOwner); qf.close(); m_watcher.addPath(idxfn); // At least some versions of qt4 don't display the status bar if // it's not created here. (void)statusBar(); (void)new HelpClient(this); HelpClient::installMap((const char *)this->objectName().toUtf8(), "RCL.SEARCH.GUI.SIMPLE"); // Set the focus to the search terms entry: sSearch->takeFocus(); enbSynAction->setDisabled(prefs.synFile.isEmpty()); enbSynAction->setChecked(prefs.synFileEnable); // Stemming language menu g_stringNoStem = tr("(no stemming)"); g_stringAllStem = tr("(all languages)"); m_idNoStem = preferencesMenu->addAction(g_stringNoStem); m_idNoStem->setCheckable(true); m_stemLangToId[g_stringNoStem] = m_idNoStem; m_idAllStem = preferencesMenu->addAction(g_stringAllStem); m_idAllStem->setCheckable(true); m_stemLangToId[g_stringAllStem] = m_idAllStem; // Can't get the stemming languages from the db at this stage as // db not open yet (the case where it does not even exist makes // things complicated). So get the languages from the config // instead vector langs; if (!getStemLangs(langs)) { QMessageBox::warning(0, "Recoll", tr("error retrieving stemming languages")); } QAction *curid = prefs.queryStemLang == "ALL" ? m_idAllStem : m_idNoStem; QAction *id; for (vector::const_iterator it = langs.begin(); it != langs.end(); it++) { QString qlang = QString::fromUtf8(it->c_str(), it->length()); id = preferencesMenu->addAction(qlang); id->setCheckable(true); m_stemLangToId[qlang] = id; if (prefs.queryStemLang == qlang) { curid = id; } } curid->setChecked(true); m_toolsTB = new QToolBar(tr("Tools"), this); m_toolsTB->setObjectName(QString::fromUtf8("m_toolsTB")); m_toolsTB->addAction(toolsAdvanced_SearchAction); m_toolsTB->addAction(toolsDoc_HistoryAction); m_toolsTB->addAction(toolsSpellAction); m_toolsTB->addAction(actionQuery_Fragments); this->addToolBar(int2area(prefs.toolArea), m_toolsTB); m_resTB = new QToolBar(tr("Results"), this); m_resTB->setObjectName(QString::fromUtf8("m_resTB")); this->addToolBar(int2area(prefs.resArea), m_resTB); // Document filter buttons and combobox // Combobox version of the document filter control m_filtCMB = new QComboBox(m_resTB); m_filtCMB->setEditable(false); m_filtCMB->addItem(tr("All")); m_filtCMB->setToolTip(tr("Document filter")); // Buttons version of the document filter control m_filtFRM = new QFrame(this); m_filtFRM->setObjectName(QString::fromUtf8("m_filtFRM")); QSizePolicy sizePolicy2(QSizePolicy::Preferred, QSizePolicy::Maximum); sizePolicy2.setHorizontalStretch(0); sizePolicy2.setVerticalStretch(0); sizePolicy2.setHeightForWidth(m_filtFRM->sizePolicy().hasHeightForWidth()); m_filtFRM->setSizePolicy(sizePolicy2); QHBoxLayout *bgrphbox = new QHBoxLayout(m_filtFRM); m_filtBGRP = new QButtonGroup(m_filtFRM); QRadioButton *allRDB = new QRadioButton(m_filtFRM); verticalLayout->insertWidget(1, m_filtFRM); allRDB->setObjectName(QString::fromUtf8("allRDB")); allRDB->setGeometry(QRect(0, 0, 45, 20)); allRDB->setText(tr("All")); bgrphbox->addWidget(allRDB); int bgrpid = 0; m_filtBGRP->addButton(allRDB, bgrpid++); allRDB->setChecked(true); // Menu version of the document filter control m_filtMN = new QMenu(MenuBar); m_filtMN->setObjectName(QString::fromUtf8("m_filtMN")); MenuBar->insertMenu(helpMenu->menuAction(), m_filtMN); m_filtMN->setTitle("F&ilter"); QActionGroup *fltag = new QActionGroup(this); fltag->setExclusive(true); QAction *act = fltag->addAction(tr("All")); m_filtMN->addAction(act); act->setCheckable(true); act->setData((int)0); // Go through the filter list and setup buttons and combobox vector cats; theconfig->getGuiFilterNames(cats); m_catgbutvec.push_back(catg_strings[0]); for (vector::const_iterator it = cats.begin(); it != cats.end(); it++) { QRadioButton *but = new QRadioButton(m_filtFRM); QString catgnm = QString::fromUtf8(it->c_str(), it->length()); m_catgbutvec.push_back(*it); // We strip text before the first colon before setting the button name. // This is so that the user can decide the order of buttons by naming // the filter,ie, a:media b:messages etc. QString but_txt = catgnm; int colon = catgnm.indexOf(':'); if (colon != -1) { but_txt = catgnm.right(catgnm.size()-(colon+1)); } but->setText(tr(but_txt.toUtf8())); m_filtCMB->addItem(tr(but_txt.toUtf8())); bgrphbox->addWidget(but); m_filtBGRP->addButton(but, bgrpid++); QAction *act = fltag->addAction(tr(but_txt.toUtf8())); m_filtMN->addAction(act); act->setCheckable(true); act->setData((int)(m_catgbutvec.size()-1)); m_filtMN->connect(m_filtMN, SIGNAL(triggered(QAction *)), this, SLOT(catgFilter(QAction *))); } m_filtFRM->setLayout(bgrphbox); connect(m_filtBGRP, SIGNAL(buttonClicked(int)),this, SLOT(catgFilter(int))); connect(m_filtCMB, SIGNAL(activated(int)), this, SLOT(catgFilter(int))); restable = new ResTable(this); verticalLayout->insertWidget(2, restable); actionShowResultsAsTable->setChecked(prefs.showResultsAsTable); on_actionShowResultsAsTable_toggled(prefs.showResultsAsTable); // A shortcut to get the focus back to the search entry. QKeySequence seq("Ctrl+Shift+s"); QShortcut *sc = new QShortcut(seq, this); connect(sc, SIGNAL (activated()), sSearch, SLOT (takeFocus())); QKeySequence seql("Ctrl+l"); sc = new QShortcut(seql, this); connect(sc, SIGNAL (activated()), sSearch, SLOT (takeFocus())); connect(&m_watcher, SIGNAL(fileChanged(QString)), this, SLOT(updateIdxStatus())); connect(sSearch, SIGNAL(startSearch(std::shared_ptr, bool)), this, SLOT(startSearch(std::shared_ptr, bool))); connect(sSearch, SIGNAL(setDescription(QString)), this, SLOT(onSetDescription(QString))); connect(sSearch, SIGNAL(clearSearch()), this, SLOT(resetSearch())); connect(preferencesMenu, SIGNAL(triggered(QAction*)), this, SLOT(setStemLang(QAction*))); connect(preferencesMenu, SIGNAL(aboutToShow()), this, SLOT(adjustPrefsMenu())); connect(fileExitAction, SIGNAL(triggered() ), this, SLOT(fileExit() ) ); connect(fileToggleIndexingAction, SIGNAL(triggered()), this, SLOT(toggleIndexing())); #ifndef _WIN32 fileMenu->insertAction(fileRebuildIndexAction, fileBumpIndexingAction); connect(fileBumpIndexingAction, SIGNAL(triggered()), this, SLOT(bumpIndexing())); #endif connect(fileRebuildIndexAction, SIGNAL(triggered()), this, SLOT(rebuildIndex())); connect(fileEraseDocHistoryAction, SIGNAL(triggered()), this, SLOT(eraseDocHistory())); connect(fileEraseSearchHistoryAction, SIGNAL(triggered()), this, SLOT(eraseSearchHistory())); connect(actionSave_last_query, SIGNAL(triggered()), this, SLOT(saveLastQuery())); connect(actionLoad_saved_query, SIGNAL(triggered()), this, SLOT(loadSavedQuery())); connect(actionShow_index_statistics, SIGNAL(triggered()), this, SLOT(showIndexStatistics())); connect(helpAbout_RecollAction, SIGNAL(triggered()), this, SLOT(showAboutDialog())); connect(showMissingHelpers_Action, SIGNAL(triggered()), this, SLOT(showMissingHelpers())); connect(showActiveTypes_Action, SIGNAL(triggered()), this, SLOT(showActiveTypes())); connect(userManualAction, SIGNAL(triggered()), this, SLOT(startManual())); connect(toolsDoc_HistoryAction, SIGNAL(triggered()), this, SLOT(showDocHistory())); connect(toolsAdvanced_SearchAction, SIGNAL(triggered()), this, SLOT(showAdvSearchDialog())); connect(toolsSpellAction, SIGNAL(triggered()), this, SLOT(showSpellDialog())); connect(actionWebcache_Editor, SIGNAL(triggered()), this, SLOT(showWebcacheDialog())); connect(actionQuery_Fragments, SIGNAL(triggered()), this, SLOT(showFragButs())); connect(actionSpecial_Indexing, SIGNAL(triggered()), this, SLOT(showSpecIdx())); connect(indexConfigAction, SIGNAL(triggered()), this, SLOT(showIndexConfig())); connect(indexScheduleAction, SIGNAL(triggered()), this, SLOT(showIndexSched())); connect(queryPrefsAction, SIGNAL(triggered()), this, SLOT(showUIPrefs())); connect(extIdxAction, SIGNAL(triggered()), this, SLOT(showExtIdxDialog())); connect(enbSynAction, SIGNAL(toggled(bool)), this, SLOT(setSynEnabled(bool))); connect(toggleFullScreenAction, SIGNAL(triggered()), this, SLOT(toggleFullScreen())); connect(actionShowQueryDetails, SIGNAL(triggered()), reslist, SLOT(showQueryDetails())); connect(periodictimer, SIGNAL(timeout()), this, SLOT(periodic100())); restable->setRclMain(this, true); connect(actionSaveResultsAsCSV, SIGNAL(triggered()), restable, SLOT(saveAsCSV())); connect(this, SIGNAL(docSourceChanged(std::shared_ptr)), restable, SLOT(setDocSource(std::shared_ptr))); connect(this, SIGNAL(searchReset()), restable, SLOT(resetSource())); connect(this, SIGNAL(resultsReady()), restable, SLOT(readDocSource())); connect(this, SIGNAL(sortDataChanged(DocSeqSortSpec)), restable, SLOT(onSortDataChanged(DocSeqSortSpec))); connect(restable->getModel(), SIGNAL(sortDataChanged(DocSeqSortSpec)), this, SLOT(onSortDataChanged(DocSeqSortSpec))); connect(restable, SIGNAL(docPreviewClicked(int, Rcl::Doc, int)), this, SLOT(startPreview(int, Rcl::Doc, int))); connect(restable, SIGNAL(docExpand(Rcl::Doc)), this, SLOT(docExpand(Rcl::Doc))); connect(restable, SIGNAL(showSubDocs(Rcl::Doc)), this, SLOT(showSubDocs(Rcl::Doc))); connect(restable, SIGNAL(openWithRequested(Rcl::Doc, string)), this, SLOT(openWith(Rcl::Doc, string))); reslist->setRclMain(this, true); connect(this, SIGNAL(docSourceChanged(std::shared_ptr)), reslist, SLOT(setDocSource(std::shared_ptr))); connect(firstPageAction, SIGNAL(triggered()), reslist, SLOT(resultPageFirst())); connect(prevPageAction, SIGNAL(triggered()), reslist, SLOT(resPageUpOrBack())); connect(nextPageAction, SIGNAL(triggered()), reslist, SLOT(resPageDownOrNext())); connect(this, SIGNAL(searchReset()), reslist, SLOT(resetList())); connect(this, SIGNAL(resultsReady()), reslist, SLOT(readDocSource())); connect(reslist, SIGNAL(hasResults(int)), this, SLOT(resultCount(int))); connect(reslist, SIGNAL(wordSelect(QString)), sSearch, SLOT(addTerm(QString))); connect(reslist, SIGNAL(wordReplace(const QString&, const QString&)), sSearch, SLOT(onWordReplace(const QString&, const QString&))); connect(reslist, SIGNAL(nextPageAvailable(bool)), this, SLOT(enableNextPage(bool))); connect(reslist, SIGNAL(prevPageAvailable(bool)), this, SLOT(enablePrevPage(bool))); connect(reslist, SIGNAL(docExpand(Rcl::Doc)), this, SLOT(docExpand(Rcl::Doc))); connect(reslist, SIGNAL(showSnippets(Rcl::Doc)), this, SLOT(showSnippets(Rcl::Doc))); connect(reslist, SIGNAL(showSubDocs(Rcl::Doc)), this, SLOT(showSubDocs(Rcl::Doc))); connect(reslist, SIGNAL(docSaveToFileClicked(Rcl::Doc)), this, SLOT(saveDocToFile(Rcl::Doc))); connect(reslist, SIGNAL(editRequested(Rcl::Doc)), this, SLOT(startNativeViewer(Rcl::Doc))); connect(reslist, SIGNAL(openWithRequested(Rcl::Doc, string)), this, SLOT(openWith(Rcl::Doc, string))); connect(reslist, SIGNAL(docPreviewClicked(int, Rcl::Doc, int)), this, SLOT(startPreview(int, Rcl::Doc, int))); connect(reslist, SIGNAL(previewRequested(Rcl::Doc)), this, SLOT(startPreview(Rcl::Doc))); setFilterCtlStyle(prefs.filterCtlStyle); if (prefs.keepSort && prefs.sortActive) { m_sortspec.field = (const char *)prefs.sortField.toUtf8(); m_sortspec.desc = prefs.sortDesc; onSortDataChanged(m_sortspec); emit sortDataChanged(m_sortspec); } enableTrayIcon(prefs.showTrayIcon); fileRebuildIndexAction->setEnabled(false); fileToggleIndexingAction->setEnabled(false); fileRetryFailedAction->setEnabled(false); // Start timer on a slow period (used for checking ^C). Will be // speeded up during indexing periodictimer->start(1000); } void RclMain::enableTrayIcon(bool on) { on = on && QSystemTrayIcon::isSystemTrayAvailable(); if (on) { if (nullptr == m_trayicon) { m_trayicon = new RclTrayIcon(this, QIcon(QString(":/images/recoll.png"))); } m_trayicon->show(); } else { delete m_trayicon; m_trayicon = 0; } } void RclMain::setSynEnabled(bool on) { prefs.synFileEnable = on; if (uiprefs) uiprefs->synFileCB->setChecked(prefs.synFileEnable); } void RclMain::resultCount(int n) { actionSortByDateAsc->setEnabled(n>0); actionSortByDateDesc->setEnabled(n>0); } void RclMain::setFilterCtlStyle(int stl) { switch (stl) { case PrefsPack::FCS_MN: setupResTB(false); m_filtFRM->setVisible(false); m_filtMN->menuAction()->setVisible(true); break; case PrefsPack::FCS_CMB: setupResTB(true); m_filtFRM->setVisible(false); m_filtMN->menuAction()->setVisible(false); break; case PrefsPack::FCS_BT: default: setupResTB(false); m_filtFRM->setVisible(true); m_filtMN->menuAction()->setVisible(false); } } // Set up the "results" toolbox, adding the filter combobox or not depending // on config option void RclMain::setupResTB(bool combo) { m_resTB->clear(); m_resTB->addAction(firstPageAction); m_resTB->addAction(prevPageAction); m_resTB->addAction(nextPageAction); m_resTB->addSeparator(); m_resTB->addAction(actionSortByDateAsc); m_resTB->addAction(actionSortByDateDesc); if (combo) { m_resTB->addSeparator(); m_filtCMB->show(); m_resTB->addWidget(m_filtCMB); } else { m_filtCMB->hide(); } m_resTB->addSeparator(); m_resTB->addAction(actionShowResultsAsTable); } // This is called by a timer right after we come up. Try to open // the database and talk to the user if we can't void RclMain::initDbOpen() { bool nodb = false; string reason; bool maindberror; if (!maybeOpenDb(reason, true, &maindberror)) { nodb = true; if (maindberror) { FirstIdxDialog fidia(this); connect(fidia.idxconfCLB, SIGNAL(clicked()), this, SLOT(execIndexConfig())); connect(fidia.idxschedCLB, SIGNAL(clicked()), this, SLOT(execIndexSched())); connect(fidia.runidxPB, SIGNAL(clicked()), this, SLOT(rebuildIndex())); fidia.exec(); // Don't open adv search or run cmd line search in this case. return; } else { QMessageBox::warning(0, "Recoll", tr("Could not open external index. Db not open. Check external indexes list.")); } } if (prefs.startWithAdvSearchOpen) showAdvSearchDialog(); // If we have something in the search entry, it comes from a // command line argument if (!nodb && sSearch->hasSearchString()) QTimer::singleShot(0, sSearch, SLOT(startSimpleSearch())); if (!m_urltoview.isEmpty()) viewUrl(); } void RclMain::setStemLang(QAction *id) { LOGDEB("RclMain::setStemLang(" << id << ")\n"); // Check that the menu entry is for a stemming language change // (might also be "show prefs" etc. bool isLangId = false; for (map::const_iterator it = m_stemLangToId.begin(); it != m_stemLangToId.end(); it++) { if (id == it->second) isLangId = true; } if (!isLangId) return; // Set the "checked" item state for lang entries for (map::const_iterator it = m_stemLangToId.begin(); it != m_stemLangToId.end(); it++) { (it->second)->setChecked(false); } id->setChecked(true); // Retrieve language value (also handle special cases), set prefs, // notify that we changed QString lang; if (id == m_idNoStem) { lang = ""; } else if (id == m_idAllStem) { lang = "ALL"; } else { lang = id->text(); } prefs.queryStemLang = lang; LOGDEB("RclMain::setStemLang(" << id << "): lang [" << qs2utf8s(prefs.queryStemLang) << "]\n"); rwSettings(true); emit stemLangChanged(lang); } // Set the checked stemming language item before showing the prefs menu void RclMain::setStemLang(const QString& lang) { LOGDEB("RclMain::setStemLang(" << qs2utf8s(lang) << ")\n"); QAction *id; if (lang == "") { id = m_idNoStem; } else if (lang == "ALL") { id = m_idAllStem; } else { map::iterator it = m_stemLangToId.find(lang); if (it == m_stemLangToId.end()) return; id = it->second; } for (map::const_iterator it = m_stemLangToId.begin(); it != m_stemLangToId.end(); it++) { (it->second)->setChecked(false); } id->setChecked(true); } // Prefs menu about to show void RclMain::adjustPrefsMenu() { setStemLang(prefs.queryStemLang); } void RclMain::showTrayMessage(const QString& text) { if (m_trayicon && prefs.trayMessages) m_trayicon->showMessage("Recoll", text, QSystemTrayIcon::Information, 1000); } void RclMain::closeEvent(QCloseEvent *ev) { LOGDEB("RclMain::closeEvent\n"); if (isFullScreen()) { prefs.showmode = PrefsPack::SHOW_FULL; } else if (isMaximized()) { prefs.showmode = PrefsPack::SHOW_MAX; } else { prefs.showmode = PrefsPack::SHOW_NORMAL; } if (prefs.closeToTray && m_trayicon && m_trayicon->isVisible()) { hide(); ev->ignore(); } else { fileExit(); } } void RclMain::fileExit() { LOGDEB("RclMain: fileExit\n"); // Have to do this both in closeEvent (for close to tray) and fileExit // (^Q, doesnt go through closeEvent) if (isFullScreen()) { prefs.showmode = PrefsPack::SHOW_FULL; } else if (isMaximized()) { prefs.showmode = PrefsPack::SHOW_MAX; } else { prefs.showmode = PrefsPack::SHOW_NORMAL; } if (m_trayicon) { m_trayicon->setVisible(false); } // Don't save geometry if we're currently fullscreened if (!isFullScreen() && !isMaximized()) { prefs.mainwidth = width(); prefs.mainheight = height(); } prefs.toolArea = toolBarArea(m_toolsTB); prefs.resArea = toolBarArea(m_resTB); restable->saveColState(); if (prefs.ssearchTypSav) { prefs.ssearchTyp = sSearch->searchTypCMB->currentIndex(); } rwSettings(true); // We should do the right thing and let exit() call all the // cleanup handlers. But we have few persistent resources and qt // exit is a great source of crashes and pita. So do our own // cleanup: deleteAllTempFiles(); // and scram out _Exit(0); } // Start a db query and set the reslist docsource void RclMain::startSearch(std::shared_ptr sdata, bool issimple) { LOGDEB("RclMain::startSearch. Indexing " << (m_idxproc?"on":"off") << " Active " << m_queryActive << "\n"); if (m_queryActive) { LOGDEB("startSearch: already active\n"); return; } m_queryActive = true; restable->setEnabled(false); m_source = std::shared_ptr(); m_searchIsSimple = issimple; // The db may have been closed at the end of indexing string reason; // If indexing is being performed, we reopen the db at each query. if (!maybeOpenDb(reason, m_idxproc != 0)) { QMessageBox::critical(0, "Recoll", QString(reason.c_str())); m_queryActive = false; restable->setEnabled(true); return; } if (prefs.synFileEnable && !prefs.synFile.isEmpty()) { string sf = (const char *)prefs.synFile.toLocal8Bit(); if (!rcldb->setSynGroupsFile(sf)) { QMessageBox::warning(0, "Recoll", tr("Can't set synonyms file (parse error?)")); return; } } else { rcldb->setSynGroupsFile(""); } Rcl::Query *query = new Rcl::Query(rcldb.get()); query->setCollapseDuplicates(prefs.collapseDuplicates); curPreview = 0; DocSequenceDb *src = new DocSequenceDb(rcldb, std::shared_ptr(query), string(tr("Query results").toUtf8()), sdata); src->setAbstractParams(prefs.queryBuildAbstract, prefs.queryReplaceAbstract); m_source = std::shared_ptr(src); m_source->setSortSpec(m_sortspec); m_source->setFiltSpec(m_filtspec); emit docSourceChanged(m_source); emit sortDataChanged(m_sortspec); initiateQuery(); } class QueryThread : public QThread { std::shared_ptr m_source; public: QueryThread(std::shared_ptr source) : m_source(source) { } ~QueryThread() { } virtual void run() { cnt = m_source->getResCnt(); } int cnt; }; void RclMain::initiateQuery() { if (!m_source) return; QApplication::setOverrideCursor(QCursor(Qt::WaitCursor)); QueryThread qthr(m_source); qthr.start(); QProgressDialog progress(this); progress.setLabelText(tr("Query in progress.
" "Due to limitations of the indexing library,
" "cancelling will exit the program")); progress.setWindowModality(Qt::WindowModal); progress.setRange(0,0); // For some reason setMinimumDuration() does not seem to work with // a busy dialog (range 0,0) Have to call progress.show() inside // the loop. // progress.setMinimumDuration(2000); // Also the multiple processEvents() seem to improve the responsiveness?? for (int i = 0;;i++) { qApp->processEvents(); if (qthr.wait(100)) { break; } if (i == 20) progress.show(); qApp->processEvents(); if (progress.wasCanceled()) { // Just get out of there asap. exit(1); } qApp->processEvents(); } int cnt = qthr.cnt; QString msg; if (cnt > 0) { QString str; msg = tr("Result count (est.)") + ": " + str.setNum(cnt); } else { msg = tr("No results found"); } statusBar()->showMessage(msg, 0); QApplication::restoreOverrideCursor(); m_queryActive = false; restable->setEnabled(true); emit(resultsReady()); } void RclMain::resetSearch() { m_source = std::shared_ptr(); emit searchReset(); } void RclMain::onSortCtlChanged() { if (m_sortspecnochange) return; LOGDEB("RclMain::onSortCtlChanged()\n"); m_sortspec.reset(); if (actionSortByDateAsc->isChecked()) { m_sortspec.field = "mtime"; m_sortspec.desc = false; prefs.sortActive = true; prefs.sortDesc = false; prefs.sortField = "mtime"; } else if (actionSortByDateDesc->isChecked()) { m_sortspec.field = "mtime"; m_sortspec.desc = true; prefs.sortActive = true; prefs.sortDesc = true; prefs.sortField = "mtime"; } else { prefs.sortActive = prefs.sortDesc = false; prefs.sortField = ""; } if (m_source) m_source->setSortSpec(m_sortspec); emit sortDataChanged(m_sortspec); initiateQuery(); } void RclMain::onSortDataChanged(DocSeqSortSpec spec) { LOGDEB("RclMain::onSortDataChanged\n"); m_sortspecnochange = true; if (spec.field.compare("mtime")) { actionSortByDateDesc->setChecked(false); actionSortByDateAsc->setChecked(false); } else { actionSortByDateDesc->setChecked(spec.desc); actionSortByDateAsc->setChecked(!spec.desc); } m_sortspecnochange = false; if (m_source) m_source->setSortSpec(spec); m_sortspec = spec; prefs.sortField = QString::fromUtf8(spec.field.c_str()); prefs.sortDesc = spec.desc; prefs.sortActive = !spec.field.empty(); initiateQuery(); } void RclMain::on_actionShowResultsAsTable_toggled(bool on) { LOGDEB("RclMain::on_actionShowResultsAsTable_toggled(" << on << ")\n"); prefs.showResultsAsTable = on; displayingTable = on; restable->setVisible(on); reslist->setVisible(!on); actionSaveResultsAsCSV->setEnabled(on); static QShortcut tablefocseq(QKeySequence("Ctrl+r"), this); if (!on) { int docnum = restable->getDetailDocNumOrTopRow(); if (docnum >= 0) { reslist->resultPageFor(docnum); } disconnect(&tablefocseq, SIGNAL(activated()), restable, SLOT(takeFocus())); sSearch->takeFocus(); } else { int docnum = reslist->pageFirstDocNum(); if (docnum >= 0) { restable->makeRowVisible(docnum); } nextPageAction->setEnabled(false); prevPageAction->setEnabled(false); firstPageAction->setEnabled(false); connect(&tablefocseq, SIGNAL(activated()), restable, SLOT(takeFocus())); } } void RclMain::on_actionSortByDateAsc_toggled(bool on) { LOGDEB("RclMain::on_actionSortByDateAsc_toggled(" << on << ")\n"); if (on) { if (actionSortByDateDesc->isChecked()) { actionSortByDateDesc->setChecked(false); // Let our buddy work. return; } } onSortCtlChanged(); } void RclMain::on_actionSortByDateDesc_toggled(bool on) { LOGDEB("RclMain::on_actionSortByDateDesc_toggled(" << on << ")\n"); if (on) { if (actionSortByDateAsc->isChecked()) { actionSortByDateAsc->setChecked(false); // Let our buddy work. return; } } onSortCtlChanged(); } void RclMain::saveDocToFile(Rcl::Doc doc) { QString s = QFileDialog::getSaveFileName(this, //parent tr("Save file"), QString::fromLocal8Bit(path_home().c_str()) ); string tofile((const char *)s.toLocal8Bit()); TempFile temp; // not used because tofile is set. if (!FileInterner::idocToFile(temp, tofile, theconfig, doc)) { QMessageBox::warning(0, "Recoll", tr("Cannot extract document or create " "temporary file")); return; } } void RclMain::showSubDocs(Rcl::Doc doc) { LOGDEB("RclMain::showSubDocs\n"); string reason; if (!maybeOpenDb(reason)) { QMessageBox::critical(0, "Recoll", QString(reason.c_str())); return; } vector docs; if (!rcldb->getSubDocs(doc, docs)) { QMessageBox::warning(0, "Recoll", QString("Can't get subdocs")); return; } DocSequenceDocs *src = new DocSequenceDocs(rcldb, docs, qs2utf8s(tr("Sub-documents and attachments"))); src->setDescription(qs2utf8s(tr("Sub-documents and attachments"))); std::shared_ptr source(new DocSource(theconfig, std::shared_ptr(src))); ResTable *res = new ResTable(); res->setRclMain(this, false); res->setDocSource(source); res->readDocSource(); res->show(); } // Search for document 'like' the selected one. We ask rcldb/xapian to find // significant terms, and add them to the simple search entry. void RclMain::docExpand(Rcl::Doc doc) { LOGDEB("RclMain::docExpand()\n"); if (!rcldb) return; list terms; terms = m_source->expand(doc); if (terms.empty()) { LOGDEB("RclMain::docExpand: no terms\n"); return; } // Do we keep the original query. I think we'd better not. // rcldb->expand is set to keep the original query terms instead. QString text;// = sSearch->queryText->currentText(); for (list::iterator it = terms.begin(); it != terms.end(); it++) { text += QString::fromLatin1(" \"") + QString::fromUtf8((*it).c_str()) + QString::fromLatin1("\""); } // We need to insert item here, its not auto-done like when the user types // CR sSearch->setSearchString(text); sSearch->setAnyTermMode(); sSearch->startSimpleSearch(); } void RclMain::showDocHistory() { LOGDEB("RclMain::showDocHistory\n"); resetSearch(); curPreview = 0; string reason; if (!maybeOpenDb(reason)) { QMessageBox::critical(0, "Recoll", QString(reason.c_str())); return; } // Construct a bogus SearchData structure std::shared_ptrsearchdata = std::shared_ptr(new Rcl::SearchData(Rcl::SCLT_AND, cstr_null)); searchdata->setDescription((const char *)tr("History data").toUtf8()); // If you change the title, also change it in eraseDocHistory() DocSequenceHistory *src = new DocSequenceHistory(rcldb, g_dynconf, string(tr("Document history").toUtf8())); src->setDescription((const char *)tr("History data").toUtf8()); DocSource *source = new DocSource(theconfig, std::shared_ptr(src)); m_source = std::shared_ptr(source); m_source->setSortSpec(m_sortspec); m_source->setFiltSpec(m_filtspec); emit docSourceChanged(m_source); emit sortDataChanged(m_sortspec); initiateQuery(); } // Erase all memory of documents viewed void RclMain::eraseDocHistory() { // Clear file storage if (g_dynconf) g_dynconf->eraseAll(docHistSubKey); // Clear possibly displayed history if (reslist->displayingHistory()) { showDocHistory(); } } void RclMain::eraseSearchHistory() { prefs.ssearchHistory.clear(); if (sSearch) sSearch->clearAll(); if (g_advshistory) g_advshistory->clear(); } // Called when the uiprefs dialog is ok'd void RclMain::setUIPrefs() { if (!uiprefs) return; LOGDEB("Recollmain::setUIPrefs\n"); reslist->setFont(); sSearch->setPrefs(); enbSynAction->setDisabled(prefs.synFile.isEmpty()); enbSynAction->setChecked(prefs.synFileEnable); } void RclMain::enableNextPage(bool yesno) { if (!displayingTable) nextPageAction->setEnabled(yesno); } void RclMain::enablePrevPage(bool yesno) { if (!displayingTable) { prevPageAction->setEnabled(yesno); firstPageAction->setEnabled(yesno); } } void RclMain::onSetDescription(QString desc) { m_queryDescription = desc; } QString RclMain::getQueryDescription() { if (!m_source) return ""; return m_queryDescription.isEmpty() ? u8s2qs(m_source->getDescription()) : m_queryDescription; } // Set filter, action style void RclMain::catgFilter(QAction *act) { int id = act->data().toInt(); catgFilter(id); } // User pressed a filter button: set filter params in reslist void RclMain::catgFilter(int id) { LOGDEB("RclMain::catgFilter: id " << id << "\n"); if (id < 0 || id >= int(m_catgbutvec.size())) return; switch (prefs.filterCtlStyle) { case PrefsPack::FCS_MN: m_filtCMB->setCurrentIndex(id); m_filtBGRP->buttons()[id]->setChecked(true); break; case PrefsPack::FCS_CMB: m_filtBGRP->buttons()[id]->setChecked(true); m_filtMN->actions()[id]->setChecked(true); break; case PrefsPack::FCS_BT: default: m_filtCMB->setCurrentIndex(id); m_filtMN->actions()[id]->setChecked(true); } m_catgbutvecidx = id; setFiltSpec(); } void RclMain::setFiltSpec() { m_filtspec.reset(); // "Category" buttons if (m_catgbutvecidx != 0) { string catg = m_catgbutvec[m_catgbutvecidx]; string frag; theconfig->getGuiFilter(catg, frag); m_filtspec.orCrit(DocSeqFiltSpec::DSFS_QLANG, frag); } // Fragments from the fragbuts buttonbox tool if (fragbuts) { vector frags; fragbuts->getfrags(frags); for (vector::const_iterator it = frags.begin(); it != frags.end(); it++) { m_filtspec.orCrit(DocSeqFiltSpec::DSFS_QLANG, *it); } } if (m_source) m_source->setFiltSpec(m_filtspec); initiateQuery(); } void RclMain::onFragmentsChanged() { setFiltSpec(); } void RclMain::toggleFullScreen() { if (isFullScreen()) showNormal(); else showFullScreen(); } void RclMain::showEvent(QShowEvent *ev) { sSearch->takeFocus(); QMainWindow::showEvent(ev); } void RclMain::applyStyleSheet() { ::applyStyleSheet(prefs.qssFile); } recoll-1.26.3/qtgui/advshist.h0000644000175000017500000000402213566424763013151 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _ADVSHIST_H_INCLUDED_ #define _ADVSHIST_H_INCLUDED_ #include "autoconfig.h" #include #include "recoll.h" #include #include "searchdata.h" /** Advanced search history. * * We store previous searches using the "dynconf" mechanism, as string * entries under the "advSearchHist" key. The strings are generated by * translating the SearchData structure to XML, which is done by * calling SearchData::asXML(). * When reading, we use a QXmlSimpleReader and QXmlDefaultHandler to * turn the XML back into a SearchData object, which is then passed to * the advanced search object fromSearch() method to rebuild the * window state. * * XML generation is performed by ../rcldb/searchdataxml.cpp. * See xmltosd.h for a schema description */ class AdvSearchHist { public: AdvSearchHist(); ~AdvSearchHist(); // Add entry bool push(std::shared_ptr); // Get latest. does not change state std::shared_ptr getnewest(); // Cursor std::shared_ptr getolder(); std::shared_ptr getnewer(); void clear(); private: bool read(); int m_current{-1}; std::vector > m_entries; }; #endif // _ADVSHIST_H_INCLUDED_ recoll-1.26.3/qtgui/snippets_w.h0000644000175000017500000000474013566424763013526 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SNIPPETS_W_H_INCLUDED_ #define _SNIPPETS_W_H_INCLUDED_ #include "autoconfig.h" #include #include #include "rcldoc.h" #include "docseq.h" #include "rclmain_w.h" #include "ui_snippets.h" class SnippetsW : public QWidget, public Ui::Snippets { Q_OBJECT public: SnippetsW(Rcl::Doc doc, std::shared_ptr source, QWidget* parent = 0) : QWidget(parent) { setupUi((QDialog*)this); init(); onSetDoc(doc, source); } public slots: virtual void onLinkClicked(const QUrl &); virtual void onSetDoc(Rcl::Doc doc, std::shared_ptr source); virtual void createPopupMenu(const QPoint& pos); protected slots: virtual void slotEditFind(); virtual void slotEditFindNext(); virtual void slotEditFindPrevious(); virtual void slotSearchTextChanged(const QString&); virtual void reloadByRelevance(); virtual void reloadByPage(); signals: void startNativeViewer(Rcl::Doc, int pagenum, QString term); private: void init(); std::shared_ptr m_source; Rcl::Doc m_doc; bool m_sortingByPage; }; #ifdef USING_WEBENGINE #include // Subclass the page to hijack the link clicks class SnipWebPage: public QWebEnginePage { Q_OBJECT public: SnipWebPage(SnippetsW *parent) : QWebEnginePage((QWidget *)parent), m_parent(parent) {} protected: virtual bool acceptNavigationRequest(const QUrl& url, NavigationType, bool) { m_parent->onLinkClicked(url); return false; } private: SnippetsW *m_parent; }; #endif #endif /* _SNIPPETS_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/advsearch.ui0000644000175000017500000004654213303776057013462 00000000000000 AdvSearchBase 0 0 544 441 Advanced search true 0 Find 2 0 0 All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. QFrame::NoFrame QFrame::Plain Search for <br>documents<br>satisfying: false 4 8 0 All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Qt::Vertical QSizePolicy::Expanding 0 0 0 0 Delete clause false 0 0 Add clause false QFrame::StyledPanel QFrame::Plain 1 0 QFrame::HLine QFrame::Plain Filter 1 0 Check this to enable filtering on dates Filter dates From false To false QFrame::HLine QFrame::Sunken 1 0 Check this to enable filtering on sizes Filter sizes Minimum size. You can use k/K,m/M,g/G as multipliers Min. Size false Maximum size. You can use k/K,m/M,g/G as multipliers Max. Size false QFrame::HLine QFrame::Sunken 1 0 Check this to enable filtering on file types Restrict file types false 1 0 Check this to use file categories instead of raw mime types By categories false Save as default false 0 Searched file types false false 200 20 QAbstractItemView::ExtendedSelection 0 false All ----> false false Sel -----> false false <----- Sel false false <----- All false 0 Ignored file types false false 200 20 QAbstractItemView::ExtendedSelection QFrame::HLine QFrame::Sunken 8 0 300 0 Enter top directory for search true 20 QComboBox::NoInsert false Browse false Restrict results to files in subtree: false Invert Start Search Close false recoll-1.26.3/qtgui/rclm_preview.cpp0000644000175000017500000002272713566714503014363 00000000000000/* Copyright (C) 2005-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include "log.h" #include "internfile.h" #include "rclzg.h" #include "rclmain_w.h" static const QKeySequence quitKeySeq("Ctrl+q"); // If a preview (toplevel) window gets closed by the user, we need to // clean up because there is no way to reopen it. And check the case // where the current one is closed void RclMain::previewClosed(Preview *w) { LOGDEB("RclMain::previewClosed(" << w << ")\n"); if (w == curPreview) { LOGDEB("Active preview closed\n"); curPreview = 0; } else { LOGDEB("Old preview closed\n"); } } // Document up to date check. The main problem we try to solve is // displaying the wrong message from a compacted mail folder. // // Also we should re-run the query after updating the index because // the ipaths may be wrong in the current result list. For now, the // user does this by clicking search again once the indexing is done // // We only do this for the main index, else jump and prey (cant update // anyway, even the makesig() call might not make sense for our base // config) bool RclMain::containerUpToDate(Rcl::Doc& doc) { static bool ignore_out_of_date_preview = false; // If ipath is empty, we decide we don't care. Also, we need an index, if (ignore_out_of_date_preview || doc.ipath.empty() || rcldb == 0) return true; string udi; doc.getmeta(Rcl::Doc::keyudi, &udi); if (udi.empty()) { // Whatever... return true; } string sig; if (!FileInterner::makesig(theconfig, doc, sig)) { QMessageBox::warning(0, "Recoll", tr("Can't access file: ") + QString::fromLocal8Bit(doc.url.c_str())); // Let's try the preview anyway... return true; } if (!rcldb->needUpdate(udi, sig)) { // Alles ist in ordnung return true; } // Top level (container) document, for checking for indexing error string ctsig = "+"; Rcl::Doc ctdoc; if (rcldb->getContainerDoc(doc, ctdoc)) { ctdoc.getmeta(Rcl::Doc::keysig, &ctsig); } // We can only run indexing on the main index (dbidx 0) bool ismainidx = rcldb->fromMainIndex(doc); // Indexer already running? bool ixnotact = (m_indexerState == IXST_NOTRUNNING); QString msg = tr("Index not up to date for this file.
"); if (ctsig.back() == '+') { msg += tr("Also, it seems that the last index update for the file " "failed.
"); } if (ixnotact && ismainidx) { msg += tr("Click Ok to try to update the " "index for this file. You will need to " "run the query again when indexing is done.
"); } else if (ismainidx) { msg += tr("The indexer is running so things should " "improve when it's done. "); } else if (ixnotact) { // Not main index msg += tr("The document belongs to an external index " "which I can't update. "); } msg += tr("Click Cancel to return to the list.
" "Click Ignore to show the preview anyway (and remember for " "this session). There is a risk of showing the wrong entry.
"); QMessageBox::StandardButtons bts = QMessageBox::Ignore | QMessageBox::Cancel; if (ixnotact &&ismainidx) bts |= QMessageBox::Ok; int rep = QMessageBox::warning(0, tr("Warning"), msg, bts, (ixnotact && ismainidx) ? QMessageBox::Cancel : QMessageBox::NoButton); if (m_indexerState == IXST_NOTRUNNING && rep == QMessageBox::Ok) { LOGDEB("Requesting index update for " << doc.url << "\n"); vector docs(1, doc); updateIdxForDocs(docs); } if (rep == QMessageBox::Ignore) { ignore_out_of_date_preview = true; return true; } else { return false; } } /** * Open a preview window for a given document, or load it into new tab of * existing window. * * @param docnum db query index * @param mod keyboards modifiers like ControlButton, ShiftButton */ void RclMain::startPreview(int docnum, Rcl::Doc doc, int mod) { LOGDEB("startPreview(" << docnum << ", doc, " << mod << ")\n"); if (!containerUpToDate(doc)) return; // Do the zeitgeist thing zg_send_event(ZGSEND_PREVIEW, doc); if (mod & Qt::ShiftModifier) { // User wants new preview window curPreview = 0; } if (curPreview == 0) { HighlightData hdata; m_source->getTerms(hdata); curPreview = new Preview(this, reslist->listId(), hdata); if (curPreview == 0) { QMessageBox::warning(0, tr("Warning"), tr("Can't create preview window"), QMessageBox::Ok, QMessageBox::NoButton); return; } connect(new QShortcut(quitKeySeq, curPreview), SIGNAL (activated()), this, SLOT (fileExit())); connect(curPreview, SIGNAL(previewClosed(Preview *)), this, SLOT(previewClosed(Preview *))); connect(curPreview, SIGNAL(wordSelect(QString)), sSearch, SLOT(addTerm(QString))); connect(curPreview, SIGNAL(showNext(Preview *, int, int)), this, SLOT(previewNextInTab(Preview *, int, int))); connect(curPreview, SIGNAL(showPrev(Preview *, int, int)), this, SLOT(previewPrevInTab(Preview *, int, int))); connect(curPreview, SIGNAL(previewExposed(Preview *, int, int)), this, SLOT(previewExposed(Preview *, int, int))); connect(curPreview, SIGNAL(saveDocToFile(Rcl::Doc)), this, SLOT(saveDocToFile(Rcl::Doc))); connect(curPreview, SIGNAL(editRequested(Rcl::Doc)), this, SLOT(startNativeViewer(Rcl::Doc))); curPreview->setWindowTitle(getQueryDescription()); curPreview->show(); } curPreview->makeDocCurrent(doc, docnum); } /** * Open a preview window for a given document, no linking to result list * * This is used to show ie parent documents, which have no corresponding * entry in the result list. * */ void RclMain::startPreview(Rcl::Doc doc) { Preview *preview = new Preview(this, 0, HighlightData()); if (preview == 0) { QMessageBox::warning(0, tr("Warning"), tr("Can't create preview window"), QMessageBox::Ok, QMessageBox::NoButton); return; } connect(new QShortcut(quitKeySeq, preview), SIGNAL (activated()), this, SLOT (fileExit())); connect(preview, SIGNAL(wordSelect(QString)), sSearch, SLOT(addTerm(QString))); // Do the zeitgeist thing zg_send_event(ZGSEND_PREVIEW, doc); preview->show(); preview->makeDocCurrent(doc, 0); } // Show next document from result list in current preview tab void RclMain::previewNextInTab(Preview * w, int sid, int docnum) { previewPrevOrNextInTab(w, sid, docnum, true); } // Show previous document from result list in current preview tab void RclMain::previewPrevInTab(Preview * w, int sid, int docnum) { previewPrevOrNextInTab(w, sid, docnum, false); } // Combined next/prev from result list in current preview tab void RclMain::previewPrevOrNextInTab(Preview * w, int sid, int docnum, bool nxt) { LOGDEB("RclMain::previewNextInTab sid " << sid << " docnum " << docnum << ", listId " << reslist->listId() << "\n"); if (w == 0) // ?? return; if (sid != reslist->listId()) { QMessageBox::warning(0, "Recoll", tr("This search is not active any more")); return; } if (nxt) docnum++; else docnum--; if (docnum < 0 || !m_source || docnum >= m_source->getResCnt()) { if (!prefs.noBeeps) { LOGDEB("Beeping\n"); QApplication::beep(); } else { LOGDEB("Not beeping because nobeep is set\n"); } return; } Rcl::Doc doc; if (!reslist->getDoc(docnum, doc)) { QMessageBox::warning(0, "Recoll", tr("Cannot retrieve document info from database")); return; } w->makeDocCurrent(doc, docnum, true); } // Preview tab exposed: if the preview comes from the currently // displayed result list, tell reslist (to color the paragraph) void RclMain::previewExposed(Preview *, int sid, int docnum) { LOGDEB2("RclMain::previewExposed: sid " << sid << " docnum " << docnum << ", m_sid " << reslist->listId() << "\n"); if (sid != reslist->listId()) { return; } reslist->previewExposed(docnum); } recoll-1.26.3/qtgui/main.cpp0000644000175000017500000003666213567750145012617 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "rcldb.h" #include "rclconfig.h" #include "pathut.h" #include "recoll.h" #include "smallut.h" #include "rclinit.h" #include "log.h" #include "rclmain_w.h" #include "ssearch_w.h" #include "guiutils.h" #include "smallut.h" #include "readfile.h" #include "uncomp.h" #include "recollq.h" extern RclConfig *theconfig; std::mutex thetempfileslock; // Use a list not a vector so that contained objects have stable // addresses when extending. static list o_tempfiles; /* Keep an array of temporary files for deletion at exit. It happens that we erase some of them before exiting (ie: when closing a preview tab), we don't reuse the array holes for now */ TempFile *rememberTempFile(TempFile temp) { std::unique_lock locker(thetempfileslock); o_tempfiles.push_back(temp); return &o_tempfiles.back(); } void forgetTempFile(string &fn) { if (fn.empty()) return; std::unique_lock locker(thetempfileslock); for (auto& entry : o_tempfiles) { if (entry.ok() && !fn.compare(entry.filename())) { entry = TempFile(); } } fn.erase(); } void deleteAllTempFiles() { std::unique_lock locker(thetempfileslock); o_tempfiles.clear(); Uncomp::clearcache(); } std::shared_ptr rcldb; int recollNeedsExit; RclMain *mainWindow; void startManual(const string& helpindex) { if (mainWindow) mainWindow->startManual(helpindex); } bool maybeOpenDb(string &reason, bool force, bool *maindberror) { LOGDEB2("maybeOpenDb: force " << force << "\n"); if (force) { rcldb = std::shared_ptr(new Rcl::Db(theconfig)); } rcldb->rmQueryDb(""); for (const auto& dbdir : prefs.activeExtraDbs) { LOGDEB("main: adding [" << dbdir << "]\n"); rcldb->addQueryDb(dbdir); } Rcl::Db::OpenError error; if (!rcldb->isopen() && !rcldb->open(Rcl::Db::DbRO, &error)) { reason = "Could not open database"; if (maindberror) { reason += " in " + theconfig->getDbDir() + " wait for indexing to complete?"; *maindberror = (error == Rcl::Db::DbOpenMainDb) ? true : false; } return false; } rcldb->setAbstractParams(-1, prefs.syntAbsLen, prefs.syntAbsCtx); return true; } // Retrieve the list currently active stemming languages. We try to // get this from the db, as some may have been added from recollindex // without changing the config. If this fails, use the config. This is // used for setting up choice menus, not updating the configuration. bool getStemLangs(vector& vlangs) { // Try from db string reason; if (maybeOpenDb(reason)) { vlangs = rcldb->getStemLangs(); LOGDEB0("getStemLangs: from index: " << stringsToString(vlangs) <<"\n"); return true; } else { // Cant get the langs from the index. Maybe it just does not // exist yet. So get them from the config string slangs; if (theconfig->getConfParam("indexstemminglanguages", slangs)) { stringToStrings(slangs, vlangs); return true; } return false; } } // This is never called because we _Exit() in rclmain_w.cpp static void recollCleanup() { LOGDEB2("recollCleanup: closing database\n" ); rcldb.reset(); deleteZ(theconfig); deleteAllTempFiles(); LOGDEB2("recollCleanup: done\n" ); } void applyStyleSheet(const QString& ssfname) { const char *cfname = (const char *)ssfname.toLocal8Bit(); LOGDEB0("Applying style sheet: [" << (cfname) << "]\n" ); if (cfname && *cfname) { string stylesheet; file_to_string(cfname, stylesheet); qApp->setStyleSheet(QString::fromUtf8(stylesheet.c_str())); } else { qApp->setStyleSheet(QString()); } } extern void qInitImages_recoll(); static const char *thisprog; // BEWARE COMPATIBILITY WITH recollq OPTIONS letters static int op_flags; #define OPT_a 0x1 #define OPT_c 0x2 #define OPT_f 0x4 #define OPT_h 0x8 #define OPT_L 0x10 #define OPT_l 0x20 #define OPT_o 0x40 #define OPT_q 0x80 #define OPT_t 0x100 #define OPT_v 0x200 #define OPT_w 0x400 static const char usage [] = "\n" "recoll [-h] [-c ] [-q query]\n" " -h : Print help and exit\n" " -c : specify config directory, overriding $RECOLL_CONFDIR\n" " -L : force language for GUI messages (e.g. -L fr)\n" " [-o|l|f|a] [-t] -q 'query' : search query to be executed as if entered\n" " into simple search. The default is to interpret the argument as a \n" " query language string (but see modifier options)\n" " In most cases, the query string should be quoted with single-quotes to\n" " avoid shell interpretation\n" " -a : the query will be interpreted as an AND query.\n" " -o : the query will be interpreted as an OR query.\n" " -f : the query will be interpreted as a filename search\n" " -l : the query will be interpreted as a query language string (default)\n" " -t : terminal display: no gui. Results go to stdout. MUST be given\n" " explicitly as -t (not ie, -at), and -q MUST\n" " be last on the command line if this is used.\n" " Use -t -h to see the additional non-gui options\n" " -w : open minimized\n" "recoll -v : print version\n" "recoll \n" " This is used to open a recoll url (including an ipath), and called\n" " typically from another search interface like the Unity Dash\n" ; static void Usage(void) { FILE *fp = (op_flags & OPT_h) ? stdout : stderr; fprintf(fp, "%s\n", Rcl::version_string().c_str()); fprintf(fp, "%s: Usage: %s", thisprog, usage); exit((op_flags & OPT_h)==0); } int main(int argc, char **argv) { // if we are named recollq or option "-t" is present at all, we // don't do the GUI thing and pass the whole to recollq for // command line / pipe usage. if (!strcmp(argv[0], "recollq")) exit(recollq(&theconfig, argc, argv)); for (int i = 0; i < argc; i++) { if (!strcmp(argv[i], "-t")) { exit(recollq(&theconfig, argc, argv)); } } #ifdef USING_WEBENGINE // This is necessary for allowing webengine to load local resources (icons) // It is not an issue because we never access remote sites. char arg_disable_web_security[] = "--disable-web-security"; int appargc = argc + 1; char** appargv = new char*[appargc+1]; for(int i = 0; i < argc; i++) { appargv[i] = argv[i]; } appargv[argc] = arg_disable_web_security; appargv[argc+1] = nullptr; QApplication app(appargc, appargv); #else QApplication app(argc, argv); #endif QCoreApplication::setOrganizationName("Recoll.org"); QCoreApplication::setApplicationName("recoll"); string a_config; string a_lang; string question; string urltoview; // Avoid disturbing argc and argv. Especially, setting argc to 0 // prevents WM_CLASS to be set from argv[0] (it appears that qt // keeps a ref to argc, and that it is used at exec() time to set // WM_CLASS from argv[0]). Curiously, it seems that the argv // pointer can be modified without consequences, but we use a copy // to play it safe int myargc = argc; char **myargv = argv; thisprog = myargv[0]; myargc--; myargv++; while (myargc > 0 && **myargv == '-') { (*myargv)++; if (!(**myargv)) Usage(); while (**myargv) switch (*(*myargv)++) { case 'a': op_flags |= OPT_a; break; case 'c': op_flags |= OPT_c; if (myargc < 2) Usage(); a_config = *(++myargv); myargc--; goto b1; case 'f': op_flags |= OPT_f; break; case 'h': op_flags |= OPT_h; Usage();break; case 'L': op_flags |= OPT_L; if (myargc < 2) Usage(); a_lang = *(++myargv); myargc--; goto b1; case 'l': op_flags |= OPT_l; break; case 'o': op_flags |= OPT_o; break; case 'q': op_flags |= OPT_q; if (myargc < 2) Usage(); question = *(++myargv); myargc--; goto b1; case 't': op_flags |= OPT_t; break; case 'v': op_flags |= OPT_v; fprintf(stdout, "%s\n", Rcl::version_string().c_str()); return 0; case 'w': op_flags |= OPT_w; break; default: Usage(); } b1: myargc--; myargv++; } // If -q was given, all remaining non-option args are concatenated // to the query. This is for the common case recoll -q x y z to // avoid needing quoting "x y z" if (op_flags & OPT_q) while (myargc > 0) { question += " "; question += *myargv++; myargc--; } // Else the remaining argument should be an URL to be opened if (myargc == 1) { urltoview = *myargv++;myargc--; if (urltoview.compare(0, 7, cstr_fileu)) { Usage(); } } else if (myargc > 0) Usage(); string reason; theconfig = recollinit(0, recollCleanup, 0, reason, &a_config); if (!theconfig || !theconfig->ok()) { QString msg = "Configuration problem: "; msg += QString::fromUtf8(reason.c_str()); QMessageBox::critical(0, "Recoll", msg); exit(1); } // fprintf(stderr, "recollinit done\n"); // Translations for Qt standard widgets QString slang; if (op_flags & OPT_L) { slang = u8s2qs(a_lang); } else { slang = QLocale::system().name().left(2); } QTranslator qt_trans(0); qt_trans.load(QString("qt_%1").arg(slang), QLibraryInfo::location(QLibraryInfo::TranslationsPath)); app.installTranslator(&qt_trans); // Translations for Recoll string translatdir = path_cat(theconfig->getDatadir(), "translations"); QTranslator translator(0); translator.load( QString("recoll_") + slang, translatdir.c_str() ); app.installTranslator( &translator ); // fprintf(stderr, "Translations installed\n"); string historyfile = path_cat(theconfig->getConfDir(), "history"); g_dynconf = new RclDynConf(historyfile); if (!g_dynconf || !g_dynconf->ok()) { QString msg = app.translate ("Main", "\"history\" file is damaged, please check " "or remove it: ") + QString::fromLocal8Bit(historyfile.c_str()); QMessageBox::critical(0, "Recoll", msg); exit(1); } g_advshistory = new AdvSearchHist; // fprintf(stderr, "History done\n"); rwSettings(false); // fprintf(stderr, "Settings done\n"); if (!prefs.qssFile.isEmpty()) { applyStyleSheet(prefs.qssFile); } QIcon icon; icon.addFile(QString::fromUtf8(":/images/recoll.png")); app.setWindowIcon(icon); // Create main window and set its size to previous session's RclMain w; mainWindow = &w; if (prefs.mainwidth > 100) { QSize s(prefs.mainwidth, prefs.mainheight); mainWindow->resize(s); } string dbdir = theconfig->getDbDir(); if (dbdir.empty()) { QMessageBox::critical( 0, "Recoll", app.translate("Main", "No db directory in configuration")); exit(1); } maybeOpenDb(reason); if (op_flags & OPT_w) { mainWindow->showMinimized(); } else { switch (prefs.showmode) { case PrefsPack::SHOW_NORMAL: mainWindow->show(); break; case PrefsPack::SHOW_MAX: mainWindow->showMaximized(); break; case PrefsPack::SHOW_FULL: mainWindow->showFullScreen(); break; } } QTimer::singleShot(0, mainWindow, SLOT(initDbOpen())); // Connect exit handlers etc.. Beware, apparently this must come // after mainWindow->show()? app.connect(&app, SIGNAL(lastWindowClosed()), &app, SLOT(quit())); app.connect(&app, SIGNAL(aboutToQuit()), mainWindow, SLOT(close())); mainWindow->sSearch->searchTypCMB->setCurrentIndex(prefs.ssearchTyp); mainWindow->sSearch->searchTypeChanged(prefs.ssearchTyp); if (op_flags & OPT_q) { SSearch::SSearchType stype; if (op_flags & OPT_o) { stype = SSearch::SST_ANY; } else if (op_flags & OPT_f) { stype = SSearch::SST_FNM; } else if (op_flags & OPT_a) { stype = SSearch::SST_ALL; } else { stype = SSearch::SST_LANG; } mainWindow->sSearch->searchTypCMB->setCurrentIndex(int(stype)); mainWindow-> sSearch->setSearchString(QString::fromLocal8Bit(question.c_str())); } else if (!urltoview.empty()) { LOGDEB("MAIN: got urltoview [" << (urltoview) << "]\n" ); mainWindow->setUrlToView(QString::fromLocal8Bit(urltoview.c_str())); } return app.exec(); } QString myGetFileName(bool isdir, QString caption, bool filenosave, QString dirloc, QString dfltnm) { LOGDEB1("myFileDialog: isdir " << isdir << "\n"); QFileDialog dialog(0, caption); #ifdef _WIN32 // The default initial directory on WIndows is the Recoll install, // which is not appropriate. Change it, only for the first call // (next will start with the previous selection). static bool first{true}; if (first) { first = false; // See https://doc.qt.io/qt-5/qfiledialog.html#setDirectoryUrl // about the clsid magic (this one points to the desktop). dialog.setDirectoryUrl( QUrl("clsid:B4BFCC3A-DB2C-424C-B029-7FE99A87C641")); } #endif if (!dirloc.isEmpty()) { dialog.setDirectory(dirloc); } if (!dfltnm.isEmpty()) { dialog.selectFile(dfltnm); } if (isdir) { dialog.setFileMode(QFileDialog::Directory); dialog.setOptions(QFileDialog::ShowDirsOnly); } else { dialog.setFileMode(QFileDialog::AnyFile); if (filenosave) dialog.setAcceptMode(QFileDialog::AcceptOpen); else dialog.setAcceptMode(QFileDialog::AcceptSave); } dialog.setViewMode(QFileDialog::List); QFlags flags = QDir::NoDotAndDotDot | QDir::Hidden; if (isdir) flags |= QDir::Dirs; else flags |= QDir::Dirs | QDir::Files; dialog.setFilter(flags); if (dialog.exec() == QDialog::Accepted) { return dialog.selectedFiles().value(0); } return QString(); } recoll-1.26.3/qtgui/webcache.ui0000644000175000017500000000236013303776057013251 00000000000000 Webcache 0 0 400 300 Webcache editor Search regexp searchLE QAbstractItemView::NoEditTriggers true false recoll-1.26.3/qtgui/systray.cpp0000644000175000017500000000413213533651561013367 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include "systray.h" #include "rclmain_w.h" #include "log.h" void RclTrayIcon::init() { QAction *restoreAction = new QAction(tr("Restore"), this); QAction *quitAction = new QAction(tr("Quit"), this); connect(restoreAction, SIGNAL(triggered()), this, SLOT(onRestore())); connect(quitAction, SIGNAL(triggered()), m_mainw, SLOT(fileExit())); QMenu *trayIconMenu = new QMenu(0); trayIconMenu->addAction(restoreAction); trayIconMenu->addAction(quitAction); setContextMenu(trayIconMenu); connect(this, SIGNAL(activated(QSystemTrayIcon::ActivationReason)), this, SLOT(onActivated(QSystemTrayIcon::ActivationReason))); } void RclTrayIcon::onRestore() { // Hide and show to restore on current desktop m_mainw->hide(); switch (prefs.showmode) { case PrefsPack::SHOW_NORMAL: m_mainw->show(); break; case PrefsPack::SHOW_MAX: m_mainw->showMaximized(); break; case PrefsPack::SHOW_FULL: m_mainw->showFullScreen(); break; } } void RclTrayIcon::onActivated(QSystemTrayIcon::ActivationReason reason) { LOGDEB("RclTrayIcon::onActivated: reason " << reason << std::endl); switch (reason) { case QSystemTrayIcon::DoubleClick: case QSystemTrayIcon::Trigger: case QSystemTrayIcon::MiddleClick: onRestore(); break; default: return; } } recoll-1.26.3/qtgui/rclzg.cpp0000644000175000017500000000517313533651561013000 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifdef USE_ZEITGEIST #include "autoconfig.h" #include "rclzg.h" #include "log.h" #include "pathut.h" #include #include #include #include #include #include #include // Can't see no reason why our logger couldn' static QtZeitgeist::Log zglogger; void zg_send_event(ZgSendType, const Rcl::Doc& doc) { static int needinit = 1; if (needinit) { QtZeitgeist::init(); needinit = 0; } // The subject is about the document QtZeitgeist::DataModel::Subject subject; subject.setUri(QString::fromLocal8Bit(doc.url.c_str())); // TODO: refine these subject.setInterpretation(QtZeitgeist::Interpretation::Subject::NFODocument); if (doc.ipath.empty()) subject.setManifestation(QtZeitgeist::Manifestation::Subject::NFOFileDataObject); else subject.setManifestation(QtZeitgeist::Manifestation::Subject::NFOEmbeddedFileDataObject); subject.setOrigin(QString::fromLocal8Bit(path_getfather(doc.url).c_str())); subject.setMimeType(doc.mimetype.c_str()); string titleOrFilename; doc.getmeta(Rcl::Doc::keytt, &titleOrFilename); if (titleOrFilename.empty()) { doc.getmeta(Rcl::Doc::keyfn, &titleOrFilename); } subject.setText(QString::fromUtf8(titleOrFilename.c_str())); QtZeitgeist::DataModel::Event event; event.setTimestamp(QDateTime::currentDateTime()); event.addSubject(subject); event.setInterpretation(QtZeitgeist::Interpretation::Event::ZGAccessEvent); event.setManifestation(QtZeitgeist::Manifestation::Event::ZGUserActivity); event.setActor("app://recoll.desktop"); QtZeitgeist::DataModel::EventList events; events.push_back(event); LOGDEB("zg_send_event, sending for " << (doc.mimetype) << " " << (doc.url) << "\n" ); zglogger.insertEvents(events); } #endif recoll-1.26.3/qtgui/rclmain.ui0000644000175000017500000004075513347664027013150 00000000000000 RclMainBase 0 0 800 600 0 0 Recoll 0 4 2 4 2 0 0 2 0 0 0 800 25 &File &View &Tools &Preferences &Help &Results E&xit Ctrl+Q fileExitAction Update &index fileToggleIndexingAction Trigger incremental pass fileBumpIndexingAction false &Rebuild index fileRebuildIndexAction &Erase document history fileEraseDocHistoryAction &Erase search history fileEraseSearchHistoryAction Missing &helpers showMissingHelpers_Action Indexed &MIME types showActiveTypes_Action &About Recoll helpAbout_RecollAction &User manual userManualAction :/images/history.png:/images/history.png Document &History Document History toolsDoc_HistoryAction :/images/asearch.png:/images/asearch.png &Advanced Search Advanced/complex Search toolsAdvanced_SearchAction &Sort parameters Sort parameters toolsSort_parametersAction :/images/spell.png:/images/spell.png Term &explorer Term explorer tool toolsSpellAction false :/images/nextpage.png:/images/nextpage.png Next page Next page of results PgDown nextPageAction false :/images/firstpage.png:/images/firstpage.png First page Go to first page of results Shift+PgUp firstPageAction false :/images/prevpage.png:/images/prevpage.png Previous page Previous page of results PgUp prevPageAction &Index configuration indexConfigAction Indexing &schedule indexScheduleAction &GUI configuration queryPrefsAction E&xternal index dialog External index dialog extIdxAction true Enable synonyms Enable synonyms enbSynAction &Full Screen Full Screen F11 toggleFullScreenAction true false :/images/up.png:/images/up.png Sort by date, oldest first Sort by dates from oldest to newest true false :/images/down.png:/images/down.png Sort by date, newest first Sort by dates from newest to oldest Show Query Details true :/images/table.png:/images/table.png Show as table Show results in a spreadsheet-like table Save as CSV (spreadsheet) file Saves the result into a file which you can load in a spreadsheet Next Page Previous Page First Page :/images/code-block.png:/images/code-block.png Query Fragments true With failed files retrying Next update will retry previously failed files fileToggleIndexingAction Save last query Load saved query Special Indexing Indexing with special options Index &statistics Webcache Editor SSearch QWidget

ssearch_w.h
ResList QWidget
reslist.h
ssearch_w.h reslist.h recoll-1.26.3/qtgui/advshist.cpp0000644000175000017500000000511013566424763013503 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "advshist.h" #include "guiutils.h" #include "log.h" #include "xmltosd.h" using namespace std; using namespace Rcl; AdvSearchHist::AdvSearchHist() { read(); } AdvSearchHist::~AdvSearchHist() { for (auto& entry : m_entries) { entry.reset(); } } std::shared_ptr AdvSearchHist::getnewest() { if (m_entries.empty()) return std::shared_ptr(); return m_entries[0]; } std::shared_ptr AdvSearchHist::getolder() { m_current++; if (m_current >= int(m_entries.size())) { m_current--; return std::shared_ptr(); } return m_entries[m_current]; } std::shared_ptr AdvSearchHist::getnewer() { if (m_current == -1 || m_current == 0 || m_entries.empty()) return std::shared_ptr(); return m_entries[--m_current]; } bool AdvSearchHist::push(std::shared_ptr sd) { m_entries.insert(m_entries.begin(), sd); if (m_current != -1) m_current++; string xml = sd->asXML(); // dynconf interprets <= 0 as unlimited size, but we want 0 to // disable saving history if (prefs.historysize != 0) { g_dynconf->enterString(advSearchHistSk, xml, prefs.historysize); } return true; } bool AdvSearchHist::read() { if (!g_dynconf) return false; // getStringEntries() return the entries in order (lower key // first), but we want most recent first, so revert vector lxml = g_dynconf->getStringEntries(advSearchHistSk); for (auto it = lxml.rbegin(); it != lxml.rend(); it++) { std::shared_ptr sd = xmlToSearchData(*it); if (sd) m_entries.push_back(sd); } return true; } void AdvSearchHist::clear() { g_dynconf->eraseAll(advSearchHistSk); } recoll-1.26.3/qtgui/systray.h0000644000175000017500000000240013533651561013030 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SYSTRAY_H_INCLUDED_ #define _SYSTRAY_H_INCLUDED_ #include #include class RclMain; class RclTrayIcon : public QSystemTrayIcon { Q_OBJECT public: RclTrayIcon(RclMain *mainw, const QIcon& icon, QObject* parent = 0) : QSystemTrayIcon(icon, parent), m_mainw(mainw) { init(); } public slots: void onRestore(); void onActivated(QSystemTrayIcon::ActivationReason reason); private: void init(); RclMain *m_mainw; }; #endif /* _SYSTRAY_H_INCLUDED_ */ recoll-1.26.3/qtgui/searchclause_w.cpp0000644000175000017500000001345513533651561014651 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "recoll.h" #include "log.h" #include "searchclause_w.h" #include #include #include #include #include #include #include using namespace Rcl; /* * Constructs a SearchClauseW as a child of 'parent', with the * name 'name' and widget flags set to 'f'. */ SearchClauseW::SearchClauseW(QWidget* parent) : QWidget(parent) { QHBoxLayout* hLayout = new QHBoxLayout(this); sTpCMB = new QComboBox(this); sTpCMB->setEditable(false); hLayout->addWidget(sTpCMB); fldCMB = new QComboBox(this); fldCMB->setEditable(false); hLayout->addWidget(fldCMB); proxSlackSB = new QSpinBox(this); hLayout->addWidget(proxSlackSB); wordsLE = new QLineEdit(this); wordsLE->setMinimumSize(QSize(190, 0)); hLayout->addWidget(wordsLE); languageChange(); resize(QSize(0, 0).expandedTo(minimumSizeHint())); connect(sTpCMB, SIGNAL(activated(int)), this, SLOT(tpChange(int))); } /* * Destroys the object and frees any allocated resources */ SearchClauseW::~SearchClauseW() { // no need to delete child widgets, Qt does it all for us } /* * Sets the strings of the subwidgets using the current * language. */ void SearchClauseW::languageChange() { sTpCMB->clear(); sTpCMB->addItem(tr("Any")); // 0 sTpCMB->addItem(tr("All")); //1 sTpCMB->addItem(tr("None"));//2 sTpCMB->addItem(tr("Phrase"));//3 sTpCMB->addItem(tr("Proximity"));//4 sTpCMB->addItem(tr("File name"));//5 // sTpCMB->insertItem(tr("Complex clause"));//6 fldCMB->addItem(tr("No field")); if (theconfig) { set fields = theconfig->getIndexedFields(); for (set::const_iterator it = fields.begin(); it != fields.end(); it++) { // Some fields don't make sense here if (it->compare("filename")) { fldCMB->addItem(QString::fromUtf8(it->c_str())); } } } // Ensure that the spinbox will be enabled/disabled depending on // combobox state tpChange(0); sTpCMB->setToolTip(tr("Select the type of query that will be performed with the words")); proxSlackSB->setToolTip(tr("Number of additional words that may be interspersed with the chosen ones")); } // Translate my window state into an Rcl search clause SearchDataClause *SearchClauseW::getClause() { if (wordsLE->text().isEmpty()) return 0; string field; if (fldCMB->currentIndex() != 0) { field = (const char *)fldCMB->currentText().toUtf8(); } string text = (const char *)wordsLE->text().toUtf8(); switch (sTpCMB->currentIndex()) { case 0: return new SearchDataClauseSimple(SCLT_OR, text, field); case 1: return new SearchDataClauseSimple(SCLT_AND, text, field); case 2: { SearchDataClauseSimple *cl = new SearchDataClauseSimple(SCLT_OR, text, field); cl->setexclude(true); return cl; } case 3: return new SearchDataClauseDist(SCLT_PHRASE, text, proxSlackSB->value(), field); case 4: return new SearchDataClauseDist(SCLT_NEAR, text, proxSlackSB->value(), field); case 5: return new SearchDataClauseFilename(text); case 6: default: return 0; } } void SearchClauseW::setFromClause(SearchDataClauseSimple *cl) { LOGDEB("SearchClauseW::setFromClause\n" ); switch(cl->getTp()) { case SCLT_OR: if (cl->getexclude()) tpChange(2); else tpChange(0); break; case SCLT_AND: tpChange(1); break; case SCLT_PHRASE: tpChange(3); break; case SCLT_NEAR: tpChange(4); break; case SCLT_FILENAME: tpChange(5); break; default: return; } LOGDEB("SearchClauseW::setFromClause: calling erase\n" ); clear(); QString text = QString::fromUtf8(cl->gettext().c_str()); QString field = QString::fromUtf8(cl->getfield().c_str()); switch(cl->getTp()) { case SCLT_OR: case SCLT_AND: case SCLT_PHRASE: case SCLT_NEAR: if (!field.isEmpty()) { int idx = fldCMB->findText(field); if (field >= 0) { fldCMB->setCurrentIndex(idx); } else { fldCMB->setEditText(field); } } /* FALLTHROUGH */ case SCLT_FILENAME: wordsLE->setText(text); break; default: break; } switch(cl->getTp()) { case SCLT_PHRASE: case SCLT_NEAR: { SearchDataClauseDist *cls = dynamic_cast(cl); proxSlackSB->setValue(cls->getslack()); } break; default: break; } } void SearchClauseW::clear() { wordsLE->setText(""); fldCMB->setCurrentIndex(0); proxSlackSB->setValue(0); } // Handle combobox change: may need to enable/disable the distance // spinbox and field spec void SearchClauseW::tpChange(int index) { if (index < 0 || index > 5) return; if (sTpCMB->currentIndex() != index) sTpCMB->setCurrentIndex(index); switch (index) { case 3: case 4: proxSlackSB->show(); proxSlackSB->setEnabled(true); if (index == 4) proxSlackSB->setValue(10); else proxSlackSB->setValue(0); break; default: proxSlackSB->close(); } if (index == 5) { fldCMB->close(); } else { fldCMB->show(); } } recoll-1.26.3/qtgui/ssearch_w.h0000644000175000017500000000751113566424763013310 00000000000000/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SSEARCH_W_H_INCLUDED_ #define _SSEARCH_W_H_INCLUDED_ #include "autoconfig.h" #include #include #include #include #include #include class QTimer; #include "recoll.h" #include "searchdata.h" #include #include "ui_ssearchb.h" struct SSearchDef; class SSearch; class QCompleter; class RclCompleterModel : public QAbstractListModel { Q_OBJECT public: RclCompleterModel(SSearch *parent = 0) : QAbstractListModel((QWidget*)parent), m_parent(parent) { init(); } int rowCount(const QModelIndex &parent = QModelIndex()) const override; QVariant data(const QModelIndex &index, int role = Qt::DisplayRole) const override; public slots: virtual void onPartialWord(int, const QString&, const QString&); private: void init(); vector currentlist; int firstfromindex; QPixmap clockPixmap; QPixmap interroPixmap; SSearch *m_parent{nullptr}; }; class SSearch : public QWidget, public Ui::SSearchBase { Q_OBJECT public: // The values MUST NOT change, there are assumptions about them in // different parts of the code enum SSearchType {SST_ANY = 0, SST_ALL = 1, SST_FNM = 2, SST_LANG = 3}; SSearch(QWidget* parent = 0, const char * = 0) : QWidget(parent) { setupUi(this); init(); } virtual void init(); virtual void setAnyTermMode(); virtual bool hasSearchString(); virtual void setPrefs(); // Return last performed search as XML text. virtual std::string asXML(); // Restore ssearch UI from saved search virtual bool fromXML(const SSearchDef& fxml); virtual QString currentText(); virtual bool eventFilter(QObject *target, QEvent *event); public slots: virtual void searchTypeChanged(int); virtual void setSearchString(const QString& text); virtual void startSimpleSearch(); virtual void addTerm(QString); virtual void onWordReplace(const QString&, const QString&); virtual void takeFocus(); // Forget current entry and any state (history) virtual void clearAll(); private slots: virtual void searchTextChanged(const QString&); virtual void searchTextEdited(const QString&); virtual void onCompletionActivated(const QString&); virtual void restoreText(); virtual void onHistoryClicked(); virtual void onCompleterShown(); signals: void startSearch(std::shared_ptr, bool); void setDescription(QString); void clearSearch(); void partialWord(int, const QString& text, const QString &partial); private: int getPartialWord(QString& word); bool startSimpleSearch(const string& q, int maxexp = -1); RclCompleterModel *m_completermodel{nullptr}; QCompleter *m_completer{nullptr}; /* We save multiword entries because the completer replaces them with the completion */ QString m_savedEditText; /* Saved xml version of the search, as we start it */ std::string m_xml; }; #endif /* _SSEARCH_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/rclm_wins.cpp0000644000175000017500000003312613566424763013663 00000000000000/* Copyright (C) 2005-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include "log.h" #include "internfile.h" #include "listdialog.h" #include "confgui/confguiindex.h" #include "idxsched.h" #ifdef _WIN32 #include "winschedtool.h" #else #include "crontool.h" #include "rtitool.h" #endif #include "snippets_w.h" #include "fragbuts.h" #include "specialindex.h" #include "rclmain_w.h" #include "webcache.h" #include "restable.h" using namespace std; static const QKeySequence quitKeySeq("Ctrl+q"); static const QKeySequence closeKeySeq("Ctrl+w"); // Open advanced search dialog. void RclMain::showAdvSearchDialog() { if (asearchform == 0) { asearchform = new AdvSearch(0); if (asearchform == 0) { return; } connect(new QShortcut(quitKeySeq, asearchform), SIGNAL (activated()), this, SLOT (fileExit())); connect(asearchform, SIGNAL(startSearch(std::shared_ptr, bool)), this, SLOT(startSearch(std::shared_ptr, bool))); connect(asearchform, SIGNAL(setDescription(QString)), this, SLOT(onSetDescription(QString))); asearchform->show(); } else { // Close and reopen, in hope that makes us visible... asearchform->close(); asearchform->show(); } } void RclMain::showSpellDialog() { if (spellform == 0) { spellform = new SpellW(0); connect(new QShortcut(quitKeySeq, spellform), SIGNAL (activated()), this, SLOT (fileExit())); connect(spellform, SIGNAL(wordSelect(QString)), sSearch, SLOT(addTerm(QString))); spellform->show(); } else { // Close and reopen, in hope that makes us visible... spellform->close(); spellform->show(); } } void RclMain::showWebcacheDialog() { switch (indexerState()) { case RclMain::IXST_UNKNOWN: QMessageBox::warning(0, "Recoll", tr("Unknown indexer state. " "Can't access webcache file.")); return; case RclMain::IXST_RUNNINGMINE: case RclMain::IXST_RUNNINGNOTMINE: QMessageBox::warning(0, "Recoll", tr("Indexer is running. " "Can't access webcache file.")); return; case RclMain::IXST_NOTRUNNING: break; } if (!m_pidfile) { m_pidfile = new Pidfile(theconfig->getPidfile()); if (m_pidfile->open() != 0) { deleteZ(m_pidfile); return; } if (m_pidfile->write_pid() != 0) { deleteZ(m_pidfile); return; } } if (webcache == 0) { webcache = new WebcacheEdit(this); webcache->setAttribute(Qt::WA_DeleteOnClose); connect(new QShortcut(quitKeySeq, webcache), SIGNAL (activated()), this, SLOT (fileExit())); connect(webcache, SIGNAL(destroyed(QObject*)), this, SLOT(onWebcacheDestroyed(QObject*)) ); webcache->show(); } } void RclMain::onWebcacheDestroyed(QObject *) { deleteZ(m_pidfile); webcache = 0; } void RclMain::showIndexStatistics() { showSpellDialog(); if (spellform == 0) return; spellform->setMode(SpellW::TYPECMB_STATS); } void RclMain::showFragButs() { if (fragbuts && fragbuts->isStale(0)) { deleteZ(fragbuts); } if (fragbuts == 0) { fragbuts = new FragButs(0); if (fragbuts->ok()) { fragbuts->show(); connect(fragbuts, SIGNAL(fragmentsChanged()), this, SLOT(onFragmentsChanged())); } else { deleteZ(fragbuts); } } else { // Close and reopen, in hope that makes us visible... fragbuts->close(); fragbuts->show(); } } void RclMain::showSpecIdx() { if (specidx == 0) { specidx = new SpecIdxW(0); connect(specidx, SIGNAL(accepted()), this, SLOT(specialIndex())); specidx->show(); } else { // Close and reopen, in hope that makes us visible... specidx->close(); specidx->show(); } } void RclMain::showIndexConfig() { showIndexConfig(false); } void RclMain::execIndexConfig() { showIndexConfig(true); } void RclMain::showIndexConfig(bool modal) { LOGDEB("showIndexConfig()\n" ); bool created{false}; if (indexConfig == 0) { created = true; indexConfig = new ConfIndexW(0, theconfig); } indexConfig->showPrefs(modal); if (created) { connect(new QShortcut(quitKeySeq, indexConfig->getDialog()), SIGNAL (activated()), this, SLOT (fileExit())); } } void RclMain::showIndexSched() { showIndexSched(false); } void RclMain::execIndexSched() { showIndexSched(true); } void RclMain::showIndexSched(bool modal) { LOGDEB("showIndexSched()\n" ); if (indexSched == 0) { indexSched = new IdxSchedW(this); connect(new QShortcut(quitKeySeq, indexSched), SIGNAL (activated()), this, SLOT (fileExit())); #ifdef _WIN32 indexSched->cronCLB->setText(tr("Batch scheduling")); indexSched->cronCLB->setDescription( tr("The tool will let you decide at what time indexing should run. " " It uses the Windows task scheduler.")); indexSched->mainExplainLBL->hide(); indexSched->rtidxCLB->hide(); #endif connect(indexSched->cronCLB, SIGNAL(clicked()), this, SLOT(execCronTool())); if (theconfig && theconfig->isDefaultConfig()) { #ifdef RCL_MONITOR connect(indexSched->rtidxCLB, SIGNAL(clicked()), this, SLOT(execRTITool())); #else indexSched->rtidxCLB->setEnabled(false); indexSched->rtidxCLB->setToolTip(tr("Disabled because the real time indexer was not compiled in.")); #endif } else { indexSched->rtidxCLB->setEnabled(false); indexSched->rtidxCLB->setToolTip(tr("This configuration tool only works for the main index.")); } } else { // Close and reopen, in hope that makes us visible... indexSched->close(); } if (modal) { indexSched->exec(); indexSched->setModal(false); } else { indexSched->show(); } } void RclMain::showCronTool() { showCronTool(false); } void RclMain::execCronTool() { showCronTool(true); } void RclMain::showCronTool(bool modal) { LOGDEB("showCronTool()\n" ); if (cronTool == 0) { #ifdef _WIN32 cronTool = new WinSchedToolW(0); #else cronTool = new CronToolW(0); #endif connect(new QShortcut(quitKeySeq, cronTool), SIGNAL (activated()), this, SLOT (fileExit())); } else { // Close and reopen, in hope that makes us visible... cronTool->close(); } if (modal) { cronTool->exec(); cronTool->setModal(false); } else { cronTool->show(); } } void RclMain::showRTITool() { showRTITool(false); } void RclMain::execRTITool() { showRTITool(true); } void RclMain::showRTITool(bool modal) { #ifndef _WIN32 LOGDEB("showRTITool()\n" ); if (rtiTool == 0) { rtiTool = new RTIToolW(0); connect(new QShortcut(quitKeySeq, rtiTool), SIGNAL (activated()), this, SLOT (fileExit())); } else { // Close and reopen, in hope that makes us visible... rtiTool->close(); } if (modal) { rtiTool->exec(); rtiTool->setModal(false); } else { rtiTool->show(); } #endif } void RclMain::showUIPrefs() { if (uiprefs == 0) { uiprefs = new UIPrefsDialog(this); connect(new QShortcut(quitKeySeq, uiprefs), SIGNAL (activated()), this, SLOT (fileExit())); connect(uiprefs, SIGNAL(uiprefsDone()), this, SLOT(setUIPrefs())); connect(this, SIGNAL(stemLangChanged(const QString&)), uiprefs, SLOT(setStemLang(const QString&))); } else { // Close and reopen, in hope that makes us visible... uiprefs->close(); rwSettings(false); uiprefs->setFromPrefs(); } uiprefs->show(); } void RclMain::showExtIdxDialog() { showUIPrefs(); uiprefs->tabWidget->setCurrentIndex(3); } void RclMain::showAboutDialog() { string vstring = Rcl::version_string() + string("
www.recoll.org") + string("
www.xapian.org"); QMessageBox::information(this, tr("About Recoll"), vstring.c_str()); } void RclMain::showMissingHelpers() { string miss; if (!theconfig->getMissingHelperDesc(miss)) { QMessageBox::information( this, "", tr("No information: initial indexing not yet performed.")); return; } QString msg = QString::fromUtf8("

") + tr("External applications/commands needed for your file types " "and not found, as stored by the last indexing pass in "); msg += ""; msg += QString::fromLocal8Bit(theconfig->getConfDir().c_str()); msg += "/missing:

\n";
    if (!miss.empty()) {
	msg += QString::fromUtf8(miss.c_str());
    } else {
	msg += tr("No helpers found missing");
    }
    msg += "
"; QMessageBox::information(this, tr("Missing helper programs"), msg); } void RclMain::showActiveTypes() { string reason; bool maindberror; if (!maybeOpenDb(reason, true, &maindberror)) { QMessageBox::warning(0, tr("Error"), u8s2qs(reason), QMessageBox::Ok, QMessageBox::NoButton); return; } // All mime types in index. vector vdbtypes; if (!rcldb->getAllDbMimeTypes(vdbtypes)) { QMessageBox::warning(0, tr("Error"), tr("Index query error"), QMessageBox::Ok, QMessageBox::NoButton); return; } set mtypesfromdb; mtypesfromdb.insert(vdbtypes.begin(), vdbtypes.end()); // All types listed in mimeconf: vector mtypesfromconfig = theconfig->getAllMimeTypes(); // Intersect file system types with config types (those not in the // config can be indexed by name, not by content) set mtypesfromdbconf; for (vector::const_iterator it = mtypesfromconfig.begin(); it != mtypesfromconfig.end(); it++) { if (mtypesfromdb.find(*it) != mtypesfromdb.end()) mtypesfromdbconf.insert(*it); } // Substract the types for missing helpers (the docs are indexed // by name only): string miss; if (theconfig->getMissingHelperDesc(miss) && !miss.empty()) { FIMissingStore st(miss); map >::const_iterator it; for (it = st.m_typesForMissing.begin(); it != st.m_typesForMissing.end(); it++) { set::const_iterator it1; for (it1 = it->second.begin(); it1 != it->second.end(); it1++) { set::iterator it2 = mtypesfromdbconf.find(*it1); if (it2 != mtypesfromdbconf.end()) mtypesfromdbconf.erase(it2); } } } ListDialog dialog; dialog.setWindowTitle(tr("Indexed MIME Types")); // Turn the result into a string and display dialog.groupBox->setTitle(tr("Content has been indexed for these MIME types:")); // We replace the list with an editor so that the user can copy/paste delete dialog.listWidget; QTextEdit *editor = new QTextEdit(dialog.groupBox); editor->setReadOnly(true); dialog.horizontalLayout->addWidget(editor); if (mtypesfromdbconf.empty()) { editor->append(tr("Types list empty: maybe wait for indexing to " "progress?")); } else { for (set::const_iterator it = mtypesfromdbconf.begin(); it != mtypesfromdbconf.end(); it++) { editor->append(QString::fromUtf8(it->c_str())); } } editor->moveCursor(QTextCursor::Start); editor->ensureCursorVisible(); dialog.exec(); } void RclMain::newDupsW(const Rcl::Doc, const vector dups) { ListDialog dialog; dialog.setWindowTitle(tr("Duplicate documents")); dialog.groupBox->setTitle(tr("These Urls ( | ipath) share the same" " content:")); // We replace the list with an editor so that the user can copy/paste delete dialog.listWidget; QTextEdit *editor = new QTextEdit(dialog.groupBox); editor->setReadOnly(true); dialog.horizontalLayout->addWidget(editor); for (vector::const_iterator it = dups.begin(); it != dups.end(); it++) { if (it->ipath.empty()) editor->append(QString::fromLocal8Bit(it->url.c_str())); else editor->append(QString::fromLocal8Bit(it->url.c_str()) + " | " + QString::fromUtf8(it->ipath.c_str())); } editor->moveCursor(QTextCursor::Start); editor->ensureCursorVisible(); dialog.exec(); } void RclMain::showSnippets(Rcl::Doc doc) { if (!m_snippets) { m_snippets = new SnippetsW(doc, m_source); connect(m_snippets, SIGNAL(startNativeViewer(Rcl::Doc, int, QString)), this, SLOT(startNativeViewer(Rcl::Doc, int, QString))); connect(new QShortcut(quitKeySeq, m_snippets), SIGNAL (activated()), this, SLOT (fileExit())); connect(new QShortcut(closeKeySeq, m_snippets), SIGNAL (activated()), m_snippets, SLOT (close())); if (restable) { connect( restable, SIGNAL(detailDocChanged(Rcl::Doc, std::shared_ptr)), m_snippets, SLOT(onSetDoc(Rcl::Doc, std::shared_ptr))); } } else { m_snippets->onSetDoc(doc, m_source); } m_snippets->show(); } recoll-1.26.3/qtgui/recoll.qrc0000644000175000017500000000113313533651561013132 00000000000000 images/asearch.png images/cancel.png images/close.png images/code-block.png images/history.png images/nextpage.png images/prevpage.png images/firstpage.png images/sortparms.png images/spell.png images/table.png images/up.png images/down.png images/recoll.png images/interro.png images/clock.png recoll-1.26.3/qtgui/recoll.h0000644000175000017500000000431213533651561012576 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _RECOLL_H_INCLUDED_ #define _RECOLL_H_INCLUDED_ #include #include #include "rclconfig.h" #include "rcldb.h" #include "rclutil.h" #include // Misc declarations in need of sharing between the UI files // Open the database if needed. We now force a close/open by default extern bool maybeOpenDb(std::string &reason, bool force = true, bool *maindberror = 0); /** Retrieve configured stemming languages */ bool getStemLangs(vector& langs); extern RclConfig *theconfig; extern TempFile *rememberTempFile(TempFile); extern void forgetTempFile(string &fn); extern void deleteAllTempFiles(); extern std::shared_ptr rcldb; extern int recollNeedsExit; extern void startManual(const string& helpindex); extern void applyStyleSheet(const QString&); inline std::string qs2utf8s(const QString& qs) { return std::string((const char *)qs.toUtf8()); } inline std::string qs2u8s(const QString& qs) { return std::string((const char *)qs.toUtf8()); } inline QString u8s2qs(const std::string us) { return QString::fromUtf8(us.c_str()); } /** Specialized version of the qt file dialog. Can't use getOpenFile() etc. cause they hide dot files... */ extern QString myGetFileName(bool isdir, QString caption = QString(), bool filenosave = false, QString dirlocation = QString(), QString dlftnm = QString() ); #endif /* _RECOLL_H_INCLUDED_ */ recoll-1.26.3/qtgui/guiutils.h0000644000175000017500000001266613566424763013206 00000000000000/* Copyright (C) 2005 Jean-Francois Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _GUIUTILS_H_INCLUDED_ #define _GUIUTILS_H_INCLUDED_ #include #include #include #include #include #include "dynconf.h" extern RclDynConf *g_dynconf; #include "advshist.h" extern AdvSearchHist *g_advshistory; using std::string; using std::list; using std::vector; /** Holder for preferences (gets saved to user Qt prefs) */ class PrefsPack { public: // Simple search entry behaviour bool ssearchNoComplete; bool ssearchStartOnComplete; // Decide if we display the doc category filter control as a // toolbar+combobox or as a button group under simple search enum FilterCtlStyle {FCS_BT, FCS_CMB, FCS_MN}; int filterCtlStyle; int respagesize{8}; int historysize{0}; int maxhltextmbs; QString reslistfontfamily; // Not saved in prefs for now. Computed from qt defaults and used to // set main character color for webkit/textbrowser reslist and // snippets window. QString fontcolor; QString qtermstyle; // CSS style for query terms in reslist and other places int reslistfontsize; // Result list format string QString reslistformat; string creslistformat; QString reslistheadertext; // Date strftime format QString reslistdateformat; string creslistdateformat; QString qssFile; QString snipCssFile; QString queryStemLang; int mainwidth; int mainheight; enum ShowMode {SHOW_NORMAL, SHOW_MAX, SHOW_FULL}; int showmode{SHOW_NORMAL}; int pvwidth; // Preview window geom int pvheight; int toolArea; // Area for "tools" toolbar int resArea; // Area for "results" toolbar bool ssearchTypSav; // Remember last search mode (else always // start with same) int ssearchTyp{0}; // Use single app (default: xdg-open), instead of per-mime settings bool useDesktopOpen; // Remember sort state between invocations ? bool keepSort; QString sortField; bool sortActive; bool sortDesc; // Abstract preferences. Building abstracts can slow result display bool queryBuildAbstract{true}; bool queryReplaceAbstract{false}; // Synthetized abstract length (chars) and word context size (words) int syntAbsLen; int syntAbsCtx; // Abstract snippet separator QString abssep; // Snippets window max list size int snipwMaxLength; // Snippets window sort by page (dflt: by weight) bool snipwSortByPage; bool startWithAdvSearchOpen{false}; // Try to display html if it exists in the internfile stack. bool previewHtml; bool previewActiveLinks; // Use
 tag to display highlighted text/plain inside html (else
    // we use 
at end of lines, which lets textedit wrap lines). enum PlainPre {PP_BR, PP_PRE, PP_PREWRAP}; int previewPlainPre; bool collapseDuplicates; bool showResultsAsTable; // Extra query indexes. This are stored in the history file, not qt prefs vector allExtraDbs; vector activeExtraDbs; // Advanced search subdir restriction: we don't activate the last value // but just remember previously entered values QStringList asearchSubdirHist; // Textual history of simple searches (this is just the combobox list) QStringList ssearchHistory; // Make phrase out of search terms and add to search in simple search bool ssearchAutoPhrase; double ssearchAutoPhraseThreshPC; // Ignored file types in adv search (startup default) QStringList asearchIgnFilTyps; bool fileTypesByCats; // Words that are automatically turned to ext:xx specs in the query // language entry. QString autoSuffs; bool autoSuffsEnable; // Synonyms file QString synFile; bool synFileEnable; QStringList restableFields; vector restableColWidths; // Remembered term match mode int termMatchType{0}; // Program version that wrote this. Not used for now, in prevision // of the case where we might need an incompatible change int rclVersion{1505}; // Suppress all noises bool noBeeps; bool showTrayIcon{false}; bool closeToTray{false}; bool trayMessages{false}; // See qxtconfirmationmessage. Needs to be -1 for the dialog to show int showTempFileWarning; // Advanced search window clause list state vector advSearchClauses; // Default paragraph format for result list static const char *dfltResListFormat; std::string stemlang(); }; /** Global preferences record */ extern PrefsPack prefs; /** Read write settings from disk file */ extern void rwSettings(bool dowrite); extern QString g_stringAllStem, g_stringNoStem; #endif /* _GUIUTILS_H_INCLUDED_ */ recoll-1.26.3/qtgui/i18n/0000755000175000017500000000000013570165410011776 500000000000000recoll-1.26.3/qtgui/i18n/recoll_nl.qm0000644000175000017500000033571013545064515014245 00000000000000ƒ14:dWAVvGcGΥH6rH6J+i!J+GJ6 J6'Lb/MzPhSS T5?WTliWXRZ0[ % \geigwj^Jssƍv8vovj!%0`́#NfJ #*'fS$ 7$UnU/nTxgt:$zEnE .&]3( +1{0,`E;s>.cw]Do2DNXMrX4m`^Jh6lt]nnOwOyww&#_H.v{vRyc/9xCnyͺL3 LSI]g؅ wK p#vvw 5w 5w 5w 5wJU].s7 ;U3ֳ6 f3rWuͼusggHli׸~Cg;U5Uk/e !D&.Nzh/Ō7o=dy?dG$JUY JUYY~[um_n|16uʷTʗ<ʗƸ ^}cLh=x_Jg=c҅Ax#%AXB c V<.۽dF1M;[3<_L(}NB&X4e 2lP/vh9"wTT(Psb߅"3 ZI7IhnAnߤfWf:=f~ ͷx,7W62Wer3¾`y%3dy%5 ŠtlS΄: #sX|NW2?̔ !ii -Zzt-"5)T>jTB9]BS,kՌor?)xr|ypk@,5'<̶԰e"'?,tfASâ  sRvǢaI^Ч~C>d p#z`xM:0$O" q-օuUKiMp hzsu9P3v2kvƒ6{^-T#mBiNKgU %?n7vÒP^:[XnDw)t C|K!v!#++^ 7I^<~WCjFW#FN-H:"_guMapI(f 8 piTR|Njw ϗpn{~=.݈(T l| tn0 ÓtÓtNJ>Զȍ7Ɇt']86 c#قD. 3e En %<*snT,y1:uG6J¢RFJn@QuuY/}}u;Eo0h,lc'h@ʶgu\c 䴥:?a{ T T Js N HȾ -(7< 7  9Zy ;3 DG KN ]# cC kn lMe k  qD I Vf " `p 9QY ÛCK ü> } 3 o e@ } iO JB *Nl ͹ :^S c *R4 +<b 6 >V, G.~M4 `P ` aE cEc d8% yj Iv N sr VTI C  ϙ  F ԅ W ^ ye3- TH h ,xu =!R Kj XRv X hhI t9k  o Q mv Σ rU ٷ ۷1 ~= ?  VdJ 4 ( Y  'ИC +bC .ʢދ / 9ɝ L* P֙P RVP T#z Va \iC ]5 `F h v {l] !Y !Y$ W |  Ҭ} }% - iH   z~ ~w  N'A  m #D# 'RM -n 7Q 8w+ F6 OEL X^[ ]8 ]) ^" ^ s mC u0D y!1 y~ 3i ȩ u) u; P PQ 5d6 2 7a i ~ Ւ6e H  Q56x £ q3Ѿ36{.%n.ʢ-/.X3U-M68b9g<:Q~SOW" Y~s[s\3e3g3hx56p~b q!z cmc |y|B' lyIL}iAlle termen All clauses AdvSearchElke term Any clause AdvSearchTGeen juist achtervoegsel in grootte filter$Bad multiplier suffix in size filter AdvSearch mediamedia AdvSearchberichtmessage AdvSearch andereother AdvSearchpresentatie presentation AdvSearchspreadsheet spreadsheet AdvSearch teksttext AdvSearchalle <----- All AdvSearchBase<----- Sel <----- Sel AdvSearchBasevoeg term toe Add clause AdvSearchBase$geavanceerd zoekenAdvanced search AdvSearchBaseAlle ----> All ----> AdvSearchBasexElk niet lege veld aan de rechterzijde zal worden gecombineerd met En ("Alle clausules" keuze) en Of ("Bepalingen" keuze) voegwoorden. <br> "Elk", "en" Of "Geen" veldtypen kan een mix van eenvoudige woorden en uitdrukkingen tussen dubbele aanhalingstekens te accepteren. <br> Velden zonder gegevens worden genegeerd.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBasedoorbladerenBrowse AdvSearchBasePer categorie By categories AdvSearchBaseVVink dit aan om op datum te kunnen filteren'Check this to enable filtering on dates AdvSearchBase^vink dit aan om filetype filtering te activeren,Check this to enable filtering on file types AdvSearchBaseLVink dit aan om te filteren op grootte'Check this to enable filtering on sizes AdvSearchBaseVink dit aan om bestands catergorie te gebruiken in plaats van raw mime;Check this to use file categories instead of raw mime types AdvSearchBase sluitClose AdvSearchBaseverwijder term Delete clause AdvSearchBaseVvoer de top bestandsmap in om te doorzoekenEnter top directory for search AdvSearchBase FilterFilter AdvSearchBaseFilter datums Filter dates AdvSearchBaseFilter grootte Filter sizes AdvSearchBaseVindFind AdvSearchBaseVanFrom AdvSearchBase&negeer bestandstypeIgnored file types AdvSearchBaseomkerenInvert AdvSearchBaseMax grootte Max. Size AdvSearchBaseMaximale grootte. U kunt k / K, m / M gebruiken, g / G als multipliers4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaseMin Grootte Min. Size AdvSearchBaseMinimummaat. U kunt k / K, m / M gebruiken, g / G als multipliers4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase.beperk tot bestandstypeRestrict file types AdvSearchBasedBeperk de resultaten tot de bestanden in de subtak%Restrict results to files in subtree: AdvSearchBase(Sla op als standaardSave as default AdvSearchBaseRZoek naar<br>documenten<br> die bevatten:'Search for
documents
satisfying: AdvSearchBase,Gezochte bestands typeSearched file types AdvSearchBaseSel ----> Sel -----> AdvSearchBase Begin met zoeken Start Search AdvSearchBaseTotTo AdvSearchBase<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexeer schema (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"> Een enkele numerieke waarde, door komma's gescheiden lijsten (1,3,5) en reeksen (1-7). Meer in het algemeen zullen de velden worden gebruikt <span style=" font-style:italic;">als </span> in het crontab bestand, en het volledige crontab syntax kan worden gebruikt, zie crontab (5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Bijvoorbeeld invoeren <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Dagen, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Uren</span> en <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minuten</span> zal recollindex starten op elke dag om 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Een schema met zeer frequent activering is waarschijnlijk minder efficint dan real time indexeren.</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolW`<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Klik <span style=" font-style:italic;">Uitzetten</span> om automatisch batch indexeren uit te zetten, <span style=" font-style:italic;">Aanzetten</span> om het te activeren, <span style=" font-style:italic;">Annuleren</span> om niets te doen</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolW&Cron dialoogvenster Cron Dialog CronToolWXDagen van de week (* of 0-7, of 7 is Zondag))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWUitzettenDisable CronToolWAanzettenEnable CronToolWFout bij het instellen van cron job. Slechte syntax in de ingave?3Error installing cron entry. Bad syntax in fields ? CronToolWUren (*of 0-23Hours (* or 0-23) CronToolWHet lijkt erop dat met de hand bewerkt ingaves bestaan voor recollindex, kan niet crontab bewerkenPIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWMinuten (0-59)Minutes (0-59) CronToolWDialoogDialog EditDialog"Configuratie fout Config error EditTranslokaal pad Local path EditTrans$Oorspronkelijk pad Original path EditTransbronpad Source path EditTranstoevoegenAdd EditTransBaseAnnuleerCancel EditTransBaseVerwijderenDelete EditTransBasePad vertalingenPath Translations EditTransBase BewaarSave EditTransBaseSelecteer n of meerdere bestandstypen, gebruik dan de bediening in het kader hieronder om te veranderen hoe ze worden verwerktkSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBase,zet vertalingspad voorSetting path translations for  EditTransBase <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Het blijkt dat de index voor deze configuratie niet bestaat.</span><br /><br />Als u gewoon uw home directory wilt indexeren met een set van redelijke standaardinstellingen, drukt u op de<span style=" font-style:italic;">Start indexeer nu</span>knop. Je zult in staat zijn om de details later aan te passen.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Als u meer controle wil, gebruik dan de volgende links om de indexering configuratie en het schema aan te passen.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Deze tools kunnen later worden geopend vanuit het<span style=" font-style:italic;">Voorkeuren</span> menu.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialog6Setup van eerste indexeringFirst indexing setupFirstIdxDialog0Configuratie inedexeringIndexing configurationFirstIdxDialog$Indexerings schemaIndexing scheduleFirstIdxDialog.Begin nu met indexeringStart indexing nowFirstIdxDialog(Dit laat u de mappen die u wilt indexeren, en andere parameters aan passen, zoals uitgesloten bestandspaden of namen, standaard character sets, enz.This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialog$Dit zal u laten kiezen tussen batch en real-time indexering, en het opzetten van een automatisch schema voor batch indexeren (met behulp van cron)This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog"%1 niet gevonden. %1 not found.FragButs %1: %2%1: %2FragButs&Zoekterm fragmentenQuery FragmentsFragButs <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span>indexering kan permanent draaien, het indexeren van bestanden als ze veranderen, of lopen op vaste intervallen.</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Het lezen van de handleiding kan helpen om te beslissen tussen deze benaderingen (druk op F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Deze tool kan u helpen bij het opzetten van een schema om batch indexeren runs te automatiseren, of het starten van real time indexeren wanneer u zich aanmeldt (of beide, dat is echter zelden zinvol). </p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedWCron schemaCron scheduling IdxSchedWBeslis of real time indexeren wordt gestart wanneer u inlogt (alleen voor de standaard-index).ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedW*indexing schema setupIndex scheduling setup IdxSchedW8Real time indexering opstartReal time indexing start up IdxSchedWDeze tool zal u laten beslissen op welk tijdstip het indexeren moet worden uitgevoerd en zal een crontab installeren._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedWDialoog vensterDialog ListDialogGroepVensterGroupBox ListDialogHet "Geschiedenis" bestand is beschadigd of on(lees)schrijfbaar geworden, graag controleren of verwijderen:K"history" file is damaged or un(read)writeable, please check or remove it: Main>Geen db bestand in configuratie No db directory in configurationMain&Volgende&NextPreview&Vorige &PreviousPreview&Zoek naar: &Search for:PreviewZKan doc omzetten in een interne representatie0Can't turn doc into internal representation for PreviewAnnuleerCancelPreview WissenClearPreviewSluit tab Close TabPreview6preview tekst aan het makenCreating preview textPreview<Fout bij het laden van bestandError while loading filePreviewJPreview tekst in editor aan het laden Loading preview text into editorPreview&Hoofd/kleine letter Match &CasePreview0Help programma ontbreektMissing helper program: PreviewKopieerCopyPreviewTextEditVouw lijnen Fold linesPreviewTextEdit$Behoud inspringingPreserve indentationPreviewTextEditDruk afPrintPreviewTextEdit.Druk huidige Preview afPrint Current PreviewPreviewTextEdit6Bewaar document als bestandSave document to filePreviewTextEditSelecteer alles Select AllPreviewTextEditToon veld Show fieldsPreviewTextEditToon afbeelding Show imagePreviewTextEdit Toon hoofd tekstShow main textPreviewTextEdit4<b>Aangepaste substructuurCustomised subtreesQObject<i>De parameters die volgen zijn ingesteld, hetzij op het hoogste niveau, als er niets <br>of een lege regel is geselecteerd in de keuzelijst boven, of voor de geselecteerde submap.<br> U kunt mappen toevoegen of verwijderen door op de +/- knoppen te klikken.The parameters that follow are set either at the top level, if nothing
or an empty line is selected in the listbox above, or for the selected subdirectory.
You can add or remove directories by clicking the +/- buttons.QObjectTekenset die wordt gebruikt voor het lezen van bestanden die het intern tekenset niet worden herkend, bijvoorbeeld pure tekstbestanden. Ondernemingen De standaard waarde is leeg en de waarde van de NLS-omgeving wordt gebruikt.Character set used for reading files which do not identify the character set internally, for example pure text files.
The default value is empty, and the value from the NLS environnement is used.QObject2Standaard<br>karakter setDefault
character setQObject,Volg symbolische linksFollow symbolic linksQObjectVolg symbolische links tijdens het indexeren. De standaard is niet volgen, om dubbele indexering te voorkomenTFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject$Globale parametersGlobal parametersQObject"Genegeerde eindesIgnored endingsQObject6Indexeer alle bestandsnamenIndex all file namesQObject&Indexeer de namen van bestanden waarvan de inhoud niet kan worden gedentificeerd of verwerkt (geen of niet-ondersteunde MIME-type). standaard true}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject"Lokale parametersLocal parametersQObjectZoek parametersSearch parametersQObject$Overgeslagen namen Skipped namesQObjectDe lijst van de submappen in de gendexeerde hirarchie <br> waar sommige parameters moeten worden geherdefinieerd. Standaard: leeg.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObject^Dit zijn bestandsnaam eindes voor bestanden die zullen worden gendexeerd door alleen de naam (geen MIME-type identificatie poging, geen decompressie, geen inhoud indexering).These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing).QObjectDit zijn patronen voor bestand of de mappen namen die niet mogen worden gendexeerd.LThese are patterns for file or directory names which should not be indexed.QObject Web geschiedenis Web historyQObject&Openen&OpenQWidget>&Open Bovenliggend document/map&Open Parent document/folderQWidget&Preview&PreviewQWidget(&Schijf naar Bestand&Write to FileQWidget>Kan het document niet uitpakkenCannot extract document: QWidget$Kies exact een mapChoose exactly one directoryQWidget,Kopieer &Bestands NaamCopy &File NameQWidgetKopieer &URL Copy &URLQWidget$kon map niet lezenCould not read directory: QWidgetXMaak of kies een bestandsnaam om op te slaanCreate or choose save directoryQWidget@Vindt &gelijksoortige documentenFind &similar documentsQWidget*Open &Knipsel vensterOpen &Snippets windowQWidgetOpen met Open WithQWidgetFPreview B&ovenliggende document/mapPreview P&arent document/folderQWidgetVoer script uit Run ScriptQWidget<Bewaar selektie naar bestandenSave selection to filesQWidget@Toon subdocumenten / attachmentsShow subdocuments / attachmentsQWidgetXOnverwachte bestandsnaam botsing, annuleren.+Unexpected file name collision, cancelling.QWidget&Niet nogmaals tonenDo not show again.QxtConfirmationMessage<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span>indexering kan worden ingesteld om te draaien als een daemon, het bijwerken van de index als bestanden veranderen, in real time. Je krijgt dan een altijd up-to-date index, maar systeembronnen worden permanent gebruikt.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>.

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolW@start nu ook de indexatie daemon%Also start indexing daemon right now.RTIToolW^Autostart ongedaan gemaakt proces ook stoppen ?2Autostart file deleted. Kill current process too ?RTIToolW"Kan niet aanmakenCan't create: RTIToolW8Kon recollindex niet startenCould not execute recollindexRTIToolW"Verwijder bestand Deleting fileRTIToolWVerwijderen Deleting: RTIToolWLAutomatisch Starten realtime-indexeren"Real time indexing automatic startRTIToolW&Verwijder autostartRemoving autostartRTIToolWVervang bestandReplacing fileRTIToolWVervanging Replacing: RTIToolWVStart met indexeren bij mijn desktop-sessie.Start indexing daemon with my desktop session.RTIToolWWaarschuwingWarningRTIToolWalle talen(all languages)RclMainGeen taal (no stemming)RclMainOver Recoll About RecollRclMainAlleAllRclMainVerkeerde desktop snelkoppeling for %1:[%2] Graag de desktop snelkoppeling controleren?Bad desktop app spec for %1: [%2] Please check the desktop fileRclMainPad verkeerd Bad pathsRclMainHVerkeerd pad in configuratie bestand!Bad paths in configuration file: RclMainVerkeerde command line voor viewer %1:[%2'] controleer mimeview van bestandCBad viewer command line for %1: [%2] Please check the mimeview fileRclMain8Geen toegang tot het bestandCan't access file: RclMain<kan preview venster niet makenCan't create preview windowRclMainjkan synomiemen bestand niet instellen ( parse error?)&Can't set synonyms file (parse error?)RclMain<Kan het bestand niet uitpakkenCan't uncompress file: RclMainfkan het index niet bijwerken:indexeren is al aktief#Can't update index: indexer runningRclMain|kan het document niet uitpakken of een tijdelijk bestand maken0Cannot extract document or create temporary fileRclMainFkan parent van document niet vindenCannot find parent documentRclMainbkan info van het document uit database niet lezen+Cannot retrieve document info from databaseRclMain6Kies bestand om op te slaanChoose file to saveRclMain Klik op annuleren om terug te keren naar de lijst. <br>Klik negeren om het voorbeeld toch te tonen( en te onthouden voor deze sessie)pClick Cancel to return to the list.
Click Ignore to show the preview anyway (and remember for this session).RclMainKlik Ok om de index voor dit bestand bij te werken, daarna moet u de opdracht opnieuw uitvoeren na het indexerenjClick Ok to update the index for this file, then you will need to re-run the query when indexing is done. RclMainSluitenClosingRclMainXDe inhoud is bijgewerkt voor deze mime types.Content has been indexed for these mime types:RclMainHKon bewaarde zoekopdracht niet ladenCould not load saved queryRclMainkon externe index niet openen. Db niet geopend. Controleer externe indexlijstHCould not open external index. Db not open. Check external indexes list.RclMain.Kan bestand niet openenCould not open file: RclMain>Kan niet schrijven naar bestandCould not write to fileRclMainzUitgeschakeld omdat real-time indexering niet ingeschakeld is;Disabled because the real time indexer was not compiled in.RclMainLaat deze waarschuwing niet meer zien (gebruik GUI voorkeuren om te herstellen)DDo not show this warning next time (use GUI preferences to restore).RclMainDocument filterDocument filterRclMain*Document geschiedenisDocument historyRclMainafgerondDoneRclMain2Vermenigvuldig documentenDuplicate documentsRclMainWis index Erasing indexRclMainFoutErrorRclMainUitvoeren: [ Executing: [RclMainExterne toepassingen / commandos die nodig zijn voor dit bestandstype en niet gevonden, zoals opgeslagen in de laatste indexerings pogingpExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMain"Geschiedenis data History dataRclMain"index geblokkeerd Index lockedRclMain$Index is niet openIndex not openRclMainIndex voor dit bestand is niet op tu date. geweigerd om verkeerde inforamtie te tonen te riskerenNIndex not up to date for this file. Refusing to risk showing the wrong entry. RclMain Index vraag foutIndex query errorRclMainIndex schemaIndex schedulingRclMain,Geindexeerd MIME TypesIndexed MIME TypesRclMaindDe indexeerder is bezig. Geen toegang tot webcache/Indexer is running. Can't access webcache file.RclMain8Indexering is nog niet bezigIndexing did not run yetRclMain$Indexering misluktIndexing failedRclMain&Indexering is bezigIndexing in progress: RclMain,Indexering onderbrokenIndexing interruptedRclMainLaad fout Load errorRclMain0Missende hulp programmasMissing helper programsRclMainMonitorenMonitorRclMainnGeen externe viewer voor dit mime type geconfigureerd [-No external viewer configured for mime type [RclMainBAlle hulpprogrammas zijn aanwezigNo helpers found missingRclMainJGeen opgeslagen vorige zoekresultatenNo preserved previous searchRclMain0Geen resultaten gevondenNo results foundRclMainNiets gezocht No searchRclMainGeenNoneRclMainOpenen van tijdelijke kopie.Alle bewerkingen zullen verloren gaan als u ze niet opslaat naar een permanente lokatie`Opening a temporary copy. Edits will be lost if you don't save
them to a permanent location.RclMain WissenPurgeRclMainBezig met opdracht <br>Vanwege beperkingen van de indexeerder zal bij,<br>stop het programma in zijn geheel sluiten!eQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMainZoekresultaat Query resultsRclMainLees fout Read failedRclMainZDe index resetten en geheel opnieuw beginnen?(Reset the index and start from scratch ?RclMain$Telresultaat(est.)Result count (est.)RclMainBestand opslaan Save fileRclMain@Bewaarde Zoekopdrachten (*.rclq)Saved Queries (*.rclq)RclMainPatronen selecteren kan alleen gebruikt worden met een start folder:Selection patterns can only be used with a start directoryRclMainVPatronen selecteren vraagt een begin folderSelection patterns need topdirRclMainHet spijt ons, dit is nog niet beschikbaar voor het windows platform, gebruik het bestands ingave menu om de index te updatenYSorry, not available under Windows for now, use the File menu entries to update the indexRclMain StemdbStemdbRclMainStop &IndexingStop &IndexingRclMain:Sub-documenten en attachmentsSub-documents and attachmentsRclMainHet huidige indexerings proces werdt niet gestart vanaf deze interface, kan het niet stoppenOThe current indexing process was not started from this interface, can't kill itRclMain Het huidige indexering proces werdt niet gestart vanaf deze interface. Klik Ok om het toch te stoppen, of annuleren om het zo te latenyThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainHet document hoort bij een externe index die niet up te daten is@The document belongs to an external index which I can't update. RclMainDe indexeerder is bezig dus er zou een verbetering moeten optreden als hij klaar is.@The indexer is running so things should improve when it's done. RclMainDe viewer gespecificeerd in mimeview voor %1: %2 is niet gevonden Wilt u het dialoogvenster voorkeuren openen?hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMainRDeze Urls (ipath) hebben dezelfde inhoud:-These Urls ( | ipath) share the same content:RclMainlDeze configuratie tool werkt alleen voor de hoofdindex6This configuration tool only works for the main index.RclMainJDeze zoekopdracht is niet meer aktief"This search is not active any moreRclMainOnbekendUnknownRclMainDe staat van de indexer is onbekend. Kan geen toegang krijgen tot het webcache bestand.2Unknown indexer state. Can't access webcache file.RclMain(Indexeren &bijwerken Update &IndexRclMainBijwerkenUpdatingRclMainViewer command line voor %1 specificeerd zowel het bestandtype als het parentfile type waarde: niet ondersteundQViewer command line for %1 specifies both file and parent file value: unsupportedRclMainWaarschuwingWarningRclMainSchrijf fout Write failedRclMainLFout bij het ophalen van de stam talen#error retrieving stemming languagesRclMaingefilterdfilteredRclMain mediamediaRclMainberichtmessageRclMain andersotherRclMainpresentatie presentationRclMaingesorteerdsortedRclMainspreadsheet spreadsheetRclMain teksttextRclMainJOpnieuw proberen met mislukte bestand With failed files retrying RclMainBase&Over Recoll &About Recoll RclMainBase&&Geavanceerd Zoeken&Advanced Search RclMainBase4&Wis bestands geschiedenis&Erase document history RclMainBase*&Wis zoekgeschiedenis&Erase search history RclMainBase&Bestand&File RclMainBase &Volledig Scherm &Full Screen RclMainBase"&GUI configuratie&GUI configuration RclMainBase &Help&Help RclMainBase&&Index configuratie&Index configuration RclMainBase&Voorkeuren &Preferences RclMainBase2&Vernieuw de gehele index&Rebuild index RclMainBase&Resultaten&Results RclMainBase&&Sorteer parameters&Sort parameters RclMainBase&Gereedschappen&Tools RclMainBase*&Gebruiks handleiding &User manual RclMainBase&Bekijken&View RclMainBase<Uitgebreid/ Geavanceerd ZoekenAdvanced/complex Search RclMainBaseCrtl+ QCtrl+Q RclMainBase*Document geschiedenisDocument History RclMainBase.Document & GeschiedenisDocument &History RclMainBaseV&erlatenE&xit RclMainBase.E&xternal index dialoogE&xternal index dialog RclMainBase*Schakel synoniemen inEnable synonyms RclMainBase(Extern index dialoogExternal index dialog RclMainBaseF11F11 RclMainBaseEerste Pagina First Page RclMainBaseEerste pagina First page RclMainBaseVolledig Scherm Full Screen RclMainBaseNGa naar de eerste pagina van resultatenGo to first page of results RclMainBase(Index & statistiekenIndex &statistics RclMainBase.Geindexeerd &MIME typesIndexed &MIME types RclMainBase Indexing &schemaIndexing &schedule RclMainBase:Indexeren met speciale optiesIndexing with special options RclMainBase4Laad bewaarde zoekopdrachtLoad saved query RclMainBase0Missend & HulpprogrammasMissing &helpers RclMainBaseVolgende Pagina Next Page RclMainBaseVolgende pagina Next page RclMainBase4Volgende resultaten paginaNext page of results RclMainBaseDe volgende update zal de eerder mislukte bestanden opnieuw proberen.Next update will retry previously failed files RclMainBase PgDownPgDown RclMainBasePgUpPgUp RclMainBaseVorige Pagina Previous Page RclMainBaseVorige pagina Previous page RclMainBase8Vorige pagina met resultatenPrevious page of results RclMainBaseZoek fragmentenQuery Fragments RclMainBase RecollRecoll RclMainBaseJBewaar als CVS ( spreadsheet) bestandSave as CSV (spreadsheet) file RclMainBase6Bewaar laatste zoekopdrachtSave last query RclMainBaseBewaar het resultaat naar een bestand die te laden is in een spreadsheet@Saves the result into a file which you can load in a spreadsheet RclMainBaseShift+PgUp Shift+PgUp RclMainBase"Toon zoek detialsShow Query Details RclMainBaseToon als tabel Show as table RclMainBasedToon het resultaat in een spreadsheet achtig tabel(Show results in a spreadsheet-like table RclMainBase@Sorteer op datum, nieuwste eerstSort by date, newest first RclMainBase>Sorteer op datume, oudste eerstSort by date, oldest first RclMainBaseFSorteer op datum van oud naar nieuw#Sort by dates from newest to oldest RclMainBaseFSorteer op datum van oud naar nieuw#Sort by dates from oldest to newest RclMainBase$Sorteer parametersSort parameters RclMainBase&Speciale IndexeringSpecial Indexing RclMainBase"Term &onderzoekerTerm &explorer RclMainBase>Termen onderzoekers gereedschapTerm explorer tool RclMainBase&Update &indexeerder Update &index RclMainBaseWebcache EditorWebcache Editor RclMainBaseAfsluitenQuit RclTrayIconHerstellenRestore RclTrayIconUittrekselAbstract RecollModel AuteurAuthor RecollModel DatumDate RecollModelDatum en tijd Date and time RecollModelBestands datum Document date RecollModel Bestands grootte Document size RecollModelBestands datum File date RecollModelBestands naam File name RecollModel Bestands grootte File size RecollModelIpadIpath RecollModelSleutelwoordenKeywords RecollModelMIME type MIME type RecollModel MtijdMtime RecollModel,Origineel karakter setOriginal character set RecollModel relevantiewaardeRelevancy rating RecollModel TitelTitle RecollModelURLURL RecollModel&(toon zoekopdracht) (show query)ResListJ<p><b>Geen resultaat gevonden</b><br>

No results found
ResListv<p><i>Alternatieve spellingen (accenten onderdrukken): </i>4

Alternate spellings (accents suppressed): ResListB<p><i>Alternatieve spelling: </i>

Alternate spellings: ResList"Document historieDocument historyResListDocumenten DocumentsResListVolgendeNextResList OpenenOpenResListBekijkenPreviewResList VorigePreviousResList(Zoekopdracht details Query detailsResList2Resultaten telling (est.)Result count (est.)ResListResultaatslijst Result listResListKnipselSnippetsResList2Document niet beschikbaarUnavailable documentResListvoorforResListvan tenminsteout of at leastResList &Verwijder kolom&Delete columnResTable"&Opnieuw sorteren &Reset sortResTable&Bewaar als CVS &Save as CSVResTable&Voeg "%1" kolom toeAdd "%1" columnResTableBKan bestand niet openen/ bewaren:Can't open/create file: ResTable8Bewaar lijst als cvs bestandSave table to CSV fileResTableVAfwijken van de uidig (bewaarde) voorkeuren' differ from current preferences (kept)SSearchAlle termen All termsSSearchElke termAny termSSearch`Automatische aanvullingen voor opgeslagen zoeken Auto suffixes for stored query: SSearchAuto aanvullen is ingesteld, maar het was uitgeschakeld voor de opgeslagen zoekopdracht3Autophrase is set but it was unset for stored querySSearchAutomatisch aanvullen is uitgeschakeld maar was ingesteld voor opegeslagen zoekopdracht3Autophrase is unset but it was set for stored querySSearchFoute zoektermBad query stringSSearchTVoer bestandsnaam wildcard uitdrukking in.$Enter file name wildcard expression.SSearchZoekterm'taal expressie. Cheat sheet: <br> <i> term1 term2 </i>. 'Term1' en 'term2' op elk gebied <br> <i> veld: term1 </i>. 'Term1' in 'het veld' veld <br> Standaard veldnamen / synoniemen: <br> titel / onderwerp / titel, auteur / uit, ontvanger / to, filename, ext. <br> Pseudo-velden: dir, mime / format, het type / rclcat, datum, grootte <br>. Twee datuminterval Voorbeelden: 2009-03-01 / 2009-05-20 2009-03-01 / P2M <br>. <i> term1 term2 OR term3 </i>: term1 AND (term2 OR term3) <br>. U kunt haakjes gebruiken om dingen duidelijker te maken. <br> <i> "term1 term2" </i>: zin (moet precies gebeuren). Mogelijke modifiers: <br> <i> "term1 term2" p </i>. Ongeordende nabijheid zoeken met de standaard afstand <br> Gebruik <b> Toon Zoekterm </b> in geval van twijfel over de uitslag en zie handleiding (& lt; F1>) voor meer informatie. Enter query language expression. Cheat sheet:
term1 term2 : 'term1' and 'term2' in any field.
field:term1 : 'term1' in field 'field'.
Standard field names/synonyms:
title/subject/caption, author/from, recipient/to, filename, ext.
Pseudo-fields: dir, mime/format, type/rclcat, date, size.
Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.
term1 term2 OR term3 : term1 AND (term2 OR term3).
You can use parentheses to make things clearer.
"term1 term2" : phrase (must occur exactly). Possible modifiers:
"term1 term2"p : unordered proximity search with default distance.
Use Show Query link when in doubt about result and see manual (<F1>) for more detail. SSearchVoer zoekterm hier in. Type ESC SPC als aanvulling voor huidige termFEnter search terms here. Type ESC SPC for completions of current term.SSearch`External indexen voor opgeslagen zoekopdrachten:#External indexes for stored query: SSearchBestandsnaam File nameSSearch$Geen geheugen meer Out of memorySSearchZoek taalQuery languageSSearchRStam taal voor opgeslagen zoekopdrachten:%Stemming languages for stored query: SSearchKies zoektype.Choose search type. SSearchBase WissenClear SSearchBase Crtl+SCtrl+S SSearchBaseVoer de zoekopdracht term hier in. Type ESC SPC om huidige termen aan te vullenFEnter search terms here. Type ESC SPC for completions of current term. SSearchBase Wis zoekopdrachtErase search entry SSearchBaseSZoekBasis SSearchBase SSearchBase ZoekenSearch SSearchBase$Start zoekopdracht Start query SSearchBaseAlleAll SearchClauseWElkeAny SearchClauseWBestandsnaam File name SearchClauseWGeen veldNo field SearchClauseWGeenNone SearchClauseWAantal extra woorden die kunnen worden ingevoegd met de gekozen woordenHNumber of additional words that may be interspersed with the chosen ones SearchClauseW FrasePhrase SearchClauseWOngeveer Proximity SearchClauseWSelecteer het type zoekopdracht dat zal worden uitgevoerd met de woorden:>Select the type of query that will be performed with the words SearchClauseW Vindt:Find:SnippetsVolgendeNextSnippets VorigePrevSnippetsKnipselsSnippetsSnippets(<P> Sorry, niet iets precies kunnen vinden. Waarschijnlijk is het document zeer groot en is de knipsels generator verdwaald in een doolhof ... </ p>

Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...

 SnippetsWZoekSearch SnippetsWBladerenBrowseSpecIdxW"Map om recursief te indexeren. Dit moet binnen het reguliere geindexeerde gebied zijn<br>zoals ingesteld in het configuratiebestand (hoofdmappen)Directory to recursively index. This must be inside the regular indexed area
as defined in the configuration file (topdirs).SpecIdxWhProbeerniet nog eens de vorig niet gelukte bestanden%Do not retry previously failed files.SpecIdxWAnders zullen alleen de veranderende of gefaalde bestanden verwerkt worden5Else only modified or failed files will be processed.SpecIdxWnWis de geselecteerde bestandens data voor de indexering*Erase selected files data before indexing.SpecIdxW"Laat dit leeg om alle bestanden te kunnen selecteren. U kunt meerdere spaties gescheiden shell-type patronen gebruiken. <br> Patronen met ingesloten ruimtes moeten aangeduid worden met dubbele aanhalingstekens. <br> Kan alleen worden gebruikt als het hoofddoel is ingesteldLeave empty to select all files. You can use multiple space-separated shell-type patterns.
Patterns with embedded spaces should be quoted with double quotes.
Can only be used if the start target is set.SpecIdxW$Selecteer patronenSelection patterns:SpecIdxW&Speciale indexeringSpecial IndexingSpecIdxWfBegin Map (anders de normale hoofdmappen gebruiken)+Start directory (else use regular topdirs):SpecIdxW,Hoofd index identiteitTop indexed entitySpecIdxW&Sluiten&Close SpellBase&Uitvouwen&Expand  SpellBaseAccentenAccents SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBaseHoofdletterCase SpellBase GelijkMatch SpellBaseGeen db info. No db info. SpellBase Term onderzoeker Term Explorer SpellBase:Documenten gemaakt/bijgewerkt Documents created/updatedSpellW Bestanden getest Files testedSpellW0Ongeindexeerde bestanden Unindexed filesSpellW%1 resultaten %1 resultsSpellW,Aspell expansie fout. Aspell expansion error. SpellW`Aspell init faalt. Is Aspell niet geinstalleerd?)Aspell init failed. Aspell not installed?SpellW<Gemiddelde termen per documentAverage terms per documentSpellW(Database map grootteDatabase directory sizeSpellWDoc./Tot. Doc. / Tot.SpellW~Index: %1 documenten, wisselende lengte %2 termen.%3 resultaten7Index: %1 documents, average length %2 terms.%3 resultsSpellWItemItemSpellWlDe lijst is alfabetisch afgebroken, sommige frequenter1List was truncated alphabetically, some frequent SpellW@Langste document lengte (termen)Longest document length (terms)SpellWMIME types MIME types:SpellW,Geen expansie gevondenNo expansion foundSpellW"Aantal documentenNumber of documentsSpellW RegexpRegexpSpellW@resultaten van vorige indexeringResults from last indexing:SpellW4Toon indexeer statistiekenShow index statisticsSpellWBKleinste document lengte (termen) Smallest document length (terms)SpellW&Spelling/PhonetischSpelling/PhoneticSpellWStam expansieStem expansionSpellWTermTermSpellW WaardeValueSpellWwildcards WildcardsSpellWPFout bij het ophalen van woordstam talen#error retrieving stemming languagesSpellWEr kunnen termen ontbreken. Probeer gebruik te maken van een langere root.terms may be missing. Try using a longer root.SpellWAlle termen All terms UIPrefsDialogElke termAny term UIPrefsDialog^Tenminste moet er een index worden geselecteerd$At most one index should be selected UIPrefsDialogKan index met verschillende hoofdletters/ diakritisch tekens opties niet toevoegen>Cant add index with different case/diacritics stripping option UIPrefsDialogKiesChoose UIPrefsDialog:Standaard QtWebkit lettertypeDefault QtWebkit font UIPrefsDialogBestandsnaam File name UIPrefsDialogZoek taalQuery language UIPrefsDialogXResultaten koppen lijst ( is standaard leeg)%Result list header (default is empty) UIPrefsDialogResultaten lijst paragrafen formaat (wist alles en reset naar standaard)Nieuwe Waardes:</b>New Values:ViewActionBase@Aktie (leeg -> recoll standaard) Action (empty -> recoll default)ViewActionBase:Toepassen op huidige selectieApply to current selectionViewActionBaseAfsluitenCloseViewActionBaseHUitzonderingen op Desktop voorkeuren Exception to Desktop preferencesViewActionBase"Standaard ViewersNative ViewersViewActionBaseRecoll actiesRecoll action:ViewActionBaseSelecteer een of meerdere bestandstypes, gebruik vervolgens de instellingen onderin het venster hoe ze verwerkt wordenkSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseSlecteer een of meerdere mime types gebruik vervolgens de instellingen onderin het venster om de verwerkingen aan te passenlSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBase$Selecteer dezelfde Select sameViewActionBasePGebruik Desktop voorkeuren als standaard"Use Desktop preferences by defaultViewActionBasehuidige waarde current valueViewActionBaseZoek regexp Search regexpWebcache"Webcache bewerkerWebcache editorWebcacheKopieer URLCopy URL WebcacheEdit$Verwijder selectieDelete selection WebcacheEdit|Indexer is aan het werken. Kan webcache bestand niet bewerken.-Indexer is running. Can't edit webcache file. WebcacheEditStatus van indexer onbekend. Kan webcache bestand niet bewerken.0Unknown indexer state. Can't edit webcache file. WebcacheEditWebcache is gewijzigd, u zult de indexer opnieuw moeten uitvoeren na het sluiten van dit vensterRWebcache was modified, you will need to run the indexer after closing this window. WebcacheEditMIMEMIME WebcacheModelUrlUrl WebcacheModelZet het indexeren van firefox bezochte paginas aan. <br> (hiervoor zal ook de Firefox Recoll plugin moeten worden geinstalleerd door uzelf)\Enables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin)confgui::ConfBeaglePanelWInvoeringen zullen worden gerecycled zodra de groote is bereikt. <br> Het verhogen van de groote heeft zin omdat het beperken van de waarde de bestaande waardes niet zal afkappen ( er is alleen afval ruimte aan het einde).Entries will be recycled once the size is reached.
Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end).confgui::ConfBeaglePanelWLMax. grootte voor het web opslaan (MB) Max. size for the web store (MB)confgui::ConfBeaglePanelWHVerwerk de WEB geschiedenis wachtrijProcess the WEB history queueconfgui::ConfBeaglePanelWXDe naam voor een map waarin de kopieen van de bezochte webpaginas opgeslagen zullen worden.<br>Een niet absoluut pad zal worden gekozen ten opzichte van de configuratie mapThe name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory.confgui::ConfBeaglePanelWDWeb pagina map naam om op te slaanWeb page store directory nameconfgui::ConfBeaglePanelWFKan configuratie bestand niet lezenCan't write configuration fileconfgui::ConfIndexWKiesChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW<P> Automatisch activeren hoofdletters gevoeligheid als de vermelding hoofdletters heeft in elke, behalve de eerste positie. Anders moet u zoek taal gebruiken en de <i>C</i> modifier karakter-hoofdlettergevoeligheid opgeven.

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity.confgui::ConfSearchPanelW<P> Automatisch activeren diakritische tekens gevoeligheid als de zoekterm tekens zijn geaccentueerd (niet in unac_except_trans). Wat je nodig hebt om de zoek taal te gebruiken en de <i> D</i> modifier om diakritische tekens gevoeligheid te specificeren.

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity.confgui::ConfSearchPanelW<p> Maximale uitbreidingstelling voor een enkele term (bijv.: bij het gebruik van wildcards) Een standaard van 10.000 is redelijk en zal zoekpodrachten die lijken te bevriezen terwijl de zoekmachine loopt door de termlijst vermijden.

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.confgui::ConfSearchPanelW<p> Maximale aantal elementaire clausules die we kunnen toevoegen aan een enkele Xapian zoeken. In sommige gevallen kan het resultaatvan de term uitbreiding multiplicatief zijn, en we willen voorkomen dat er overmatig gebruik word gemaakt van het werkgeheugen. De standaard van 100.000 zou hoog genoeg moeten zijn in beidde gevallen en compatible zijn met moderne hardware configuraties.5

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.confgui::ConfSearchPanelW\Automatische karakter hoofdletter gevoeligheid$Automatic character case sensitivityconfgui::ConfSearchPanelWZAutomatische diakritische tekens gevoeligheid Automatic diacritics sensitivityconfgui::ConfSearchPanelWBMaximaal Xapian clausules tellingMaximum Xapian clauses countconfgui::ConfSearchPanelWDMaximale term uitbreidings tellingMaximum term expansion countconfgui::ConfSearchPanelWEen exclusieve lijst van gendexeerde typen mime. <br> Niets anders zal worden gendexeerd. Normaal gesproken leeg en inactiefeAn exclusive list of indexed mime types.
Nothing else will be indexed. Normally empty and inactiveconfgui::ConfSubPanelW(Sluit mime types uitExclude mime typesconfgui::ConfSubPanelWExterne filters die langer dan dit werken worden afgebroken. Dit is voor het zeldzame geval (bijv: postscript) wanneer een document een filterlus zou kunnen veroorzaken. Stel in op -1 voor geen limiet.External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. confgui::ConfSubPanelWGlobaalGlobalconfgui::ConfSubPanelWAls deze waarde is ingesteld (niet gelijk aan -1), zal tekstbestanden worden opgedeeld in blokken van deze grootte voor indexering. Dit zal helpen bij het zoeken naar zeer grote tekstbestanden (bijv: log-bestanden).If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files).confgui::ConfSubPanelWXMaximaal gecomprimeerd bestands formaat (KB)Max. compressed file size (KB)confgui::ConfSubPanelW:Max. filter executie tijd (S)Max. filter exec. time (S)confgui::ConfSubPanelW:Max. tekstbestand groote (MB)Max. text file size (MB)confgui::ConfSubPanelWZMime types die niet geindexeerd zullen wordenMime types not to be indexedconfgui::ConfSubPanelW"Alleen mime typesOnly mime typesconfgui::ConfSubPanelWBTekst bestand pagina grootte (KB)Text file page size (KB)confgui::ConfSubPanelW<Deze waarde stelt een drempel waarboven gecomprimeerde bestanden niet zal worden verwerkt. Ingesteld op -1 voor geen limiet, op 0 voor geen decompressie ooit.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever.confgui::ConfSubPanelWhDeze waarde stelt een drempel waarboven tekstbestanden niet zal worden verwerkt. Ingesteld op -1 voor geen limiet. Dit is voor het uitsluiten van monster logbestanden uit de index.This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index.confgui::ConfSubPanelW Dit zijn uitzonderingen op het unac mechanisme dat, standaard, alle diakritische tekens verwijderd, en voert canonische ontbinding door. U kunt unaccenting voor sommige karakters veranderen, afhankelijk van uw taal, en extra decomposities specificeren, bijv. voor ligaturen. In iedere ruimte gescheiden ingave , waar het eerste teken is de bron is, en de rest de vertaling.l

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.confgui::ConfTopPanelWAspell taalAspell languageconfgui::ConfTopPanelW"Database map naamDatabase directory nameconfgui::ConfTopPanelW:Schakelt het gebruik van aspell uit om spellings gissingen in het term onderzoeker gereedschap te genereren. <br> Handig als aspell afwezig is of niet werkt.Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work. confgui::ConfTopPanelWFIndex verversings megabyte intervalIndex flush megabytes intervalconfgui::ConfTopPanelW Log bestandsnaam Log file nameconfgui::ConfTopPanelW0Log uitgebreidheids nivoLog verbosity levelconfgui::ConfTopPanelW.maximale schijf gebruikMax disk occupation (%)confgui::ConfTopPanelW&Gebruik aspell nietNo aspell usageconfgui::ConfTopPanelW$Paden overgeslagen Skipped pathsconfgui::ConfTopPanelWStam talenStemming languagesconfgui::ConfTopPanelWHet bestand waar de boodschappen geschreven zullen worden.<br>Gebruik 'stderr' voor terminal weergavePThe file where the messages will be written.
Use 'stderr' for terminal outputconfgui::ConfTopPanelWTaal instelling voor het aspell woordenboek. Dit zou er uit moeten zien als 'en'of 'nl'...<br> als deze waarde niet is ingesteld, zal de NLS omgeving gebruikt worden om het te berekenen, wat meestal werkt. Om een idee te krijgen wat er op uw systeem staat, type 'aspell config' en zoek naar .dat bestanden binnen de 'data-dir'map.3The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. confgui::ConfTopPanelWDe talen waarvoor de stam uitbreidings<br>wooordenboeken voor zullen worden gebouwd.IThe languages for which stemming expansion
dictionaries will be built.confgui::ConfTopPanelWEen lijst van mappen waar de recursive indexering gaat starten. Standaard is de thuismap.LThe list of directories where recursive indexing starts. Default: your home.confgui::ConfTopPanelW:De naam voor een map om de index in op te slaan<br> Een niet absoluut pad ten opzichte van het configuratie bestand is gekozen. Standaard is het 'xapian db'.The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.confgui::ConfTopPanelWDit zijn de namen van de mappen die indexering niet zal doorzoeken. <br> Kan wildcards bevatten. Moet overeenkomen met de paden gezien door de indexer (bijv: als topmappen zoals '/ home/me en '/ home' is eigenlijk een link naar '/usr/home', een correcte overgeslagen pad vermelding zou zijn '/home/me/tmp * ', niet' /usr/home/me/tmp * ')#These are names of directories which indexing will not enter.
May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')confgui::ConfTopPanelWFDit is het precentage van schijfgebruike waar indexering zal falen en stoppen (om te vermijden dat uw schijf volraakt.<br>0 betekend geen limit (dit is standaard).This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).
0 means no limit (this is the default).confgui::ConfTopPanelW`Deze waarde past de hoeveelheid data die zal worden geindexeerd tussen de flushes naar de schijf.<br> Dit helpt bij het controleren van het gebruik van geheugen. Standaad 10MB This value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB confgui::ConfTopPanelWDeze waarde bepaald het aantal boodschappen,<br>van alleen foutmeldingen tot een hoop debugging data.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data.confgui::ConfTopPanelWTop mappenTop directoriesconfgui::ConfTopPanelW&Unac uitzonderingenUnac exceptionsconfgui::ConfTopPanelW&Annuleren&CanceluiPrefsDialogBase&OK&OKuiPrefsDialogBase<BR>
uiPrefsDialogBase <PRE>

uiPrefsDialogBase<PRE> + wrap
 + wrapuiPrefsDialogBaseEen zoekopdracht naar '[rollende stenen] (2 termen) wordt gewijzigd in [rollen of stenen of (rollende frase 2 stenen)]. Dit zou een hogere prioriteit moeten geven aan de resultaten, waar de zoektermen precies zoals ingevoerd moeten verschijnen.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBase4Abstract knipsel scheidingAbstract snippet separatoruiPrefsDialogBaseAlles ActiverenActivate AlluiPrefsDialogBaseIndex toevoegen	Add indexuiPrefsDialogBase0Veranderingen doorvoeren
Apply changesuiPrefsDialogBasezAutostart eenvoudige zoekopdracht bij ingave in de witruimte.-Auto-start simple search on whitespace entry.uiPrefsDialogBasefAutomatisch aanvullen van eenvoudige zoekopdrachten+Automatically add phrase to simple searchesuiPrefsDialogBasedAutofrase term frequentie drempelwaarde percentage.Autophrase term frequency threshold percentageuiPrefsDialogBaseKnoppen Paneel
Buttons PaneluiPrefsDialogBaseKiesChooseuiPrefsDialogBase0Kies editor toepassingenChoose editor applicationsuiPrefsDialogBaseKlik hier om een andere index map toe te voegen aan de lijst. U kunt een Recoll configuratie map of een Xapian index te selecteren.{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBaseXSluit naar systeemvak in plaats van sluiten.!Close to tray instead of exiting.uiPrefsDialogBase6Datum notatie (strftime(3))Date format (strftime(3))uiPrefsDialogBase"Alles DeactiverenDeactivate AlluiPrefsDialogBaseBepaal of document mappen moeten worden weergegeven als keuzerondjes, gereedschap combinatiebox of menu.QDecide if document filters are shown as radio buttons, toolbar combobox, or menu.uiPrefsDialogBase`Schakel Qt auto-aanvullen uit in zoek invoegveld*Disable Qt autocompletion in search entry.uiPrefsDialogBase8Veranderingen ongedaan makenDiscard changesuiPrefsDialogBaseMoeten we een abstract maken, zelfs als het document er al een blijkt te hebben?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBase<Moeten we proberen om abstracten voor resultatenlijst invoering op te bouwen met behulp van de context van de zoektermen? Kan traag zijn met grote documenten.zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBase8Document filter keuze stijl:Document filter choice style:uiPrefsDialogBase6Dynamisch abstracten bouwenDynamically build abstractsuiPrefsDialogBaseTBewerk resultaat pagina html header invoeg#Edit result page html header insertuiPrefsDialogBaseTBewerk resultaten paragraaf formaat string#Edit result paragraph format stringuiPrefsDialogBaseAanzettenEnableuiPrefsDialogBaseExterne indexenExternal IndexesuiPrefsDialogBaseHFrequentie percentage drempel waarover wij geen termen gebruiken binnen autofrase. Frequente termen zijn een belangrijk prestatie probleem met zinnen en frases. Overgeslagen termen vergroten de zins verslapping, en verminderen de autofrase doeltreffendheid. De standaardwaarde is 2 (procent).Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10Helvetica-10uiPrefsDialogBase:Verberg duplicaat resultaten.Hide duplicate results.uiPrefsDialogBase>Highlight kleur voor zoektermenHighlight color for query termsuiPrefsDialogBaseIndien aangevinkt, zullen de resultaten met dezelfde inhoud onder verschillende namen slecht eenmaal worden getoond.XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBaseLijnen in PRE tekst worden niet gevouwen. Met behulp van BR kan inspringen verwijderen. PRE + Wrap stijl zou wenselijk kunnen zijn.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBasetMaximale tekst groote highlighted voor preview (megabytes)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseMenuMenuuiPrefsDialogBasehOpgegeven aantal van weergaves per resultaten pagina"Number of entries in a result pageuiPrefsDialogBaseOpent een dailoog venster om het knipsel venster CSS stijl sheet bestand te selecterenAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBaseOpent een dialoog om de resultaten lijst lettertype te selecteren-Opens a dialog to select the result list fontuiPrefsDialogBaselOpend een dialoog venster om style sheet te selecteren-Opens a dialog to select the style sheet fileuiPrefsDialogBase"Paden vertalingenPaths translationsuiPrefsDialogBaseBPlatte tekst naar HTML lijn stijlPlain text to HTML line styleuiPrefsDialogBasehHtml voorkeur in plaats van gewoon tekst als preview&Prefer Html to plain text for preview.uiPrefsDialogBaseTZoek taal magic bestandsnaam achtervoegsel(Query language magic file name suffixes.uiPrefsDialogBase@Onthoud sorteer activatie statusRemember sort activation state.uiPrefsDialogBaseVerwijder van de lijst. Dit heeft geen effect op de schijf index.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase2Geselecteerde verwijderenRemove selecteduiPrefsDialogBaseBVervang abstracten van documenten Replace abstracts from documentsuiPrefsDialogBaseHerstelResetuiPrefsDialogBase@Herstel de Knipsel venster stijl Resets the Snippets window styleuiPrefsDialogBaseReset het resultaten lijst lettertype naar systeem standaardwaarde1Resets the result list font to the system defaultuiPrefsDialogBaseFReset de style sheet naar standaard!Resets the style sheet to defaultuiPrefsDialogBase Resultaten lijstResult ListuiPrefsDialogBase6Resultaten lijst lettertypeResult list fontuiPrefsDialogBase*Zoek terwijl u typed.Search as you type.uiPrefsDialogBaseZoek parametersSearch parametersuiPrefsDialogBaseBToon pictogram in het systeemvak.Show system tray icon.uiPrefsDialogBaselToon waarschuwing bij het openen van een temp bestand.)Show warning when opening temporary file.uiPrefsDialogBase6Knipsel venster CSS bestandSnippets window CSS fileuiPrefsDialogBaseNStart met geavanceerd zoek dialog open.'Start with advanced search dialog open.uiPrefsDialogBaseFStart met een eenvoudige zoek modusStart with simple search modeuiPrefsDialogBaseStam taalStemming languageuiPrefsDialogBaseStyle sheetStyle sheetuiPrefsDialogBase$Synoniemen bestand
Synonyms fileuiPrefsDialogBaseJSynthetische abstract context woorden Synthetic abstract context wordsuiPrefsDialogBasePSynthetische abstractie grootte (tekens)$Synthetic abstract size (characters)uiPrefsDialogBaseTeksten groter dan dit zullen niet worden highlighted in previews (te langzaam).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBaseDe woorden in de lijst zal automatisch omgezet worden naar ext:xxx clausules in de zoektaal ingave.bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase(Toggle geselecteerdeToggle selecteduiPrefsDialogBase>Gereedschaps-menu combinatieboxToolbar ComboboxuiPrefsDialogBase(Gebruikers interfaceUser interfaceuiPrefsDialogBase*Gebruikers voorkeurenUser preferencesuiPrefsDialogBase*Gebruiker stijl toe te passen op het knipsel-venster <br>. Let op: het resultaat pagina header invoegen is ook opgenomen in het'kop knipsel-venster .User style to apply to the snippets window.
Note: the result page header insert is also included in the snippets window header.uiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_lt.qm0000644000175000017500000007370113566424763014262 00000000000000Ek$—K'HJ*%?K*0?z+L#1 :d<Gi)GB H6QH6RJ+%;J+M:J6J6XWT'NW8Xje%lss<v88vv=5zϳ__0́W<T X!nE:0,e>.c*XMe `^Gn(Ov]W+c xC( L2SIL>  +Np#vIw 5Fw 5Lw 5Nw 5UU ֳS6Qwf3@Mͼuegg=^;U3yU&e  o&b;.N,?dJJUY57JUYH}YRmqʗʗ<^cZL_ xAEwLLXX46[2I"I% nnQFfUUfYMf^OA/Mo#bX| -Z -E)TjyB9\BSP)r?)Gr2Vk/,DR<@NBQAS :â9 KI TRR?Ǣ07~C Xl`Dz" NvUKk0p h] T#(K %C7Ja[7$X DC]FW#O1FN`H:"/w/ϗIbnG]= 0Ót^Ót=Ɇto&:ugQ)}})3l0N#I8c  H> 9ZyM KC ]#8 16 _ 9z ü>g c +< 65 `P6 ` cEB y&a IY  C0 H B@ Aa A TH $) h$c ΣB 4= ۷D ^ 'И@ /6 97 P֙2 RV;_ T#, V \iCi `F  9 Sj \ ~c Q 8ZY F5 yV y~ 3S uL uU P1 P; 5dfj H ^ £Sc/._<n=?G7%J5R'b>zcJzcTVP#.FS;.56Y˓U; cQ~hXY~s?[sEg3Ch |c|k'9C l>CBLioVisos slygos All clauses AdvSearchBet kuri slyga Any clause AdvSearch mediamedia AdvSearchpraneaimasmessage AdvSearchkitaother AdvSearchprezentacijos presentation AdvSearchskai iuokls spreadsheet AdvSearchskai iuokls spreadsheets AdvSearchtekstastext AdvSearchtekstaitexts AdvSearch<----- Visi <----- All AdvSearchBase<----- Pas <----- Sel AdvSearchBasePridti slyg Add clause AdvSearchBase"Iasamesn paieakaAdvanced search AdvSearchBaseVisi ----> All ----> AdvSearchBaseVisi kairje esantys netuati laukai bus sujungiami AND (visi) arba OR (bet kuris) pagalba. <br> "Bet kuris" "Visi" ir "Nei vienas" lauks tipai gali priimti paprasts ~od~is miain/ ir frazes pa~ymtas dvigubomis kabutmis. <br> Tuati laukeliai ignoruojami.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBaseNaraytiBrowse AdvSearchBase"Pagal kategorijas By categories AdvSearchBase^Pa~ymti, jei norite filtruoti pagal byls tipus,Check this to enable filtering on file types AdvSearchBase~Pa~ymti, jei norite naudoti byls kategorijas vietoje mime tips;Check this to use file categories instead of raw mime types AdvSearchBaseU~darytiClose AdvSearchBaseIatrinti slyg Delete clause AdvSearchBase^.raaykite virautinio lygio direktorij paieakaiEnter top directory for search AdvSearchBase(Ignoruoti byls tipaiIgnored file types AdvSearchBase&Apriboti byls tipusRestrict file types AdvSearchBaseJPateikti rezultatus byloms submedyje:%Restrict results to files in subtree: AdvSearchBase2Iasaugoti kaip numatytj/Save as default AdvSearchBaseJIeakoti <br>dokuments<br>tenkinan is:'Search for
documents
satisfying: AdvSearchBase"Ieakota byls tipsSearched file types AdvSearchBasePas -----> Sel -----> AdvSearchBasePradti paieak Start Search AdvSearchBaseAspell kalbaAspell language ConfIndexWDNepavyksta /raayti nustatyms bylosCan't write configuration file ConfIndexWBDuomens bazs direktorijos vardasDatabase directory name ConfIndexW*Nurodo nenaudoti aspell programos kuriant tarimo aproksimacijas raktinis ~od~is tyrinjimo /rankyje.<br>Naudinga, jei aspell neveikia arba ne/diegta.Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexW&Globalks parametraiGlobal parameters ConfIndexWTIndekso dalis, /raaoms / disk, dydis (MB)Index flush megabytes interval ConfIndexW$Lokalks parametraiLocal parameters ConfIndexW Log bylos vardas Log file name ConfIndexW&Log iasamumo lygmuoLog verbosity level ConfIndexW,Aspell nebus naudojamaNo aspell usage ConfIndexW&Paieakos parametraiSearch parameters ConfIndexWpDirektorijs, kuris turinys nein-<br>deksuojamas, sraaas Skipped paths ConfIndexWNKalbos naudojamos stemming<br> procesuiStemming languages ConfIndexWByla, kurioje bus /raaomos ~inuts.<br>Naudokite 'stderr' nordami iavesti / terminalo langPThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexWvKalbos, kurioms bus sukurti stemming <br>expansion ~odynai.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexWDirektorijs, kuriose pradedamas rekursinis indeksavimas, sraaas. Numatytoji: nams direktorija.LThe list of directories where recursive indexing starts. Default: your home. ConfIndexW4Vert nustato duomens, kurie indeksuojami tarp /raaymo / disk, apimt/.<br>Padeda valdyti indeksavimo dalies atminties naudojim. Numatyta vert yra 10 MBThis value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWVert nustato ~iniu is apimt/, nuo vien tik <br>klaids fiksavimo iki didels apimties duomens skirts debugging.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexWAuka iausio lygmens direktorijos<br>kuriose vykdomas indeksavimasTop directories ConfIndexWGlobalusGlobal ConfSubPanelWJei vert nurodyta (nelgyi -1) tekstins bylos bus suskaidytos / nurodyto dyd~io bylas, kurios bus atskirai indeksuojamos. Naudinga atliekant paieak labai dideliose tekstinse bylose (pav. log bylose).If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). ConfSubPanelWJDid~iausias suspausts byls dydis (KB)Max. compressed file size (KB) ConfSubPanelWLDid~iausias tekstins bylos dydis (MB)Max. text file size (MB) ConfSubPanelW4Tekstins bylos dydis (KB)Text file page size (KB) ConfSubPanelWLViraijus pasirinkt suspausts byls dyd/, jie nebus indeksuojami. Pasirinkite -1 jei nenorite nurodyti ribos, 0, jei nenorite, jog suspaustos bylos bkts indeksuojamos.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. ConfSubPanelWLViraijus pasirinkt tekstinis byls dyd/, jie nebus indeksuojami. Pasirinkite -1 jei nenorite nurodyti ribos, 0, jei nenorite, jog suspaustos bylos bkts indeksuojamos.This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. ConfSubPanelWAtaauktiCancel EditTransBaseTNustatymuose nerandama duomens bazs bylos No db directory in configurationMain&Sekantis&NextPreview&Ankstesnis &PreviousPreview&Ieakoti: &Search for:PreviewZNepavyksta pervesti dokumento / vidin busen0Can't turn doc into internal representation for PreviewAtaauktiCancelPreviewIavalytiClearPreview6Kuriamas per~valgos tekstasCreating preview textPreviewR.keliamas / redaktoris per~valgos tekstas Loading preview text into editorPreview&Atitaikyti &Atvej/  Match &CasePreview:Trkksta pagalbins programos:Missing helper program: PreviewAtidarytiOpenPreviewSpausdintiPrintPreviewTextEditBSpausdinti kaip matoma per~ikrojePrint Current PreviewPreviewTextEditRodyti laukus Show fieldsPreviewTextEdit0Rodyti pagrindin/ tekstShow main textPreviewTextEditP<b>Pritaikyti direktorijs<br> submed~iaiCustomised subtreesQObject2Sekti simbolines nuorodasFollow symbolic linksQObjectIndeksavimo metu sekti simbolines nuorodas. Numatytasis elgesys yra nesekti, bandant iavengti dvigubo indeksavimoTFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject6Indeksuoti viss byls vardusIndex all file namesQObjectIndeksuoti byls, kuris turinio nepavyksta perskaityti, vardus. Numatytoji reikam: teisyb}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject"Ne/traukti vardai Skipped namesQObjectSubdirektorijs, kuriose dal/ parametrs reikia pakeisti, sraaas.<br> Numatytoji reikam: tua ia.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObject~Byls arba direktorijs, kuris nedera indeksuoti, vards aablonai.LThese are patterns for file or directory names which should not be indexed.QObjectfAtidaryti &Aukatesnio lygio dokumentus/direktorijas&Open Parent document/folderQWidget&Per~ikra&PreviewQWidget&.raayti / byl&Write to FileQWidget,Kopijuoti &Bylos vardCopy &File NameQWidgetKopijuoti &URL Copy &URLQWidget4Rasti &panaaius dokumentusFind &similar documentsQWidgethPer~ikrti &Aukatesnio lygio dokumentus/direktorijasPreview P&arent document/folderQWidget.spjimasWarningRTIToolW(visos kalbos)(all languages)RclMain(no stemming) (no stemming)RclMainApie Recoll About RecollRclMainVisiAllRclMainDNepavyksta sukurti per~ikros langoCan't create preview windowRclMainxNepavyksta perskaityti dokumento arba sukurti laikinos bylos0Cannot extract document or create temporary fileRclMainNepavyksta iagauti ia duomens bazs informacijos apie dokument +Cannot retrieve document info from databaseRclMainU~daromaClosingRclMain$Dokuments istorijaDocument historyRclMainVykdoma: [ Executing: [RclMain$Istorijos duomenys History dataRclMainIndeksuojama:Indexing in progress: RclMain0indeksavimas pertrauktasIndexing interruptedRclMain6Trkksta pagalbinis programsMissing helper programsRclMainNustatymuose nenumatyta jokia iaorin per~ikros programa aiam mime tipui [-No external viewer configured for mime type [RclMain^Randamos visos reikalingos pagalbins programosNo helpers found missingRclMainIavalytiPurgeRclMain(U~klausos rezultatai Query resultsRclMain StemdbStemdbRclMain.Sustabdyti &IndeksavimStop &IndexingRclMainNurodyta per~ikros programa aiam mime tipui %1: %2 nerandama. Ar nortumete iaaaukti nustatyms lang?hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMain8`i paieaka daugiau nevykdoma"This search is not active any moreRclMainNe~inomaUnknownRclMain&Atnaujinti &Indeks Update &IndexRclMain.spjimasWarningRclMainFerror retrieving stemming languages#error retrieving stemming languagesRclMainfiltruotasfilteredRclMain mediamediaRclMainpraneaimasmessageRclMainkitaotherRclMainprezentacijos presentationRclMainsurkaiuotasortedRclMainskai iuokls spreadsheetRclMaintekstastextRclMain&Apie Recoll &About Recoll RclMainBase$&Iasamesn Paieaka&Advanced Search RclMainBase8&Iatrinti dokuments istorij&Erase document history RclMainBase &Byla&File RclMainBase&Pagalba&Help RclMainBase&Nustatymai &Preferences RclMainBase,&Surkaiuoti parametrai&Sort parameters RclMainBase&.rankiai&Tools RclMainBase$&Vartotojo vadovas &User manual RclMainBase"Iasamesn PaieakaAdvanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBase$Dokuments IstorijaDocument History RclMainBase&Dokuments &IstorijaDocument &History RclMainBaseI&aeitiE&xit RclMainBase.Iaorinis indekss langasExternal index dialog RclMainBasePirmas puslapis First page RclMainBaseBPereiti / pirm rezultats puslap/Go to first page of results RclMainBase"Sekantis puslapis Next page RclMainBase6Sekantis rezultats puslapisNext page of results RclMainBase0Priea tai buvs puslapis Previous page RclMainBase:Ankstesnis rezultats puslapisPrevious page of results RclMainBase RecollRecoll RclMainBase*Surkaiuoti parametrusSort parameters RclMainBase8Raktinis ~od~is &tyrintojasTerm &explorer RclMainBaseDRaktinis ~od~is tyrinjimo /rankisTerm explorer tool RclMainBase&Atnaujinti &Indeks Update &index RclMainBaseDataDate RecollModelBylos vardas File name RecollModel"(rodyti u~klaus) (show query)ResList><p><b>Nerasta rezultats</b><br>

No results found
ResListX<p><i>Kiti galimi tarimai (be akcents): </i>4

Alternate spellings (accents suppressed): ResList$Dokuments istorijaDocument historyResListDokumentai DocumentsResList KitasNextResListAtidarytiOpenResListPer~ikraPreviewResListAnkstesnisPreviousResList"U~klausos detals Query detailsResList"Rezultats sraaas Result listResList.Neprieinamas dokumentasUnavailable documentResListia bentout of at leastResList,Visi raktiniai ~od~iai All termsSSearch0Bet kuris raktinis ~odisAny termSSearch8Netinkamai pateikta u~klausaBad query stringSSearchBylos vardas File nameSSearch&Nepakanka atminties Out of memorySSearchU~klauss kalbaQuery languageSSearch4Pasirinkite paieakos tip.Choose search type. SSearchBaseIavalytiClear SSearchBase Ctrl+SCtrl+S SSearchBase.Iatrinti paieakos /raaErase search entry SSearchBaseSSearchBase SSearchBase SSearchBaseIeakotiSearch SSearchBase Pradti u~klaus Start query SSearchBaseVisiAll SearchClauseWBylos vardas File name SearchClauseWPapildoms ~od~is skai ius kurie gali interspersed with the chosen onesHNumber of additional words that may be interspersed with the chosen ones SearchClauseW`Pasirinkite u~klausos tip atliekam su ~od~iais>Select the type of query that will be performed with the words SearchClauseW KitasNextSnippetsIeakotiSearch SnippetsWNaraytiBrowseSpecIdxW&U~daryti&Close SpellBase&Iaplsti&Expand  SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBase6Raktinis ~od~is tyrinjimas Term Explorer SpellBase$Nerasta prapltimsNo expansion foundSpellW RegexpRegexpSpellW Tarimas/FonetikaSpelling/PhoneticSpellWStem expansionStem expansionSpellWRaktinis ~odisTermSpellWWildcards WildcardsSpellWFerror retrieving stemming languages#error retrieving stemming languagesSpellW,Visi raktiniai ~od~iai All terms UIPrefsDialog0Bet kuris raktinis ~odisAny term UIPrefsDialogNaraytiChoose UIPrefsDialogBylos vardas File name UIPrefsDialogU~klauss kalbaQuery language UIPrefsDialogAtrodo, jog pasirinkta direktorija nra Xapian indekso direktorija;The selected directory does not appear to be a Xapian index UIPrefsDialogZPasirinkta direktorija jau yra indekso sraae3The selected directory is already in the index list UIPrefsDialog8Pagrindinis/localus indekas!This is the main/local index! UIPrefsDialogFerror retrieving stemming languages#error retrieving stemming languages UIPrefsDialogU~darytiCloseViewActionBase8Sistemos per~ikros programosNative ViewersViewActionBaseNaraytiChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW&Ataaukti&CanceluiPrefsDialogBase &Gerai&OKuiPrefsDialogBasejPaieaka bus pakeista (pav. rolling stones -> rolling or stones or (rolling phrase 2 stones)). Teikiama aiaki pirmenyb rezultatams kuriuose rasti raktiniai ~od~iai atitinka /vestus.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBaseVisus aktyvuoti Activate AlluiPrefsDialogBasePridti indeks Add indexuiPrefsDialogBase*Pritaikyti pakeitimus Apply changesuiPrefsDialogBaseJPridti prie paprastos paieakos fraz+Automatically add phrase to simple searchesuiPrefsDialogBaseNaraytiChooseuiPrefsDialogBase@Pasirinkite redaktoris programasChoose editor applicationsuiPrefsDialogBase"Visus deaktyvuotiDeactivate AlluiPrefsDialogBase*Panaikinti pakeitimusDiscard changesuiPrefsDialogBasevAr sukuriame dirbtin santrauk, jei dokumente jau ji yra? EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBaseAr pabandome sukurti santraukas remdamiesi u~klauss raktinis ~od~is kontekstu? Didels apimties dokumentams gali ltai veikti.zDo we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents.uiPrefsDialogBase:Dinamiakai sukurti santraukasDynamically build abstractsuiPrefsDialogBase$Iaoriniai indeksaiExternal IndexesuiPrefsDialogBaseHelvetica-10 Helvetica-10uiPrefsDialogBaseFSlpti pasikartojan ius rezultatus.Hide duplicate results.uiPrefsDialogBasePa~ymjus, bus rodoma tik viena ia byls su tuo pa iu turiniu, ta iau skirtingais vardais.XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBasevDid~iausia teksto, pa~ymto per~ikrai, apimtis (megabaitai)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseD.raas skai ius rezultats puslapyje"Number of entries in a result pageuiPrefsDialogBaseFPasirinkite rezultats sraao arift-Opens a dialog to select the result list fontuiPrefsDialogBase\Pirmenyb teikti Html formatui per~ikros metu.&Prefer Html to plain text for preview.uiPrefsDialogBaseh.siminti rkaiavimo pasirinkimus (nedings perkrovus).Remember sort activation state.uiPrefsDialogBasetPaaalinti ia sraao. Neturi jokio poveikio indeksui diske.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase&Pa~ymtus paaalintiRemove selecteduiPrefsDialogBaseRPakeisti dokumentuose randamas santraukas Replace abstracts from documentsuiPrefsDialogBase2Gra~inti numatytj formResetuiPrefsDialogBase`Gra~ina numatytj rezultats sraao srifto vert1Resets the result list font to the system defaultuiPrefsDialogBase0Rezultats sraao ariftasResult list fontuiPrefsDialogBase&Paieakos parametraiSearch parametersuiPrefsDialogBaseLPradti nuo iasamesns paieakos lango.'Start with advanced search dialog open.uiPrefsDialogBaseStemming kalbaStemming languageuiPrefsDialogBaseLDirbtins santraukos konteksto ~od~iai Synthetic abstract context wordsuiPrefsDialogBase\Dirbtins santraukos dydis (simbolis skai ius)$Synthetic abstract size (characters)uiPrefsDialogBaseTekstai viraijantys a/ dyd/ nebus nuspalvinami per~ikros metu (per didel apkrova).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBase6.jungti/Iajungti pasirinktToggle selecteduiPrefsDialogBase"Vartotoja aplinkaUser interfaceuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_it.ts0000644000175000017500000041216213566424763014266 00000000000000 AdvSearch All clauses Tutti i termini Any clause Qualsiasi termine texts testi spreadsheets fogli di calcolo presentations presentazioni media multimediali messages messaggi other altri Bad multiplier suffix in size filter text spreadsheet presentation message AdvSearchBase Advanced search Ricerca avanzata Restrict file types Limita i tipi di file Save as default Salva come default Searched file types Ricerca tipo file All ----> Tutti ----> Sel -----> Sel -----> <----- Sel <----- Sel <----- All <----- Tutti Ignored file types Ignora i file di questo tipo Enter top directory for search Scrivi la directory base per la ricerca Browse Esplora Restrict results to files in subtree: Limita i risultati alla sotto-directory: Start Search Cerca Search for <br>documents<br>satisfying: Cerca i documenti<br>che contengono: Delete clause Elimina condizione Add clause Aggiungi condizione Check this to enable filtering on file types Contrassegna per abilitare la ricerca sul tipo di file By categories Per categorie Check this to use file categories instead of raw mime types Contrassegna per usare le categorie al posto dei tipi mime Close Chiudi All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Invert Minimum size. You can use k/K,m/M,g/G as multipliers Min. Size Maximum size. You can use k/K,m/M,g/G as multipliers Max. Size Filter From To Check this to enable filtering on dates Filter dates Find Check this to enable filtering on sizes Filter sizes ConfIndexW Can't write configuration file Impossibile scrivere il file di configurazione Global parameters Parametri globali Local parameters Parametri locali Search parameters Parametri per la ricerca Top directories Cartella superiore The list of directories where recursive indexing starts. Default: your home. Lista delle cartelle in cui inizia lìindicizzazione recorsiva. Di default è la tua home. Skipped paths Indirizzi saltati These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Stemming languages Lingue per la radice The languages for which stemming expansion<br>dictionaries will be built. Lingue per le quali verrà costruito<br>il dizionario delle espansioni radicali. Log file name Nome del file di log The file where the messages will be written.<br>Use 'stderr' for terminal output Il file dove verranno scritti i messaggi.<br>Usa 'stderr' per il terminale Log verbosity level Livello di verbosità del log This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Questo valore regola il numero dei messaggi,>br>dai soli errori a mole indicazioni per il debug. Index flush megabytes interval Intervallo in megabite per il salvataggio intermedio dell'indice This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Questo valore regola il volume di dati da indicizzare tra un salvataggio e l'altro.<br>Aiuta a controllare l'uso della memoria. Di default è post uguale a 10Mb Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. No aspell usage Non usare aspell Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Aspell language Lingua di aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Database directory name Nome della cartella del database The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Unac exceptions <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. Process the WEB history queue Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Web page store directory name The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Max. size for the web store (MB) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Automatic diacritics sensitivity <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. Automatic character case sensitivity <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. Maximum term expansion count <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. Maximum Xapian clauses count <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfSubPanelW Only mime types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Exclude mime types Mime types not to be indexed Max. compressed file size (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Max. text file size (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Text file page size (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Global Globale CronToolW Cron Dialog <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Hours (* or 0-23) Minutes (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> Enable Disable It seems that manually edited entries exist for recollindex, cannot edit crontab Error installing cron entry. Bad syntax in fields ? EditDialog Dialog EditTrans Source path Local path Config error Original path EditTransBase Path Translations Setting path translations for Select one or several file types, then use the controls in the frame below to change how they are processed Add Delete Cancel Annulla Save FirstIdxDialog First indexing setup <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> Indexing configuration This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Indexing schedule This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Start indexing now FragButs %1 not found. %1: %2 Query Fragments IdxSchedW Index scheduling setup <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> Cron scheduling The tool will let you decide at what time indexing should run and will install a crontab entry. Real time indexing start up Decide if real time indexing will be started when you log in (only for the default index). ListDialog Dialog GroupBox Main No db directory in configuration Nessuna directory per il DB di base nella configurazione Could not open database in Impossibile aprire il database in . Click Cancel if you want to edit the configuration file before indexing starts, or Ok to let it proceed. Clicca 'Annulla' se vuoi editare il file di configurazione prima di iniziare l'indicizzazione, oppure 'OK' se vuoi procedere. Configuration problem (dynconf Problema di configurazione (dynconf "history" file is damaged, please check or remove it: Preview &Search for: &Cerca: &Next &Successivo &Previous &Precedente Match &Case Rispetta &Maiuscole/minuscole Clear Cancella Creating preview text Creazione del testo per l'anteprima Loading preview text into editor Caricamento anteprima del testo nell'editor Cannot create temporary directory Impossibile creare directory temporanea Cancel Annulla Close Tab Chiudi Tab Missing helper program: Manca il programma di filtro esterno: Can't turn doc into internal representation for Impossibile tradurre il documento per la rappresentazione interna Form Tab 1 Open Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text PreviewTextEdit Show fields Show main text Print Print Current Preview Show image Select All Copy Save document to file Fold lines Preserve indentation Open document QObject Global parameters Parametri globali Local parameters Parametri locali <b>Customised subtrees <b>Ramificazioni personalizzate The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. Lista delle sottocartelle nella gerarchia indicizzata<br>ove alcuni parametri devono essere ridefiniti. Predefinita: vuota. <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>I parametri che seguono sono postii al livello superiore, se niente <br> o una linea vuota è selezionata nella casella sovrastante, oppure al livello della cartella selezionata.<br> Puoi aggiungere/rimuovere cartelle cliccando i bottoni +/-. Skipped names Nomi saltati These are patterns for file or directory names which should not be indexed. Questi sono modelli per i nomi delle cartelle e/o dei files che non devono vebire indicizzati. Default character set Set di caratteri di default This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Questa è la codifica caratteri usata per leggere i file che non contengono indicazioni interne sulla codifica usata, ad esempio file di testo semplice.<br>Il valore predefinito è vuoto, in modo che venga usata l'impostazione locale del sistema. Follow symbolic links Segue il link simbolico Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Segue il link simbolico durante l'indicizzazione. Di default è no, per evitare la duplicazione dell'indice Index all file names Indicizza tutti i nomi dei files Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Indicizza il nome di quei files il cui contenuto non può essere identificato o processato (tipo mime non supportato). Di default è impostato a vero Search parameters Parametri per la ricerca Default<br>character set Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Ignored endings These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. QWidget Create or choose save directory Choose exactly one directory Could not read directory: Unexpected file name collision, cancelling. Cannot extract document: &Preview &Anteprima &Open Open With Run Script Copy &File Name Copia il nome del &File Copy &URL Copia l'&Url &Write to File Save selection to files Preview P&arent document/folder &Open Parent document/folder Find &similar documents Trova documenti &simili Open &Snippets window Show subdocuments / attachments QxtConfirmationMessage Do not show again. RTIToolW Real time indexing automatic start <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Also start indexing daemon right now. Replacing: Replacing file Can't create: Warning Attenzione Could not execute recollindex Deleting: Deleting file Removing autostart Autostart file deleted. Kill current process too ? RclMain About Recoll Informazioni su Recoll Executing: [ Esecuzione di: [ Cannot retrieve document info from database Impossibile caricare informazioni del documento dal database Warning Attenzione Can't create preview window Non posso creare la finestra di anteprima Query results Risultati della ricerca Document history Cronologia dei documenti History data Cronologia dei dati Indexing in progress: Indicizzazione in corso: Files Files Purge Pulisco Stemdb Database espansioni Closing Chiusura Unknown Sconosciuto This search is not active any more Questa ricerca non e' piu' attiva Can't start query: Non posso iniziare la ricerca: Bad viewer command line for %1: [%2] Please check the mimeconf file Errata linea di comando per %1: [%2] Verifica il file mimeconf Cannot extract document or create temporary file Non posso estrarre il documento o creare il file temporaneo (no stemming) (nessuna espansione) (all languages) (tutte le lingue) error retrieving stemming languages errore nel recupero delle lingue per l'espansione Update &Index Stop &Indexing All media multimediali message other altri presentation spreadsheet text sorted filtered No helpers found missing Missing helper programs No external viewer configured for mime type [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Can't access file: Can't uncompress file: Save file Result count (est.) Query details Dettagli ricerca Could not open external index. Db not open. Check external indexes list. No results found None Updating Done Monitor Indexing failed The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Erasing index Reset the index and start from scratch ? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Error Index query error Can't update index: indexer running Indexed MIME Types Bad viewer command line for %1: [%2] Please check the mimeview file Viewer command line for %1 specifies both file and parent file value: unsupported Cannot find parent document External applications/commands needed for your file types and not found, as stored by the last indexing pass in Sub-documents and attachments Document filter The indexer is running so things should improve when it's done. Duplicate documents These Urls ( | ipath) share the same content: Bad desktop app spec for %1: [%2] Please check the desktop file Indexing interrupted Bad paths Selection patterns need topdir Selection patterns can only be used with a start directory No search No preserved previous search Choose file to save Saved Queries (*.rclq) Write failed Could not write to file Read failed Could not open file: Load error Could not load saved query Index scheduling Sorry, not available under Windows for now, use the File menu entries to update the index Disabled because the real time indexer was not compiled in. This configuration tool only works for the main index. Can't set synonyms file (parse error?) The document belongs to an external index which I can't update. Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Do not show this warning next time (use GUI preferences to restore). Index locked Unknown indexer state. Can't access webcache file. Indexer is running. Can't access webcache file. with additional message: Non-fatal indexing message: Types list empty: maybe wait for indexing to progress? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported Tools Results Content has been indexed for these MIME types: Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Indexing done Can't update index: internal error Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> documents document files file errors error total files) No information: initial indexing not yet performed. RclMainBase Previous page Pagina precedente Next page Pagina seguente &File &File E&xit &Esci &Tools &Strumenti &Help &Aiuto &Preferences &Preferenze Search tools Strumenti di ricerca Result list Lista risultati &About Recoll &Informazioni su Recoll Document &History C&ronologia documenti Document History Visualizza la cronologia dei documenti &Advanced Search Ricerca &Avanzata Advanced/complex Search Mostra la finestra di Ricerca avanzata &Sort parameters &Parametri ordinamento Sort parameters Configurazione dei parametri di ordinamento Next page of results Pagina seguente Previous page of results Pagina precedente &Query configuration &Configurazione ricerca &User manual &Manuale utente Recoll Recoll Ctrl+Q Ctrl+Q Update &index Aggiorna &indice Term &explorer &Esplora l'indice Term explorer tool Strumento di esplorazione indice External index dialog Configurazione indici esterni &Erase document history &Cancella la cronologia dei documenti First page Prima pagina Go to first page of results Vai alla prima pagina dei risultati &Indexing configuration Conf&igurazione indicizzazione PgDown PgUp &Full Screen F11 Full Screen &Erase search history Sort by dates from oldest to newest Sort by dates from newest to oldest Show Query Details &Rebuild index Shift+PgUp E&xternal index dialog &Index configuration &GUI configuration &Results Sort by date, oldest first Sort by date, newest first Show as table Show results in a spreadsheet-like table Save as CSV (spreadsheet) file Saves the result into a file which you can load in a spreadsheet Next Page Previous Page First Page Query Fragments With failed files retrying Next update will retry previously failed files Indexing &schedule Enable synonyms Save last query Load saved query Special Indexing Indexing with special options &View Missing &helpers Indexed &MIME types Index &statistics Webcache Editor Trigger incremental pass RclTrayIcon Restore Quit RecollModel File name Nome file Mime type Tipo MIME Date Data Abstract Author Document size Document date File size File date Keywords Original character set Relevancy rating Title URL Mtime Date and time Ipath MIME type Can't sort by inverse relevance ResList Result list Lista dei risultati Unavailable document Documento inaccessible Previous Precedente Next Successivo <p><b>No results found</b><br> <p><b>Nessun risultato</b><br> &Preview &Anteprima Copy &URL Copia l'&Url Find &similar documents Trova documenti &simili Query details Dettagli ricerca (show query) (mostra dettagli di ricerca) Copy &File Name Copia il nome del &File Document history Cronologia dei documenti Preview Open <p><i>Alternate spellings (accents suppressed): </i> Documents Risultati out of at least totale di almeno for per <p><i>Alternate spellings: </i> Result count (est.) Snippets ResTable &Reset sort &Delete column Save table to CSV file Can't open/create file: &Preview &Anteprima Copy &File Name Copia il nome del &File Copy &URL Copia l'&Url Find &similar documents Trova documenti &simili &Save as CSV Add "%1" column ResTableDetailArea &Preview &Anteprima Copy &File Name Copia il nome del &File Copy &URL Copia l'&Url Find &similar documents Trova documenti &simili ResultPopup &Preview &Anteprima Copy &File Name Copia il nome del &File Copy &URL Copia l'&Url Find &similar documents Trova documenti &simili SSearch Any term Qualsiasi All terms Tutti File name Nome file Completions Espansione Select an item: Seleziona una voce: Too many completions Troppe possibilita' di espansione Query language Linguaggio di interrogazione Bad query string Stringa di ricerca malformata Out of memory Memoria esaurita Enter file name wildcard expression. Enter search terms here. Type ESC SPC for completions of current term. Inserisci qui i termini di ricerca. Premi ESC Spazio per il completamento automatico dei termini. Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Stemming languages for stored query: differ from current preferences (kept) Auto suffixes for stored query: External indexes for stored query: Autophrase is set but it was unset for stored query Autophrase is unset but it was set for stored query Enter search terms here. SSearchBase SSearchBase SSearchBase Clear Cancella Ctrl+S Ctrl+S Erase search entry Cancella voce di ricerca Search Cerca Start query Inizia ricerca Enter search terms here. Type ESC SPC for completions of current term. Inserisci qui i termini di ricerca. Premi ESC Spazio per il completamento automatico dei termini. Choose search type. Show query history SearchClauseW SearchClauseW SearchClauseW Any of these Qualsiasi parola All of these Tutte le parole None of these Nessuna di queste This phrase Questa frase Terms in proximity Parole in prossimita' File name matching Nome del file Select the type of query that will be performed with the words Seleziona il tipo di ricerca da effettuare con i termini indicati Number of additional words that may be interspersed with the chosen ones Numero di parole che possono frapporsi tra i termini di ricerca indicati No field Any All None Phrase Proximity File name Nome file Snippets Snippets Find: Next Successivo Prev SnippetsW Search Cerca <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> Sort By Relevance Sort By Page SortForm Date Data Mime type Tipo MIME SortFormBase Sort Criteria Criterio di ordinamento Sort the Ordina i most relevant results by: risultati piu' rilevanti per: Descending Discendente Close Chiudi Apply Applica SpecIdxW Special Indexing Else only modified or failed files will be processed. Erase selected files data before indexing. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Browse Esplora Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Selection patterns: Top indexed entity Retry previously failed files. Start directory. Must be part of the indexed tree. Use full indexed area if empty. SpellBase Term Explorer Esplorazione dei termini &Expand &Espandi Alt+E Alt+E &Close &Chiudi Alt+C Alt+C Term Termine No db info. Match Case Accents SpellW Wildcards Caratteri jolly Regexp Espressione regolare Spelling/Phonetic Ortografia/Fonetica Aspell init failed. Aspell not installed? Errore di inizializzazione aspell. Aspell e' installato? Aspell expansion error. Errore di espansione di Aspell. Stem expansion Espansione grammaticale error retrieving stemming languages Impossibile formare la lista di espansione per la lingua No expansion found Nessun epansione trovata Term Termine Doc. / Tot. Index: %1 documents, average length %2 terms.%3 results %1 results List was truncated alphabetically, some frequent terms may be missing. Try using a longer root. Show index statistics Number of documents Average terms per document Database directory size MIME types: Item Value Smallest document length (terms) Longest document length (terms) Results from last indexing: Documents created/updated Files tested Unindexed files List files which could not be indexed (slow) Spell expansion error. UIPrefsDialog The selected directory does not appear to be a Xapian index La directory selezionata non sembra essera un indice Xapian This is the main/local index! Questo e' l'indice principale! The selected directory is already in the index list La directory selezionata e' gia' nella lista Select xapian index directory (ie: /home/buddy/.recoll/xapiandb) Seleziona la directory indice Xapian (es.: /home/ciccio/.recoll/xapiandb) error retrieving stemming languages Impossibile formare la lista delle lingue per l'espansione grammaticale Choose Result list paragraph format (erase all to reset to default) Result list header (default is empty) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read At most one index should be selected Cant add index with different case/diacritics stripping option Default QtWebkit font Any term Qualsiasi All terms Tutti File name Nome file Query language Linguaggio di interrogazione Value from previous program exit UIPrefsDialogBase User interface Interfaccia utente Number of entries in a result page Numero di risultati per pagina Result list font Fonts per la lista dei risultati Helvetica-10 Helvetica-10 Opens a dialog to select the result list font Apre una finestra di dialogo per selezionare i fonts della lista dei risultati Reset Ripristina Resets the result list font to the system default Ripristina i font della lista dei risultati Auto-start simple search on whitespace entry. Inizia automaticamente una ricerca semplice digitando uno spazio. Start with advanced search dialog open. Inizia aprendo la finestra di ricerca avanzata. Start with sort dialog open. Inizia con la finestra di ordinamento aperta. Search parameters Parametri per la ricerca Stemming language Linguaggio per l'espansione Dynamically build abstracts Costruisci dinamicamente i riassunti Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Devo cercare di costruire i riassunti per le voci nell'elenco dei risultati usando il contesto dei termini di ricerca? Puo' essere lento per grossi documenti.. Replace abstracts from documents Sostituisci i riassunti dei documenti Do we synthetize an abstract even if the document seemed to have one? Devo sintetizzare un riassunto anche se il documento sembra ne abbia uno? Synthetic abstract size (characters) Numero caratteri per il riassunto Synthetic abstract context words Numero di parole di contesto per il riassunto External Indexes Indici esterni Add index Aggiungi indice Select the xapiandb directory for the index you want to add, then click Add Index Seleziona nella directory Xapiandb l'indice che vuoi aggiungere e clicca su 'Aggiungi indice' Browse Esplora &OK &OK Apply changes Applica modifiche &Cancel &Annulla Discard changes Annulla modifiche Result paragraph<br>format string Stringa di formattazione<br>dei risultati Automatically add phrase to simple searches Aggiungi automaticamente frase alle ricerche semplici A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Una ricerca per [vino rosso] (2 parole) sara' completata come [vino O rosso O (vino FRASE 2 rosso)]. Questo dovrebbe dare la precedenza ai risultati che contengono i termini esattamente come sono stati scritti. User preferences Preferenze utente Use desktop preferences to choose document editor. Usa le preferenze del desktop per scegliere l'editor dei documenti. External indexes Indici esterni Toggle selected Commuta selezionati Activate All Seleziona tutti Deactivate All Deseleziona tutti Remove selected Rimuovi selezionati Remove from list. This has no effect on the disk index. Rimuovi dalla lista. Non ha effetto sull'indice del disco. Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Definisce il formato per ogni paragrafo dell'elenco dei risultati. Usare il formato qt html e le sostituzioni tipo printf:<br>%A Riassunto<br> %D Data<br> %I Icona<br> %K Parole chiave (se esistono)<br> %L Link per anteprima e modifica<br> %M Tipo MIME<br> %N Numero del risultato<br> %R Percentuale di rilevanza<br> %S Informazioni sulla dimensione<br> %T Titolo<br> %U Url<br> Remember sort activation state. Ricorda lo stato dell'impostazione di ordinamento. Maximum text size highlighted for preview (megabytes) Dimensione massima del testo da evidenziare nell'anteprima (megabytes) Texts over this size will not be highlighted in preview (too slow). Testi di lunghezza superiore a questa non vengono evidenziati nella preview (troppo lento). ViewAction Changing actions with different current values Modifica di azioni con valori differenti da quelli attuali Mime type Tipo MIME Command MIME type Desktop Default Changing entries with different current values ViewActionBase File type Tipo di file Action Azione Select one or several file types, then click Change Action to modify the program used to open them Seleziona uno o piu' tipi di file e poi clicca su 'Cambia Azione' per modificare il programma usato per aprirli Change Action Cambia Azione Close Chiudi Native Viewers Applicazione di visualizzazione Select one or several mime types then use the controls in the bottom frame to change how they are processed. Use Desktop preferences by default Select one or several file types, then use the controls in the frame below to change how they are processed Exception to Desktop preferences Action (empty -> recoll default) Apply to current selection Recoll action: current value Select same <b>New Values:</b> Webcache Webcache editor Search regexp WebcacheEdit Copy URL Unknown indexer state. Can't edit webcache file. Indexer is running. Can't edit webcache file. Delete selection Webcache was modified, you will need to run the indexer after closing this window. WebcacheModel MIME Url confgui::ConfIndexW Can't write configuration file Impossibile scrivere il file di configurazione confgui::ConfParamFNW Browse Esplora Choose confgui::ConfParamSLW + + - - Add entry Delete selected entries ~ Edit selected entries confgui::ConfSubPanelW Global Globale confgui::ConfTopPanelW Top directories Cartella superiore The list of directories where recursive indexing starts. Default: your home. Lista delle cartelle in cui inizia lìindicizzazione recorsiva. Di default è la tua home. Skipped paths Indirizzi saltati These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Questi sono i nomi delle cartelle in cui l'indicizzazione non entra<br>Possono contenere caratteri speciali. Devono corrispondere agli indirizzi visti dal motore di indicizzazione (ad esempio, se la cartella superiore include '/home/io' e '/home' è in realtà un link a '/usr/home', l'indirizzo corretto che si vuole sltare dovrebbe essere '/home/me/tmp*' e non ì/home/usr/tmp*') Stemming languages Lingue per la radice The languages for which stemming expansion<br>dictionaries will be built. Lingue per le quali verrà costruito<br>il dizionario delle espansioni radicali. Log file name Nome del file di log The file where the messages will be written.<br>Use 'stderr' for terminal output Il file dove verranno scritti i messaggi.<br>Usa 'stderr' per il terminale Log verbosity level Livello di verbosità del log This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Questo valore regola il numero dei messaggi,>br>dai soli errori a mole indicazioni per il debug. Index flush megabytes interval Intervallo in megabite per il salvataggio intermedio dell'indice This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Questo valore regola il volume di dati da indicizzare tra un salvataggio e l'altro.<br>Aiuta a controllare l'uso della memoria. Di default è post uguale a 10Mb Max disk occupation (%) Massima occupazione del disco fisso (%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). Questa è la percentuale fi occupazione del disco fisso oltre la quale l'indicizzazione si ferma con un errore (per evitare di riempire il disco).<br>0 significa nessun limite (questo è il valore di default). No aspell usage Non usare aspell Aspell language Lingua di aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Lingua per il dizionario aspell. Dovrebbe essere simile a 'en' o 'it' ...<br>Se questo valore non è impostato verrà usato l'ambiente NLS per calcolarlo, cosa che generalmente funziona. Per avere un'idea di cosa sia installato sul tuo sistema, dai il comando 'aspell config' e guarda il nome dei files .dat nella cartella 'data-dir'. Database directory name Nome della cartella del database The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Nome della cartella in cui salvare l'indice<br>Un indirizzo non assoluto viene interpretato come relativo alla cartella di congigurazione. Di default è 'xapiandb'. Use system's 'file' command Usa il comando di sistema 'file' Use the system's 'file' command if internal<br>mime type identification fails. Usa il comando di sistema 'file' se fallisce<br>l'identificazione interna del tipo mime. uiPrefsDialogBase User preferences Preferenze utente User interface Interfaccia utente Number of entries in a result page Numero di risultati per pagina If checked, results with the same content under different names will only be shown once. Hide duplicate results. Result list font Fonts per la lista dei risultati Opens a dialog to select the result list font Apre una finestra di dialogo per selezionare i fonts della lista dei risultati Helvetica-10 Helvetica-10 Resets the result list font to the system default Ripristina i font della lista dei risultati Reset Ripristina Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Definisce il formato per ogni paragrafo dell'elenco dei risultati. Usare il formato qt html e le sostituzioni tipo printf:<br>%A Riassunto<br> %D Data<br> %I Icona<br> %K Parole chiave (se esistono)<br> %L Link per anteprima e modifica<br> %M Tipo MIME<br> %N Numero del risultato<br> %R Percentuale di rilevanza<br> %S Informazioni sulla dimensione<br> %T Titolo<br> %U Url<br> Result paragraph<br>format string Stringa di formattazione<br>dei risultati Texts over this size will not be highlighted in preview (too slow). Testi di lunghezza superiore a questa non vengono evidenziati nella preview (troppo lento). Maximum text size highlighted for preview (megabytes) Dimensione massima del testo da evidenziare nell'anteprima (megabytes) Use desktop preferences to choose document editor. Usa le preferenze del desktop per scegliere l'editor dei documenti. Choose editor applications Auto-start simple search on whitespace entry. Inizia automaticamente una ricerca semplice digitando uno spazio. Start with advanced search dialog open. Inizia aprendo la finestra di ricerca avanzata. Start with sort dialog open. Inizia con la finestra di ordinamento aperta. Remember sort activation state. Ricorda lo stato dell'impostazione di ordinamento. Prefer Html to plain text for preview. Search parameters Parametri per la ricerca Stemming language Linguaggio per l'espansione A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Una ricerca per [vino rosso] (2 parole) sara' completata come [vino O rosso O (vino FRASE 2 rosso)]. Questo dovrebbe dare la precedenza ai risultati che contengono i termini esattamente come sono stati scritti. Automatically add phrase to simple searches Aggiungi automaticamente frase alle ricerche semplici Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Devo cercare di costruire i riassunti per le voci nell'elenco dei risultati usando il contesto dei termini di ricerca? Puo' essere lento per grossi documenti.. Dynamically build abstracts Costruisci dinamicamente i riassunti Do we synthetize an abstract even if the document seemed to have one? Devo sintetizzare un riassunto anche se il documento sembra ne abbia uno? Replace abstracts from documents Sostituisci i riassunti dei documenti Synthetic abstract size (characters) Numero caratteri per il riassunto Synthetic abstract context words Numero di parole di contesto per il riassunto The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Query language magic file name suffixes. Enable External Indexes Indici esterni Toggle selected Commuta selezionati Activate All Seleziona tutti Deactivate All Deseleziona tutti Remove from list. This has no effect on the disk index. Rimuovi dalla lista. Non ha effetto sull'indice del disco. Remove selected Rimuovi selezionati Add index Aggiungi indice Apply changes Applica modifiche &OK &OK Discard changes Annulla modifiche &Cancel &Annulla Abstract snippet separator Style sheet Opens a dialog to select the style sheet file Choose Resets the style sheet to default Result List Edit result paragraph format string Edit result page html header insert Date format (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Autophrase term frequency threshold percentage Plain text to HTML line style Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. <BR> <PRE> <PRE> + wrap Disable Qt autocompletion in search entry. Paths translations Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Snippets window CSS file Opens a dialog to select the Snippets window CSS style sheet file Resets the Snippets window style Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Document filter choice style: Buttons Panel Toolbar Combobox Menu Show system tray icon. Close to tray instead of exiting. Start with simple search mode User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Synonyms file Show warning when opening temporary file. Highlight CSS style for query terms Recoll - User Preferences Set path translations for the selected index or for the main one if no selection exists. Activate links in preview. Make links inside the preview window clickable, and start an external browser when they are clicked. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Suppress all beeps. recoll-1.26.3/qtgui/i18n/recoll_pl.qm0000644000175000017500000026541713545064515014255 00000000000000)j!00ŃJ M(fS0 *a0wnxgt :N  \EnE.0,;s>.cq}Do2N'Vsn4XMJX2h5[ltإnjwwax%~H5v8.q,bsͺ )~H LSIԵ؅ qA"p#vvw 5w 5zw 5dw U.Mֳ6-wf3T/wyͼuKmggfH;U:4BUgyex Z!DS( N>x&A=dS?d?dG$JUYJUYYY[uiQ_nbzn*W[uDʷvP3ʗHʗM^GkL=3ނx;JgBAK%B `0C.3<.':d@;[ݭ<LNBe 2vh(P0bY"3 Z I6"IeQnEߤ@ff;f:,6VW4Way%1y%C΄: #CX|(W0̔ ;i -Zz-j)TR-.tn64BBSng2j.("k l9or?):rs%w|kG,o<"U"׉AS%â ȗRguǢŕI^W~C=k>`V#`M8`$" ׻:jUKSp h7u9NMv2hT#jBe %Cn37wN[XnD!Kw)/C96!v#+Vv7I^j<~FW#FN@ZH:"ďM>_guKap f 6w ϗ8ܙ1 lÓtÓtiȍɆtZ*]85#t3bR:uNG;.jGT`Qo4kzu}}n;EAV0'R%BcH@Iqid6kKD2c 䴥:? 0 Z H -(5 9Zy ;3 D K ]# cC kjK lMb! X qDߋ  B ?u 9P ü>O !y 3 9 *Nha :^% c R *R3. 6j >V G.~KT `P ` aE cE d8 yg I3e k VT z C)  G  c ԅ y ye1 TH? do ,x3 =!P Kj T|c hd   i Σ rƒ y n< ٷg ۷ : ?IS Vd 'ИQ +bCt / L*— P֙> RVY T#t/ V \iCQj ] `F  hR vf {lO9 !Y !YM WU Y    i Z   7 ~H] Ny a m~ 'RL# -< .!N- 84 F3 OEJ ]7 ]? mCG u0d y y< 8 3G ȩ< u P P( 5dL ~ L 1  7^} ;? Ւ4 H _ Q5 £ qG!%ny/.5b@8b:9<W8Q~PZW"ykY~s{[s\2e3g3`p~^=!7ndcmzc|s|T'  l_Li[ Ka|dy warunek All clauses AdvSearchKtry[ warunek Any clause AdvSearchHBBdna jednostka we filtrze rozmiaru$Bad multiplier suffix in size filter AdvSearchmultimediamedia AdvSearchwiadomo[cimessage AdvSearchpozostaBeother AdvSearchprezentacje presentation AdvSearcharkusze spreadsheet AdvSearchtekstowetext AdvSearch <----- Wszystkie <----- All AdvSearchBase"<----- Zaznaczone <----- Sel AdvSearchBaseDodaj warunek Add clause AdvSearchBase"DokBadne szukanieAdvanced search AdvSearchBaseWszystkie ----> All ----> AdvSearchBase.Podane warunki (pola z prawej strony) bd u|yte razem (dla zaznaczonego "Ka|dy warunek") lub oddzielnie (dla zaznaczonego "Ktry[ warunek"). <br>Pola "Ktre[", "Wszystkie" i "{adne" przyjmuj pojedyDcze wyrazy lub frazy w cudzysBowiu.<br>Pola bez danych s ignorowane.</br></br>All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored.

 AdvSearchBasePrzegldajBrowse AdvSearchBaseJako kategoria By categories AdvSearchBase2Zaznacz, by okre[li dat'Check this to enable filtering on dates AdvSearchBase<Zaznacz, by okre[li typ pliku,Check this to enable filtering on file types AdvSearchBase8Zaznacz, by okre[li rozmiar'Check this to enable filtering on sizes AdvSearchBase4Zaznacz, by u|y kategorii;Check this to use file categories instead of raw mime types AdvSearchBaseZamknijClose AdvSearchBaseUsuD warunek Delete clause AdvSearchBase<Podaj szczyt katalogu szukaniaEnter top directory for search AdvSearchBase FiltryFilter AdvSearchBasePo dacie Filter dates AdvSearchBasePo rozmiarze Filter sizes AdvSearchBase ZnajdzFind AdvSearchBasePo:From AdvSearchBasePomiD plikiIgnored file types AdvSearchBaseLE{CE POZAInvert AdvSearchBaseMniejszy od: Max. Size AdvSearchBaseJDopuszczalne jednostki: k/K, m/M, g/G4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaseWikszy od: Min. Size AdvSearchBaseJDopuszczalne jednostki: k/K, m/M, g/G4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase Okre[l typ plikuRestrict file types AdvSearchBase<Tylko pliki LE{CE W katalogu:%Restrict results to files in subtree: AdvSearchBase.Zapamitaj wybrane typySave as default AdvSearchBase^Znajdz <br>dokumenty<br>speBniacjce:</br></br>1Search for
documents
satisfying:

 AdvSearchBasePrzeszukaj plikSearched file types AdvSearchBase"Zaznaczone -----> Sel -----> AdvSearchBase Szukaj Start Search AdvSearchBase Przed:To AdvSearchBase<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indeksuj cyklicznie (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Ka|de pole mo|e zawiera wieloznacznik (*), pojdyDcz warto[, list po przecinku (1,3,5) oraz zakres (1-7). Tak samo<span style=" font-style:italic;">jak</span>gdyby to byB plik Crontab. Dlatego mo|liwe jest u|ycie skBadni Crontab. (zobacz crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />PrzykBadowo wpisujc <span style=" font-family:'Courier New,courier';">*</span> w <span style=" font-style:italic;">"Dni tygodnia", </span><span style=" font-family:'Courier New,courier';">12,19</span> w <span style=" font-style:italic;">"Godziny"</span> oraz <span style=" font-family:'Courier New,courier';">15</span> w <span style=" font-style:italic;">"Minuty"</span> uruchomili by[my indeksowanie (recollindex) ka|dego dnia o 00:15 oraz 19:15</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Indeksowanie cykliczne (nawet te bardzo czste) jest mniej efektowne ni| indeksowanie w czasie rzeczywistym.</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolWZ<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Kliknij <span style=" font-style:italic;">WyBcz</span>, aby zatrzyma automatyczne indeksowanie. <span style=" font-style:italic;">WBcz</span>, aby je rozpocz. <span style=" font-style:italic;">Anuluj</span>, aby utrzyma obecny stan.</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolW"Ustaw cykl (CRON) Cron Dialog CronToolWZDni tygodnia (* or 0-7, 0 lub 7 to Niedziela))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolW WyBczDisable CronToolW WBczEnable CronToolWpBBd przy rejestrowaniu cyklu. BBdna skBadnia w polach?3Error installing cron entry. Bad syntax in fields ? CronToolW(Godziny (* lub 0-23)Hours (* or 0-23) CronToolWNie mo|na zmieni crontab. Wyglda na to, |e istniej rczne wpisy dla recollindex.PIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWMinuty (0-59)Minutes (0-59) CronToolWOkno dialogoweDialog EditDialog"BBd konfiguracji Config error EditTransZcie|ka lokalna Local path EditTrans$Zcie|ka oryginalna Original path EditTrans Zcie|ka zrdBowa Source path EditTrans DodajAdd EditTransBase AnulujCancel EditTransBaseUsuDDelete EditTransBase&Zcie|ka tBumaczeniaPath Translations EditTransBase ZapiszSave EditTransBaseWybierz jeden lub kilka typw pliku, nastpnie wska| w ramce poni|ej jak maj zosta przetworzonekSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBaseDUstawienie [cie|ki translacji dla Setting path translations for  EditTransBase <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Indeks dla tej konfiguracji nie istnieje.</span><br /><br />Je[li tylko chcesz indeksowa swj katalog domowy u|wyajc fabrcznych ustawieD, wci[nij przycisk <span style=" font-style:italic;">Rozpocznij indeksowanie </span>. SzczegBy mo|esz ustawi rwnie| pzniej. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Je[li chesz mie wiksz kontrol, u|yj nastpujcych odno[nikw w celu konfiguracji indeksowania oraz jego harmonogramu.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">To samo mo|esz rwnie| otrzyma pozniej wybierajc <span style=" font-style:italic;">Ustawienia</span> z menu.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialogHPocztkowa konfiguracja indeksowaniaFirst indexing setupFirstIdxDialog2Konfiguracja indeksowaniaIndexing configurationFirstIdxDialog0Harmonogram indeksowaniaIndexing scheduleFirstIdxDialog.Rozpocznij indeksowanieStart indexing nowFirstIdxDialogTutaj mo|esz wybra katalogi do indeksowania, oraz inne parametry tj. wyBczenie [cie|ek plikw czy ich nazw, domy[lny zestaw znakw, etc.This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialog"Tutaj mo|esz wybra midzy indeksowaniem w kolejce, a indeksowaniem nabierzco, jak i ustaleniem automatycznej kolejki indeksowania (dziki Cron)This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Indeksowanie <span style=" font-weight:600;">Recoll</span> mo|e by uruchomione na staBe (indeksujc ka|d zmian) lub w okre[lonych cyklach.</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Instrukcja obsBugi (EN) mo|e pomc wybra rozwizanie dla Ciebie (wci[nij F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Narzdzie to pomo|e Ci zaplanowa indeksowanie cykliczne lub wybierzesz indeksowanie "na bie|co" po zalogowaniu (lub jedno i drugie, co rzadko jest sendowne).</p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedW2Planowanie z u|yciem CronCron scheduling IdxSchedW\Pozwala uruchomi indeksowanie po zalogowaniu.ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedWLKonfiguracja harmonogramu indeksowaniaIndex scheduling setup IdxSchedWBUruchom indeksowanie "na bie|co"Real time indexing start up IdxSchedWTutaj zdecydujesz o jakim czasie indeksowanie ma by uruchamiane (po przez wpis do crontab)._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedWOkno dialogoweDialog ListDialog GrupaGroupBox ListDialogPlik "history" jest uszkodzony lub brak mo|liwo[ci jego odczytu/zapisu, zmieD to lub go usuD: K"history" file is damaged or un(read)writeable, please check or remove it: MainXBrak katalogu dla bazy danych w konfiguracji No db directory in configurationMain&Nastpny&NextPreview&Poprzedni &PreviousPreview&Szukaj: &Search for:Preview^Nie mog przemieni dokumentu na wBadny format 0Can't turn doc into internal representation for Preview AnulujCancelPreviewWyczy[ClearPreviewZamknij kart Close TabPreview*Tworz podgld tekstuCreating preview textPreview(BBd Badowania plikuError while loading filePreview@Aaduj podgld tekstu do edytora Loading preview text into editorPreview2Sprawdzaj &wielko[ liter Match &CasePreview>Brak programu usprawniajcego: Missing helper program: Preview KopiujCopyPreviewTextEditZwiD linie Fold linesPreviewTextEdit Zachowaj wciciaPreserve indentationPreviewTextEdit DrukujPrintPreviewTextEdit*Drukuj obecny podgldPrint Current PreviewPreviewTextEdit0Zapisz dokument do plikuSave document to filePreviewTextEdit Zaznacz wszystko Select AllPreviewTextEditPoka| pola Show fieldsPreviewTextEditPoka| obraz Show imagePreviewTextEdit$Poka| tekst gBwnyShow main textPreviewTextEdit0<b>Dostosowana gaBz</b>Customised subtreesQObject <i>Poni|sze parametry tycz s pod[wietlonego katalogu z powy|szej listy.<br> Je[li powy|sza lista jest pusta lub pod[wietla pust lini poni|sze ustawienia tycz si najpBytszego katalogu.<br>Mo|esz doda lub usun katalog klikajc przyciski +/-</br></br></i> The parameters that follow are set either at the top level, if nothing
or an empty line is selected in the listbox above, or for the selected subdirectory.
You can add or remove directories by clicking the +/- buttons.

QObject,Domy[lny zestaw znakwDefault character setQObjectBIdz za dowizaniami symbolicznymiFollow symbolic linksQObjectFollow symbolic links while indexing. The default is no, to avoid duplicate indexing Indeksujc, idz za dowizaniami symbolicznymi. Domy[lnia warto[ to NIE, chroni przed zduplikowanymi indeksami.TFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject$Parametry globalneGlobal parametersQObject>Indeksuj wszystkie nazwy plikwIndex all file namesQObject Indeksuj nazwy plikw dla ktrych zawarto[ nie mo|e by rozpoznana lub przetworzona (Nie lub nieobsBugiwany typ MIME). Domy[lnie Tak.}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject"Parametry lokalneLocal parametersQObject$Parametry szukaniaSearch parametersQObjectWykluczenia Skipped namesQObjectLista podkatalogw w zaindeksowanej hierarchii <br> dla ktrych cz[ parametrw musi by ustalona ponownie. Domy[lnie: pusty.</br>xThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.
QObjectTutaj ustawiasz reguBy wykluczajce indeksowanie plikw i katalogw.LThese are patterns for file or directory names which should not be indexed.QObjectTo jest zestaw znakw sBu|cy do odczytu plikw i nie jest to|samy z ustawieniami wewntrznymi (np: czyste pliki tesktowe)<br>Domy[lnie jest pusty, a u|yta jest warto[ ze [rodowiska NLS.</br>This is the character set used for reading files which do not identify the character set internally, for example pure text files.
The default value is empty, and the value from the NLS environnement is used.
QObjectHistoria sieci Web historyQObject.<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indeksacja mo|e by uruchomiona w tle (daemon), aktualizujc indeks nabierzco. Zyskujesz zawsze aktualny indeks, tracc cz[ zasobw systemowych.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> .

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWbDodatkowo natychmiast uruchom indeksowanie w tle.%Also start indexing daemon right now.RTIToolWrUsunito plik autostartu. Zamkn rwnie| bie|cy proces?2Autostart file deleted. Kill current process too ?RTIToolW$Nie mog utworzy:Can't create: RTIToolW:Nie mo|na wykona recollindexCould not execute recollindexRTIToolWUsuwanie pliku Deleting fileRTIToolWUsuwanie: Deleting: RTIToolWjAutomatyczny start indeksowania w czasie rzeczywistym"Real time indexing automatic startRTIToolW&Usuwanie autostartuRemoving autostartRTIToolWPodmiana plikuReplacing fileRTIToolWPodmiana: Replacing: RTIToolWlUruchom indeksowanie w tle razem ze startem komputera..Start indexing daemon with my desktop session.RTIToolWOstrze|enieWarningRTIToolW(ka|dy jzyk)(all languages)RclMain>wyBcz ciosanie (ang. stemming) (no stemming)RclMainKarta Recoll About RecollRclMainWszystkoAllRclMainBBdna komenda przegldarki dla typu %1: [%2] Sprawdz plik widoku MIMECBad viewer command line for %1: [%2] Please check the mimeview fileRclMainFNie mog uzyska dostpu do pliku: Can't access file: RclMain@Nie mo|na utworzy okna podglduCan't create preview windowRclMain4Nie mog wypakowa pliku: Can't uncompress file: RclMaindNie mog zaktualizowa indeksu: pracujcy indekser#Can't update index: indexer runningRclMaintNie mo|na wypakowa dokumentu lub stworzy plik tymczasowy0Cannot extract document or create temporary fileRclMainHNie mo|na odszuka rodzica dokumentuCannot find parent documentRclMain|Brak mo|liwo[ci pobrania informacji o dokumencie z bazy danych+Cannot retrieve document info from databaseRclMainKliknij Ok by uaktualni indeks tego pliku, po zakoDczeniu ponw zapytanie lub Anuluj.fClick Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel.RclMainZamykanieClosingRclMainhZawarto[ zostaBa zaindeksowana dla tych typw MIME:.Content has been indexed for these mime types:RclMainNie mog otworzyc zewntrznego indeksu. Nie otwarta baza danych. Sprawdz list zewntrznych indeksw.HCould not open external index. Db not open. Check external indexes list.RclMain2Filtr kategorii dokumentuDocument category filterRclMain&Historia dokumentwDocument historyRclMainZakoDczoneDoneRclMain Usuwanie indeksu Erasing indexRclMainBBdErrorRclMainWykonuj: [ Executing: [RclMainBrak zewntrznych aplikacji|komend wymaganych przez twoje typy plikw.pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMainHistoria danych History dataRclMain*Indeks jest zamknityIndex not openRclMainIndeks tego pliku jest nieaktualny. Odmawiam podania bBdnych wynikw.MIndex not up to date for this file. Refusing to risk showing the wrong entry.RclMain,BBd odpytania indeksuIndex query errorRclMain.Zaindeksowane typy MIMEIndexed MIME TypesRclMainvIndeksowanie w trakcie, spodziewana poprawa po zakoDczeniu.7Indexer running so things should improve when it's doneRclMainXIndeksowanie nie zostaBo jeszcze uruchomioneIndexing did not run yetRclMain(Pora|ka indeksowaniaIndexing failedRclMain.Indeksowanie w tracie: Indexing in progress: RclMain,Brakujce rozszerzeniaMissing helper programsRclMainSprawdzanieMonitorRclMainvBrak skonfigurowanej zewntrzenej przegldarki typw MIME [-No external viewer configured for mime type [RclMainBWszystkie rozszerzenia znalezionoNo helpers found missingRclMainBrak wynikwNo results foundRclMainNicNoneRclMainWyczy[PurgeRclMainTrwa odpytywanie.<br>z powodu ograniczeD biblioteki indeksowania,<br>anulowanie zamknie program</br></br>oQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the program

RclMainWynik zapytania Query resultsRclMain>Ponownie spisa indeks od zera?(Reset the index and start from scratch ?RclMain,Liczba wynikw (szac.)Result count (est.)RclMainZapisz plik Save fileRclMain StemdbStemdbRclMain.Zatrzymaj &IndeksowanieStop &IndexingRclMain2Poddokumenty i zaBcznikiSub-documents and attachmentsRclMainObecny proces indeksowania uruchomiono z innego okna. Kliknij Ok, by zamkn proces.yThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainBrak przegldarki dla typu MIME %1: %2 . Chcesz to ustawi teraz?hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMainFTo wyszukanie przestaBo by aktywne"This search is not active any moreRclMainNieznaneUnknownRclMainOd[wie| &Indeks Update &IndexRclMainOd[wie|anieUpdatingRclMainPolecenie czytnika dla %1 podaje zarwno plik jak i warto[ pliku rodzica: niewspieraneQViewer command line for %1 specifies both file and parent file value: unsupportedRclMainOstrze|enieWarningRclMaintBBd pobierania "reguB ciosania" (ang. stemming languages)#error retrieving stemming languagesRclMainprzefiltrowanefilteredRclMainmultimediamediaRclMainwiadomo[cimessageRclMainpozostaBeotherRclMainprezentacje presentationRclMainposortowanesortedRclMainarkusze spreadsheetRclMaintekstowetextRclMain&Karta Recoll &About Recoll RclMainBase,&Zaawansowane szukanie&Advanced Search RclMainBase0&UsuD histori dokumentu&Erase document history RclMainBase.&UsuD histori szukania&Erase search history RclMainBase &Plik&File RclMainBasePeBen &Ekran &Full Screen RclMainBase"Konfiguracja &GUI&GUI configuration RclMainBase &Pomoc&Help RclMainBase*&Konfiguracja indeksu&Index configuration RclMainBase$&Plan indeksowania&Indexing schedule RclMainBase&Ustawienia &Preferences RclMainBase&Odnw indeks&Rebuild index RclMainBase&Wyniki&Results RclMainBase2Poka| zaindeksowane &typy&Show indexed types RclMainBase:Poka| &brakujcych pomocnikw&Show missing helpers RclMainBase*Parametry &sortowania&Sort parameters RclMainBase&Narzdzia&Tools RclMainBase&Instrukcja &User manual RclMainBase ZBo|one szukanieAdvanced/complex Search RclMainBaseWszystkoAll RclMainBase Ctrl+QCtrl+Q RclMainBase$Historia DokumentuDocument History RclMainBase&&Historia dokumentuDocument &History RclMainBase&ZakoDczE&xit RclMainBase"Zewntrzny indeksE&xternal index dialog RclMainBase"Zewntrzny indeksExternal index dialog RclMainBaseF11F11 RclMainBasePierwsza strona First Page RclMainBasePierwsza strona First page RclMainBasePeBen ekran Full Screen RclMainBaseFPrzejdz do pierwszej strony wynikwGo to first page of results RclMainBaseNastpna strona Next Page RclMainBaseNastpna strona Next page RclMainBase.Nastpna strona wynikwNext page of results RclMainBase PgDownPgDown RclMainBasePgUpPgUp RclMainBase"Poprzednia strona Previous Page RclMainBase"Poprzednia strona Previous page RclMainBase2Poprzednia strona wynikwPrevious page of results RclMainBase RecollRecoll RclMainBase Wyniki Result list RclMainBase:Zapisz jako plik CSV (arkusz)Save as CSV (spreadsheet) file RclMainBase\Zapisz wyniki do pliku czytelnego przez arkusz@Saves the result into a file which you can load in a spreadsheet RclMainBase,Narzdzia wyszukiwania Search tools RclMainBaseShift+PgUp Shift+PgUp RclMainBase2Poka| szczegBy zapytaniaShow Query Details RclMainBase"Poka| jako tabela Show as table RclMainBase0Poka| wyniki jako arkusz(Show results in a spreadsheet-like table RclMainBase>Sortuj po dacie: od najnowszegoSort by date, newest first RclMainBase@Sortuj po dacie: od najstarszegoSort by date, oldest first RclMainBase>Sortuj po dacie: od najnowszego#Sort by dates from newest to oldest RclMainBase@Sortuj po dacie: od najstarszego#Sort by dates from oldest to newest RclMainBase(Parametry sortowaniaSort parameters RclMainBase"Przej|yj &terminyTerm &explorer RclMainBase*Przegldanie terminwTerm explorer tool RclMainBase*&Aktualizacja indeksu Update &index RclMainBaseAbstrakcjaAbstract RecollModel AutorAuthor RecollModelDataDate RecollModelData i czas Date and time RecollModelData dokumentu Document date RecollModel"Rozmiar dokumentu Document size RecollModelData pliku File date RecollModelNazwa pliku File name RecollModelRozmiar pliku File size RecollModel IpathIpath RecollModelSBowa kluczeKeywords RecollModelTyp MIME MIME type RecollModel Czas modyfikacjiMtime RecollModel0Oryginalny zestaw znakwOriginal character set RecollModelTrafno[Relevancy rating RecollModel TytuBTitle RecollModelURLURL RecollModel"(Poka| zapytanie) (show query)ResList@<p><b>Brak wynikw</b><br /></p>$

No results found

ResListx<p><i>Aleternatywna pisowania (ignorowane akcenty): </i></p>8

Alternate spellings (accents suppressed):

ResListJ<p><i>Alternatywna pisownia: </i></p>#

Alternate spellings:

ResList$Historia dokumentuDocument historyResListDokumenty DocumentsResList(Duplikaty dokumentwDuplicate documentsResListNastpnyNextResList OtwrzOpenResListPoprzedniPreviewResListPoprzedniPreviousResList&SzczegBy zapytania Query detailsResList6Liczba wynikw (oszacowana)Result count (est.)ResListLista wynikw Result listResListTTe URLe ( | ipath) maj t sam zawarto[:-These Urls ( | ipath) share the same content:ResList(Dokument niedostpnyUnavailable documentResListdlaforResListz co najmniejout of at leastResList&UsuD kolumn&Delete columnResTable"&Reset sortowania &Reset sortResTable &Zapisz jako CSV &Save as CSVResTable$Dodaj "%1" kolumnAdd "%1" columnResTableDNie mo|na otworzy|utworzy pliku:Can't open/create file: ResTable6Zapisz tabel jako plik CSVSave table to CSV fileResTable&Otwrz&Open ResultPopup@&Otwrz dokument|katalog rodzica&Open Parent document/folder ResultPopup&Poprzedni&Preview ResultPopup Zapisz &do pliku&Write to File ResultPopup&&Kopiuj nazw plikuCopy &File Name ResultPopupKopiuj &URL Copy &URL ResultPopup2Znajdz &podobne dokumentyFind &similar documents ResultPopup*Otwrz okno &snipetwOpen &Snippets window ResultPopupDPodgld rodzica dokumentu|kataloguPreview P&arent document/folder ResultPopup6Zapisz zaznaczenie do plikuSave selection to files ResultPopup:Poka| poddokumenty|zaBcznikiShow subdocuments / attachments ResultPopupKa|dy termin All termsSSearchKtry[ terminAny termSSearch BBdne zapytanieBad query stringSSearchlWprowadz wieloznakowe (wildcard) wyra|enie nazwy pliku$Enter file name wildcard expression.SSearchWprowadz wyra|enie pytajce. Zciga:<br> <i>termin1 termin2</i> : 'termin1' i 'termin2' w ktrym[ polu.<br> <i>pole:termin1</i> : 'termin1' w polu 'pole'.<br> Standardowe pola nazwy|synonimy:<br> tytuB/przedmiot/napis, autor/od, odbiorca/do, nazwa pliku, ext.<br> Pseudo-pola: katalog, mime/format, typ/rclcat, data.<br> PrzykBad przedziaBu dat: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>termin1 termin2 LUB termin3</i> : termin1 LUB (termin2 LUB termin3).<br> Nawiasy aktualnie niedozwolone.<br> <i>"termin1 termin2"</i> : fraza (wystpienie dokBadne). Dostpne modyfikatory:<br> <i>"termin1 termin2"p</i> : nieuporzdkowane ssiednie szukanie z domy[ln odlegBo[ci.<br> U|yj <b>Poka| zapytanie</b> je[li jeste[ niepewny wynikw i sprawdz je z instrukcj (<F1>) . </br></br></br></br></br></br></br></br></br></br></br>Enter query language expression. Cheat sheet:
term1 term2 : 'term1' and 'term2' in any field.
field:term1 : 'term1' in field 'field'.
Standard field names/synonyms:
title/subject/caption, author/from, recipient/to, filename, ext.
Pseudo-fields: dir, mime/format, type/rclcat, date.
Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.
term1 term2 OR term3 : term1 AND (term2 OR term3).
No actual parentheses allowed.
"term1 term2" : phrase (must occur exactly). Possible modifiers:
"term1 term2"p : unordered proximity search with default distance.
Use Show Query link when in doubt about result and see manual () for more detail.










SSearchWprowadz tutaj szkane terminy. Wpisz ESC SPC by uzupeBni bie|cy termin.FEnter search terms here. Type ESC SPC for completions of current term.SSearchNazwa pliku File nameSSearchBrak pamici Out of memorySSearchJzyk zapytaDQuery languageSSearch*Wybierz typ szukania.Choose search type. SSearchBaseWyczy[Clear SSearchBase Ctrl+SCtrl+S SSearchBaseWprowadz tutaj szkane terminy. Wpisz ESC SPC by uzupeBni bie|cy termin.FEnter search terms here. Type ESC SPC for completions of current term. SSearchBase"UsuD szukany wpisErase search entry SSearchBaseSSearchBase SSearchBase SSearchBase SzukajSearch SSearchBaseStart zapytania Start query SSearchBase Ka|dyAll SearchClauseW Ktry[Any SearchClauseWNazwa pliku File name SearchClauseWBez polaNo field SearchClauseW {adenNone SearchClauseWLiczba dodatkowych wyrazw, ktre mog by przeplatane z wybranymiHNumber of additional words that may be interspersed with the chosen ones SearchClauseW FrazaPhrase SearchClauseWSsiedztwo Proximity SearchClauseWhWybierz typ zapytania, ktry bdzie u|yty z wyrazami>Select the type of query that will be performed with the words SearchClauseWZnajdz:Find:SnippetsNastpnyNextSnippetsPoprzedniPrevSnippetsSnipetySnippetsSnippets SzukajSearch SnippetsW&Zamknij&Close SpellBase&Rozszerz&Expand  SpellBaseAkcentyAccents SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBase,Wielko[ znakw (Case)Case SpellBaseDopasowanieMatch SpellBase&Brak informacji bd. No db info. SpellBase"Przegld terminw Term Explorer SpellBase%1 wynikw %1 resultsSpellW2BBd rozszerzenia Aspell.Aspell expansion error. SpellW`Nieudany start Aspell. Nie zainstalowano Aspell?)Aspell init failed. Aspell not installed?SpellW8Zrednia terminw na dokumentAverage terms per documentSpellW8Rozmiar katalogu bazy danychDatabase directory sizeSpellWDok. / Razem Doc. / Tot.SpellWxIndeks: %1 dokumenty, [rednia dBugo[ %2 terminw.%3 wynikw7Index: %1 documents, average length %2 terms.%3 resultsSpellWElementItemSpellWRLista obcita alfabetycznie, cz[ czsta1List was truncated alphabetically, some frequent SpellW8Najwiksza dBugo[ dokumentuLongest document lengthSpellWTypy MIME: MIME types:SpellW4Nieznalezione rozszerzenieNo expansion foundSpellW"Liczba dokumentwNumber of documentsSpellW6Wyra|enie reguBowe (regexp)RegexpSpellW:Poka| statystyki indeksowaniaShow index statisticsSpellW:Najmniejsza dBugo[ dokumentuSmallest document lengthSpellW*Pisownia/Fonetyczno[Spelling/PhoneticSpellWHRoszerzenie rdzenia (Stem expansion)Stem expansionSpellW TerminTermSpellWWarto[ValueSpellW4Wieloznaczniki (wildcards) WildcardsSpellWtBBd pobierania "reguB ciosania" (ang. stemming languages)#error retrieving stemming languagesSpellWVTerminy mog zgin. U|yj dBu|szego rdzenia.terms may be missing. Try using a longer root.SpellW^Co najwy|ej jeden indeks powinnien by wyberany$At most one index should be selected UIPrefsDialogNie mo|na doda indeksu z opcj r|nej wielko[ci-liter/znakach-diakrytycznych>Cant add index with different case/diacritics stripping option UIPrefsDialogWybierzChoose UIPrefsDialogPNagBwek listy wynikw (domy[lnie pusty)%Result list header (default is empty) UIPrefsDialogFormat paragrafu listy wynikw (usuD wszystko by wri do domy[lnych)Nowa warto[:</b>New Values:ViewActionBase@CzyD (pusty -> recoll domy[lnie) Action (empty -> recoll default)ViewActionBase0U|yj dla obecnego wyboruApply to current selectionViewActionBaseZamknijCloseViewActionBase8Wyjtki dla ustawieD Pulpitu Exception to Desktop preferencesViewActionBase"Systemowy czytnikNative ViewersViewActionBase$Recoll zachowanie:Recoll action:ViewActionBaseWybierz jeden lub kilka typw pliku, nastpnie wska| w ramce poni|ej jak maj zosta przetworzonekSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseWybierz jedno lub kilka typw MIME po czym okre[l jak maj by przetwarzane u|ywajc kontrolek na dole ramkilSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBaseWybierz to samo Select sameViewActionBase@U|yj domy[lnie ustawieD Pulpitu "Use Desktop preferences by defaultViewActionBaseobecna warto[ current valueViewActionBaseWBcz ineksowanie odwiedzonych stron w Firefox.<br>(Wymagana instalacja dodatku Firefox Recoll)</br>aEnables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin)
confgui::ConfBeaglePanelWRWpisy bd odnowione gdy osignie rozmiar1Entries will be recycled once the size is reachedconfgui::ConfBeaglePanelWNMaks. rozmiar dla schowka webowego (MB) Max. size for the web store (MB)confgui::ConfBeaglePanelW>Przejdz do kolejki historii webProcess the WEB history queueconfgui::ConfBeaglePanelWNazwa katalogu w ktrym trzymane s kopie odwiedzonych stron.<br>Nieabsolutna [cie|ka jest brana wzgldnie do katalogu konfiguracji.</br>The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory.
confgui::ConfBeaglePanelWLNazwa katalogu dla trzymania stron webWeb page store directory nameconfgui::ConfBeaglePanelWHNie mo|na pisa w pliku konfiguracjiCan't write configuration fileconfgui::ConfIndexWWybierzChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW<p>Automatycznie uruchom rozr|nianie wielko[ci znakw je[li wpis ma wielkie litery (poza pierwszym znakiem). Inaczej musisz u|y jzyka zapytaD oraz modyfikatora <i>C</i> by wskaza rozr|nianie wielko[ci liter.</p>

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity.

confgui::ConfSearchPanelW<p>Automatycznie uruchom czuBo[ diakrytykw je[li szukana fraza zawiera "ogonki" (nie w unac_except_trans). Inaczej musisz u|y jzyka zapytaD oraz modyfikator<i>D</i> by wskaza czuBo[ diakrytykw.</p>

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity.

confgui::ConfSearchPanelW<p>Maksymalna liczba rozszerzeD dla pojedyDczego terminu (np.: u|ywajc wieloznacznikw). Domy[lne 10 000 jest warto[ci rozsdn oraz strze|e przed zawieszeniem zapytania podczas gdy przeszukiwana jest lista terminw</p>

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.

confgui::ConfSearchPanelWl<p>Maksymalna liczba pocztkowych klauzuli dodawanych do zapytania Xapian. W niektrych przypadkach, wynik rozszerzeD terminu mo|e by pomno|ony, zwikszajc zu|ycie pamici. Domy[lne 100 000 powinno by dostatecznie wysokie dla wikszo[ci przypadkw oraz dziaBajce na obecnych konfiguracjach sprztowych.</p>9

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.

confgui::ConfSearchPanelWJAutomatyczna czuBo[ wielko[ci znakw$Automatic character case sensitivityconfgui::ConfSearchPanelWDAutomatyczna czuBo[ na diakrytyki Automatic diacritics sensitivityconfgui::ConfSearchPanelWDMaksymalna liczba klauzuli Xapian Maximum Xapian clauses countconfgui::ConfSearchPanelWHMaksymalna liczba rozszerzeD terminuMaximum term expansion countconfgui::ConfSearchPanelWDPrzerywa po tym czasie zewntrzne filtrowanie. Dla rzadkich przypadkw (np.: postscript) kiedy dokument mo|e spowodowa zaptlenie filtrowania. Brak limitu to -1.External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. confgui::ConfSubPanelWGlobalnieGlobalconfgui::ConfSubPanelWIndeksujc dzieli plik tekstowy na podane kawaBki (je[li r|ne od -1). Pomocne przy szukaniu w wielkich plikach (np.: dzienniki systemowe).If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files).confgui::ConfSubPanelWPMaks. rozmiar skompresowanego pliku (KB)Max. compressed file size (KB)confgui::ConfSubPanelW4Maks. czas filtrowania (s)Max. filter exec. time (S)confgui::ConfSubPanelWHMaks. rozmiar plikw tekstowych (MB)Max. text file size (MB)confgui::ConfSubPanelWHRozmiar strony pliku tekstowego (KB)Text file page size (KB)confgui::ConfSubPanelWWarto[ progowa od ktrej skompresowane pliki przestaj by przetwarzane. Brak limitu to -1, 0 wyBcza przetwarzanie plikw skompresowanych.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever.confgui::ConfSubPanelW@Warto[ progowa po ktrej pliki tekstowe przestaj by przetwarzane. Brak limitu to -1. U|ywaj do wykluczenia gigantycznych plikw dziennika systemowego (logs).This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index.confgui::ConfSubPanelW<p>To s wyjtki mechaniki unac., ktra domy[lnie usuwa wszystkie diakrytyki oraz wykonuj dekompozycj kanoniczn. Mo|liwe nadpisanie nieakcentowania dla pewnych znakw, w zale|no[ci od twojego jzyka oraz wyszczeglnienie dodatkowych dekompozycji, np.: ligatur. Dla ka|dego wpisu (oddzielony spacjami) pierwszy znak to zrdBo, pozostaBe to tBumaczenie</p>p

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.

confgui::ConfTopPanelWJzyk AspellAspell languageconfgui::ConfTopPanelW4Nazwa katalogu bazy danychDatabase directory nameconfgui::ConfTopPanelWWyBcz u|ywanie Aspell do tworzenia przybli|eD w narzdziu przegldania terminw.<br> U|yteczne, gdy brak Aspell lub jest zepsuty. </br>Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.
confgui::ConfTopPanelWZInterwaB (megabajty) opr|niania indeksowaniaIndex flush megabytes intervalconfgui::ConfTopPanelW8Nazwa pliku dziennika (logs) Log file nameconfgui::ConfTopPanelW4Poziom st|enia komunikatuLog verbosity levelconfgui::ConfTopPanelW.Maks. zu|ycie dysku (%)Max disk occupation (%)confgui::ConfTopPanelW$Brak u|ycia AspellNo aspell usageconfgui::ConfTopPanelW$Wykluczone [cie|ki Skipped pathsconfgui::ConfTopPanelWRReguBy ciosania (ang. stemming languages)Stemming languagesconfgui::ConfTopPanelWPlik w ktrym zapisywane s komunikaty.<br>U|yj "stderr" by skorzysta z konsoli</br>UThe file where the messages will be written.
Use 'stderr' for terminal output
confgui::ConfTopPanelWJzyk dla katalogu Aspell, wygldajcy jak "en" lub "pl" ..<br>Gdy brak, warto[ [rodowiska NLS zostanie u|yta (zwykle dziaBa). By sprawdzi, co posiadasz zainstalowane, wpisz "aspell config" po czym znajdz pliki .dat w katalogu "data-dir". </br>8The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory.
confgui::ConfTopPanelWJzyki dla ktrych sBownik rozszerzenia ciosania<br> (stemming) bdzie zbudowany.</br>NThe languages for which stemming expansion
dictionaries will be built.
confgui::ConfTopPanelWLista katalogw rekursywnego indeksowania. Domy[lnie: Twj katalog domowy.LThe list of directories where recursive indexing starts. Default: your home.confgui::ConfTopPanelWNazwa katalogu przechowania indeksu<br>Nieabsolutna [cie|ka jest brana wzgldnie do katalogu konfiguracji. Domy[lnie jest to "xapiandb".</br>The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.
confgui::ConfTopPanelWDTe katalogi s wykluczone z indeksowania.<br> Dozwolone wieloznaczniki. Musz odpowiada [cie|kom znanym indekserowi (np.: je[li szczytowy katalog zawiera "/home/ja" i "/home" jest linkiem do "/usr/home", to poprawna [cie|ka to "/home/ja/tmp*", natomiast bBdna to "/usr/home/ja/tmp*")</br>(These are names of directories which indexing will not enter.
May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')
confgui::ConfTopPanelWProcent zu|ycia dysku po ktrym indeksowanie zostanie przerwane (chroni przed zapeBnieniem dysku).<br>0 oznacz brak limitu (domy[lnie).</br>This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).
0 means no limit (this is the default).
confgui::ConfTopPanelW$Ustawia prg indeksowania danych zanim zostan wysBane na dysk.<br>Odpowiada za kontrolowanie zu|ycia pamici przez indekser. Domy[lnie: 10MB</br>This value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB
confgui::ConfTopPanelWWarto[ ta ustawia ilo[ komunikatw,<br>od prostych bBdw a| po mnogie informacje diagnostyczne.</br>_This value adjusts the amount of messages,
from only errors to a lot of debugging data.
confgui::ConfTopPanelW$Szczytowe katalogiTop directoriesconfgui::ConfTopPanelWUnac exceptionsUnac exceptionsconfgui::ConfTopPanelW&Anuluj&CanceluiPrefsDialogBase&Ok&OKuiPrefsDialogBase <BR />
uiPrefsDialogBase<PRE />
uiPrefsDialogBase*<PRE> + zawijaj</PRE>
 + wrap
uiPrefsDialogBase|Wyszukanie dla [rolling stones] (2 terminy) zostanie zamienione na [rolling or stones or (rolling phrase 2 stones)]. To powinno da pierwszeDstwo wynikom, dokBadnie tak jak zostaBy wpisane.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBase>Oddzielacz snipetu streszczeniaAbstract snippet separatoruiPrefsDialogBase Aktywuj wszystko Activate AlluiPrefsDialogBaseDodaj indeks Add indexuiPrefsDialogBaseZastosuj zmiany Apply changesuiPrefsDialogBasedProste szukanie gdy u|yto biBych znakw we wpisie.-Auto-start simple search on whitespace entry.uiPrefsDialogBase\Automatycznie dodaj fraz do szukania prostego+Automatically add phrase to simple searchesuiPrefsDialogBase^Procentowy prg czsto[ci dla terminu Autofrazy.Autophrase term frequency threshold percentageuiPrefsDialogBaseWybierzChooseuiPrefsDialogBase0Wybierz edytor aplikacjiChoose editor applicationsuiPrefsDialogBaseKliknij by doda kolejny katalog do listy. Mo|esz wybra zarwno katalog konfiguracji Recoll jak i indeks Xapian.{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBase2Format daty (strftime(3))Date format (strftime(3))uiPrefsDialogBase$Deaktywuj wszystkoDeactivate AlluiPrefsDialogBaseNWyBcz podpowiedz Qt dla wpisu szukania*Disable Qt autocompletion in search entry.uiPrefsDialogBasePorzu zmianyDiscard changesuiPrefsDialogBaseWy[wietl filtr kategorii jako pasek zamiast panelu (wymagany restart).KDisplay category filter as toolbar instead of button panel (needs restart).uiPrefsDialogBasezTworzy sztuczne streszczenie nawet je[li dokument ma wBasne?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBaseMam budowa streszczenie dla wynikw po przez u|ycie kontekstu teminw zapytania? Mo|e zwalnia dla du|ych dokumentw.zDo we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents.uiPrefsDialogBase<Buduj streszczenia dynamicznieDynamically build abstractsuiPrefsDialogBaseLZmieD nagBwek HTML dla strony wynikw#Edit result page html header insertuiPrefsDialogBaseBZmieD format paragrafu dla wyniku#Edit result paragraph format stringuiPrefsDialogBase WBczEnableuiPrefsDialogBase$Zewntrzne indeksyExternal IndexesuiPrefsDialogBasePrg czstotliow[ci procentowej dla ktrej terminy wew. autofrazy nie s u|ywane. Czste terminy s powodem sBabej wydajno[ci fraz. Pominite terminy zwikszaj rozlu|nienie frazy oraz zmniejszanj wydajno[ autofrazy. Domy[lna warto[ to 2 (%).Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10 Helvetica-10uiPrefsDialogBase6Ukryj duplikaty w wynikach.Hide duplicate results.uiPrefsDialogBase:Pod[wietl terminy z zapytaniaHighlight color for query termsuiPrefsDialogBasexWy[wietl tylko raz gdy tak sama zawarto[ (cho r|ne nazwy)XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBaseLinie w PRE nie s zwijane. U|ycie BR zaciera wcicia. PRE + Zawijaj styl mo|e by tym co szukasz.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBaselMaks. rozmiar tekstu dla wyr|nienia w podgldzie (MB)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBase2Liczba wynikw na stronie"Number of entries in a result pageuiPrefsDialogBase@Otwrz okno by wybra snipet CSSAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBase8Otwiera okno wyboru czcionek-Opens a dialog to select the result list fontuiPrefsDialogBaseDOtwiera okno wyboru arkusza stylw-Opens a dialog to select the style sheet fileuiPrefsDialogBase"Zcie|ki tBumaczeDPaths translationsuiPrefsDialogBaseDStyl linii czystego tekstu do HTMLPlain text to HTML line styleuiPrefsDialogBase\U|yj HTML (zamiast czysty tekst) dla podgldu.&Prefer Html to plain text for preview.uiPrefsDialogBase\Magiczne przyrostki nazw plikw jzyka zapytaD(Query language magic file name suffixes.uiPrefsDialogBase2Pamitaj stan sortowania.Remember sort activation state.uiPrefsDialogBase^UsuD z listy. Brak skutku dla indeksu na dysku.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase UsuD zaznaczenieRemove selecteduiPrefsDialogBase@ZamieD streszczenia z dokumentw Replace abstracts from documentsuiPrefsDialogBase ResetResetuiPrefsDialogBase Reset stylu okna Resets the Snippets window styleuiPrefsDialogBaseFReset czcionki wynikw do domy[lnej1Resets the result list font to the system defaultuiPrefsDialogBaseDReset arkusza stylw do domy[lnych!Resets the style sheet to defaultuiPrefsDialogBaseLista wynikw Result ListuiPrefsDialogBase,Czcionka listy wynikwResult list fontuiPrefsDialogBase.Szukaj podczas pisania.Search as you type.uiPrefsDialogBase$Parametry szukaniaSearch parametersuiPrefsDialogBase$Okno snippetw CSSSnippets window CSS fileuiPrefsDialogBaseRRozpocznij oknem zaawansowanego szukania.'Start with advanced search dialog open.uiPrefsDialogBaseJzyk ciosaniaStemming languageuiPrefsDialogBaseArkusz stylw Style sheetuiPrefsDialogBaseTKontekstowe wyrazy sztucznego streszczenia Synthetic abstract context wordsuiPrefsDialogBaseVRozmiar sztucznego streszczenia (w znakach)$Synthetic abstract size (characters)uiPrefsDialogBaseTeksty powy|ej tego rozmiaru bd ukryte w podgldzie (zbyt wolne).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBaseWyrazy z listy zostan automatycznie zmienione w klauzule ext:xxx we wpisach jzyka zapytaD.bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase$Odwrc zaznaczenieToggle selecteduiPrefsDialogBase WygldUser interfaceuiPrefsDialogBaseUstawieniaUser preferencesuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_zh.qm0000644000175000017500000020173013566424763014257 00000000000000.cDo2XMbXTZ`^^ltnwРwvc xC LoSI׷  5p#^vvϴw 5w 5׌w 5w 5߫wU +.Xֳޠ6Rf3 ͼug>g;UU[U$e  r!D&&.NY=d?dFG$JUYxJUYYݰ[u_n~lu9ʷʗʗĤ^_LxKJgjAB J<.dJ;[<ՔL.e 2vh(PNb"3 Z rIUIn2n+ffif؄WUWTy%Sy%΄B:$#X|WR̔ R -Z 9-)TB9 BS\knor?)r4|ak,=<>5AS Pâ  MRIǢI^ʅ~C >`" _UKp h|u9sv2T#KN %1ë7u[X nD_w)-C#+7I^FW#oFNH:"_gurawOϗӁn  lÓtpÓtHȍ&Ɇt3#̭:u?QM}};ET0,Өc 䴥 H 9Zyض ;3 K0 ]#O kV lM k t Ε | ! 9s ü> [ *N c *RT +< 6 G.~r `P: ` cEU d8 y I 1 C+  ɝ  + ԅ yeS TH _ =!t Kj< h Σʴ V ۷  Vd 'Иz /v 97 9ɝ L*X P֙ RVK T#" V \iC$ ]K `F h v {l !Yl !Y . # L B ~ ܂ mr 'Rr - 8e F` OEq u0o y y~4 3 ȩC u u P P 5dQ S' 7! H £ qo%n/.<UQ~\Y~s[sͥ\Sg3p~V !cm cp||' lş$LZiQhgaN All clauses AdvSearchNagaN Any clause AdvSearcheN\:[nVhvTSUOMN kcxn$Bad multiplier suffix in size filter AdvSearch YZOSeNmedia AdvSearchNmessage AdvSearchQv[other AdvSearchoy:ehc presentation AdvSearchu5[Phh< spreadsheet AdvSearchu5[Phh< spreadsheets AdvSearcheg,eNtext AdvSearcheg,texts AdvSearch !yRQh <----- All AdvSearchBase !yR N-y <----- Sel AdvSearchBasemRgaN Add clause AdvSearchBasezd}"Advanced search AdvSearchBase yRQh! All ----> AdvSearchBaseSvb@g ^zz[WkOc qg;N QhgaN  y b;b NagaN  y ge~T0<br> Na  Qh T e N y[Wk|{WcS׏Qe{SU͋TS_S_u(v~v~T0<br>zzvQehFO_ue0All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBasemOBrowse AdvSearchBase c Y'|{gen By categories AdvSearchBase N-N* NO[egۈLn'Check this to enable filtering on dates AdvSearchBase" N-N* NO[eN|{WۈLn,Check this to enable filtering on file types AdvSearchBase" N-N* NO[eN\:[ۈLn'Check this to enable filtering on sizes AdvSearchBase4 N-N* NOOu(Y'vR|{ N Ou(QwOSveN|{W;Check this to use file categories instead of raw mime types AdvSearchBaseQsClose AdvSearchBaseR dgaN Delete clause AdvSearchBaseQed}"vgN \Bv_UEnter top directory for search AdvSearchBasenFilter AdvSearchBaseneg Filter dates AdvSearchBasen\:[ Filter sizes AdvSearchBasegb~Find AdvSearchBaseNFrom AdvSearchBase_ueveN|{WIgnored file types AdvSearchBase S͏lngaNInvert AdvSearchBasegY'\:[ Max. Size AdvSearchBase0gY'\:[0O`SOu(k/K0m/M0g/GO\N:SUOM4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaseg\\:[ Min. Size AdvSearchBase0g\\:[0O`SOu(k/K0m/M0g/GO\N:SUOM4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase P[eN|{WRestrict file types AdvSearchBase"\~gN-veNP[W(kd[Pv_UhN-%Restrict results to files in subtree: AdvSearchBase O[XN:؋P<Save as default AdvSearchBase(d}"<br>n፳NN gaN<br>vehc'Search for
documents
satisfying: AdvSearchBase\d}"veN|{WSearched file types AdvSearchBase yR N-y! Sel -----> AdvSearchBase_Yd}" Start Search AdvSearchBaseR0To AdvSearchBaseAspellAspell language ConfIndexWelQQeMneNCan't write configuration file ConfIndexW epcn^v_UT Database directory name ConfIndexWvykbW(͋c}"VhN-Ou(aspellgeubbQvv͋0<br>W(lg [aspellb[]O\N kc^8eOu(N* y0Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexWQh\@SepGlobal parameters ConfIndexWR7e}"_v QF[WIndex flush megabytes interval ConfIndexW\@SepLocal parameters ConfIndexW _UeNT  Log file name ConfIndexW_Uvu~R+Log verbosity level ConfIndexWQu[XPvgY'\:[MB  Max. size for the web store (MB) ConfIndexWN Ou(aspellNo aspell usage ConfIndexWd}"SepSearch parameters ConfIndexW uev_ Skipped paths ConfIndexWh9Stemming languages ConfIndexWTz ^Qvm`oOO[XR0N*eN0<br>Ou('stderr'Nhy:\m`oQR0~zPThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexW*\O[N<br>g h9bi\UQx0IThe languages for which stemming expansion
dictionaries will be built. ConfIndexW:}"_NΏN*RhN-vv_U_Y _RW0ۈL0؋O`v[v_U0LThe list of directories where recursive indexing starts. Default: your home. ConfIndexWbu(geP[XY R6gev]򋿕Quvv_UT 0<br>YgOu(v[_ ROv[NMnv_Uv_ۈLYt0The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory. ConfIndexWvN*P<etvf/ _Sy}/TY\}"_epcne bM\epcnR7eR0xlvN S0<br>u(gecR6}"_z vQ[XS`u(`Q0؋N:10MBThis value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWTN*P<etvf/Qvm`ovep <br>Qv~R+NNŏQbO`oR0QNY'XO`o0ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexWv~v_UTop directories ConfIndexWQuP[Xv_UT Web page store directory name ConfIndexWQh\@Global ConfSubPanelW|YgnTN*P<N {IN-1 Reg,eNORRrbNHY'vWW ^vNۈL}"_0 f/u(ged}"Y'Weg,eNvOY_UeN 0If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). ConfSubPanelWS)eNgY'\:[KB Max. compressed file size (KB) ConfSubPanelWeg,eNgY'\:[MB Max. text file size (MB) ConfSubPanelWeg,eNSUu\:[KB Text file page size (KB) ConfSubPanelWd\:[Y'NN*P <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T19:47:37" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T19:56:53" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } .T3 { font-style:italic; } .T4 { font-family:Courier New,courier; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> by}"_RNR(cron) </p><p class="P1">kN*[WkSNSbNN*M{&(*)0SUN*ep[WP<0SRvRh(1,3,5)TV(1-7)0fQxnW0 N[WkO<span class="T3">c Sh7</span>QR0crontab eNN- VkdّSNOu(crontab vb@g l S€crontab(5)0</p><p class="P1"><br/>OY W(<span class="T3">eg</span>N-Qe<span class="T4">*</span> <span class="T3">\e</span>N-Qe<span class="T4">12,19</span> <span class="T3">R</span>N-Qe<span class="T4">15 </span>v OW(kY)v12:15 AM T7:15 PMT/Rrecollindex0</p><p class="P1">NN*~AbgLvRNR Qv`'SkN N [e}"_0</p></body></html> 

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolW<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:08:00" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:11:47" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T2 { font-style:italic; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1">pQ<span class="T2">yu(</span>NP\kbۈLRSvby}"_ pQ<span class="T2">T/u(</span>NT/u(kdR pQ<span class="T2">Sm</span>RN e9SNOUN0</p></body></html> 

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolWRNR[hF Cron Dialog CronToolW&fge(*b0-7 0b7f/cfgY)))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWyu(Disable CronToolWT/u(Enable CronToolW$cQecrongaveQ0hgl03Error installing cron entry. Bad syntax in fields ? CronToolW\e(*b0-23)Hours (* or 0-23) CronToolWPw wge]~g bKRvrecollindexgavN VkdelcrontabPIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWR(0-59)Minutes (0-59) CronToolW[hFDialog EditDialogSmCancel EditTransBase<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:14:44" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:23:13" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T2 { font-weight:bold; } .T4 { font-style:italic; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T2">g*b~R0[^NkdMn[Ov}"_epcn0</span><br/><br/>YgO`S`NN~Ttv؋Sepge}"_O`v[v_Uv \1vcc <span class="T4">zSs_Y}"_</span>c 0NTSNetMnSepv0</p><p class="P1">YgO``etgNNv \1Ou(N bvcgeetQvN-v}"_MnT[eRT'0</p><p class="P1">N]QwSW(NT<span class="T4"> y</span>SU0</p></body></html> 

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialog{,Nk!}"_nFirst indexing setupFirstIdxDialog}"_MnIndexing configurationFirstIdxDialog [e}"_NRIndexing scheduleFirstIdxDialog zSs_Y}"_Start indexing nowFirstIdxDialog^W(ّSNetO``[QvۈL}"_vv_U NSQv[Sep OYcdT_bT [W0؋[W{& & &This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialogdW(ّSN bf/ۈLby}"_f/[e}"_ SN勾nNN*RSv[eOu(cron by}"_NR0This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialogJ<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:27:11" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:30:49" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> }"_z ^Sc~ЈL^vNW(eNSuSSe[QvۈL}"_ N_SNN[eЈLNk!0</p><p class="P1">O`SNNN bKQ NOfY}W0PZQbbc F1 0</p><p class="P1">N*]QwS^.RO`nNN*RۈLby}"_v[eNR bnb_SO`v{_UeOT/R[e}"_bN$T eۈL _Sq6QNNlg aNI 0</p></body></html> 

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedW[eNRCron scheduling IdxSchedWy:[Wk Show fieldsPreviewTextEditf>y:VrG Show imagePreviewTextEdit f>y:N;eg,Show main textPreviewTextEdit<b>[NIv[Pv_UhCustomised subtreesQObject ߎ*{&ScFollow symbolic linksQObject4W(}"_eߎ*{&Sc0؋f/N ߎ*v NQMY }"_TFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject[b@g eNT ۈL}"_Index all file namesQObjectT[NelR$ebYtQvQ[g*w|{WbQv|{WN e/c veNvT [WۈL}"_0؋N:f/}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObjectueveNT  Skipped namesQObjectZf/]}"_vv_UhN-vNN[Pv_U~bvRh<br> [NvgNSep[NI0؋zzv}0sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObject$Qwg Nj!_veNbv_UN O}"_0LThese are patterns for file or directory names which should not be indexed.QObject bS_&O &OpenQWidgetbS_N N~ehc/v_U&O &Open Parent document/folderQWidget &P &PreviewQWidgetQQeeN&W &Write to FileQWidgetY R6eNT &F Copy &File NameQWidgetY R6_&U  Copy &URLQWidgetgb~|{O <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T21:00:38" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T21:02:43" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> }"_z ^SNN[bz ve_ЈL W(eNSuSSeO[efe}"_0h7O`v}"_Nvf/NeNT kev OFf/OS`u(N[v|~ߍDn0</p></body></html> .

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWT ekdk!N_zSsT/R}"_z 0%Also start indexing daemon right now.RTIToolW*RT/ReN]~R d0N_g@k{_SRMz T2Autostart file deleted. Kill current process too ?RTIToolW elR^Can't create: RTIToolWelbgLrecollindexCould not execute recollindexRTIToolW kcW(R deN Deleting fileRTIToolW kcW(R d Deleting: RTIToolW[e}"_RT/R"Real time indexing automatic startRTIToolWkcW(R dRT/RyRemoving autostartRTIToolW kcW(fcbeNReplacing fileRTIToolW kcW(fcb Replacing: RTIToolW$W(bvhLbOT/ReOT/R}"_z 0.Start indexing daemon with my desktop session.RTIToolWfTJWarningRTIToolW[QhۈLh9{ (all languages)RclMainN ۈLh9{  (no stemming)RclMainRecollf About RecollRclMainQhAllRclMainelՋeNCan't access file: RclMainelR^zSCan't create preview windowRclMainelՉS)kdeNCan't uncompress file: RclMainelfe}"_}"_z ^]W(ЈL#Can't update index: indexer runningRclMainelcSehcbR^N4eeN0Cannot extract document or create temporary fileRclMainelNepcn^SehcO`o+Cannot retrieve document info from databaseRclMainkcW(QsClosingRclMain4elbS_Y}"_0epcn^g*bS_0hgY}"_Rh0HCould not open external index. Db not open. Check external indexes list.RclMainehcSSDocument historyRclMain][bDoneRclMain kcW(R d}"_ Erasing indexRclMainErrorRclMain kcW(bgL[ Executing: [RclMainSSepcn History dataRclMain }"_gQIndex query errorRclMain]}"_veN|{WIndexed MIME TypesRclMain}"_Y1%Indexing failedRclMain kcW(}"_Indexing in progress: RclMaing*b~R0vRz ^Missing helper programsRclMainvщVhMonitorRclMain$[kdyeN|{Wlg MnYgw Vh[-No external viewer configured for mime type [RclMainvRMN :\NOURz ^No helpers found missingRclMain g*b~R0~gNo results foundRclMaineNoneRclMainR dPurgeRclMainLgkcW(ۈLN-0<br>u1N}"_^vgNPR6 <br>SmvO[z ^QeQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMaing~g Query resultsRclMainNY4e_Y}"_T(Reset the index and start from scratch ?RclMain~gepO0P< Result count (est.)RclMainO[XeN Save fileRclMainStemepcn^StemdbRclMainP\kb}"_&I Stop &IndexingRclMainL_SRM}"_z N f/u1kduLbT/Rv0pQxn[Ng@k{[ bpQSmN勩[u1ЈLyThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainNlg b~R0mimeviewN-N:%1: %2Mnvgw Vh0 f/T&bS_ y[hFhThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMainN*g]~N f/m;vN"This search is not active any moreRclMaing*wUnknownRclMainfe}"_&I  Update &IndexRclMainkcW(feUpdatingRclMainfTJWarningRclMaincS֋h9eQ#error retrieving stemming languagesRclMain]nfilteredRclMain YZOSeNmediaRclMainNmessageRclMainQv[otherRclMainoy:ehc presentationRclMain]c^sortedRclMainu5[Phh< spreadsheetRclMaineg,eNtextRclMainRecollf&A  &About Recoll RclMainBasezd}"&A &Advanced Search RclMainBaseR dehcSS&E &Erase document history RclMainBaseR dd}"SS&E &Erase search history RclMainBase eN&F &File RclMainBase Qh\O&F  &Full Screen RclMainBase ^.R&H &Help RclMainBase y&P  &Preferences RclMainBaseeg }"_&R &Rebuild index RclMainBasec^Sep&S &Sort parameters RclMainBase ]Qw&T &Tools RclMainBaseu(b7bKQ&U  &User manual RclMainBasez/Y gBd}"Advanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBaseehcSSDocument History RclMainBaseehcSS&H Document &History RclMainBase Q&x E&xit RclMainBaseY}"_[hF&x E&xternal index dialog RclMainBaseY}"_[hFExternal index dialog RclMainBaseF11F11 RclMainBase{,Nu First page RclMainBaseQh\O Full Screen RclMainBaselR0~gv{,NuGo to first page of results RclMainBaseN Nu Next page RclMainBase N Nu~gNext page of results RclMainBaseTN uPgDown RclMainBaseTN uPgUp RclMainBaseN Nu Previous page RclMainBase N Nu~gPrevious page of results RclMainBase RecollRecoll RclMainBaseShift+TN u Shift+PgUp RclMainBasef>y:gS~ƂShow Query Details RclMainBasec egcR gevW(RMb#Sort by dates from newest to oldest RclMainBasec egcR gevW(RMb#Sort by dates from oldest to newest RclMainBasec^SepSort parameters RclMainBase͋c}"Vh&e Term &explorer RclMainBase ͋c}"VhTerm explorer tool RclMainBasefe}"_&i  Update &index RclMainBasedXAbstract RecollModelO\Author RecollModelegDate RecollModel egSe Date and time RecollModelehceg Document date RecollModelehc\:[ Document size RecollModeleNeg File date RecollModeleNT  File name RecollModeleN\:[ File size RecollModelQ_Ipath RecollModelQs.Keywords RecollModeleN|{W MIME type RecollModelOe9eMtime RecollModelS[W{&Original character set RecollModelvQs^Relevancy rating RecollModelhTitle RecollModel_URL RecollModelf>y:gS~Ƃ  (show query)ResList&<p><b>g*b~R0~g</b><br>

No results found
ResList.<p><i>Qv[bQ_b__S </i>4

Alternate spellings (accents suppressed): ResListehcSSDocument historyResList{, DocumentsResListN NuNextResListbS_OpenResListPreviewResListN NuPreviousResList gS~Ƃ Query detailsResList~gepO0P< Result count (est.)ResList~gRh Result listResList elՋehcUnavailable documentResListN*ehc ggaNforResListN*ehc g\Qqg out of at leastResListR dkdR&D &Delete columnResTablenc^gaN&R  &Reset sortResTableO[XN:CSV&S  &Save as CSVResTablemR"%1"RAdd "%1" columnResTableelbS_/R^eNCan't open/create file: ResTable\hhSelect the type of query that will be performed with the words SearchClauseWN NuNextSnippetsd}"Search SnippetsWmOBrowseSpecIdxW Qs&C &Close SpellBase \U_&E &Expand  SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBaseg*b~R0epcn^O`o0 No db info. SpellBase ͋c}"Vh Term Explorer SpellBase ehcep/`;ep Doc. / Tot.SpellW g*b~R0bi\UNo expansion foundSpellW kcRh_RegexpSpellWbQ/SїhgSpelling/PhoneticSpellWh9bi\UStem expansionSpellW͋TermSpellWM{& WildcardsSpellWcS֋h9eQ#error retrieving stemming languagesSpellWQh͋ All terms UIPrefsDialogNN͋Any term UIPrefsDialog bChoose UIPrefsDialogeNT  File name UIPrefsDialoggQuery language UIPrefsDialog N-vv_UN f/Xapian}"_;The selected directory does not appear to be a Xapian index UIPrefsDialog N-vv_U]~W(}"_RhN-3The selected directory is already in the index list UIPrefsDialogf/N;/g,W0}"_This is the main/local index! UIPrefsDialogcS֋h9eQ#error retrieving stemming languages UIPrefsDialogT}NCommand ViewActioneN|{W MIME type ViewActionQsCloseViewActionBase g,W0gw VhNative ViewersViewActionBase bChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW Sm&C &CanceluiPrefsDialogBase xn[&O &OKuiPrefsDialogBase[[n w] (2N*͋)vd}"OSb[n or w or (n 2N*͋ w)]0 [NNd}"͋W(QvN-c qgSh7Qsv~g QvOQH~ONN0A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBasedXN-vrGkvR{&Abstract snippet separatoruiPrefsDialogBaseQhom; Activate AlluiPrefsDialogBasemR}"_ Add indexuiPrefsDialogBase Oe9SueH Apply changesuiPrefsDialogBaseR\~mRR0{SUd}"N-+Automatically add phrase to simple searchesuiPrefsDialogBaseR~ĘsP_Ne9SDiscard changesuiPrefsDialogBase4SsOehcg,bg NN*dX bNNq6LTbdXO`oEDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBaseTf/T&Ou(g͋ThVvN N egeg ~gRhgavN-vdX [NY'vehcSO_ab0zDo we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents.uiPrefsDialogBase R`g dXDynamically build abstractsuiPrefsDialogBase ~gubvhtmlY4cQey#Edit result page html header insertuiPrefsDialogBase~gk=vh<_[W{&N2#Edit result paragraph format stringuiPrefsDialogBaseT/u(EnableuiPrefsDialogBaseY}"_External IndexesuiPrefsDialogBasef/NN*sP< ǏN*PR0R~N-0 ؘ͋f/~N-`'vN;gen0 uev͋OXR~vzz:P< VkdOMONR~RveHs0 ؋Py:NN*0XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBase0W(N-[QvۈLNf>y:vgY'eg,\:[QF[W 5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseNN*~gubN-f>y:v~ggaep"Number of entries in a result pageuiPrefsDialogBase(bS_NN*[hF N bu(N~gRhv[WOS-Opens a dialog to select the result list fontuiPrefsDialogBase bS_NN*[hF N bh7_SUeN-Opens a dialog to select the style sheet fileuiPrefsDialogBaseN-OQHOu(Html0&Prefer Html to plain text for preview.uiPrefsDialogBasegy^YGeNT T0(Query language magic file name suffixes.uiPrefsDialogBaseOOc^r`0Remember sort activation state.uiPrefsDialogBase,NRhN-R d0N O[xlvN v}"_ bc_[07Remove from list. This has no effect on the disk index.uiPrefsDialogBase R d N-yRemove selecteduiPrefsDialogBaseSNehcN-^&vdX Replace abstracts from documentsuiPrefsDialogBasenResetuiPrefsDialogBase"\~gRhN-v[WOS͋N:|~ߞ؋P<1Resets the result list font to the system defaultuiPrefsDialogBase\h7_SUnN:؋P<!Resets the style sheet to defaultuiPrefsDialogBase~gRh Result ListuiPrefsDialogBase ~gRh[WOSResult list fontuiPrefsDialogBased}"SepSearch parametersuiPrefsDialogBaseT/RebS_zd}"[hF0'Start with advanced search dialog open.uiPrefsDialogBaseh9Stemming languageuiPrefsDialogBaseh7_SU Style sheetuiPrefsDialogBaseTbdXN N e Synthetic abstract context wordsuiPrefsDialogBaseTbdX^[W{&N*ep $Synthetic abstract size (characters)uiPrefsDialogBase4ǏN*^veg,N OW(zS̚Nf>y:Y*ab 0CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBase@N*RhN-v͋OW(gQehF́RSbext:xxxS0bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase Rcb N-yToggle selecteduiPrefsDialogBaseu(b7uLbUser interfaceuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_es.ts0000644000175000017500000050671413566424763014270 00000000000000 AdvSearch All clauses Todas las cláusulas Any clause Cualquier cláusula texts textos spreadsheets hojas de cálculo presentations presentaciones media medios messages mensajes other otros Bad multiplier suffix in size filter Sufijo multiplicador incorrecto en filtro de tamaño text texto spreadsheet hoja de cálculo presentation presentación message mensaje AdvSearchBase Advanced search Búsqueda avanzada Search for <br>documents<br>satisfying: Buscar documentos<br>que satisfagan: Delete clause Borrar cláusula Add clause Añadir cláusula Restrict file types Restringir tipos de archivo Check this to enable filtering on file types Marque esto para habilitar filtros en tipos de archivos By categories Por categorías Check this to use file categories instead of raw mime types Marque esto para usar categorías en lugar de tipos MIME Save as default Guardar como predeterminado Searched file types Tipos de archivos buscados All ----> Todos ----> Sel -----> Sel -----> <----- Sel <----- Sel <----- All <----- Todos Ignored file types Tipos de archivos ignorados Enter top directory for search Ingrese directorio inicial para la búsqueda Browse Buscar Restrict results to files in subtree: Restringir resultados a archivos en subdirectorio: Start Search Iniciar búsqueda Close Cerrar All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Todos los campos no vacíos a la derecha serán combinados con conjunciones AND (opción "Todas las cláusulas") o OR (opción "Cualquier cláusula").<br>Los campos "Cualquiera", "Todas" y "Ninguna" pueden aceptar una mezcla de palabras simples y frases dentro de comillas dobles.<br>Campos sin datos son ignorados. Invert Invertir Minimum size. You can use k/K,m/M,g/G as multipliers Tamaño mínimo. Puede utilizar k/K, m/M o g/G como multiplicadores Min. Size Tamaño Mínimo Maximum size. You can use k/K,m/M,g/G as multipliers Tamaño máximo. Puede utilizar k/K, m/M o g/G como multiplicadores Max. Size Tamaño máximo Filter Filtro Check this to enable filtering on dates Marque esto para habilitar filtros en fechas Filter dates Filtrar fechas From Desde To Hasta Find Buscar Check this to enable filtering on sizes Marque esto para habilitar filtros en tamaños Filter sizes Filtro de tamaños ConfIndexW Can't write configuration file No se puede escribir archivo de configuración Global parameters Parámetros globales Local parameters Parámetros locales Search parameters Parámetros de búsqueda Top directories Directorios primarios The list of directories where recursive indexing starts. Default: your home. La lista de directorios donde la indexación recursiva comienza. Valor por defecto: su directorio personal. Skipped paths Directorios omitidos These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Stemming languages Lenguajes para raíces The languages for which stemming expansion<br>dictionaries will be built. Los lenguajes para los cuales los diccionarios de expansión de raíces serán creados. Log file name Nombre de archivo de registro The file where the messages will be written.<br>Use 'stderr' for terminal output El archivo donde los mensajes serán escritos.<br>Use 'stderr' para salida a la terminal Log verbosity level Nivel de verbosidad del registro This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Este valor ajusta la cantidad de mensajes,<br>desde solamente errores hasta montones de información de depuración. Index flush megabytes interval Intervalo en megabytes de escritura del índice This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Este valor ajusta la cantidad de datos indexados entre escrituras al disco.<br> Esto ayuda a controlar el uso de memoria del indexador. Valor estándar 10MB Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. No aspell usage No utilizar aspell Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Deshabilita el uso de aspell para generar aproximaciones ortográficas en la herramienta explorador de términos.<br>Útil si aspell no se encuentra o no funciona. Aspell language Lenguaje Aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. El lenguaje para el diccionario aspell. Esto debería ser algo como 'en' o 'fr' ...<br>Si no se establece este valor, el ambiente NLS será utilizado para calcularlo, lo cual usualmente funciona. Para tener una idea de lo que está instalado en sus sistema, escriba 'aspell-config' y busque archivos .dat dentro del directorio 'data-dir'. Database directory name Nombre del directorio de base de datos The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. El nombre de un directorio donde almacenar el índice.<br>Una ruta no absoluta se interpreta como relativa al directorio de configuración. El valor por defecto es 'xapiandb'. Unac exceptions Excepciones Unac <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Estas son excepciones al mecanismo unac, el cual, de forma predeterminada, elimina todos los diacríticos, y realiza una descomposición canónica. Es posible prevenir la eliminación de acentos para algunos caracteres, dependiendo de su lenguaje, y especificar descomposiciones adicionales, por ejemplo, para ligaturas. En cada entrada separada por espacios, el primer caracter es el origen, y el resto es la traducción. Process the WEB history queue Procesar la cola del historial WEB Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Habilita la indexación de páginas visitadas en Firefox.<br>(necesita también el plugin Recoll para Firefox) Web page store directory name Nombre del directorio del almacén para páginas web The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. El nombre del directorio dónde almacenar las copias de páginas web visitadas.<br>Una ruta de directorio no absoluta es utilizada, relativa al directorio de configuración. Max. size for the web store (MB) Tamaño máximo para el almacén web (MB) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Automatic diacritics sensitivity Sensibilidad automática de diacríticos <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>Habilitar automáticamente la sensibilidad de diacríticos si el término de búsqueda tiene caracteres acentuados (no presentes en unac_except_trans). De otra forma necesita usar el lenguage de búsqueda y el modificador <i>D</i> para especificar la sensibilidad de los diacríticos. Automatic character case sensitivity Sensibilidad automática a la distinción de mayúsculas/minúsculas de los caracteres <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>Habilitar automáticamente la sensibilidad a las mayúsculas/minúsculas si la entrada tiene caracteres en mayúscula en una posición distinta al primer caracter. De otra forma necesita usar el lenguaje de búsqueda y el modificador <i>C</i> para especificar la sensibilidad a las mayúsculas y minúsculas. Maximum term expansion count Máximo conteo de expansión de términos <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Máxima expansión de conteo para un solo término (ej: cuando se usan comodines). El valor por defecto de 10000 es razonable y evitará consultas que parecen congelarse mientras el motor de búsqueda recorre la lista de términos. Maximum Xapian clauses count Máximo conteo de cláusulas de Xapian <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Número máximo de cláusulas elementales agregadas a una consulta de Xapian. En algunos casos, el resultado de la expansión de términos puede ser multiplicativo, y deseamos evitar el uso excesivo de memoria. El valor por defecto de 100000 debería ser lo suficientemente alto en la mayoría de los casos, y compatible con las configuraciones de hardware típicas en la actualidad. ConfSubPanelW Only mime types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Exclude mime types Mime types not to be indexed Max. compressed file size (KB) Tamaño máximo de archivo comprimido (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Este valor establece un umbral mas allá del cual los archivos<br>comprimidos no serán procesados. Escriba 1 para no tener límite,<br>o el número 0 para nunca hacer descompresión. Max. text file size (MB) Tamaño máximo para archivo de texto (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Este valor establece un umbral más allá del cual los archivos de texto no serán procesados.<br>Escriba 1 para no tener límites. Este valor es utilizado para excluir archivos de registro gigantescos del índice. Text file page size (KB) Tamaño de página para archivo de texto (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Si se utiliza este valor (diferente de -1), los archivos de texto serán separados en partes de este tamaño para ser indexados. Esto ayuda con las búsquedas de archivos de texto muy grandes (ej: archivos de registro). Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Filtros externos que se ejecuten por más tiempo del establecido serán detenidos. Esto es por el caso inusual (ej: postscript) dónde un documento puede causar que un filtro entre en un ciclo infinito. Establezca el número -1 para indicar que no hay límite. Global Global CronToolW Cron Dialog Ventana de Cron <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> horario de indexado por lotes (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Cada campo puede contener un comodín (*), un valor numérico único, listas separadas por comas (1,3,5) y rangos (1-7). Más generalmente, los campos serán usados <span style=" font-style:italic;">tal como son</span> dentro del archivo crontab, y toda la sintaxis crontab puede ser usada, ver crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Por ejemplo, ingresar <span style=" font-family:'Courier New,courier';">*</span> en <span style=" font-style:italic;">Días, </span><span style=" font-family:'Courier New,courier';">12,19</span> en <span style=" font-style:italic;">Horas</span> y <span style=" font-family:'Courier New,courier';">15</span> en <span style=" font-style:italic;">Minutos</span> iniciaría recollindex cada día a las 12:15 AM y 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Un horario con activaciones frecuentes es probablemente menos eficiente que la indexación en tiempo real.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Días de la semana (* o 0-7, 0 o 7 es Domingo) Hours (* or 0-23) Horas (* o 0-23) Minutes (0-59) Minutos (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Presione <span style=" font-style:italic;">Deshabilitar</span> para detener la indexación automática por lotes, <span style=" font-style:italic;">Habilitar</span> para activarla, <span style=" font-style:italic;">Cancelar</span> para no cambiar nada.</p></body></html> Enable Habilitar Disable Deshabilitar It seems that manually edited entries exist for recollindex, cannot edit crontab Parece ser que existen entradas para recollindex editadas manualmente, no se puede editar crontab Error installing cron entry. Bad syntax in fields ? Error al instalar entrada de cron. Sintaxis incorrecta en los campos? EditDialog Dialog Ventana de diálogo EditTrans Source path Ruta de origen Local path Ruta local Config error Error de configuración Original path Ruta original EditTransBase Path Translations Ruta de traducciones Setting path translations for Establecer ruta de traducciones para Select one or several file types, then use the controls in the frame below to change how they are processed Seleccione uno o más tipos de archivos, y use los controles en la caja abajo para cambiar cómo se procesan Add Añadir Delete Borrar Cancel Cancelar Save Guardar FirstIdxDialog First indexing setup Primera configuración de indexación <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Parece ser que el índice para esta configuración no existe.</span><br /><br />Si solamente desea indexar su directorio personal con un conjunto de valores iniciales razonables, presione el botón <span style=" font-style:italic;">Iniciar indexación ahora</span>. Es posible ajustar los detalles más tarde.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Si necesita más control, use los enlaces siguientes para ajustar la configuración de indexación y el horario.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Estas herramientas pueden ser accedidas luego desde el menú <span style=" font-style:italic;">Preferencias</span>.</p></body></html> Indexing configuration Configuración de indexación This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Esto le permite ajustar los directorios que quiere indexar y otros parámetros, como rutas de archivos o nombres excluidos, conjuntos de caracteres estándar, etc. Indexing schedule Horario de indexación This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Esto le permite escoger entre indexación en tiempo real y por lotes, y configurar un horario automático para indexar por lotes (utilizando cron). Start indexing now Iniciar indexación ahora FragButs %1 not found. %1: %2 Query Fragments IdxSchedW Index scheduling setup Configuración de horario de indexación <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">La indexación de <span style=" font-weight:600;">Recoll</span> puede ejecutarse permanentemente, indexando archivos cuando cambian, o puede ejecutarse en intervalos discretos. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Leer el manual puede ayudarle a decidir entre estos dos métodos (presione F1).</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Esta herramienta puede ayudarle a configurar un horario para automatizar la ejecución de indexación por lotes, o iniciar la indexación en tiempo real cuando inicia la sesión (o ambos, lo cual rara vez tiene sentido).</p></body></html> Cron scheduling Horario de Cron The tool will let you decide at what time indexing should run and will install a crontab entry. Esta herramienta le permite decidir a qué hora la indexación se ejecutará e instalará una entrada en el crontab. Real time indexing start up Inicio de la indexación en tiempo real Decide if real time indexing will be started when you log in (only for the default index). Decida si la indexación en tiempo real será ejecutada cuando inicie la sesión (solo para el índice estándar). ListDialog Dialog Ventana de diálogo GroupBox Cuadro de grupo Main No db directory in configuration Directorio de base de datos no está configurado Could not open database in No se puede abrir base de datos en . Click Cancel if you want to edit the configuration file before indexing starts, or Ok to let it proceed. Presione Cancelar si desea editar la configuración antes de indexar, o Ok para proceder. "history" file is damaged or un(read)writeable, please check or remove it: El archivo de historial esta dañado o no se puede leer, por favor revíselo o bórrelo: "history" file is damaged, please check or remove it: Preview Close Tab Cerrar Pestaña Cannot create temporary directory No se puede crear directorio temporal Cancel Cancelar Missing helper program: Programa ayudante faltante: Can't turn doc into internal representation for No se puede convertir documento a representación interna para Creating preview text Creando texto de vista previa Loading preview text into editor Cargando texto de vista previa en el editor &Search for: &Buscar por: &Next &Siguiente &Previous &Previo Clear Limpiar Match &Case &Coincidir mayúsculas y minúsculas Cannot create temporary directory: No se puede crear directorio temporal: Error while loading file Error al cargar archivo Form Tab 1 Open Abrir Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text PreviewTextEdit Show fields Mostrar campos Show main text Mostrar texto principal Print Imprimir Print Current Preview Imprimir vista previa actual Show image Mostrar imagen Select All Seleccionar todo Copy Copiar Save document to file Guardar documento en un archivo Fold lines Doblar líneas Preserve indentation Preservar indentación Open document QObject Global parameters Parámetros globales Local parameters Parámetros locales <b>Customised subtrees <b>Subdirectorios personalizados The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. La lista de subdirectorios en la jerarquía indexada<br>dónde algunos parámetros necesitan ser definidos. Valor por defecto: vacío. <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>Los parámetros siguientes se aplican a nivel superior, si una línea vacía<br>o ninguna es seleccionada en el listado arriba, o para cada directorio seleccionado.<br>Puede añadir o remover directorios presionando los botones +/-. Skipped names Nombres omitidos These are patterns for file or directory names which should not be indexed. Estos son patrones de nombres de archivos o directorios que no deben ser indexados. Default character set Conjunto de caracteres por defecto This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Este es el conjunto de caracteres usado para leer archivos que no son identificados internamente, por ejemplo, archivos de texto puro.<br>El valor por defecto está vacío, y el valor del ambiente NLS es usado. Follow symbolic links Seguir enlaces simbólicos Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Seguir enlaces simbólicos al indexar. El valor por defecto es no, para evitar indexar duplicados Index all file names Indexar todos los nombres de archivos Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Indexar los nombres de los archivos para los cuales los contenidos no pueden ser<br>identificados o procesados (tipo MIME inválido o inexistente). El valor por defecto es verdadero Beagle web history Historial web Beagle Search parameters Parámetros de búsqueda Web history Historial Web Default<br>character set Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Ignored endings These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. QWidget Create or choose save directory Choose exactly one directory Could not read directory: Unexpected file name collision, cancelling. Cannot extract document: &Preview &Open &Abrir Open With Run Script Copy &File Name Copy &URL Copiar &URL &Write to File Save selection to files Guardar selección a archivos Preview P&arent document/folder &Open Parent document/folder &Abrir documento/directorio ascendente Find &similar documents Buscar documentos &similares Open &Snippets window Abrir ventana de &fragmentos Show subdocuments / attachments Mostrar subdocumentos / adjuntos QxtConfirmationMessage Do not show again. RTIToolW Real time indexing automatic start Inicio automático de la indexación en tiempo real <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">La indexación de <span style=" font-weight:600;">Recoll</span> puede configurarse para ejecutar como un demonio, actualizando el índice cuando los archivos cambian, en tiempo real. Obtiene un índice actualizado siempre, pero los recursos del sistema son utilizados permanentemente.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Iniciar el demonio de indexación con mi sesión de escritorio. Also start indexing daemon right now. También iniciar demonio de indexación ahora mismo. Replacing: Reemplazando: Replacing file Reemplazando archivo Can't create: No se puede crear: Warning Advertencia Could not execute recollindex No se puede ejecutar recollindex Deleting: Borrando: Deleting file Borrando archivo Removing autostart Eliminando autoinicio Autostart file deleted. Kill current process too ? Archivo de autoinicio borrado. Detener el proceso actual también? RclMain (no stemming) (sin raíces) (all languages) (todos los lenguajes) error retrieving stemming languages error al recuperar lenguajes para raíces Indexing in progress: Indexación en progreso: Files Ficheros Purge Stemdb Raízdb Closing Cerrando Unknown Desconocido Can't start query: No se puede iniciar la consulta: Query results Resultados de búsqueda Cannot retrieve document info from database No se puede recuperar información del documento de la base de datos Warning Advertencia Can't create preview window No se puede crear ventana de vista previa This search is not active any more Esta búsqueda no está activa Bad viewer command line for %1: [%2] Please check the mimeconf file Línea de comando incorrecta de visualizador para %1: [%2] Por favor revise el fichero mimeconf Cannot extract document or create temporary file No se puede extraer el documento o crear archivo temporal Executing: [ Ejecutando: [ About Recoll Acerca de Recoll History data Datos de historial Document history Historial de documentos Update &Index Actualizar &Índice Indexing interrupted Indexación interrumpida Stop &Indexing Detener &Indexación All Todo media medios message mensaje other otro presentation presentación spreadsheet hoja de cálculo text texto sorted ordenado filtered filtrado External applications/commands needed and not found for indexing your file types: Aplicaciones/comandos externos necesarios y no encontrados para indexar sus tipos de fichero: No helpers found missing Missing helper programs Programas ayudantes faltantes Document category filter Filtro de categorías de documentos No external viewer configured for mime type [ No hay visualizador configurado para tipo MIME [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Can't access file: No se puede accesar el archivo: Can't uncompress file: No se puede descomprimir el archivo: Save file Guardar archivo Result count (est.) Conteo de resultados (est.) Query details Detalles de búsqueda Could not open external index. Db not open. Check external indexes list. No se puede abrir índice externo. Base de datos no abierta. Revise listado de índices externos. No results found No hay resultados None Ninguno Updating Actualizando Done Hecho Monitor Monitor Indexing failed Indexación falló The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone El proceso de indexación actual no se inicio desde esta interfaz. Presione Ok para detenerlo, o Cancelar para dejarlo ejecutar Erasing index Borrando índice Reset the index and start from scratch ? Restaurar el índice e iniciar desde cero? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Consulta en progreso.<br>Debido a limitaciones en la librería de indexación,<br>cancelar terminará el programa Error Error Index not open Índice no está abierto Index query error Error de consulta del índice Indexed Mime Types Tipos MIME indexados Content has been indexed for these mime types: Los contenidos han sido indexados para estos tipos MIME: Index not up to date for this file. Refusing to risk showing the wrong entry. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. Índice no actualizado para este fichero. No mostrado para evitar utilizar la entrada errónea. Presione Ok para actualizar el índice para este fichero, luego ejecute la consulta de nuevo cuando la indexación termine. En caso contrario, presione Cancelar. Can't update index: indexer running No se puede actualizar el índice: indexador en ejecución Indexed MIME Types Tipos MIME indexados Bad viewer command line for %1: [%2] Please check the mimeview file Línea de comando incorrecta de visualizador para %1: [%2] Por favor revise el archivo mimeconf Viewer command line for %1 specifies both file and parent file value: unsupported Línea de comandos del visualizador para %1 especifica valores para el archivo y el archivo padre: no soportado Cannot find parent document No se encuentra documento padre Indexing did not run yet La indexación no se ha ejecutado aún External applications/commands needed for your file types and not found, as stored by the last indexing pass in Aplicaciones/comandos externos requeridos por sus tipos de archivos y no encontrados, como se almacenaron en el último pase de indexación en Index not up to date for this file. Refusing to risk showing the wrong entry. El índice no está actualizado para este archivo. Rehusando mostrar la entrada equivocada. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. Presione Ok para actualizar el índice para este archivo, y ejecute de nuevo la consulta cuando la indexación termine. En caso contrario, cancele. Indexer running so things should improve when it's done El indexador está en ejecución, así que las cosas deberían mejorar cuando termine Sub-documents and attachments Sub-documentos y adjuntos Document filter The indexer is running so things should improve when it's done. Duplicate documents Documentos duplicados These Urls ( | ipath) share the same content: Estos URLs ( | ipath) comparten el mismo contenido: Bad desktop app spec for %1: [%2] Please check the desktop file Bad paths Selection patterns need topdir Selection patterns can only be used with a start directory No search No preserved previous search Choose file to save Saved Queries (*.rclq) Write failed Could not write to file Read failed Could not open file: Load error Could not load saved query Index scheduling Sorry, not available under Windows for now, use the File menu entries to update the index Disabled because the real time indexer was not compiled in. This configuration tool only works for the main index. Can't set synonyms file (parse error?) The document belongs to an external index which I can't update. Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Do not show this warning next time (use GUI preferences to restore). Index locked Unknown indexer state. Can't access webcache file. Indexer is running. Can't access webcache file. with additional message: Non-fatal indexing message: Types list empty: maybe wait for indexing to progress? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported Tools Results Content has been indexed for these MIME types: Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Indexing done Can't update index: internal error Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> documents document files file errors error total files) No information: initial indexing not yet performed. RclMainBase Recoll Recoll Search tools Herramientas de búsqueda Result list Lista de resultados &File &Archivo &Tools &Herramientas &Preferences &Preferencias &Help &Ayuda E&xit &Salir Ctrl+Q Ctrl+Q Update &index Actualizar &índice &Erase document history Borrar historial de &documentos &About Recoll &Acerca de Recoll &User manual Manual de &Usuario Document &History Historial de &Documentos Document History Historial de Documentos &Advanced Search Búsqueda &Avanzada Advanced/complex Search Búsqueda avanzada/compleja &Sort parameters Parámetros de &ordenamiento Sort parameters Parámetros de ordenamiento Term &explorer &Explorador de términos Term explorer tool Herramienta de exploración de términos Next page Siguiente página Next page of results Página de resultados siguiente First page Primera página Go to first page of results Ir a la primera página de resultados Previous page Página anterior Previous page of results Página de resultados anterior &Query configuration Configuración de &consulta External index dialog Configuración de índices externos &Indexing configuration Configuración de &indexación All Todo &Show missing helpers &Mostrar ayudantes faltantes PgDown AvPág PgUp RePág &Full Screen Pantalla &Completa F11 F11 Shift+Home Mayúsculas+Inicio Full Screen Pantalla Completa &Erase search history Borrar historial de &búsqueda sortByDateAsc ordenarPorFechaAsc Sort by dates from oldest to newest Ordenar por fechas de la más antigua a la más reciente sortByDateDesc ordenarPorFechaDesc Sort by dates from newest to oldest Ordenar por fechas de la más reciente a la más antigua Show Query Details Mostrar resultados de la consulta Show results as table Mostrar resultados tabulados &Rebuild index &Reconstruir índice &Show indexed types &Mostrar tipos indexados Shift+PgUp Mayúsculas+RePág &Indexing schedule &Horario de indexación E&xternal index dialog &Configuración de índices externos &Index configuration &Configuración del Índice &GUI configuration Configuración de &GUI &Results &Resultados Sort by date, oldest first Ordenar por fecha, antiguos primero Sort by date, newest first Ordenar por fecha, recientes primero Show as table Mostrar como tabla Show results in a spreadsheet-like table Mostrar resultados en una tabla similar a una hoja de cálculo Save as CSV (spreadsheet) file Guardar como un archivo CSV (hoja de cálculo) Saves the result into a file which you can load in a spreadsheet Guardar el resultado en un archivo que se puede cargar en una hoja de cálculo Next Page Página Siguiente Previous Page Página Anterior First Page Primera Página Query Fragments With failed files retrying Next update will retry previously failed files Indexing &schedule Enable synonyms Save last query Load saved query Special Indexing Indexing with special options &View Missing &helpers Indexed &MIME types Index &statistics Webcache Editor Trigger incremental pass RclTrayIcon Restore Quit RecollModel Abstract Resumen Author Autor Document size Tamaño del documento Document date Fecha del documento File size Tamaño del archivo File name Nombre del archivo File date Fecha del archivo Ipath Ipath Keywords Palabras clave Mime type Tipo MIME Original character set Conjunto de caracteres original Relevancy rating Calificación de relevancia Title Título URL URL Mtime Fecha Mod Date Fecha Date and time Fecha y hora Ipath Ipath MIME type Tipo MIME Can't sort by inverse relevance ResList Result list Lista de resultados (show query) (mostrar consulta) &Preview &Vista Previa Copy &File Name Copiar nombre de &fichero Copy &URL Copiar &URL Find &similar documents Buscar documentos &similares Document history Historial de documentos <p><b>No results found</b><br> <p><b>No hay resultados</b></br> Previous Anterior Next Siguiente Unavailable document Documento no disponible Preview Vista previa Open Abrir <p><i>Alternate spellings (accents suppressed): </i> <p><i>Ortografía alterna (acentos suprimidos): </i> &Write to File &Escribir a fichero Preview P&arent document/folder &Vista previa de documento/directorio ascendente &Open Parent document/folder &Abrir documento/directorio ascendente &Open &Abrir Documents Documentos out of at least de por lo menos for para <p><i>Alternate spellings: </i> <p><i>Escrituras Alternas: </i> Duplicate documents Documentos duplicados These Urls ( | ipath) share the same content: Estos URLs ( | ipath) comparten el mismo contenido: Result count (est.) Conteo de resultados (est.) Query details Detalles de búsqueda Snippets Fragmentos ResTable &Reset sort &Restaurar ordenamiento &Delete column &Borrar columna Add " Añadir " " column " columna Save table to CSV file Guardar tabla a archivo CSV Can't open/create file: No se puede abrir/crear archivo: &Preview &Vista previa &Open &Abrir Copy &File Name Copiar nombre de &fichero Copy &URL Copiar &URL &Write to File &Escribir a fichero Find &similar documents Buscar documentos &similares Preview P&arent document/folder &Vista previa de documento/directorio ascendente &Open Parent document/folder &Abrir documento/directorio ascendente &Save as CSV &Guardar como CSV Add "%1" column Agregar columna "%1" ResTableDetailArea &Preview &Vista previa &Open &Abrir Copy &File Name Copiar nombre de &fichero Copy &URL Copiar &URL &Write to File &Escribir a fichero Find &similar documents Buscar documentos &similares Preview P&arent document/folder &Vista previa de documento/directorio ascendente &Open Parent document/folder &Abrir documento/directorio ascendente ResultPopup &Preview &Previsualización &Open &Abrir Copy &File Name Copiar nombre de &archivo Copy &URL Copiar &URL &Write to File &Escribir a archivo Save selection to files Guardar selección a archivos Preview P&arent document/folder &Vista previa de documento o directorio ascendente &Open Parent document/folder &Abrir documento/directorio ascendente Find &similar documents Buscar documentos &similares Open &Snippets window Abrir ventana de &fragmentos Show subdocuments / attachments Mostrar subdocumentos / adjuntos SSearch Any term Cualquier término All terms Todos los términos File name Nombre de archivo Query language Lenguaje de consulta Bad query string Consulta inválida Out of memory No hay memoria Too many completions Demasiadas finalizaciones Completions Finalizaciones Select an item: Seleccione un ítem: Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> No actual parentheses allowed.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Ingrese expresión de lenguaje de consulta. Hoja de trucos:<br> <i>term1 term2</i> : 'term1' y 'term2' en cualquier campo.<br> <i>campo:term1</i> : 'term1' en campo 'campo'. <br> Nombres de campos estándar/sinónimos:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-campos: dir, mime/format, type/rclcat, date.<br> Dos ejemplos de intervalo de fechas: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> Los paréntesis no están permitidos en realidad.<br> <i>"term1 term2"</i> : frase (debe aparecer exactamente). Modificadores posibles:<br> <i>"term1 term2"p</i> : busca de proximidad sin orden con distancia estándar.<br> Use el enlace <b>Mostrar Consulta</b> en caso de duda sobre el resultado y vea el manual (&lt;F1>) para más detalles. Enter file name wildcard expression. Ingrese expresión de comodín para nombre de archivo. Enter search terms here. Type ESC SPC for completions of current term. Ingrese términos de búsqueda aquí. Presione ESC ESPACIO para completar el término actual. Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Stemming languages for stored query: differ from current preferences (kept) Auto suffixes for stored query: External indexes for stored query: Autophrase is set but it was unset for stored query Autophrase is unset but it was set for stored query Enter search terms here. SSearchBase SSearchBase SSearchBase Clear Limpiar Ctrl+S Ctrl+S Erase search entry Borrar entrada de búsqueda Search Búsqueda Start query Iniciar consulta Enter search terms here. Type ESC SPC for completions of current term. Ingrese términos de búsqueda aquí. Presione ESC ESP para completar el término actual. Choose search type. Elija tipo de búsqueda. Show query history SearchClauseW Any of these Cualquiera All of these Todas None of these Ninguna This phrase Frase Terms in proximity Términos en proximidad File name matching Nombre de fichero Select the type of query that will be performed with the words Elija el tipo de consulta que será realizada con las palabras Number of additional words that may be interspersed with the chosen ones Número de palabras adicionales que pueden ser intercaladas con las escogidas No field Ningún campo Any Cualquiera All Todo None Ninguno Phrase Frase Proximity Proximidad File name Nombre de archivo Snippets Snippets Fragmentos about:blank about:blank Find: Buscar: Next Siguiente Prev Anterior SnippetsW Search Buscar <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> Sort By Relevance Sort By Page SpecIdxW Special Indexing Else only modified or failed files will be processed. Erase selected files data before indexing. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Browse Buscar Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Selection patterns: Top indexed entity Retry previously failed files. Start directory. Must be part of the indexed tree. Use full indexed area if empty. SpellBase Term Explorer Explorador de términos &Expand &Expandir Alt+E Alt+E &Close &Cerrar Alt+C Alt+C No db info. No hay información de bd. Match Lenguaje Case Distinción de mayúsculas Accents Acentos SpellW Wildcards Comodines Regexp Expresión regular Stem expansion Expansión de raíces Spelling/Phonetic Ortografía/fonética error retrieving stemming languages error al recuperar lenguajes para raíces Aspell init failed. Aspell not installed? Inicialización de Aspell falló. Está instalado Aspell? Aspell expansion error. Error de expansión de Aspell. No expansion found Expansión no encontrada Term Término Doc. / Tot. Doc./Tot. Index: %1 documents, average length %2 terms Índice: %1 documentos, largo promedio %2 términos Index: %1 documents, average length %2 terms.%3 results Índice: %1 documentos, largo promedio %2 términos. %3 resultados %1 results %1 resultados List was truncated alphabetically, some frequent La lista fue separada alfabéticamente, algunos términos terms may be missing. Try using a longer root. frecuentes pueden no aparecer. Intente usar una raíz más larga. Show index statistics Mostrar estadísticas del índice Number of documents Número de documentos Average terms per document Términos promedio por documento Smallest document length Tamaño del documento más pequeño Longest document length Tamaño del documento más grande Database directory size Tamaño del directorio de la base de datos MIME types: Tipos MIME: Item Elemento Value Valor Smallest document length (terms) Longest document length (terms) Results from last indexing: Documents created/updated Files tested Unindexed files List files which could not be indexed (slow) Spell expansion error. UIPrefsDialog error retrieving stemming languages error al recuperar lenguajes para raíces The selected directory does not appear to be a Xapian index El directorio seleccionado no parece ser un índice Xapian This is the main/local index! Este es el índice local o principal! The selected directory is already in the index list El directorio seleccionado ya está en la lista de índices Select xapian index directory (ie: /home/buddy/.recoll/xapiandb) Seleccione el directorio para el índice Xapian (ej: /home/buddy/.recoll/xapiandb) Choose Elegir Result list paragraph format (erase all to reset to default) Formato de párrafo para la lista de resultados (borre todo para volver al valor por defecto) Result list header (default is empty) Encabezado de la lista de resultados (valor por defecto es vacío) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) Seleccionar el directorio de configuración de recoll o el directorio para el índice xapian (ej: /home/me/.recoll o /home/me/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read El directorio seleccionado parecer ser un directorio de configuración de Recoll pero la configuración no puede ser leída At most one index should be selected Al menos un índice debe ser seleccionado Cant add index with different case/diacritics stripping option No se puede agregar un índice con diferente opción para remover mayúsculas/minúsculas/diacríticos Default QtWebkit font Any term Cualquier término All terms Todos los términos File name Query language Lenguaje de consulta Value from previous program exit ViewAction Changing actions with different current values Cambiando acciones con valores actuales diferentes Mime type Tipo MIME Command Comando MIME type Tipo MIME Desktop Default Valor predeterminado del ambiente de escritorio Changing entries with different current values Cambiando entradas con diferentes valores actuales ViewActionBase Native Viewers Visualizadores Nativos Select one or several file types, then click Change Action to modify the program used to open them Seleccione uno o varios tipos de fichero, luego presione Cambiar Acción para modificar el programa usado para abrirlos Change Action Cambiar Acción Close Cerrar Select one or several mime types then click "Change Action"<br>You can also close this dialog and check "Use desktop preferences"<br>in the main panel to ignore this list and use your desktop defaults. Seleccione uno o varios tipos MIME y presione "Cambiar Acción"<br>Puede también cerrar esta ventana y marcar "Usar preferencias del escritorio"<br>en el panel principal para ignorar esta lista y usar los valores estándar de su escritorio. Select one or several mime types then use the controls in the bottom frame to change how they are processed. Seleccione uno o más tipos mime, y use los controles en la caja abajo para cambiar cómo se procesan. Use Desktop preferences by default Usar preferencias del escritorio como estándar Select one or several file types, then use the controls in the frame below to change how they are processed Seleccione uno o más tipos de archivos, y use los controles en la caja abajo para cambiar cómo se procesan Exception to Desktop preferences Excepción de las preferencias del escritorio Action (empty -> recoll default) Acción (vacío -> valor por defecto de recoll) Apply to current selection Aplicar a la selección actual Recoll action: Acción current value valor Select same Seleccionar misma <b>New Values:</b> <b>Nuevos valores</b> Webcache Webcache editor Search regexp WebcacheEdit Copy URL Unknown indexer state. Can't edit webcache file. Indexer is running. Can't edit webcache file. Delete selection Webcache was modified, you will need to run the indexer after closing this window. WebcacheModel MIME Url confgui::ConfBeaglePanelW Steal Beagle indexing queue Robar cola de indexado de Beagle Beagle MUST NOT be running. Enables processing the beagle queue to index Firefox web history.<br>(you should also install the Firefox Beagle plugin) Beagle NO DEBE estar ejecutándose. Habilita procesar la cola para indexar el historial web de Firefox de Beagle.<br>(debe también instalar el plugin Beagle para Firefox) Entries will be recycled once the size is reached Las entradas serán recicladas una vez que el tamaño es alcanzado Web page store directory name Nombre del directorio del almacén para páginas web The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. El nombre del directorio dónde almacenar las copias de páginas web visitadas.<br>Una ruta de directorio no absoluta es utilizada, relativa al directorio de configuración. Max. size for the web store (MB) Tamaño máximo para el almacén web (MB) Process the WEB history queue Procesar la cola del historial WEB Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Habilita la indexación de páginas visitadas en Firefox.<br>(necesita también el plugin Recoll para Firefox) confgui::ConfIndexW Can't write configuration file No se puede escribir archivo de configuración confgui::ConfParamFNW Choose Elegir confgui::ConfParamSLW + + - - Add entry Delete selected entries ~ Edit selected entries confgui::ConfSearchPanelW Automatic diacritics sensitivity Sensibilidad automática de diacríticos <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>Habilitar automáticamente la sensibilidad de diacríticos si el término de búsqueda tiene caracteres acentuados (no presentes en unac_except_trans). De otra forma necesita usar el lenguage de búsqueda y el modificador <i>D</i> para especificar la sensibilidad de los diacríticos. Automatic character case sensitivity Sensibilidad automática a la distinción de mayúsculas/minúsculas de los caracteres <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>Habilitar automáticamente la sensibilidad a las mayúsculas/minúsculas si la entrada tiene caracteres en mayúscula en una posición distinta al primer caracter. De otra forma necesita usar el lenguaje de búsqueda y el modificador <i>C</i> para especificar la sensibilidad a las mayúsculas y minúsculas. Maximum term expansion count Máximo conteo de expansión de términos <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Máxima expansión de conteo para un solo término (ej: cuando se usan comodines). El valor por defecto de 10000 es razonable y evitará consultas que parecen congelarse mientras el motor de búsqueda recorre la lista de términos. Maximum Xapian clauses count Máximo conteo de cláusulas de Xapian <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Número máximo de cláusulas elementales agregadas a una consulta de Xapian. En algunos casos, el resultado de la expansión de términos puede ser multiplicativo, y deseamos evitar el uso excesivo de memoria. El valor por defecto de 100000 debería ser lo suficientemente alto en la mayoría de los casos, y compatible con las configuraciones de hardware típicas en la actualidad. confgui::ConfSubPanelW Global Global Max. compressed file size (KB) Tamaño máximo de archivo comprimido (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Este valor establece un umbral mas allá del cual los archivos<br>comprimidos no serán procesados. Escriba 1 para no tener límite,<br>o el número 0 para nunca hacer descompresión. Max. text file size (MB) Tamaño máximo para archivo de texto (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Este valor establece un umbral más allá del cual los archivos de texto no serán procesados.<br>Escriba 1 para no tener límites. Este valor es utilizado para excluir archivos de registro gigantescos del índice. Text file page size (KB) Tamaño de página para archivo de texto (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Si se utiliza este valor (diferente de -1), los archivos de texto serán separados en partes de este tamaño para ser indexados. Esto ayuda con las búsquedas de archivos de texto muy grandes (ej: archivos de registro). Max. filter exec. time (S) Tiempo máximo de ejecución de filtros (S) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loopSet to -1 for no limit. Filtros externos que se ejecuten por más tiempo del establecido serán abortados.<br>Esto ocurre en los raros casos (ej: postscript) cuando un documento hace que un filtro entre en un ciclo.<br>Establezca un valor de -1 para no tener límite. External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Filtros externos que se ejecuten por más tiempo del establecido serán detenidos. Esto es por el caso inusual (ej: postscript) dónde un documento puede causar que un filtro entre en un ciclo infinito. Establezca el número -1 para indicar que no hay límite. confgui::ConfTopPanelW Top directories Directorios primarios The list of directories where recursive indexing starts. Default: your home. La lista de directorios donde la indexación recursiva comienza. Valor por defecto: su directorio personal. Skipped paths Directorios omitidos These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Estos son los nombres de directorios los cuales no se indexan.<br>Puede contener comodines. Debe corresponder a las rutas vistas por el indexador (ej: si los directorios primarios incluyen '/home/me' y '/home' es en realidad un enlace a '/usr/home', la entrada correcta para directorios omitidos sería '/home/me/tmp*', no '/usr/home/me/tmp*') Stemming languages Lenguajes para raíces The languages for which stemming expansion<br>dictionaries will be built. Los lenguajes para los cuales los diccionarios de expansión de raíces serán creados. Log file name Nombre de archivo de registro The file where the messages will be written.<br>Use 'stderr' for terminal output El archivo donde los mensajes serán escritos.<br>Use 'stderr' para salida a la terminal Log verbosity level Nivel de verbosidad del registro This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Este valor ajusta la cantidad de mensajes,<br>desde solamente errores hasta montones de información de depuración. Index flush megabytes interval Intervalo en megabytes de escritura del índice This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Este valor ajusta la cantidad de datos indexados entre escrituras al disco.<br> Esto ayuda a controlar el uso de memoria del indexador. Valor estándar 10MB Max disk occupation (%) Utilización máxima de disco (%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). Este es el porcentaje de utilización de disco donde la indexación fallará y se detendrá (para evitar llenarle el disco).<br>0 significa sin límites (valor por defecto). No aspell usage No utilizar aspell Aspell language Lenguaje Aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. El lenguaje para el diccionario aspell. Esto debe ser algo como 'en' o 'fr'...<br>Si este valor no se especifica, el ambiente NLS será usado para averiguarlo, lo cual usualmente funciona. Para tener una idea de qué esta instalado en su sistema escriba 'aspell-config' y busque por ficheros .dat dentro del directorio 'data-dir'. Database directory name Nombre del directorio de base de datos The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Nombre del directorio donde almacenar el índice.<br>Un valor no absoluto para la ruta de directorio es usado, relativo al directorio de configuración. El valor estándar es 'xapiandb'. Use system's 'file' command Utilizar el comando 'file' del sistema Use the system's 'file' command if internal<br>mime type identification fails. Utilizar el comando 'file' del sistema si la identificación interna de tipos MIME falla. Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Deshabilita el uso de aspell para generar aproximaciones ortográficas en la herramienta explorador de términos.<br>Útil si aspell no se encuentra o no funciona. The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. El lenguaje para el diccionario aspell. Esto debería ser algo como 'en' o 'fr' ...<br>Si no se establece este valor, el ambiente NLS será utilizado para calcularlo, lo cual usualmente funciona. Para tener una idea de lo que está instalado en sus sistema, escriba 'aspell-config' y busque archivos .dat dentro del directorio 'data-dir'. The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. El nombre de un directorio donde almacenar el índice.<br>Una ruta no absoluta se interpreta como relativa al directorio de configuración. El valor por defecto es 'xapiandb'. Unac exceptions Excepciones Unac <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Estas son excepciones al mecanismo unac, el cual, de forma predeterminada, elimina todos los diacríticos, y realiza una descomposición canónica. Es posible prevenir la eliminación de acentos para algunos caracteres, dependiendo de su lenguaje, y especificar descomposiciones adicionales, por ejemplo, para ligaturas. En cada entrada separada por espacios, el primer caracter es el origen, y el resto es la traducción. uiPrefsDialogBase User preferences Preferencias de usuario User interface Interfaz de usuario Number of entries in a result page Número de elementos en la página de resultados If checked, results with the same content under different names will only be shown once. Si está marcado, los resultados con el mismo contenido bajo nombres diferentes serán mostrados solo una vez. Hide duplicate results. Esconder resultados duplicados. Highlight color for query terms Color de resaltado para términos de búsqueda Result list font Tipo de letra para lista de resultados Opens a dialog to select the result list font Abre una ventana para seleccionar el tipo de letra para la lista de resultados Helvetica-10 Helvetica-10 Resets the result list font to the system default Restaurar el tipo de letra de la lista de resultados al valor por defecto del sistema Reset Restaurar Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Define el formato para cada párrafo de resultados. Utilice formato qt-html y reemplazos estilo printf:<br>%A Resumen<br> %D Fecha<br> %I Nombre del ícono<br> %K Palabras clave (si existen)<br> %L Enlaces de vista previa y edición<br> %M Tipo MIME<br> %Número de resultado<br> %R Porcentaje de relevancia<br> %S Información de tamaño<br> %T Título<br> %U Url<br> Result paragraph<br>format string Texto de formato para<br>párrafo de resultados Texts over this size will not be highlighted in preview (too slow). Textos más allá de este tamaño no serán resaltados (muy lento). Maximum text size highlighted for preview (megabytes) Tamaño máximo de texto resaltado para vista previa (megabytes) Use desktop preferences to choose document editor. Usar preferencias del escritorio para seleccionar editor de documentos. Choose editor applications Escoger aplicaciones para edición Display category filter as toolbar instead of button panel (needs restart). Mostrar filtros de categorías como barra de herramientas en lugar de panel de botones (necesita reinicio). Auto-start simple search on whitespace entry. Auto iniciar búsqueda simple al entrar espacios en blanco. Start with advanced search dialog open. Iniciar con la ventana de búsqueda avanzada abierta. Remember sort activation state. Recordar estado de activación de ordenamiento. Prefer Html to plain text for preview. Preferir HTML a texto simple para vista previa. Search parameters Parámetros de búsqueda Stemming language Lenguaje de raíces A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Una búsqueda por [rolling stones] (2 términos) será cambiada por [rolling or stones or (rolling phrase 2 stones)]. Esto dará mayor precedencia a los resultados en los cuales los términos de búsqueda aparecen exactamente como fueron escritos. Automatically add phrase to simple searches Automáticamente añadir frases a búsquedas simples Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. ¿Intentar construir resúmenes para elementos en la lista de resultados utilizando el contexto de los términos de búsqueda? Puede ser lento para documentos grandes. Dynamically build abstracts Construir resúmenes dinámicamente Do we synthetize an abstract even if the document seemed to have one? ¿Sintetizar un resumen aunque el documento parece tener uno? Replace abstracts from documents Reemplazar resúmenes de los documentos Synthetic abstract size (characters) Tamaño del resumen sintetizado (caracteres) Synthetic abstract context words Palabras de contexto del resumen sintetizado The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Las palabras en la lista serán convertidas automáticamente a cláusulas ext:xxx en el ingreso de lenguaje de consulta. Query language magic file name suffixes. Sufijos para nombres mágicos de archivos en el lenguaje de consulta. Enable Habilitar External Indexes Índices Externos Toggle selected Cambiar selección Activate All Activar Todos Deactivate All Desactivar Todos Remove from list. This has no effect on the disk index. Eliminar de la lista. Esto no tiene efecto en el índice en disco. Remove selected Eliminar selección Click to add another index directory to the list Presione para añadir otro directorio de índice a la lista Add index Añadir índice Apply changes Aplicar cambios &OK &OK Discard changes Descartar cambios &Cancel &Cancelar Abstract snippet separator Separador de fragmentos de resumen Use <PRE> tags instead of <BR>to display plain text as html. Utilizar etiquetas <PRE> en lugar de <BR> para mostrar texto simple como html. Lines in PRE text are not folded. Using BR loses indentation. Líneas en texto PRE no se parten. Al usar BR se pierde indentación. Style sheet Hoja de estilo Opens a dialog to select the style sheet file Abre una ventana de diálogo para seleccionar la hoja de estilos Choose Elegir Resets the style sheet to default Restablecer la hoja de estilo al valor por defecto Lines in PRE text are not folded. Using BR loses some indentation. Líneas en texto PRE no se parten. Al usar BR se pierde indentación. Use <PRE> tags instead of <BR>to display plain text as html in preview. Use etiquetas <PRE> en lugar de <BR> para desplegar texto corriente como html en la vista previa. Result List Lista de resultados Edit result paragraph format string Editar texto de formato para el párrafo de resultados Edit result page html header insert Editar encabezado html insertado en página de resultados Date format (strftime(3)) Formato de fecha (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Umbral de porcentaje de frecuencia sobre el cuál no utilizamos términos dentro de la autofrase. Los términos frequentes son un problema importante de desempeño con las frases. Términos omitidos aumenta la holgura de la frase, y reducen la eficiencia de la autofrase. El valor por defecto es 2 (por ciento). Autophrase term frequency threshold percentage Porcentaje del umbral de frecuencia de términos de autofrase Plain text to HTML line style Texto común a estilo de línea HTML Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. Las líneas en texto PRE no son dobladas. Al usar BR se pierde indentación. El estilo PRE + Wrap probablemente es lo que está buscando. <BR> <BR> <PRE> <PRE> <PRE> + wrap <PRE> + wrap Disable Qt autocompletion in search entry. Deshabilitar autocompletar de Qt en la entrada de búsqueda. Search as you type. Buscar al escribir. Paths translations Rutas de traducciones Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Haga clic para agregar otro directorio de índice a la lista. Puede seleccionar un directorio de configuración de Recoll o un índice Xapian. Snippets window CSS file Archivo CSS para la ventana de fragmentos Opens a dialog to select the Snippets window CSS style sheet file Abre una ventana de diálogo para el archivo de estilos CSS de la ventana de fragmentos Resets the Snippets window style Establece el valor por defecto para el estilo de la ventana de Fragmentos Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Document filter choice style: Buttons Panel Toolbar Combobox Menu Show system tray icon. Close to tray instead of exiting. Start with simple search mode User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Synonyms file Show warning when opening temporary file. Highlight CSS style for query terms Recoll - User Preferences Set path translations for the selected index or for the main one if no selection exists. Activate links in preview. Make links inside the preview window clickable, and start an external browser when they are clicked. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Suppress all beeps. recoll-1.26.3/qtgui/i18n/recoll_uk.qm0000644000175000017500000006345413566424763014266 00000000000000J6\J6IW,RXZ?es{s/8v8,vv/zϳO0_́H.E!HnE-`0,>.c!AXMUlnvMc xCd L'SI=  !p#v;lw 59+w 5=w 5@Jw 5FTU -ֳD6Bf32ͼuV)gg/;U(Ue  _l&R.N#?d;JUY*JUY:YC]Jʗʗ/c^SLO^xQA7>ILIEX4+(IMInnnBfFfIfN3>#S!X| -Z -8@)TZxB9 JBSA_r?)9hr'j,7cC<2?BZAS râ, < ERCXǢ%~C \y`6" ?UK[p hMwT#K %97;X CNFW#@FNPw%ϗ;n90 &MÓt\Ót09Ɇt_:uWhQ ?}}\0?i5;6c J H0 9Zy? K60 ]#+ O 9 ü>X c +< 6) `P* ` cE5 y IJ2 C& :l 4 3 4' TH n h Σ5^ )A ۷6 N 'И30 /+{ 97 P֙'+ RV- T##L Vg \iCY `F Dw M- ~T B 8J F* yG y~t 3 u> uF P& P.J 5dV H £D/Sw/.5<]=1G76J*Ub1)zc<zcEFP8.$˓; qQ~XY~s2B[s7g35 ||[y'- l0&Lfi_AV ?>;O All clauses AdvSearchC4L-O:5 ?>;5 Any clause AdvSearch<C;LB8<54V0media AdvSearch?>2V4><;5==Omessage AdvSearchV=H5other AdvSearch?@575=B0FVW presentation AdvSearchB01;8FV spreadsheet AdvSearchB01;8FV spreadsheets AdvSearch B5:ABtext AdvSearch B5:AB8texts AdvSearch<----- AV <----- All AdvSearchBase<----- 81 <----- Sel AdvSearchBase>40B8 ?>;5 Add clause AdvSearchBase!:;04=89 ?>HC:Advanced search AdvSearchBaseAV -----> All ----> AdvSearchBaseAV =5?CABV ?>;O 1C45 >1'T4=0=> 70 4>?><>3>N  ("CAV A;>20") 01> " ("1C4L-O:V A;>20").<br>>;O B8?C "1C4L-O:V A;>20", "CAV A;>20" B0 "157 F8E A;V2" ?@89<0NBL AC<VH A;V2 B0 D@07 C ?>42V9=8E ;0?:0E.<br>>;O 157 40=8E =5 15@CBLAO 4> C2038.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBase5@53;O4Browse AdvSearchBase> :0B53>@VOE By categories AdvSearchBaseT8:>@8AB>2C20B8 DV;LB@0FVN ?> B8?0E D09;V2,Check this to enable filtering on file types AdvSearchBaseX8:>@8AB>2C20B8 :0B53>@VW 70<VABL B8?V2 MIME;Check this to use file categories instead of raw mime types AdvSearchBase0:@8B8Close AdvSearchBase@81@0B8 ?>;5 Delete clause AdvSearchBase0(C:0B8 BV;L:8 C :0B0;>7VEnter top directory for search AdvSearchBase3=>@>20=VIgnored file types AdvSearchBase(1<568B8 B8?8 D09;V2Restrict file types AdvSearchBaseJ1<568B8 ?>HC: ?> D09;0E 7 ?V445@520:%Restrict results to files in subtree: AdvSearchBase$15@53B8 O: B8?>2VSave as default AdvSearchBaseT(C:0B8<br>4>:C<5=B8,</br>I> 704>2V;L=ONBL:'Search for
documents
satisfying: AdvSearchBase 060=VSearched file types AdvSearchBase81 -----> Sel -----> AdvSearchBase (C:0B8 Start Search AdvSearchBase>20 aspellAspell language ConfIndexWH5<>6;82> 70?8A0B8 D09; :>=DVC@0FVWCan't write configuration file ConfIndexW"5:0 1078 40=8EDatabase directory name ConfIndexW8<8:0T 28:>@8AB0==O aspell 4;O 35=5@0FVW =01;865=L C =0?8A0==O 2 =02V0B>@V B5@<V=V2.<br>>@8A=5, :>;8 aspell 2V4ACB=V9 01> 7;0<0=89. Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexW$030;L=V ?0@0<5B@8Global parameters ConfIndexW<=B5@20; A:840==O V=45:AC (1)Index flush megabytes interval ConfIndexW"VAF52V ?0@0<5B@8Local parameters ConfIndexW$09; 6C@=0;C Log file name ConfIndexW&>:;04=VABL 6C@=0;CLog verbosity level ConfIndexW25 28:>@8AB>2C20B8 aspellNo aspell usage ConfIndexW 0@0<5B@8 ?>HC:CSearch parameters ConfIndexW @>?CA:0B8 H;OE8 Skipped paths ConfIndexW(>28 7V A;>2>D>@<0<8Stemming languages ConfIndexWr$09;, :C48 ?V4CBL ?>2V4><;5==O.<br>'stderr' 4;O B5@<V=0;CPThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexW~>28, 4;O O:8E 1C45 ?>1C4>20=><br>A;>2=8:8 @>7:@8BBO A;>2>D>@<.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexW!?8A>: B5:, 7 O:8E ?>G8=0TBLAO @5:C@A82=5 V=45:AC20==O. "8?>2>: 4><0H=O B5:0.LThe list of directories where recursive indexing starts. Default: your home. ConfIndexW!:V;L:8 40=8E 1C45 ?@>V=45:A>20=> <V6 A:840==O<8 V=45:AC =0 48A:.<br>>?><030T :>=B@>;N20B8 28:>@8AB0==O ?0<'OBV V=45:A0B>@><. "8?>2>: 101 This value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWAB0=>28B8 >1AO3 ?>2V4><;5=L,<br>2V4 ?><8;>: 4> 40=8E 7=520465==O.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexW5@E=V B5:8Top directories ConfIndexW;>10;L=VGlobal ConfSubPanelWD560 @>7<V@C AB8A=5=8E D09;V2 (KB)Max. compressed file size (KB) ConfSubPanelW &5 7=0G5==O 2AB0=>2;NT ?>@V3 @>7<V@C AB8A=5=8E D09;V2, 1V;LHV 70 =L>3> =5 1C45 >?@0FL>20=>. -1 28<8:0T ;V<VB, 0 28<8:0T 45:><?@5AVN.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. ConfSubPanelWV4<V=8B8Cancel EditTransBaseB :>=DV3C@0FVW =5<0T :0B0;>3C   No db directory in configurationMain&0ABC?=5&NextPreview&>?5@54=T &PreviousPreview&(C:0B8: &Search for:Preview|5<>6;82> ?5@5B2>@8B8 4>:C<5=B =0 2=CB@VH=T ?@54AB02;5==O 4;O 0Can't turn doc into internal representation for PreviewV4<V=8B8CancelPreview !B5@B8ClearPreview6!B2>@NN B5:AB 4;O ?5@53;O4CCreating preview textPreviewJ020=B06CN B5:AB ?5@53;O4C 2 @540:B>@ Loading preview text into editorPreview,&'CB;82VABL 4> @5TAB@C Match &CasePreview@5 7=0945=> 4>?><V6=C ?@>3@0<C: Missing helper program: PreviewV4:@8B8OpenPreview>:07C20B8 ?>;O Show fieldsPreviewTextEdit2>:07C20B8 >A=>2=89 B5:ABShow main textPreviewTextEdit:<b>V445@520 7 =0;0HBC20==O<8Customised subtreesQObject> >7:@820B8 A8<2>;VG=V ?>A8;0==OFollow symbolic linksQObject%>48B8 ?> A8<;V=:0E ?@8 V=45:A0FVW. "8?>2> "=V" 4;O C=8:=5==O 4C1;V:0BV2TFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject8=45:AC20B8 2AV =0728 D09;V2Index all file namesQObject =45:AC20B8 B0:>6 =0728 D09;V2, 2<VAB O:8E =5 <>65 1CB8 2?V7=0=> G8 >1@>1;5=> (=52V4><89 01> =5?V4B@8<C20=89 B8? MIME). "8?>2> "B0:"}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject @>?CA:0B8 =0728 Skipped namesQObject!?8A>: B5: C V=45:A>20=V9 VT@0@EVW,<br>4;O O:8E 45O:V ?0@0<5B@8 ?>B@V1=> 7<V=8B8. "8?>2>: ?CAB89.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectj(01;>=8 =072 D09;V2 01> B5:, O:V =5 1C45 V=45:A>20=>.LThese are patterns for file or directory names which should not be indexed.QObject&5@53;O=CB8&PreviewQWidget,>?VN20B8 &=072C D09;0Copy &File NameQWidget>?VN20B8 &URL Copy &URLQWidget.=09B8 &AE>6V 4>:C<5=B8Find &similar documentsQWidget>?5@5465==OWarningRTIToolW(2AV <>28)(all languages)RclMain(157 A;>2>D>@<) (no stemming)RclMain@> Recoll About RecollRclMain2AVAllRclMain@5 <>6C AB2>@8B8 2V:=> ?5@53;O4CCan't create preview windowRclMainl5<>6;82> 74>1CB8 4>:C<5=B G8 AB2>@8B8 B8<G0A>289 D09;0Cannot extract document or create temporary fileRclMainJ5 <>6C 74>1CB8 4>:C<5=B 7 1078 40=8E+Cannot retrieve document info from databaseRclMain0:@820NClosingRclMain$AB>@VO 4>:C<5=BV2Document historyRclMain8:>=CN: [ Executing: [RclMain0=V VAB>@VW History dataRclMain=45:ACTBLAO: Indexing in progress: RclMain,=45:AC20==O ?5@5@20=>Indexing interruptedRclMain6V4ACB=V 4>40B:>2V ?@>3@0<8Missing helper programsRclMain:AV 4>40B:>2V ?@>3@0<8 =0O2=VNo helpers found missingRclMainG8AB8B8PurgeRclMain" 57C;LB0B8 70?8BC Query resultsRclMain070 :>@5=V2StemdbRclMain.5&@5@20B8 V=45:AC20==OStop &IndexingRclMain0&59 ?>HC: 265 =50:B82=89"This search is not active any moreRclMain52V4><>UnknownRclMain >=>28B8 &V=45:A Update &IndexRclMain>?5@5465==OWarningRclMain:?><8;:0 74>1C20==O A?8A:C <>2#error retrieving stemming languagesRclMainDV;LB@>20=5filteredRclMain <54V0mediaRclMain?>2V4><;5==OmessageRclMainV=H5otherRclMain?@575=B0FVW presentationRclMainA>@B>20=5sortedRclMainB01;8FV spreadsheetRclMain B5:ABtextRclMain@> &Recoll &About Recoll RclMainBase&!:;04=89 ?>HC:&Advanced Search RclMainBase8&G8AB8B8 VAB>@VN 4>:C<5=BV2&Erase document history RclMainBase &$09;&File RclMainBase&>2V4:0&Help RclMainBase&0;0HBC20==O &Preferences RclMainBase*&0@0<5B@8 A>@BC20==O&Sort parameters RclMainBase&=AB@C<5=B8&Tools RclMainBase*&>2V4=8: :>@8ABC20G0 &User manual RclMainBase8!:;04=89 (?>3;81;5=89) ?>HC:Advanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBase$AB>@VO 4>:C<5=BV2Document History RclMainBase&&AB>@VO 4>:C<5=BV2Document &History RclMainBase &8EV4E&xit RclMainBase4V0;>3 7>2=VH=L>3> V=45:ACExternal index dialog RclMainBase5@H0 AB>@V=:0 First page RclMainBaseL5@59B8 4> ?5@H>W AB>@V=:8 @57C;LB0BV2Go to first page of results RclMainBase"0ABC?=0 AB>@V=:0 Next page RclMainBase:0ABC?=0 AB>@V=:0 @57C;LB0BV2Next page of results RclMainBase$>?5@54=O AB>@V=:0 Previous page RclMainBase<>?5@54=O AB>@V=:0 @57C;LB0BV2Previous page of results RclMainBase RecollRecoll RclMainBase(0@0<5B@8 A>@BC20==OSort parameters RclMainBase&&02V0B>@ B5@<V=V2Term &explorer RclMainBase@=AB@C<5=B 4;O 282G0==O B5@<V=V2Term explorer tool RclMainBase &>=>28B8 V=45:A Update &index RclMainBase0B0Date RecollModel<'O D09;C File name RecollModel (?>:070B8 70?8B) (show query)ResList2<p><b>5 7=0945=></b><br>

No results found
ResList$AB>@VO 4>:C<5=BV2Document historyResList>:C<5=B8 DocumentsResList0ABC?=0NextResListV4:@8B8OpenResList5@53;O4PreviewResList>?5@54=OPreviousResList5B0;V 70?8BC Query detailsResList$!?8A>: @57C;LB0BV2 Result listResList&>:C<5=B =54>AO6=89Unavailable documentResList?>forResList7 ?@8=09<=Vout of at leastResList#AV A;>20 All termsSSearchC4L-O:5 A;>2>Any termSSearch*52V@=89 @O4>: 70?8BCBad query stringSSearch<'O D09;C File nameSSearch&54>AB0B=L> ?0<'OBV Out of memorySSearch>20 70?8BCQuery languageSSearch&15@VBL B8? ?>HC:C.Choose search type. SSearchBase !B5@B8Clear SSearchBase Ctrl+SCtrl+S SSearchBase2!B5@B8 2<VAB @O4:0 70?8B0Erase search entry SSearchBaseSSearchBase SSearchBase SSearchBase =09B8Search SSearchBase>G0B8 70?8B Start query SSearchBase2AVAll SearchClauseW<'O D09;C File name SearchClauseWlV;L:VABL 4>40B:>28E A;V2, I> <>6CBL 1CB8 <V6 >1@0=8<8HNumber of additional words that may be interspersed with the chosen ones SearchClauseWj815@VBL B8? 70?8BC, O:89 1C45 7@>1;5=> ?> F8E A;>20E>Select the type of query that will be performed with the words SearchClauseW0ABC?=0NextSnippets =09B8Search SnippetsW5@53;O4BrowseSpecIdxW&0:@8B8&Close SpellBase& >7:@8B8 &Expand  SpellBase Alt+CAlt+C SpellBase$02V0B>@ B5@<V=V2 Term Explorer SpellBase* >7:@8BBO =5 7=0945=5No expansion foundSpellW 5328@07RegexpSpellW0?8A/72CG0==OSpelling/PhoneticSpellW& >7:@8BBO A;>2>D>@<Stem expansionSpellW !;>2>TermSpellW(01;>=8 WildcardsSpellW:?><8;:0 74>1C20==O A?8A:C <>2#error retrieving stemming languagesSpellW#AV A;>20 All terms UIPrefsDialogC4L-O:5 A;>2>Any term UIPrefsDialog5@53;O4Choose UIPrefsDialog<'O D09;C File name UIPrefsDialog>20 70?8BCQuery language UIPrefsDialogJ1@0=0 B5:0 =5 AE>60 =0 V=45:A Xapian;The selected directory does not appear to be a Xapian index UIPrefsDialogB1@0=0 B5:0 265 C A?8A:C V=45:AV23The selected directory is already in the index list UIPrefsDialog:&5 >A=>2=89/;>:0;L=89 V=45:A!This is the main/local index! UIPrefsDialog:?><8;:0 74>1C20==O A?8A:C <>2#error retrieving stemming languages UIPrefsDialog0:@8B8CloseViewActionBase" V4=V ?5@53;O40GVNative ViewersViewActionBase5@53;O4Chooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW&V4<V=0&CanceluiPrefsDialogBase&OK&OKuiPrefsDialogBase>HC: [rolling stones] (2 A;>20) 1C45 7<V=5=> =0 [rolling or stones or (rolling phrase 2 stones)]. &5 <>65 ?V4=OB8 @57C;LB0B8, 2 O:8E ?>HC:>2V A;>20 7CAB@VG0NBLAO A0<5 2 B0:V9 ?>A;V4>2=>ABV, O: 2 70?8BV.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBase:;NG8B8 2A5 Activate AlluiPrefsDialogBase>40B8 V=45:A Add indexuiPrefsDialogBase"0AB>AC20B8 7<V=8 Apply changesuiPrefsDialogBaseZ2B><0B8G=> 4>4020B8 D@07C 4> ?@>AB8E ?>HC:V2+Automatically add phrase to simple searchesuiPrefsDialogBase5@53;O4ChooseuiPrefsDialogBase"15@VBL @540:B>@8Choose editor applicationsuiPrefsDialogBase8:;NG8B8 2A5Deactivate AlluiPrefsDialogBaseV4<V=8B8 7<V=8Discard changesuiPrefsDialogBase~'8 @>18B8 =>289 :>=A?5:B, =02VBL O:I> O:89AL 265 T 2 4>:C<5=BV?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBase '8 =0<030B8AO 1C4C20B8 :>=A?5:B8 4;O @57C;LB0BV2 ?>HC:C, 28:>@8AB>2CNGV :>=B5:AB 7=0945=8E A;V2? >65 ?@0FN20B8 ?>2V;L=> 4;O 25;8:8E 4>:C<5=BV2.zDo we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents.uiPrefsDialogBase88=0<VG=> 1C4C20B8 :>=A?5:B8Dynamically build abstractsuiPrefsDialogBase >2=VH=V V=45:A8External IndexesuiPrefsDialogBaseHelvetica-10 Helvetica-10uiPrefsDialogBase %>20B8 4C1;V:0B8Hide duplicate results.uiPrefsDialogBase/:I> C2V<:=5=5, @57C;LB0B8 A B0:8< A0<8< 7<VAB>< B0 @V7=8<8 =0720<8 1C45 ?>:070=> =5 1V;LH5 >4=>3> @07C.XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBaseb0:A8<0;L=89 @>7<V@ B5:ABC V7 ?V4A2VGC20==O< (1)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseBV;L:VABL @57C;LB0BV2 =0 AB>@V=:C"Number of entries in a result pageuiPrefsDialogBasebV4:@820T 4V0;>3 281>@C H@8DBC A?8A:C @57C;LB0BV2-Opens a dialog to select the result list fontuiPrefsDialogBasedV44020B8 ?5@5203C HTML =04 B5:AB>< 4;O ?5@53;O4C.&Prefer Html to plain text for preview.uiPrefsDialogBase80?0<'OB0B8 AB0= A>@BC20==O.Remember sort activation state.uiPrefsDialogBasef840;8B8 7V A?8A:C. 5 2?;820T =0 48A:>289 V=45:A.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase 840;8B8 281@0=5Remove selecteduiPrefsDialogBaseL0<V=OB8 =0O2=V C 4>:C<5=B0E :>=A?5:B8 Replace abstracts from documentsuiPrefsDialogBase!:8=CB8ResetuiPrefsDialogBaseD>25@B0T H@8DB C B8?>289 A8AB5<=891Resets the result list font to the system defaultuiPrefsDialogBase0(@8DB A?8A:C @57C;LB0BV2Result list fontuiPrefsDialogBase 0@0<5B@8 ?>HC:CSearch parametersuiPrefsDialogBase\V4:@820B8 4V0;>3 A:;04=>3> ?>HC:C ?@8 AB0@BV.'Start with advanced search dialog open.uiPrefsDialogBase>20 A;>2>D>@<Stemming languageuiPrefsDialogBase8>=B5:AB=8E A;V2 C :>=A?5:BV Synthetic abstract context wordsuiPrefsDialogBaseT >7<V@ A8=B5B8G=>3> :>=A?5:BC (C A8<2>;0E)$Synthetic abstract size (characters)uiPrefsDialogBase"5:AB8 V7 @>7<V@><, 1V;LH8< 70 2:070=89, =5 1C45 ?V4A2VG5=> C ?>?5@54=L><C ?5@53;O4V (?>2V;L=>).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBase&5@5:;NG8B8 281@0=5Toggle selecteduiPrefsDialogBase=B5@D59AUser interfaceuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_lt.ts0000644000175000017500000044015413566424763014273 00000000000000 AdvSearch All clauses Visos sąlygos Any clause Bet kuri sąlyga texts tekstai spreadsheets skaičiuoklės presentations prezentacijos media media messages žinutės other kita Bad multiplier suffix in size filter text tekstas spreadsheet skaičiuoklės presentation prezentacijos message pranešimas AdvSearchBase Advanced search Išsamesnė paieška Search for <br>documents<br>satisfying: Ieškoti <br>dokumentų<br>tenkinančių: Delete clause Ištrinti sąlygą Add clause Pridėti sąlygą Restrict file types Apriboti bylų tipus Check this to enable filtering on file types Pažymėti, jei norite filtruoti pagal bylų tipus By categories Pagal kategorijas Check this to use file categories instead of raw mime types Pažymėti, jei norite naudoti bylų kategorijas vietoje mime tipų Save as default Išsaugoti kaip numatytąjį Searched file types Ieškota bylų tipų All ----> Visi ----> Sel -----> Pas -----> <----- Sel <----- Pas <----- All <----- Visi Ignored file types Ignoruoti bylų tipai Enter top directory for search Įrašykite viršutinio lygio direktoriją paieškai Browse Naršyti Restrict results to files in subtree: Pateikti rezultatus byloms submedyje: Start Search Pradėti paiešką Close Uždaryti All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Visi kairėje esantys netušti laukai bus sujungiami AND (visi) arba OR (bet kuris) pagalba. <br> "Bet kuris" "Visi" ir "Nei vienas" laukų tipai gali priimti paprastų žodžių mišinį ir frazes pažymėtas dvigubomis kabutėmis. <br> Tušti laukeliai ignoruojami. Invert Minimum size. You can use k/K,m/M,g/G as multipliers Min. Size Maximum size. You can use k/K,m/M,g/G as multipliers Max. Size Filter From To Check this to enable filtering on dates Filter dates Find Check this to enable filtering on sizes Filter sizes ConfIndexW Can't write configuration file Nepavyksta įrašyti nustatymų bylos Global parameters Globalūs parametrai Local parameters Lokalūs parametrai Search parameters Paieškos parametrai Top directories Aukščiausio lygmens direktorijos<br>kuriose vykdomas indeksavimas The list of directories where recursive indexing starts. Default: your home. Direktorijų, kuriose pradedamas rekursinis indeksavimas, sąrašas. Numatytoji: namų direktorija. Skipped paths Direktorijų, kurių turinys nein-<br>deksuojamas, sąrašas These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Stemming languages Kalbos naudojamos stemming<br> procesui The languages for which stemming expansion<br>dictionaries will be built. Kalbos, kurioms bus sukurti stemming <br>expansion žodynai. Log file name Log bylos vardas The file where the messages will be written.<br>Use 'stderr' for terminal output Byla, kurioje bus įrašomos žinutės.<br>Naudokite 'stderr' norėdami išvesti į terminalo langą Log verbosity level Log išsamumo lygmuo This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Vertė nustato žiniučių apimtį, nuo vien tik <br>klaidų fiksavimo iki didelės apimties duomenų skirtų debugging. Index flush megabytes interval Indekso dalių, įrašomų į diską, dydis (MB) This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Vertė nustato duomenų, kurie indeksuojami tarp įrašymo į diską, apimtį.<br>Padeda valdyti indeksavimo dalies atminties naudojimą. Numatyta vertė yra 10 MB Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. No aspell usage Aspell nebus naudojama Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Nurodo nenaudoti aspell programos kuriant tarimo aproksimacijas raktinių žodžių tyrinėjimo įrankyje.<br>Naudinga, jei aspell neveikia arba neįdiegta. Aspell language Aspell kalba The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Database directory name Duomenų bazės direktorijos vardas The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Unac exceptions <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. Process the WEB history queue Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Web page store directory name The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Max. size for the web store (MB) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Automatic diacritics sensitivity <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. Automatic character case sensitivity <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. Maximum term expansion count <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. Maximum Xapian clauses count <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfSubPanelW Only mime types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Exclude mime types Mime types not to be indexed Max. compressed file size (KB) Didžiausias suspaustų bylų dydis (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Viršijus pasirinktą suspaustų bylų dydį, jie nebus indeksuojami. Pasirinkite -1 jei nenorite nurodyti ribos, 0, jei nenorite, jog suspaustos bylos būtų indeksuojamos. Max. text file size (MB) Didžiausias tekstinės bylos dydis (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Viršijus pasirinktą tekstinių bylų dydį, jie nebus indeksuojami. Pasirinkite -1 jei nenorite nurodyti ribos, 0, jei nenorite, jog suspaustos bylos būtų indeksuojamos. Text file page size (KB) Tekstinės bylos dydis (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Jei vertė nurodyta (nelgyi -1) tekstinės bylos bus suskaidytos į nurodyto dydžio bylas, kurios bus atskirai indeksuojamos. Naudinga atliekant paiešką labai dideliose tekstinėse bylose (pav. log bylose). Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Global Globalus CronToolW Cron Dialog <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Hours (* or 0-23) Minutes (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> Enable Disable It seems that manually edited entries exist for recollindex, cannot edit crontab Error installing cron entry. Bad syntax in fields ? EditDialog Dialog EditTrans Source path Local path Config error Original path EditTransBase Path Translations Setting path translations for Select one or several file types, then use the controls in the frame below to change how they are processed Add Delete Cancel Atšaukti Save FirstIdxDialog First indexing setup <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> Indexing configuration This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Indexing schedule This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Start indexing now FragButs %1 not found. %1: %2 Query Fragments IdxSchedW Index scheduling setup <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> Cron scheduling The tool will let you decide at what time indexing should run and will install a crontab entry. Real time indexing start up Decide if real time indexing will be started when you log in (only for the default index). ListDialog Dialog GroupBox Main Configuration problem (dynconf Nustatymų bėda (dynconf No db directory in configuration Nustatymuose nerandama duomenų bazės bylos Could not open database in Nepavyko atidaryti duomenų bazės "history" file is damaged, please check or remove it: Preview Close Tab Uždarykite auselę Cannot create temporary directory Nepavyksta sukurti laikinos direktorijos Cancel Atšaukti Missing helper program: Trūksta pagalbinės programos: Can't turn doc into internal representation for Nepavyksta pervesti dokumento į vidinę buseną Creating preview text Kuriamas peržvalgos tekstas Loading preview text into editor Įkeliamas į redaktorių peržvalgos tekstas &Search for: &Ieškoti: &Next &Sekantis &Previous &Ankstesnis Clear Išvalyti Match &Case Atitaikyti &Atvejį Form Tab 1 Open Atidaryti Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text PreviewTextEdit Show fields Rodyti laukus Show main text Rodyti pagrindinį tekstą Print Spausdinti Print Current Preview Spausdinti kaip matoma peržiūroje Show image Select All Copy Save document to file Fold lines Preserve indentation Open document QObject Global parameters Globalūs parametrai Local parameters Lokalūs parametrai <b>Customised subtrees <b>Pritaikyti direktorijų<br> submedžiai The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. Subdirektorijų, kuriose dalį parametrų reikia pakeisti, sąrašas.<br> Numatytoji reikšmė: tuščia. <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>Nurodyti parametrai taikomi arba visoms direktorijoms, arba subdirektorijoms,<br> jei kuri jų prieš tai pažymimos. Pridėti ir ištrinti direktorijų vardus galite<br> spausdami +/- mygtukus. Skipped names Neįtraukti vardai These are patterns for file or directory names which should not be indexed. Bylų arba direktorijų, kurių nedera indeksuoti, vardų šablonai. Default character set Numatytoji simbolių aibė This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Pasirinkta simbolių aibė bus naudojama skaityti bylų, kurių simbolių aibės nepavyksta nustatyti, turiniui.<br>Numatytoji vertė yra nepasirinkti konkrečios simbolių aibės - tokiu atveju naudojama NLS aplinkos vertė. Follow symbolic links Sekti simbolines nuorodas Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Indeksavimo metu sekti simbolines nuorodas. Numatytasis elgesys yra nesekti, bandant išvengti dvigubo indeksavimo Index all file names Indeksuoti visų bylų vardus Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Indeksuoti bylų, kurių turinio nepavyksta perskaityti, vardus. Numatytoji reikšmė: teisybė Beagle web history Beagle tinklo istorija Search parameters Paieškos parametrai Default<br>character set Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Ignored endings These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. QWidget Create or choose save directory Choose exactly one directory Could not read directory: Unexpected file name collision, cancelling. Cannot extract document: &Preview &Peržiūra &Open Open With Run Script Copy &File Name Kopijuoti &Bylos vardą Copy &URL Kopijuoti &URL &Write to File &Įrašyti į bylą Save selection to files Preview P&arent document/folder Peržiūrėti &Aukštesnio lygio dokumentus/direktorijas &Open Parent document/folder Atidaryti &Aukštesnio lygio dokumentus/direktorijas Find &similar documents Rasti &panašius dokumentus Open &Snippets window Show subdocuments / attachments QxtConfirmationMessage Do not show again. RTIToolW Real time indexing automatic start <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Also start indexing daemon right now. Replacing: Replacing file Can't create: Warning Įspėjimas Could not execute recollindex Deleting: Deleting file Removing autostart Autostart file deleted. Kill current process too ? RclMain (no stemming) (no stemming) (all languages) (visos kalbos) error retrieving stemming languages error retrieving stemming languages Indexing in progress: Indeksuojama: Files Failai Purge Išvalyti Stemdb Stemdb Closing Uždaroma Unknown Nežinoma Can't start query: Nepavyksta pradėti vykdyti užklausą: Query results Užklausos rezultatai Cannot retrieve document info from database Nepavyksta išgauti iš duomenų bazės informacijos apie dokumentą Warning Įspėjimas Can't create preview window Nepavyksta sukurti peržiūros lango This search is not active any more Ši paieška daugiau nevykdoma Bad viewer command line for %1: [%2] Please check the mimeconf file Netinkamos peržiūros komandinė eilutė %1: [%2] Prašome patikrinti mimeconf bylą Cannot extract document or create temporary file Nepavyksta perskaityti dokumento arba sukurti laikinos bylos Executing: [ Vykdoma: [ About Recoll Apie Recoll History data Istorijos duomenys Document history Dokumentų istorija Update &Index Atnaujinti &Indeksą Indexing interrupted indeksavimas pertrauktas Stop &Indexing Sustabdyti &Indeksavimą All Visi media media message pranešimas other kita presentation prezentacijos spreadsheet skaičiuoklės text tekstas sorted surūšiuota filtered filtruotas External applications/commands needed and not found for indexing your file types: Reikalingos pilnam indeksavimui, tačiau nerandamos išorinės programos/komandos: No helpers found missing Randamos visos reikalingos pagalbinės programos Missing helper programs Trūksta pagalbinių programų Save file dialog Išsaugoti failą forma Choose a file name to save under Pasirinkite bylos vardą, kuriuo išsaugosite bylą Document category filter Dokumentų kategorijų filtras No external viewer configured for mime type [ Nustatymuose nenumatyta jokia išorinė peržiūros programa šiam mime tipui [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Nurodyta peržiūros programa šiam mime tipui %1: %2 nerandama. Ar norėtumete iššaukti nustatymų langą? Can't access file: Can't uncompress file: Save file Result count (est.) Query details Užklausos detalės Could not open external index. Db not open. Check external indexes list. No results found None Updating Done Monitor Indexing failed The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Erasing index Reset the index and start from scratch ? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Error Index query error Can't update index: indexer running Indexed MIME Types Bad viewer command line for %1: [%2] Please check the mimeview file Viewer command line for %1 specifies both file and parent file value: unsupported Cannot find parent document External applications/commands needed for your file types and not found, as stored by the last indexing pass in Sub-documents and attachments Document filter The indexer is running so things should improve when it's done. Duplicate documents These Urls ( | ipath) share the same content: Bad desktop app spec for %1: [%2] Please check the desktop file Bad paths Selection patterns need topdir Selection patterns can only be used with a start directory No search No preserved previous search Choose file to save Saved Queries (*.rclq) Write failed Could not write to file Read failed Could not open file: Load error Could not load saved query Index scheduling Sorry, not available under Windows for now, use the File menu entries to update the index Disabled because the real time indexer was not compiled in. This configuration tool only works for the main index. Can't set synonyms file (parse error?) The document belongs to an external index which I can't update. Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Do not show this warning next time (use GUI preferences to restore). Index locked Unknown indexer state. Can't access webcache file. Indexer is running. Can't access webcache file. with additional message: Non-fatal indexing message: Types list empty: maybe wait for indexing to progress? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported Tools Results Content has been indexed for these MIME types: Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Indexing done Can't update index: internal error Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> documents document files file errors error total files) No information: initial indexing not yet performed. RclMainBase Recoll Recoll Search tools Paieškos įrankiai Result list Rezultatų sąrašas &File &Byla &Tools &Įrankiai &Preferences &Nustatymai &Help &Pagalba E&xit I&šeiti Ctrl+Q Ctrl+Q Update &index Atnaujinti &Indeksą &Erase document history &Ištrinti dokumentų istoriją &About Recoll &Apie Recoll &User manual &Vartotojo vadovas Document &History Dokumentų &Istorija Document History Dokumentų Istorija &Advanced Search &Išsamesnė Paieška Advanced/complex Search Išsamesnė Paieška &Sort parameters &Surūšiuoti parametrai Sort parameters Surūšiuoti parametrus Term &explorer Raktinių žodžių &tyrinėtojas Term explorer tool Raktinių žodžių tyrinėjimo įrankis Next page Sekantis puslapis Next page of results Sekantis rezultatų puslapis First page Pirmas puslapis Go to first page of results Pereiti į pirmą rezultatų puslapį Previous page Prieš tai buvęs puslapis Previous page of results Ankstesnis rezultatų puslapis &Query configuration &Užklausų nustatymai External index dialog Išorinių indeksų langas &Indexing configuration &Indeksavimo nustatymai All Visi &Show missing helpers &Trūkstamos pagalbinės programos PgDown PgUp &Full Screen F11 Full Screen &Erase search history Sort by dates from oldest to newest Sort by dates from newest to oldest Show Query Details &Rebuild index Shift+PgUp E&xternal index dialog &Index configuration &GUI configuration &Results Sort by date, oldest first Sort by date, newest first Show as table Show results in a spreadsheet-like table Save as CSV (spreadsheet) file Saves the result into a file which you can load in a spreadsheet Next Page Previous Page First Page Query Fragments With failed files retrying Next update will retry previously failed files Indexing &schedule Enable synonyms Save last query Load saved query Special Indexing Indexing with special options &View Missing &helpers Indexed &MIME types Index &statistics Webcache Editor Trigger incremental pass RclTrayIcon Restore Quit RecollModel File name Bylos vardas Mime type Mime tipas Date Data Abstract Author Document size Document date File size File date Keywords Original character set Relevancy rating Title URL Mtime Date and time Ipath MIME type Can't sort by inverse relevance ResList Result list Rezultatų sąrašas (show query) (rodyti užklausą) &Preview &Peržiūra Copy &File Name Kopijuoti &Bylos vardą Copy &URL Kopijuoti &URL Find &similar documents Rasti &panašius dokumentus Query details Užklausos detalės filtered išfiltruota sorted surūšiuota Document history Dokumentų istorija <p><b>No results found</b><br> <p><b>Nerasta rezultatų</b><br> Previous Ankstesnis Next Kitas Unavailable document Neprieinamas dokumentas Preview Peržiūra Open Atidaryti <p><i>Alternate spellings (accents suppressed): </i> <p><i>Kiti galimi tarimai (be akcentų): </i> &Write to File &Įrašyti į bylą Preview P&arent document/folder Peržiūrėti &Aukštesnio lygio dokumentus/direktorijas &Open Parent document/folder Atidaryti &Aukštesnio lygio dokumentus/direktorijas Documents Dokumentai out of at least iš bent for <p><i>Alternate spellings: </i> Result count (est.) Snippets ResTable &Reset sort &Delete column Save table to CSV file Can't open/create file: &Preview &Peržiūra Copy &File Name Kopijuoti &Bylos vardą Copy &URL Kopijuoti &URL &Write to File &Įrašyti į bylą Find &similar documents Rasti &panašius dokumentus Preview P&arent document/folder Peržiūrėti &Aukštesnio lygio dokumentus/direktorijas &Open Parent document/folder Atidaryti &Aukštesnio lygio dokumentus/direktorijas &Save as CSV Add "%1" column ResTableDetailArea &Preview &Peržiūra Copy &File Name Kopijuoti &Bylos vardą Copy &URL Kopijuoti &URL &Write to File &Įrašyti į bylą Find &similar documents Rasti &panašius dokumentus Preview P&arent document/folder Peržiūrėti &Aukštesnio lygio dokumentus/direktorijas &Open Parent document/folder Atidaryti &Aukštesnio lygio dokumentus/direktorijas ResultPopup &Preview &Peržiūra Copy &File Name Kopijuoti &Bylos vardą Copy &URL Kopijuoti &URL &Write to File &Įrašyti į bylą Preview P&arent document/folder Peržiūrėti &Aukštesnio lygio dokumentus/direktorijas &Open Parent document/folder Atidaryti &Aukštesnio lygio dokumentus/direktorijas Find &similar documents Rasti &panašius dokumentus SSearch Any term Bet kuris raktinis žodis All terms Visi raktiniai žodžiai File name Bylos vardas Query language Užklausų kalba Bad query string Netinkamai pateikta užklausa Out of memory Nepakanka atminties Too many completions Per daug galimų užbaigimų Completions Užbaigimai Select an item: Pasirinkti įrašą: Enter file name wildcard expression. Enter search terms here. Type ESC SPC for completions of current term. Čia įveskite paieškos raktinius žodžius. Įrašykite ESC SPC rašomo termino užbaigimui. Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Stemming languages for stored query: differ from current preferences (kept) Auto suffixes for stored query: External indexes for stored query: Autophrase is set but it was unset for stored query Autophrase is unset but it was set for stored query Enter search terms here. SSearchBase SSearchBase SSearchBase Clear Išvalyti Ctrl+S Ctrl+S Erase search entry Ištrinti paieškos įrašą Search Ieškoti Start query Pradėti užklausą Enter search terms here. Type ESC SPC for completions of current term. Čia įveskite paieškos raktinius žodžius. Įrašykite ESC SPC rašomo termino užbaigimui. Choose search type. Pasirinkite paieškos tipą. Show query history SearchClauseW SearchClauseW SearchClauseW Any of these Bet kuris šių All of these Visi šie None of these Nei vienas šių This phrase Ši frazė Terms in proximity Artimi raktiniai žodžiai File name matching Bylos vardą atitinka Select the type of query that will be performed with the words Pasirinkite užklausos tipą atliekamą su žodžiais Number of additional words that may be interspersed with the chosen ones Papildomų žodžių skaičius kurie gali interspersed with the chosen ones No field Any All Visi None Phrase Proximity File name Bylos vardas Snippets Snippets Find: Next Kitas Prev SnippetsW Search Ieškoti <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> Sort By Relevance Sort By Page SortForm Date Data Mime type Mime tipas SortFormBase Sort Criteria Rūšiavimo kriterijus Sort the Rūšiuoti most relevant results by: tinkamiausi rezultatai pagal: Descending Mažėjimo tvarka Apply Pritaikyti Close Uždaryti SpecIdxW Special Indexing Else only modified or failed files will be processed. Erase selected files data before indexing. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Browse Naršyti Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Selection patterns: Top indexed entity Retry previously failed files. Start directory. Must be part of the indexed tree. Use full indexed area if empty. SpellBase Term Explorer Raktinių žodžių tyrinėjimas &Expand &Išplėsti Alt+E Alt+E &Close &Uždaryti Alt+C Alt+C Term Raktinis žodis No db info. Match Case Accents SpellW Wildcards Wildcards Regexp Regexp Stem expansion Stem expansion Spelling/Phonetic Tarimas/Fonetika error retrieving stemming languages error retrieving stemming languages Aspell init failed. Aspell not installed? Aspell iššaukimas nepavyko. Aspell programa neįdiegta? Aspell expansion error. Aspell praplėtimų klaida. No expansion found Nerasta praplėtimų Term Raktinis žodis Doc. / Tot. Index: %1 documents, average length %2 terms.%3 results %1 results List was truncated alphabetically, some frequent terms may be missing. Try using a longer root. Show index statistics Number of documents Average terms per document Database directory size MIME types: Item Value Smallest document length (terms) Longest document length (terms) Results from last indexing: Documents created/updated Files tested Unindexed files List files which could not be indexed (slow) Spell expansion error. UIPrefsDialog error retrieving stemming languages error retrieving stemming languages The selected directory does not appear to be a Xapian index Atrodo, jog pasirinkta direktorija nėra Xapian indekso direktorija This is the main/local index! Pagrindinis/localus indekas! The selected directory is already in the index list Pasirinkta direktorija jau yra indekso sąraše Select xapian index directory (ie: /home/buddy/.recoll/xapiandb) Pasirinkite Xapian indekso direktoriją (pav: /home/buddy/.recoll/xapiandb) Choose Naršyti Result list paragraph format (erase all to reset to default) Result list header (default is empty) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read At most one index should be selected Cant add index with different case/diacritics stripping option Default QtWebkit font Any term Bet kuris raktinis žodis All terms Visi raktiniai žodžiai File name Bylos vardas Query language Užklausų kalba Value from previous program exit UIPrefsDialogBase User preferences Vartotojo nustatymai User interface Vartotoja aplinka Number of entries in a result page Įrašų skaičius rezultatų puslapyje Result list font Rezultatų sąrašo šriftas Helvetica-10 Helvetica-10 Opens a dialog to select the result list font Pasirinkite rezultatų sąrašo šriftą Reset Gražinti numatytąją formą Resets the result list font to the system default Gražina numatytąją rezultatų sąrašo srifto vertę Result paragraph<br>format string Rezultatų paragrafo<br>formatas Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Apibūdina kiekvieno rezultatų įrašo formatą:<br>%A Santrauka<br> %D Data<br> %I Ikona<br> %K Raktiniai žodžiai (jei yra)<br> %L Peržiūros ir Redagavimo nuorodos<br> %M Mime tipai<br> %N Rezultų skaičius<br> %R Tinkamumas procentais<br> %S Informacija apie dydį<br> %T Pavadinimas<br> %U Url<br> Texts over this size will not be highlighted in preview (too slow). Tekstai viršijantys šį dydį nebus nuspalvinami peržiūros metu (per didelė apkrova). Maximum text size highlighted for preview (megabytes) Didžiausia teksto, pažymėto peržiūrai, apimtis (megabaitai) Auto-start simple search on whitespace entry. Pradėti paprastąją paiešką įvedus tuščio tarpelio simoblį. Start with advanced search dialog open. Pradėti nuo išsamesnės paieškos lango. Start with sort dialog open. Pradėti su atidarytu rūšiavimo langu. Use desktop preferences to choose document editor. Naudoti darbalaukio nustatymus parenkant dokumentų redaktorių. Remember sort activation state. Įsiminti rūšiavimo pasirinkimus (nedings perkrovus). Search parameters Paieškos parametrai Stemming language Stemming kalba Automatically add phrase to simple searches Pridėti prie paprastos paieškos frazę A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Paieška bus pakeista (pav. rolling stones -> rolling or stones or (rolling phrase 2 stones)). Teikiama aiški pirmenybė rezultatams kuriuose rasti raktiniai žodžiai atitinka įvestus. Dynamically build abstracts Dinamiškai sukurti santraukas Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Ar pabandome sukurti santraukas remdamiesi užklausų raktinių žodžių kontekstu? Didelės apimties dokumentams gali lėtai veikti. Replace abstracts from documents Pakeisti dokumentuose randamas santraukas Do we synthetize an abstract even if the document seemed to have one? Ar sukuriame dirbtinę santrauką, jei dokumente jau ji yra? Synthetic abstract size (characters) Dirbtinės santraukos dydis (simbolių skaičius) Synthetic abstract context words Dirbtinės santraukos konteksto žodžiai External Indexes Išoriniai indeksai External indexes Išoriniai indeksai Toggle selected Įjungti/Išjungti pasirinktą Activate All Visus aktyvuoti Deactivate All Visus deaktyvuoti Remove selected Pažymėtus pašalinti Remove from list. This has no effect on the disk index. Pašalinti iš sąrašo. Neturi jokio poveikio indeksui diske. Add index Pridėti indeksą Select the xapiandb directory for the index you want to add, then click Add Index Pasirinkti xapiandb direktoriją kurios indeką norite pridėti, tada paspauskite Pridėti Indeksą Browse Naršyti &OK &Gerai Apply changes Pritaikyti pakeitimus &Cancel &Atšaukti Discard changes Panaikinti pakeitimus Highlight color for query terms Užklausų raktinių žodžių žymėjimo spalvos Prefer Html to plain text for preview. Pirmenybę teikti Html formatui peržiūros metu. If checked, results with the same content under different names will only be shown once. Pažymėjus, bus rodoma tik viena iš bylų su tuo pačiu turiniu, tačiau skirtingais vardais. Hide duplicate results. Slėpti pasikartojančius rezultatus. Choose editor applications Pasirinkite redaktorių programas Display category filter as toolbar instead of button panel (needs restart). Kategorijų filtrą rodyti kaip įrankų juostą (reikalauja perkrovimo). ViewAction Changing actions with different current values Pakeisti veiksmus su skirtingomis dabartinėmis vertėmis Mime type Mime tipas Command MIME type Desktop Default Changing entries with different current values ViewActionBase Native Viewers Sistemos peržiūros programos Select one or several file types, then click Change Action to modify the program used to open them Pasirinkite vieną ar kelis bylų tipus, tada paspauskite Keisti Veiksmus norėdami keisti kaip programa juos atidaro File type Bylos tipas Action Veiksmas Change Action Pakeisti veiksmą Close Uždaryti Select one or several mime types then click "Change Action"<br>You can also close this dialog and check "Use desktop preferences"<br>in the main panel to ignore this list and use your desktop defaults. Pasirinkite vieną ar kelis mime tipus tada spauskite "Keisti Veiksmus"<br>Taip pat galite uždaryti šį langą ir patikrinti "Naudoti darbalaukio nustatymus"<br>pagrindinėje panelėje? norėdami ignoruoti šį sąrašą ir naudoti numatytasias darbalaukio. Select one or several mime types then use the controls in the bottom frame to change how they are processed. Use Desktop preferences by default Select one or several file types, then use the controls in the frame below to change how they are processed Exception to Desktop preferences Action (empty -> recoll default) Apply to current selection Recoll action: current value Select same <b>New Values:</b> Webcache Webcache editor Search regexp WebcacheEdit Copy URL Unknown indexer state. Can't edit webcache file. Indexer is running. Can't edit webcache file. Delete selection Webcache was modified, you will need to run the indexer after closing this window. WebcacheModel MIME Url confgui::ConfBeaglePanelW Steal Beagle indexing queue Įtraukti Beagle paruoštus duomenis Beagle MUST NOT be running. Enables processing the beagle queue to index Firefox web history.<br>(you should also install the Firefox Beagle plugin) BEAGLE programa TURI neveikti. Įgalina peržiūrėti beagle paruoštą medžiagą bandant indeksuoti Firefox naršymo<br> istoriją (papildomai reikia įdiegti Firefox Beagle priedą) Web cache directory name Naršymo tinkle cache direktorijos vardas The name for a directory where to store the cache for visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Direktorijos, kurioje saugoma lankytų tinklo svetainių cache, vardas.<br>Santykinis kelias prasideda nuo nustatymų direktorijos. Max. size for the web cache (MB) Didžiausias tinklo naršymo cache dydis (MB) Entries will be recycled once the size is reached Įrašai bus trinami pasiekus nurodytą dydį confgui::ConfIndexW Can't write configuration file Nepavyksta įrašyti nustatymų bylos confgui::ConfParamFNW Browse Naršyti Choose Naršyti confgui::ConfParamSLW + + - - Add entry Delete selected entries ~ Edit selected entries confgui::ConfSubPanelW Global Globalus Max. compressed file size (KB) Didžiausias suspaustų bylų dydis (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Viršijus pasirinktą suspaustų bylų dydį, jie nebus indeksuojami. Pasirinkite -1 jei nenorite nurodyti ribos, 0, jei nenorite, jog suspaustos bylos būtų indeksuojamos. Max. text file size (MB) Didžiausias tekstinės bylos dydis (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Viršijus pasirinktą tekstinių bylų dydį, jie nebus indeksuojami. Pasirinkite -1 jei nenorite nurodyti ribos, 0, jei nenorite, jog suspaustos bylos būtų indeksuojamos. Text file page size (KB) Tekstinės bylos dydis (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Jei vertė nurodyta (nelgyi -1) tekstinės bylos bus suskaidytos į nurodyto dydžio bylas, kurios bus atskirai indeksuojamos. Naudinga atliekant paiešką labai dideliose tekstinėse bylose (pav. log bylose). Max. filter exec. time (S) Ilgiausias filtrų veikimo laikas (S) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loopSet to -1 for no limit. Išorinių filtrų, dirbančių ilgiau nei numatyta, darbas bus nutraukiamas. Taikoma retiems atvejas (pav. postscript) kada dokumentas galėtų priversti filtrą kartoti veiksmus be galo ilgai. confgui::ConfTopPanelW Top directories Aukščiausio lygmens direktorijos<br>kuriose vykdomas indeksavimas The list of directories where recursive indexing starts. Default: your home. Direktorijų, kuriose pradedamas rekursinis indeksavimas, sąrašas. Numatytoji: namų direktorija. Skipped paths Direktorijų, kurių turinys nein-<br>deksuojamas, sąrašas These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Direktorijų, kurių turinys nebus indeksuojamas, vardai.<br> Vardo dalis gali būti wildcards. Turi atitikti programos matomus kelius iki direktorijų (pav. jei indeksuoti pradedama nuo '/home/me', o '/home' yra nuoroda į '/usr/home', teisinga vertė bus '/home/me/tmp*', o ne '/usr/home/me/tm*') Stemming languages Kalbos naudojamos stemming<br> procesui The languages for which stemming expansion<br>dictionaries will be built. Kalbos, kurioms bus sukurti stemming <br>expansion žodynai. Log file name Log bylos vardas The file where the messages will be written.<br>Use 'stderr' for terminal output Byla, kurioje bus įrašomos žinutės.<br>Naudokite 'stderr' norėdami išvesti į terminalo langą Log verbosity level Log išsamumo lygmuo This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Vertė nustato žiniučių apimtį, nuo vien tik <br>klaidų fiksavimo iki didelės apimties duomenų skirtų debugging. Index flush megabytes interval Indekso dalių, įrašomų į diską, dydis (MB) This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Vertė nustato duomenų, kurie indeksuojami tarp įrašymo į diską, apimtį.<br>Padeda valdyti indeksavimo dalies atminties naudojimą. Numatyta vertė yra 10 MB Max disk occupation (%) Didžiausia disko atminties naudojimo dalis (%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). Viršijus (procentine išraiška) disko atminties panaudojimą indeksavimas bus sustabdytas (vengiant pilnai užpildyti diską).<br>0 reiškia, jog ribos nėra (numatytoji vertė). No aspell usage Aspell nebus naudojama Aspell language Aspell kalba The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Aspell žodyno kalba ('en', 'fr' ar kita).<br>Jei vertė nenurodyta NLS aplinka pabandys nustatyti tinkamą kalbą (paprastai teisingai). Norėdami sužinoti kas įrašyta Jūsų sistemoje įrašykite 'aspell-config' ir žiūrėkite į dat bylas 'data-dir' direktorijoje. Database directory name Duomenų bazės direktorijos vardas The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Direktorijos, kurioje bus saugomas indeksas, vardas<br>Laikoma, jog santykinio keliio iki direktorijos pradžia yra nustatymų direktorija. Numatytoji yra 'xapiandb'. Use system's 'file' command Naudoti sistemos 'file' komandą Use the system's 'file' command if internal<br>mime type identification fails. Jei nepavyks atpažinti mime tipo<br>naudoti sistemos 'file' komandą. Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Nurodo nenaudoti aspell programos kuriant tarimo aproksimacijas raktinių žodžių tyrinėjimo įrankyje.<br>Naudinga, jei aspell neveikia arba neįdiegta. uiPrefsDialogBase User preferences Vartotojo nustatymai User interface Vartotoja aplinka Number of entries in a result page Įrašų skaičius rezultatų puslapyje If checked, results with the same content under different names will only be shown once. Pažymėjus, bus rodoma tik viena iš bylų su tuo pačiu turiniu, tačiau skirtingais vardais. Hide duplicate results. Slėpti pasikartojančius rezultatus. Highlight color for query terms Užklausų raktinių žodžių žymėjimo spalvos Result list font Rezultatų sąrašo šriftas Opens a dialog to select the result list font Pasirinkite rezultatų sąrašo šriftą Helvetica-10 Helvetica-10 Resets the result list font to the system default Gražina numatytąją rezultatų sąrašo srifto vertę Reset Gražinti numatytąją formą Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Apibūdina kiekvieno rezultatų įrašo formatą:<br>%A Santrauka<br> %D Data<br> %I Ikona<br> %K Raktiniai žodžiai (jei yra)<br> %L Peržiūros ir Redagavimo nuorodos<br> %M Mime tipai<br> %N Rezultų skaičius<br> %R Tinkamumas procentais<br> %S Informacija apie dydį<br> %T Pavadinimas<br> %U Url<br> Result paragraph<br>format string Rezultatų paragrafo<br>formatas Texts over this size will not be highlighted in preview (too slow). Tekstai viršijantys šį dydį nebus nuspalvinami peržiūros metu (per didelė apkrova). Maximum text size highlighted for preview (megabytes) Didžiausia teksto, pažymėto peržiūrai, apimtis (megabaitai) Use desktop preferences to choose document editor. Naudoti darbalaukio nustatymus parenkant dokumentų redaktorių. Choose editor applications Pasirinkite redaktorių programas Display category filter as toolbar instead of button panel (needs restart). Kategorijų filtrą rodyti kaip įrankų juostą (reikalauja perkrovimo). Auto-start simple search on whitespace entry. Pradėti paprastąją paiešką įvedus tuščio tarpelio simoblį. Start with advanced search dialog open. Pradėti nuo išsamesnės paieškos lango. Start with sort dialog open. Pradėti su atidarytu rūšiavimo langu. Remember sort activation state. Įsiminti rūšiavimo pasirinkimus (nedings perkrovus). Prefer Html to plain text for preview. Pirmenybę teikti Html formatui peržiūros metu. Search parameters Paieškos parametrai Stemming language Stemming kalba A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Paieška bus pakeista (pav. rolling stones -> rolling or stones or (rolling phrase 2 stones)). Teikiama aiški pirmenybė rezultatams kuriuose rasti raktiniai žodžiai atitinka įvestus. Automatically add phrase to simple searches Pridėti prie paprastos paieškos frazę Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Ar pabandome sukurti santraukas remdamiesi užklausų raktinių žodžių kontekstu? Didelės apimties dokumentams gali lėtai veikti. Dynamically build abstracts Dinamiškai sukurti santraukas Do we synthetize an abstract even if the document seemed to have one? Ar sukuriame dirbtinę santrauką, jei dokumente jau ji yra? Replace abstracts from documents Pakeisti dokumentuose randamas santraukas Synthetic abstract size (characters) Dirbtinės santraukos dydis (simbolių skaičius) Synthetic abstract context words Dirbtinės santraukos konteksto žodžiai The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Query language magic file name suffixes. Enable External Indexes Išoriniai indeksai Toggle selected Įjungti/Išjungti pasirinktą Activate All Visus aktyvuoti Deactivate All Visus deaktyvuoti Remove from list. This has no effect on the disk index. Pašalinti iš sąrašo. Neturi jokio poveikio indeksui diske. Remove selected Pažymėtus pašalinti Add index Pridėti indeksą Apply changes Pritaikyti pakeitimus &OK &Gerai Discard changes Panaikinti pakeitimus &Cancel &Atšaukti Abstract snippet separator Style sheet Opens a dialog to select the style sheet file Choose Naršyti Resets the style sheet to default Result List Edit result paragraph format string Edit result page html header insert Date format (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Autophrase term frequency threshold percentage Plain text to HTML line style Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. <BR> <PRE> <PRE> + wrap Disable Qt autocompletion in search entry. Paths translations Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Snippets window CSS file Opens a dialog to select the Snippets window CSS style sheet file Resets the Snippets window style Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Document filter choice style: Buttons Panel Toolbar Combobox Menu Show system tray icon. Close to tray instead of exiting. Start with simple search mode User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Synonyms file Show warning when opening temporary file. Highlight CSS style for query terms Recoll - User Preferences Set path translations for the selected index or for the main one if no selection exists. Activate links in preview. Make links inside the preview window clickable, and start an external browser when they are clicked. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Suppress all beeps. recoll-1.26.3/qtgui/i18n/recoll_da.ts0000644000175000017500000045064113566424763014242 00000000000000 AdvSearch All clauses Alle sætninger Any clause Vilkårlig sætning media medier other andet Bad multiplier suffix in size filter Forkert multiplikator suffiks i størrelsefilter text tekst spreadsheet regneark presentation præsentation message besked texts tekster spreadsheets regneark AdvSearchBase Advanced search Avanceret søgning Search for <br>documents<br>satisfying: Søg efter <br>dokumenter<br>der opfylder: Delete clause Slet sætning Add clause Tilføj sætning Restrict file types Begræns filtyper Check this to enable filtering on file types Afkryds dette for at aktivere filtrering på filtyper By categories Efter kategorier Check this to use file categories instead of raw mime types Afkryds dette for at bruge filkategorier i stedet for rå mime-typer Save as default Gem som standard Searched file types Søgte filtyper All ----> Alle ----> Sel -----> Valg -----> <----- Sel <----- Valg <----- All <----- Alle Ignored file types Ignorerede filtyper Enter top directory for search Indtast øverste mappe for søgning Browse Gennemse Restrict results to files in subtree: Begræns resultater til filer i undermapper: Start Search Start søgning Close Luk All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Alle felter med indhold til højre vil blive kombineret med AND ("Alle sætninger" valgt) eller OR ("Vilkårlig sætning" valgt) bindeord. <br>"Enhver" "Alle" og "Ingen" felttyper kan acceptere en blanding af simple ord, og fraser i dobbelte anførselstegn.<br>Felter uden data ignoreres. Invert Inverter Minimum size. You can use k/K,m/M,g/G as multipliers Mindste størrelse. Du kan bruge k/K,m/M,g/G som multiplikatorer Min. Size Min. størrelse Maximum size. You can use k/K,m/M,g/G as multipliers Maksimal størrelse. Du kan bruge k/K,m/M g/G som multiplikatorer Max. Size Maks. størrelse Filter Filter From Fra To Til Check this to enable filtering on dates Afkryds dette for at aktivere filtrering på datoer Filter dates Filtrer datoer Find Find Check this to enable filtering on sizes Afkryds dette for at aktivere filtrering på størrelser Filter sizes Filtrer størrelser ConfIndexW Can't write configuration file Kan ikke skrive konfigurationsfil Global parameters Globale parametre Local parameters Lokale parametre Search parameters Søgeparametre Top directories Øverste mapper The list of directories where recursive indexing starts. Default: your home. Listen over mapper hvor rekursiv indeksering starter. Standard: din hjemme-mappe (home). Skipped paths Udeladte stier These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Stemming languages Ordstammer for sprogene The languages for which stemming expansion<br>dictionaries will be built. De sprog, hvor ordstamme-udvidelses<br>ordbøger vil blive bygget. Log file name Navn på logfil The file where the messages will be written.<br>Use 'stderr' for terminal output Filen hvor meddelelser vil blive skrevet.<br>Brug 'stderr' for terminal output Log verbosity level Log informationsniveau This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Denne værdi justerer mængden af meddelelser,<br>fra kun fejl til en masse fejlretningsdata. Index flush megabytes interval Megabyte interval for skrivning af Index This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Denne værdi justere mængden af data, der er indekseret mellem skrivning til disken.<br>Dette hjælper med at kontrollere indekseringsprogrammets brug af hukommelse. Standard 10MB Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. No aspell usage Brug ikke aspell Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Deaktiver brug af aspell til at generere stavnings-tilnærmelse i værktøj for søgning efter ord. <br> Nyttigt hvis aspell er fraværende eller ikke virker. Aspell language Aspell sprog The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Sproget for aspell ordbog. Det skal se ud som "en" eller "fr" ...<br>Hvis denne værdi ikke er angivet, så vil NLS omgivelser blive brugt til at finde det, det fungerer normalt. For at få en idé om, hvad der er installeret på dit system, kan du skrive 'aspell konfig "og se efter .dat filer inde i 'data-dir' mappen. Database directory name Databasens mappenavn The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Navnet på en mappe hvor du vil gemme indekset<br>En relativ sti er taget i forhold til konfigurationsmappen. Standard er "xapiandb. Unac exceptions Unac-undtagelser <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Disse er undtagelser fra unac mekanismen, der, som standard, fjerner alle diakritiske tegn, og udfører kanonisk nedbrydning. Du kan tilsidesætte fjernelse af accent for nogle tegn, afhængigt af dit sprog, og angive yderligere nedbrydninger, f.eks. for ligaturer. I hver indgang adskilt af mellemrum, er det første tegn kildedelen, og resten er oversættelsen. Process the WEB history queue Behandl køen for WEB-historik Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Aktiverer indeksering af sider besøgt af Firefox.<br>(Du skal også installere Firefox Recoll plugin) Web page store directory name Mappenavn for lageret til Websider The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Navnet på en mappe hvor du vil gemme kopier af besøgte websider.<br>En relativ sti er taget i forhold til konfigurationsmappen. Max. size for the web store (MB) Max. størrelse til web-lager (MB) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Indgangene vil blive genbrugt, når størrelsen er nået.<br>Kun en øgning af størrelsen giver god mening, da en reducering af værdien ikke vil afkorte en eksisterende fil (kun spildplads i slutningen). Automatic diacritics sensitivity Automatisk følsomhed over for diakritiske tegn <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>Udløser automatisk følsomhed over for diakritiske tegn, hvis søgeordet har accent tegn (ikke i unac_except_trans). Ellers er du nød til bruge forespørgselssproget og <i>D</i> modifikatoren, for at angive følsomhed over for diakritiske tegn. Automatic character case sensitivity Automatisk følsomhed over for store/små bogstaver <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>Udløser automatisk følsomhed over for store/små bogstaver, hvis indgangen har store bogstaver i andet end den første position. Ellers er du nød til bruge forespørgselssproget og <i>C</i> modifikatoren, for at angive følsomhed over for store/små bogstaver. Maximum term expansion count Maksimale antal ordudvidelser <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Maksimal antal udvidelser-for et enkelt ord (fx: når der bruges jokertegn). Standarden på 10 000 er rimeligt og vil undgå forespørgsler, der synes at fryse mens motoren arbejder sig igennem ordlisten. Maximum Xapian clauses count Maksimale antal Xapiansætninger <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Maksimalt antal grundlæggende sætninger vi føjer til en enkel Xapian forespørgsel. I nogle tilfælde kan resultatet af ordudvidelse være multiplikativ, og vi ønsker at undgå at bruge overdreven hukommelse. Standarden på 100 000 bør være både høj nok i de fleste tilfælde og kompatibel med de nuværende typiske hardware konfigurationer. ConfSubPanelW Only mime types Kun mime-typer An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive En eksklusiv liste over indekserede MIME-typer.<br>Intet andet vil blive indekseret. Normalt tom og inaktiv Exclude mime types Udeluk mime-typer Mime types not to be indexed Mime-typer der ikke skal indekseres Max. compressed file size (KB) Maks. komprimeret filstørrelse (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Denne værdi angiver en grænse for, hvornår komprimerede filer ikke vil blive behandlet. Indstil til -1 for ingen grænse, til 0 for ingen dekomprimering nogensinde. Max. text file size (MB) Maks. størrelse på tekstfil (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Denne værdi angiver en grænse for, hvornår tekstfiler ikke vil blive behandlet. Indstil til -1 for ingen grænse. Dette er for at udelukke monster logfiler fra indekset. Text file page size (KB) Sidestørrelse på tekstfil (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Hvis denne værdi er angivet (ikke lig med -1), vil tekstfiler opdeles i bidder af denne størrelse for indeksering. Dette vil hjælpe søgning i meget store tekstfiler (dvs.: log-filer). Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Eksterne filtre der arbejder længere end dette vil blive afbrudt. Dette er for det sjældne tilfælde (dvs.: postscript) hvor et dokument kan forårsage, at et filter laver et loop. Indstil til -1 for ingen grænse. Global Global CronToolW Cron Dialog Cron vindue <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indekseringstidsplan (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Hvert felt kan indeholde et jokertegn (*), en enkelt numerisk værdi, kommaseparerede lister (1,3,5) og intervaller (1-7). Mere generelt vil felterne blive brugt <span style=" font-style:italic;"> som de er</span> inde i crontabfilen, og den fulde crontab syntaks kan bruges, se crontab (5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For eksempel, indtastning af <span style=" font-family:'Courier New,courier';">*</span> i <span style=" font-style:italic;">Dage, </span><span style=" font-family:'Courier New,courier';">12,19</span> i <span style=" font-style:italic;">Timer</span> og <span style=" font-family:'Courier New,courier';">15</span> i <span style=" font-style:italic;">Minutter</span> ville starte recollindex hver dag kl. 00:15 og 19:15 </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">En tidsplan med meget hyppige aktiveringer er formentlig mindre effektiv end realtid indeksering.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Ugens dage (* eller 0-7, 0 eller 7 er Søndag) Hours (* or 0-23) Timer (* eller 0-23) Minutes (0-59) Minutter (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Deaktiver</span> for at stoppe automatisk batch indeksering, <span style=" font-style:italic;">Aktiver</span> for at aktivere den, <span style=" font-style:italic;">Annuller</span> for ikke at ændre noget.</p></body></html> Enable Aktiver Disable Deaktiver It seems that manually edited entries exist for recollindex, cannot edit crontab Det ser ud til, at manuelt redigerede indgange findes for recollindeks, kan ikke redigere crontab Error installing cron entry. Bad syntax in fields ? Fejl ved installation af cron-indgange. Forkert syntaks i felter? EditDialog Dialog Vindue EditTrans Source path Kildesti Local path Lokal sti Config error Konfigureringsfejl Original path Original sti EditTransBase Path Translations Oversættelse af stier Setting path translations for Indstilling af oversættelser af stier for Select one or several file types, then use the controls in the frame below to change how they are processed Vælg en eller flere filtyper, brug derefter knapperne i rammen nedenfor for at ændre, hvordan de skal behandles Add Tilføj Delete Slet Cancel Annuller Save Gem FirstIdxDialog First indexing setup Opsætning af første indeksering <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Det fremgår, at indekset for denne konfiguration ikke eksisterer.</span><br /><br />Hvis du blot ønsker at indeksere din hjemmemappe med et sæt fornuftige standardindstillinger, skal du trykke på <span style=" font-style:italic;">Start indeksering nu</span> knappen. Du vil være i stand til at justere detaljerne senere. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Hvis du ønsker mere kontrol, kan du bruge følgende link til at justere indekseringskonfiguration og tidsplan.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Disse værktøjer kan tilgås senere fra <span style=" font-style:italic;">Præference</span> menuen.</p></body></html> Indexing configuration Konfiguration af indeksering This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Dette vil lade dig justere de mapper, du vil indeksere, og andre parametre som udelukkede filstier eller navne, standard tegnsæt etc. Indexing schedule Tidsplan for indeksering This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Dette vil lade dig vælge mellem batch og realtime indeksering, og oprette en automatisk tidsplan for batch indeksering (ved hjælp af cron). Start indexing now Start indeksering nu FragButs %1 not found. %1 ikke fundet. %1: %2 %1: %2 Fragment Buttons Fragment Knapper Query Fragments Forespørgsel efter fragmenter IdxSchedW Index scheduling setup Opsætning af indeks skedulering <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indeksering kan køre permanent, indeksere filer når de ændrer sig, eller køre med adskilte intervaller. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Læsning af manualen kan hjælpe dig med at vælge mellem disse tilgange (tryk F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Dette værktøj kan hjælpe dig med at oprette en tidsplan for at automatisere kørsler af batch indeksering, eller starte realtid indeksering når du logger ind (eller begge dele, hvilket sjældent giver mening). </p></body></html> Cron scheduling Cron skedulering The tool will let you decide at what time indexing should run and will install a crontab entry. Værktøjet vil lade dig afgøre, på hvilket tidspunkt indeksering skal køre og det vil installere en crontab indgang. Real time indexing start up Opstart af realtid indeksering Decide if real time indexing will be started when you log in (only for the default index). Beslut, om realtid indeksering skal startes når du logger ind (kun for standard-indekset). ListDialog Dialog Vindue GroupBox Gruppeboks Main No db directory in configuration Ingen dbmappe i konfigurationen "history" file is damaged or un(read)writeable, please check or remove it: Filen med "historik" er beskadiget eller den kan ikke læses eller skrives til, undersøg det venligst, eller fjern den: "history" file is damaged, please check or remove it: Preview Close Tab Luk faneblad Cancel Annuller Missing helper program: Manglende hjælpeprogram: Can't turn doc into internal representation for Kan ikke lave dok til intern repræsentation for Creating preview text Laver forhåndsvisningstekst Loading preview text into editor Henter forhåndsvisningstekst for redigering &Search for: &Søger efter: &Next &Næste &Previous &Forrige Clear Ryd Match &Case Store/små &Bogstaver Error while loading file Fejl ved indlæsning af filen Form Tab 1 Open Åbn Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text PreviewTextEdit Show fields Vis felter Show main text Vis hovedtekst Print Udskriv Print Current Preview Udskriv denne Visning Show image Vis billede Select All Vælg alle Copy Kopier Save document to file Gem dokument til fil Fold lines Ombryd linjer Preserve indentation Bevar indrykning Open document QObject Global parameters Globale parametre Local parameters Lokale parametre <b>Customised subtrees <b>Tilpassede undermapper The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. Listen over undermapper i det indekserede hierarki <br>hvor nogle parametre behøver at blive omdefineret. Standard: tom. <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>De parametre, der følger er angivet enten på øverste niveau, hvis intet<br>eller en tom linje er valgt i listefeltet ovenfor, eller for den valgte undermappe. <br> Du kan tilføje eller fjerne mapper ved at klikke på +/- knapperne. Skipped names Udeladte navne These are patterns for file or directory names which should not be indexed. Dette er mønstre for fil- eller mappenavne, der ikke skal indekseres. Follow symbolic links Følg symbolske links Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Følg symbolske link under indeksering. Standarden er nej, for at undgå dobbelt indeksering Index all file names Indekser alle filnavne Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Indekser navnene på filer, hvor indholdet ikke kan identificeres eller behandles (ingen eller ikke-understøttet mime-type). Standard er true Search parameters Søgeparametre Web history Webhistorik Default<br>character set Standard<br>tegnsæt Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Tegnsæt, der bruges til at læse filer, hvor tegnsættet ikke kan identificeres ud fra indholdet, f.eks. rene tekstfiler.<br>Standardværdien er tom, og værdien fra NLS-omgivelserne anvendes. Ignored endings ignorerede endelser These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). Dette er endelser på filnavne for filer, hvor kun navnet vil blive indekseret (ingen forsøg på identifikation af MIME-type, ingen dekomprimering, ingen indeksering af indhold). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. QWidget Create or choose save directory Opret eller vælg mappe til at gemme i Choose exactly one directory Vælg præcis en mappe Could not read directory: Kunne ikke læse mappe: Unexpected file name collision, cancelling. Uventet kollision af filnavn, annullerer. Cannot extract document: Kan ikke udtrække dokument: &Preview &Forhåndsvisning &Open &Åbn Open With Åbn med Run Script Kør skript Copy &File Name Kopier &Filnavn Copy &URL Kopier &URL &Write to File &Skriv til fil Save selection to files Gem det valgte til filer Preview P&arent document/folder Forhåndsvis &Forælderdokument/mappe &Open Parent document/folder &Åbn Forælderdokument/mappe Find &similar documents Find &lignende dokumenter Open &Snippets window Åbn vindue til &tekststumper Show subdocuments / attachments Vis underdokumenter / vedhæftede filer QxtConfirmationMessage Do not show again. Vis ikke igen. RTIToolW Real time indexing automatic start Automatisk start af realtid indeksering <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> Indeksering kan sættes til at køre som en dæmon, der opdatere indekset når filer ændres, i realtid. Du får et indeks, som altid er opdateret, men systemressourcer anvendes permanent..</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Start indekseringsdæmonen med min skrivebordssession. Also start indexing daemon right now. Start også indekseringsdæmon lige nu. Replacing: Erstatter: Replacing file Erstatter fil Can't create: Kan ikke oprette: Warning Advarsel Could not execute recollindex Kunne ikke køre recollindex Deleting: Sletter: Deleting file Sletter fil Removing autostart Fjerner autostart Autostart file deleted. Kill current process too ? Autostartfil er slettet. Stop også nuværende proces? RclMain (no stemming) (Ingen ordstammer) (all languages) (alle sprog) error retrieving stemming languages fejl under hentning af ordstammer for sprogene Indexing in progress: Indeksering i gang: Purge Rydder op Stemdb stammedb Closing Afslutter Unknown Ukendt Query results Resultater af forespørgsel Cannot retrieve document info from database Kan ikke hente dokumentinfo fra databasen Warning Advarsel Can't create preview window Kan ikke oprette forhåndsvisningsvindue This search is not active any more Denne søgning er ikke længere aktiv Cannot extract document or create temporary file Kan ikke udtrække dokument eller oprette midlertidig fil Executing: [ Udfører: [ About Recoll Om Recoll History data Historik-data Document history Dokumenthistorik Update &Index Opdater &Indeks Stop &Indexing Stop &Indeksering All Alle media medier message besked other andet presentation præsentation spreadsheet regneark text tekst sorted sorteret filtered filtreret No helpers found missing Ingen hjælpere mangler Missing helper programs Manglende hjælpeprogrammer No external viewer configured for mime type [ Ingen ekstern fremviser konfigureret for mime-type [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Fremviseren angivet i mimeview for %1: %2 er ikke fundet. Ønsker du at åbne indstillingsvinduet? Can't access file: Kan ikke tilgå fil: Can't uncompress file: Kan ikke dekomprimere fil: Save file Gem fil Result count (est.) Optælling af resultat (est.) Could not open external index. Db not open. Check external indexes list. Kunne ikke åbne ekstern indeks. DB er ikke åben. Tjek liste over eksterne indekser. No results found Ingen resultater fundet None Ingen Updating Opdaterer Done Færdig Monitor Monitor Indexing failed Indeksering mislykkedes The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Den nuværende indekseringsproces blev ikke startet fra denne grænseflade. Klik på OK for at stoppe den alligevel, eller Annuller for at lade den køre Erasing index Sletter indeks Reset the index and start from scratch ? Nulstil indekset og start forfra? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Forespørgsel er i gang<br>På grund af begrænsninger i indekseringsbiblioteket,<br>vil en annullering afslutte programmet Error Fejl Index not open Indeks ikke åben Index query error Indeks forespørgselsfejl Content has been indexed for these mime types: Indholdet er blevet indekseret for disse mime-typer: Can't update index: indexer running Kan ikke opdatere indeks: indeksering kører Indexed MIME Types Indekserede MIME-typer Bad viewer command line for %1: [%2] Please check the mimeview file Forkert kommandolinje for fremviser for %1: [%2] Kontroller venligst mimeview-filen Viewer command line for %1 specifies both file and parent file value: unsupported Fremviser kommandolinje for %1 angiver både fil og forælderfil værdier: er ikke understøttet Cannot find parent document Kan ikke finde forælderdokument Indexing did not run yet Indeksering har ikke kørt endnu External applications/commands needed for your file types and not found, as stored by the last indexing pass in Eksterne programmer/kommandoer nødvendige for dine filtyper blev ikke fundet, som gemt af den sidste indeksering Sub-documents and attachments Underdokumenter og vedhæftede filer Document filter Dokumentfilter Index not up to date for this file. Refusing to risk showing the wrong entry. Indeks er ikke opdateret for denne fil. Nægter at risikere at vise den forkerte indgang. Click Ok to update the index for this file, then you will need to re-run the query when indexing is done. Klik OK for at opdatere indekset for denne fil, du bliver så nødt til at gentage forespørgslen når indeksering er færdig. The indexer is running so things should improve when it's done. Indeksering kører, så ting burde være bedre, når den er færdig. The document belongs to an external indexwhich I can't update. Dokumentet tilhører et ekstern indeks, som jeg ikke kan opdatere. Click Cancel to return to the list. Click Ignore to show the preview anyway. Klik på Annuller for at vende tilbage til listen. Klik på Ignorer for at vise forhåndsvisningen alligevel. Duplicate documents Identiske dokumenter These Urls ( | ipath) share the same content: Disse webadresser ( | ipath) deler samme indhold: Bad desktop app spec for %1: [%2] Please check the desktop file Forkert desktop app spec for %1: [%2] Tjek venligst desktopfilen Indexing interrupted indeksering afbrudt Disabled because the real time indexer was not compiled in. Deaktiveret fordi realtid indeksering ikke blev kompileret ind. This configuration tool only works for the main index. Dette konfigurationsværktøj virker kun for hovedindekset. The current indexing process was not started from this interface, can't kill it Den nuværende indekseringsproces blev ikke startet fra denne grænseflade, kan ikke stoppe den Bad paths Ugyldige stier Bad paths in configuration file: Ugyldige stier i konfigurationsfil: Selection patterns need topdir Mønstre for udvælgelse skal have en øverste mappe Selection patterns can only be used with a start directory Mønstre for udvælgelse kan kun bruges med en startmappe No search Ingen søgning No preserved previous search Ingen tidligere søgning er bevaret Choose file to save Vælg fil, der skal gemmes Saved Queries (*.rclq) Gemte forespørgsler (*.rclq) Write failed Skrivning mislykkedes Could not write to file Kunne ikke skrive til fil Read failed Læsning mislykkedes Could not open file: Kunne ikke åbne fil: Load error Indlæsningsfejl Could not load saved query Kunne ikke indlæse gemte forespørgsel Index scheduling Indeks skedulering Sorry, not available under Windows for now, use the File menu entries to update the index Beklager, er endnu ikke tilgængelig for Windows, bruge Fil menuindgange for at opdatere indekset Can't set synonyms file (parse error?) Kan ikke aktivere synonymer-fil (analysefejl?) The document belongs to an external index which I can't update. Dokumentet tilhører et eksternt indeks, som jeg ikke kan opdatere. Click Cancel to return to the list. <br>Click Ignore to show the preview anyway (and remember for this session). Klik på Annuller for at vende tilbage til listen. <br>Klik på Ignorer for at vise forhåndsvisningen alligevel. (og husk for denne session). Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Åbner en midlertidig kopi. Ændringer vil gå tabt, hvis du ikke gemmer<br/>dem til et permanent sted. Do not show this warning next time (use GUI preferences to restore). Vis ikke denne advarsel næste gang (brug GUI præferencer for at gendanne). Index locked Indeks låst Unknown indexer state. Can't access webcache file. Indeksering i ukendt tilstand. Kan ikke tilgå webcachefil. Indexer is running. Can't access webcache file. Indeksering kører. Kan ikke tilgå webcachefil. with additional message: Non-fatal indexing message: Types list empty: maybe wait for indexing to progress? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported Tools Results Content has been indexed for these MIME types: Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Indexing done Can't update index: internal error Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> documents document files file errors error total files) No information: initial indexing not yet performed. RclMainBase Recoll Recoll &File &Fil &Tools &Værktøjer &Preferences &Præferencer &Help &Hjælp E&xit A&fslut Ctrl+Q Ctrl+Q Update &index Opdater &Indeks &Erase document history &Slet dokumenthistorik &About Recoll &Om Recoll &User manual &Brugermanual Document &History Dokument&historik Document History Dokumenthistorik &Advanced Search &Avanceret søgning Advanced/complex Search Avanceret/kompleks søgning &Sort parameters &Sorterings-parametre Sort parameters Sorterings-parametre Term &explorer &Søg efter ord Term explorer tool Værktøj for søgning efter ord Next page Næste side Next page of results Næste side med resultater First page Første side Go to first page of results Gå til første side med resultater Previous page Forrige side Previous page of results Forrige side med resultater External index dialog Eksterne indekser &Show missing helpers &Vis manglende hjælpere PgDown PgDown PgUp PgUp &Full Screen &Fuld skærm F11 F11 Full Screen Fuld skærm &Erase search history &Slet søgehistorik Sort by dates from oldest to newest Sorter efter dato fra ældste til nyeste Sort by dates from newest to oldest Sorter efter dato fra nyeste til ældste Show Query Details Vis Detaljer i forespørgsel &Rebuild index &Genopbyg indeks &Show indexed types &Vis indekserede typer Shift+PgUp Shift+PgUp &Indexing schedule &Tidsplan for Indeksering E&xternal index dialog E&ksterne indekser &Index configuration &Konfiguration for Indeks &GUI configuration &Konfiguration for GUI &Results &Resultater Sort by date, oldest first Sorter efter dato, ældste først Sort by date, newest first Sorter efter dato, nyeste først Show as table Vis som tabel Show results in a spreadsheet-like table Vis resultater i en regneark-lignende tabel Save as CSV (spreadsheet) file Gem som CSV (regneark) fil Saves the result into a file which you can load in a spreadsheet Gemmer resultatet i en fil, som du kan indlæse i et regneark Next Page Næste side Previous Page Forrige side First Page Første side Query Fragments Forespørgsel efter fragmenter With failed files retrying Forsøg igen med filer der mislykkedes Next update will retry previously failed files Næste opdatering vil igen forsøge med filer, der tidligere mislykkedes Indexing &schedule Tid&splan for Indeksering Enable synonyms Aktiver synonymer Save last query Gem sidste forespørgsel Load saved query Indlæs gemte forespørgsel Special Indexing Særlig indeksering Indexing with special options Indeksering med særlige indstillinger &View &Vis Missing &helpers Manglende &hjælpere Indexed &MIME types Indekserede &MIME-typer Index &statistics Indeks&statistik Webcache Editor Rediger webcache Trigger incremental pass RclTrayIcon Restore Gendan Quit Afslut RecollModel Abstract Sammendrag Author Forfatter Document size Dokumentets størrelse Document date Dokumentets dato File size Filstørrelse File name Filnavn File date Fildato Keywords Nøgleord Original character set Originale tegnsæt Relevancy rating Relevans bedømmelse Title Titel URL URL Mtime Mtid Date Dato Date and time Dato og tid Ipath Ipath MIME type MIME-type Can't sort by inverse relevance ResList Result list Resultatliste (show query) (vis forespørgsel) Document history Dokumenthistorik <p><b>No results found</b><br> <p><b>Ingen resultater fundet</b><br> Previous Forrige Next Næste Unavailable document Dokument ikke tilgængelig Preview Forhåndsvisning Open Åbn <p><i>Alternate spellings (accents suppressed): </i> <p><i>Alternative stavemåder (accenter undertrykt): </i> Documents Dokumenter out of at least ud af mindst for for <p><i>Alternate spellings: </i> <p><i>Alternative stavemåder: </i> Result count (est.) Optælling af resultat (est.) Query details Detaljer i Forespørgsel Snippets Tekststumper ResTable &Reset sort &Nulstil sortering &Delete column &Slet kolonne Save table to CSV file Gem tabel til CSV-fil Can't open/create file: Kan ikke åbne/oprette fil: &Save as CSV &Gem som CSV Add "%1" column Tilføj "%1" kolonne ResultPopup &Preview &Forhåndsvisning &Open &Åbn Copy &File Name Kopier &Filnavn Copy &URL Kopier &URL &Write to File &Skriv til fil Save selection to files Gem det valgte til filer Preview P&arent document/folder Forhåndsvis &Forældre-dokument/mappe &Open Parent document/folder &Åbn Forældre-dokument/mappe Find &similar documents Find &lignende dokumenter Open &Snippets window Åbn vindue til &tekststumper Show subdocuments / attachments Vis underdokumenter / vedhæftede filer Open With Åbn med Run Script Kør skript SSearch Any term Vilkårlig ord All terms Alle ord File name Filnavn Query language Forespørgselssprog Bad query string Forkert forespørgselsstreng Out of memory Ikke mere hukommelse Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> No actual parentheses allowed.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Indtast forespørgselssprogudtryk. Snydeark:<br> <i>ord1 ord2</i> : 'ord1' og 'ord2' i et hvilken som helst felt.<br> <i>felt:ord1</i> : 'ord1' i feltet 'felt'.<br> Standard feltnavne/synonymer:<br> titel/emne/billedtekst, forfatter/fra, modtager/til, filnavn, ekst.<br> Pseudofelter: dir, mime/format, type/rclcat, dato.<br> To datointerval-eksempler: 2009-03-01/2009-05-20 2009-03-01/P2M:<br>. <i>ord1 ord2 ELLER ord3</i>: ord1 OG (ord2 ELLER ord3).<br> Ingen egentlige parenteser er tilladt.<br> <i>"ord1 ord2"</i> : frase (skal forekomme nøjagtigt). Mulige modifikatorer:<br> <i>"ord1 ord2"p </i> : uordnet nærheds-søgning med standard afstand.<br> Brug <b>Vis Forespørgsel</b> link når i tvivl om resultatet og se manual (&lt;F1>) for flere detaljer. Enter file name wildcard expression. Indtast filnavn jokertegn udtryk. Enter search terms here. Type ESC SPC for completions of current term. Indtast søgeord her. Tast ESC SPC for færdiggørelse af nuværende ord. Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Indtast forespørgselssprogets udtryk. Snydeark:<br> <i>ord1 ord2</i> : 'ord1' og 'ord2' i et hvilken som helst felt.<br> <i>felt:ord1</i> : 'ord1' i feltet 'felt'.<br> Standard feltnavne/synonymer:<br> titel/emne/billedtekst, forfatter/fra, modtager/til, filnavn, ekst.<br> Pseudofelter: dir, mime/format, type/rclcat, dato, størrelse.<br> To datointerval-eksempler: 2009-03-01/2009-05-20 2009-03-01/P2M:<br>. <i>ord1 ord2 OR ord3</i>: ord1 AND (ord2 OR ord3).<br> Du kan bruge parenteser for at gøre tingene klarere.<br> <i>"ord1 ord2"</i> : frase (skal forekomme nøjagtigt). Mulige modifikatorer:<br> <i>"ord1 ord2"p </i> : uordnet nærheds-søgning med standard afstand.<br> Brug <b>Vis Forespørgsel</b> link når i tvivl om resultatet og se manual (&lt;F1>) for flere detaljer. Stemming languages for stored query: Ordstammer til sprogene for gemte forespørgsel: differ from current preferences (kept) adskiller sig fra de nuværende præferencer (beholdt) Auto suffixes for stored query: Automatiske suffikser for gemte forespørgsel: External indexes for stored query: Eksterne Indekser for gemte forespørgsel: Autophrase is set but it was unset for stored query Autofrase er aktiveret, men var deaktiveret for gemte forespørgsel Autophrase is unset but it was set for stored query Autofrase er deaktiveret, men var aktiveret for gemte forespørgsel Enter search terms here. SSearchBase SSearchBase SSøgeBase Clear Ryd Ctrl+S Ctrl+S Erase search entry Slet søgeindgang Search Søg Start query Start forespørgsel Enter search terms here. Type ESC SPC for completions of current term. Indtast søgeord her. Type ESC SPC for færdiggørelse af nuværende ord. Choose search type. Vælg søgetype. Show query history SearchClauseW Select the type of query that will be performed with the words Vælg den type forespørgsel, der vil blive udført med ordene Number of additional words that may be interspersed with the chosen ones Antal yderligere ord, der kan være blandet med de udvalgte No field Intet felt Any Vilkårlig All Alle None Ingen Phrase Frase Proximity Nærhed File name Filnavn Snippets Snippets Tekststumper Find: Find: Next Næste Prev Forrige SnippetsW Search Søg <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> <p>Desværre blev der ikke, inden for rimelige grænser, fundet en nøjagtig match. Sandsynligvis fordi dokumentet er meget stort, så tekststump-generatoren for vild i mængden...</ p> Sort By Relevance Sort By Page SpecIdxW Special Indexing Særlig indeksering Do not retry previously failed files. Forsøg ikke igen med filer, der tidligere mislykkedes. Else only modified or failed files will be processed. Ellers vil kun ændrede eller mislykkede filer blive behandlet. Erase selected files data before indexing. Slet udvalgte filers data, før indeksering. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Mappe for rekursiv indeksering. Dette skal være indenfor det regulære indekserede område<br> som defineret i konfigurationsfilen (øverste mapper). Browse Gennemse Start directory (else use regular topdirs): Startmappe (ellers brug de regulære øverste mapper): Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Lad stå tomt for at vælge alle filer. Du kan bruge adskillige mellemrums-adskilte shell-type mønstre.<br>Mønstre med indlejrede mellemrum skal citeres med dobbelte anførselstegn.<br>Kan kun bruges, hvis startmålet er angivet. Selection patterns: Mønstre for udvælgelse: Top indexed entity Top indekserede enhed Retry previously failed files. Start directory. Must be part of the indexed tree. Use full indexed area if empty. SpellBase Term Explorer Søg efter ord &Expand &Udvid Alt+E Alt+E &Close &Luk Alt+C Alt+C No db info. Ingen dbinfo. Match Sammenlign Case Stor/Små bogstaver Accents Accenter SpellW Wildcards Jokertegn Regexp Regex Stem expansion Udvidelse af stamme Spelling/Phonetic Stavning/Fonetisk error retrieving stemming languages fejl under hentning af ordstammer for sprogene Aspell init failed. Aspell not installed? Aspell init mislykkedes. Aspell ikke installeret? Aspell expansion error. Aspell udvidelsesfejl. No expansion found Ingen udvidelse fundet Term Ord Doc. / Tot. Dok. / Tot. Index: %1 documents, average length %2 terms.%3 results Index: %1 dokumenter, gennemsnitslængde %2 ord %3 resultater %1 results %1 resultater List was truncated alphabetically, some frequent Liste blev afkortet alfabetisk, nogle ofte terms may be missing. Try using a longer root. Der kan mangle ord. Prøv at bruge en længere rod. Show index statistics Vis statistik for indeks Number of documents Antal dokumenter Average terms per document Gennemsnitlige ord pr dokument Smallest document length Mindste dokumentlængde Longest document length Længste dokumentlængde Database directory size Mappestørrelse for database MIME types: MIME-typer: Item Element Value Værdi Smallest document length (terms) Mindste dokumentlængde (ord) Longest document length (terms) Længste dokumentlængde (ord) Results from last indexing: Resultater fra sidste indeksering: Documents created/updated Dokumenter oprettet/opdateret Files tested Filer testet Unindexed files ikke-indekserede filer List files which could not be indexed (slow) Spell expansion error. UIPrefsDialog error retrieving stemming languages fejl under hentning af ordstammer for sprogene The selected directory does not appear to be a Xapian index Den valgte mappe synes ikke at være et Xapianindeks This is the main/local index! Dette er hoved/lokal indekset! The selected directory is already in the index list Den valgte mappe er allerede i indekslisten Choose Vælg Result list paragraph format (erase all to reset to default) Afsnitformat for resultatliste (slet alt for at nulstille til standard) Result list header (default is empty) Overskrift for resultatliste (standard er tom) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) Vælg recoll konfigmappe eller xapian indeksmappe (f.eks: /home/me/.recoll eller /home/me/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read Den valgte mappe ligner en Recoll konfigurationmappe, men konfigurationen kunne ikke læses At most one index should be selected Der burde vælges højst et indeks Cant add index with different case/diacritics stripping option Kan ikke tilføje indeks med en anden indstilling for fjernelse af store-bogstaver/diakritiske tegn Default QtWebkit font Standard skrifttype for QtWebkit Any term Vilkårlig ord All terms Alle ord File name Filnavn Query language Forespørgselssprog Value from previous program exit Værdi fra tidligere programafslutning ViewAction Command Kommando MIME type MIME-type Desktop Default Desktop standard Changing entries with different current values Ændrer indgange med forskellige aktuelle værdier ViewActionBase Native Viewers Oprindelige fremvisere Close Luk Select one or several mime types then use the controls in the bottom frame to change how they are processed. Vælg en eller flere Mime-typer og brug derefter knapperne i bundrammen til at ændre, hvordan de behandles. Use Desktop preferences by default Brug indstillinger for Desktop som standard Select one or several file types, then use the controls in the frame below to change how they are processed Vælg en eller flere filtyper, og brug derefter knapperne i rammen nedenfor for at ændre, hvordan de behandles Exception to Desktop preferences Undtagelse til indstillinger for Desktop Action (empty -> recoll default) Handling (tom -> recoll standard) Apply to current selection Anvend på aktuelle udvalg Recoll action: Recoll handling: current value aktuelle værdi Select same Vælg det samme <b>New Values:</b> <b>Nye værdier:</b> Webcache Webcache editor Rediger webcache Search regexp Regex søgning WebcacheEdit Copy URL Kopier URL Unknown indexer state. Can't edit webcache file. Indeksering i ukendt tilstand. Kan ikke redigere webcachefil. Indexer is running. Can't edit webcache file. Indeksering kører. Kan ikke redigere webcachefil. Delete selection Slet det valgte Webcache was modified, you will need to run the indexer after closing this window. WebCache blev ændret, du er nød til at køre indeksering efter lukning af dette vindue. WebcacheModel MIME MIME Url Url confgui::ConfBeaglePanelW Entries will be recycled once the size is reached Indgangene vil blive genbrugt, når størrelsen er nået Web page store directory name Mappenavn for lageret til Websider The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Navnet på en mappe hvor du vil gemme kopier af besøgte websider.<br>En relativ sti er taget i forhold til konfigurationsmappen. Max. size for the web store (MB) Max. størrelse til web-lager (MB) Process the WEB history queue Behandl køen for WEB-historik Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Aktiverer indeksering af sider besøgt af Firefox.<br>(Du skal også installere Firefox Recoll plugin) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Indgangene vil blive genbrugt, når størrelsen er nået.<br>Kun en øgning af størrelsen giver god mening, da en reducering af værdien ikke vil afkorte en eksisterende fil (kun spildplads i slutningen). confgui::ConfIndexW Can't write configuration file Kan ikke skrive konfigurationsfil confgui::ConfParamFNW Choose Vælg confgui::ConfParamSLW + + - - Add entry Delete selected entries ~ Edit selected entries confgui::ConfSearchPanelW Automatic diacritics sensitivity Automatisk følsomhed over for diakritiske tegn <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>Udløser automatisk følsomhed over for diakritiske tegn, hvis søgeordet har accent tegn (ikke i unac_except_trans). Ellers er du nød til bruge forespørgselssproget og <i>D</i> modifikatoren, for at angive følsomhed over for diakritiske tegn. Automatic character case sensitivity Automatisk følsomhed over for store/små bogstaver <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>Udløser automatisk følsomhed over for store/små bogstaver, hvis indgangen har store bogstaver i andet end den første position. Ellers er du nød til bruge forespørgselssproget og <i>C</i> modifikatoren, for at angive følsomhed over for store/små bogstaver. Maximum term expansion count Maksimale antal ordudvidelser <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Maksimal antal udvidelser-for et enkelt ord (fx: når der bruges jokertegn). Standarden på 10 000 er rimeligt og vil undgå forespørgsler, der synes at fryse mens motoren arbejder sig igennem ordlisten. Maximum Xapian clauses count Maksimale antal Xapiansætninger <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Maksimalt antal grundlæggende sætninger vi føjer til en enkel Xapian forespørgsel. I nogle tilfælde kan resultatet af ordudvidelse være multiplikativ, og vi ønsker at undgå at bruge overdreven hukommelse. Standarden på 100 000 bør være både høj nok i de fleste tilfælde og kompatibel med de nuværende typiske hardware konfigurationer. confgui::ConfSubPanelW Global Global Max. compressed file size (KB) Maks. komprimeret filstørrelse (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Denne værdi angiver en grænse for, hvornår komprimerede filer ikke vil blive behandlet. Indstil til -1 for ingen grænse, til 0 for ingen dekomprimering nogensinde. Max. text file size (MB) Maks. størrelse på tekstfil (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Denne værdi angiver en grænse for, hvornår tekstfiler ikke vil blive behandlet. Indstil til -1 for ingen grænse. Dette er for at udelukke monster logfiler fra indekset. Text file page size (KB) Sidestørrelse på tekstfil (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Hvis denne værdi er angivet (ikke lig med -1), vil tekstfiler opdeles i bidder af denne størrelse for indeksering. Dette vil hjælpe søgning i meget store tekstfiler (dvs.: log-filer). Max. filter exec. time (S) Maks. udførelsestid for filtre (S) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Eksterne filtre der arbejder længere end dette vil blive afbrudt. Dette er for det sjældne tilfælde (dvs.: postscript) hvor et dokument kan forårsage, at et filter laver et loop. Indstil til -1 for ingen grænse. Only mime types Kun mime-typer An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive En eksklusiv liste over indekserede MIME-typer.<br>Intet andet vil blive indekseret. Normalt tom og inaktiv Exclude mime types Udeluk mime-typer Mime types not to be indexed Mime-typer der ikke skal indekseres confgui::ConfTopPanelW Top directories Øverste mapper The list of directories where recursive indexing starts. Default: your home. Listen over mapper hvor rekursiv indeksering starter. Standard: din hjemme-mappe (home). Skipped paths Udeladte stier These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Dette er navne på mapper, som indeksering ikke går ind i.<br>Kan indeholde jokertegn. Skal stemme overens med stierne, som de ses af indekseringsprogrammet (dvs. hvis de øverste mapper omfatter '/home/mig' og '/home' er et link til '/usr/home', en korrekt udeladtSti indgang ville være '/home/mig/tmp * ', ikke '/usr/home/mig/tmp * ') Stemming languages Ordstammer for sprogene The languages for which stemming expansion<br>dictionaries will be built. De sprog, hvor ordstamme-udvidelses<br>ordbøger vil blive bygget. Log file name Navn på logfil The file where the messages will be written.<br>Use 'stderr' for terminal output Filen hvor meddelelser vil blive skrevet.<br>Brug 'stderr' for terminal output Log verbosity level Log informationsniveau This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Denne værdi justerer mængden af meddelelser,<br>fra kun fejl til en masse fejlretningsdata. Index flush megabytes interval Megabyte interval for skrivning af Index This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Denne værdi justere mængden af data, der er indekseret mellem skrivning til disken.<br>Dette hjælper med at kontrollere indekseringsprogrammets brug af hukommelse. Standard 10MB Max disk occupation (%) Maks brug af disk (%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). Dette er den procentdel af diskforbrug hvor indeksering vil mislykkes, og stoppe (for at undgå at fylde dit disk).<br>0 betyder ingen grænse (dette er standard). No aspell usage Brug ikke aspell Aspell language Aspell sprog Database directory name Databasens mappenavn Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Deaktiver brug af aspell til at generere stavnings-tilnærmelse i værktøj for søgning efter ord. <br> Nyttigt hvis aspell er fraværende eller ikke virker. The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Sproget for aspell ordbog. Det skal se ud som "en" eller "fr" ...<br>Hvis denne værdi ikke er angivet, så vil NLS omgivelser blive brugt til at finde det, det fungerer normalt. For at få en idé om, hvad der er installeret på dit system, kan du skrive 'aspell konfig "og se efter .dat filer inde i 'data-dir' mappen. The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Navnet på en mappe hvor du vil gemme indekset<br>En relativ sti er taget i forhold til konfigurationsmappen. Standard er "xapiandb. Unac exceptions Unac-undtagelser <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Disse er undtagelser fra unac mekanismen, der, som standard, fjerner alle diakritiske tegn, og udfører kanonisk nedbrydning. Du kan tilsidesætte fjernelse af accent for nogle tegn, afhængigt af dit sprog, og angive yderligere nedbrydninger, f.eks. for ligaturer. I hver indgang adskilt af mellemrum, er det første tegn kildedelen, og resten er oversættelsen. uiPrefsDialogBase User preferences Brugerindstillinger User interface brugergrænseflade Number of entries in a result page Antal indgange i en resultatside If checked, results with the same content under different names will only be shown once. Afkryds forårsager, at resultater med samme indhold under forskellige navne kun bliver rapporteret en gang. Hide duplicate results. Skjul identiske resultater. Highlight color for query terms Farve for fremhævning af søgeord Result list font Skrifttype for resultatliste Opens a dialog to select the result list font Åbner et vindue til at vælge resultatlistens skrifttype Helvetica-10 Helvetica-10 Resets the result list font to the system default Nulstiller resultatlistens skrifttype til systemets standard Reset Nulstil Texts over this size will not be highlighted in preview (too slow). Tekster over denne størrelse vil ikke blive fremhævet i forhåndsvisning (for langsom). Maximum text size highlighted for preview (megabytes) Maksimal tekststørrelse der fremhæves for forhåndsvisning (megabyte) Choose editor applications Vælg redigeringsprogrammer Auto-start simple search on whitespace entry. Autostart simpel søgning ved blanktegn. Start with advanced search dialog open. Start med åbent avanceret søgevindue. Remember sort activation state. Husk sorteringens aktiveringstilstand. Prefer Html to plain text for preview. Foretræk Html til almindelig tekst for forhåndsvisning. Search parameters Søgeparametre Stemming language Ordstammer for sprog A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. En søgning efter [Rullende Sten] (2 ord) vil blive ændret til [rullende eller sten eller (rullende frase 2 sten)]. Dette skulle give højere forrang til resultaterne, hvor søgeordene vises nøjagtigt som angivet. Automatically add phrase to simple searches Tilføj automatisk frase til simple søgninger Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Skal vi forsøge at lave sammendrag af indgange til resultatliste ved at bruge sammenhænget med forespørgselsordene? Kan være langsomt for store dokumenter. Dynamically build abstracts Lav dynamisk sammendrag Do we synthetize an abstract even if the document seemed to have one? Skal vi sammenfatte et sammendrag, selvom dokumentet synes at have et? Replace abstracts from documents Erstat sammendrag fra dokumenter Synthetic abstract size (characters) Størrelse på det genererede sammendrag (tegn) Synthetic abstract context words Sammenhængende ord for det genererede sammendrag The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Ordene på listen bliver automatisk vendt til ext: xxx sætninger i forespørgselssprogets indgang. Query language magic file name suffixes. Forespørgselssprogets magiske filnavnendelser. Enable Aktiver External Indexes Eksterne Indekser Toggle selected Skift det valgte Activate All Aktiver alle Deactivate All Deaktiver alle Remove from list. This has no effect on the disk index. Fjern fra listen. Dette har ingen virkning på indeks på disken. Remove selected Fjern valgte Add index Tilføj index Apply changes Anvend ændringer &OK &OK Discard changes Kassere ændringer &Cancel &Annuller Abstract snippet separator Separator mellem sammendragets tekststumper Style sheet Stilark Opens a dialog to select the style sheet file Åbn et vindue for at vælge stilark-filen Choose Vælg Resets the style sheet to default Nulstil stilark til standard Result List Resultatliste Edit result paragraph format string Rediger formatstreng for resultatafsnit Edit result page html header insert Rediger kode for indsætnig i html-hoved for resultatside Date format (strftime(3)) Datoformat (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Hyppighedens procentvise tærskel, hvorover vi ikke bruger ord inde i autofrase. Hyppige ord er et stort problem for ydeevnen med fraser. Udeladte ord forøger frase stilstand, og reducere effektiviteten af autofrase. Standardværdien er 2 (procent). Autophrase term frequency threshold percentage Tærskelprocentsats for ordhyppighed ved autofrase Plain text to HTML line style Almindelig tekst til HTML linjetype Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. Linjer i PRE tekst ombrydes ikke. Brug af BR mister en del indrykning. PRE + Wrap stil kunne være, hvad du ønsker. <BR> <BR> <PRE> <PRE> <PRE> + wrap <PRE> + wrap Disable Qt autocompletion in search entry. Deaktiver Qt autofuldførelse i søgeindgange. Search as you type. Søg mens du skriver. Paths translations Oversættelser af stier Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Klik for at tilføje endnu en indeksmappe til listen. Du kan vælge enten en Recoll konfigurationsmappe eller et Xapianindeks. Snippets window CSS file CSS-fil for vindue til tekststumper Opens a dialog to select the Snippets window CSS style sheet file Åbner et vindue til at vælge CSS stilark-fil for vinduet til tekststumper Resets the Snippets window style Nulstil stilen for vinduet til tekststumper Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Bestemmer om dokumentfiltre er vist som radioknapper, værktøjslinje kombinationsfelt eller menu. Document filter choice style: Valgmetode for dokumentfilter: Buttons Panel Panel med knapper Toolbar Combobox værktøjslinje kombinationsfelt Menu Menu Show system tray icon. Vis statusikon. Close to tray instead of exiting. Luk til systembakke i stedet for at afslutte. Start with simple search mode Start med enkel søgetilstand User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Brugerstil der skal anvendes på vinduet til tekststumper.<br>Bemærk: Det færdige sidehoved-indstik er også inkluderet i tekststumper-vinduets hoved. Synonyms file Synonymer-fil Show warning when opening temporary file. Vis advarsel, når der åbnes en midlertidig fil. Highlight CSS style for query terms Recoll - User Preferences Set path translations for the selected index or for the main one if no selection exists. Activate links in preview. Make links inside the preview window clickable, and start an external browser when they are clicked. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Suppress all beeps. recoll-1.26.3/qtgui/i18n/recoll_fr.qm0000644000175000017500000036201113566716312014237 00000000000000\hmbCY,^#jy;ED ֍]( o:1G:dAVUBSG+G(>H6jH6jJ+J+WJ6 J6gLbMz]PPh<SkGS|T5={WTW HX0Z0Zt1[ %>Z[f3\uegwJj^#lbWms[sv8 vv Vzϳj!YqR01:́wfqJ qfSK HE  v xnURnUCgt:w3En56,.3+I4( qE+1/0,05>;ssi>.cxDo2NXMX|8`^?^h~ltZn3wO*w=@wz!`$PHsv6/3.SRc i.xCu:t0ͺL LD\SIV)؅n KCp#vB v< w 5jr@C B9)BS\~k/+o\ r?)>rv|pGk,0r<&԰V"ÄX_,tҙASân> GF wRkǢVI^*"~CdJ>@p#m`0 Uu`qM+.$+" Y-օSUKiml9+p hu9v2vƒ_{^T#xK:U % %Mn$~z7C@s8^[Oɖ1X,nDw)\AHoC1!v#+z+^/3t^7I^$ <~vCjFW#[FNH:"e_guap2f piTS|N>wϗAn>jn݈  ϳl n|inÓt6Ót >.)ȍxɆt`2]8~)o^ 6=#3Iq En`%<HD*snR,y+:uD:I$J¢JnQĽYF}}T;EE0;u;chX@m[ʶ9% qAؕIcd䴥?3# TlD k N% H" -( 7d/ 9ZyXV ;3 D'g K- ]# cC| k lM  ^ +S  qDm : WJ d 9R ÛCLY ü> I? 3 e( b in K *N ͹1 :^x c *R| *_ +<5" 6 >V G.~ S` V$ `P ` aE) cE) d8 y I O s U4 VT2 C  @ ϔhr ) (s 'z ' ԅ ^ yez TH  ,xJ0 =! Kj> W/ X- X0u h n  R ( Σ*c rr cC ٷ/ ۷/  ?K  Vd^ 4  Yg `+ 'И& +bC .ʢ9P / 4 97 9ɝ { J%T L*DY P֙% RVh T#ȍ V4c \iC ]1 `F ' hD v {l !Y !YB^ W4 |  3 Ҭ\ })  i' z  u@  ~K  N j8 m= + #D 'RV ,f -= 7Q 8 F OE$ X^c\ ] ] ^7o ^h u03 yD y~ 3 ȩ uVw uz Pw Po 5d  z- 7 i  Ւ~N H z Q53 £t q#ZѾ-'{%n.ʢ`/.O3U6!8brL91<Q~bS$Y~s%[s7\{O`ke3-Ug3-hx5zp~ F5!cmEhc ^|3|JB' l"6*Lliϐ$Toutes les clauses All clauses AdvSearchUne des clauses Any clause AdvSearchSuffixe multiplicateur incorrect dans un filtre de taille (k/m/g/t)$Bad multiplier suffix in size filter AdvSearchmultimdiamedia AdvSearchmessagemessage AdvSearch autresother AdvSearchprsentation presentation AdvSearch"feuille de calcul spreadsheet AdvSearch$feuilles de calcul spreadsheets AdvSearch textetext AdvSearch textestexts AdvSearch<----- Tout <----- All AdvSearchBase<----- Sel <----- Sel AdvSearchBase$Ajouter une clause Add clause AdvSearchBase"Recherche avanceAdvanced search AdvSearchBaseTout ----> All ----> AdvSearchBaseTous les champs de droite non vides seront combins par une conjonction ET (choix "Toutes les clauses") ou OU (choix "Une des clauses"). <br> Les champs de type "Un de ces mots", "Tous ces mots" et "Aucun de ces mots" acceptent un mlange de mots et de phrases contenues dans des apostrophes "une phrase".<br>Les champs non renseigns sont ignors.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBaseParcourirBrowse AdvSearchBasePar catgories By categories AdvSearchBaseZCocher pour activer le filtrage sur les dates'Check this to enable filtering on dates AdvSearchBasenCocher pour permettre le filtrage des types de fichiers,Check this to enable filtering on file types AdvSearchBasebCocher pour activer le fitrage sur taille fichier'Check this to enable filtering on sizes AdvSearchBaseCocher pour utiliser les catgories de fichiers au lieu des types mimes;Check this to use file categories instead of raw mime types AdvSearchBase FermerClose AdvSearchBase$Enlever une clause Delete clause AdvSearchBaseXEntrer le rpertoire o dmarre la rechercheEnter top directory for search AdvSearchBaseFiltrerFilter AdvSearchBase*Filtrer sur les dates Filter dates AdvSearchBase&Filtrer les tailles Filter sizes AdvSearchBaseTrouverFind AdvSearchBase partir deFrom AdvSearchBase2Types de fichiers ignorsIgnored file types AdvSearchBaseInverserInvert AdvSearchBaseTaille Max Max. Size AdvSearchBaseTaille Maximum. Vous pouvez utiliser un suffixe multiplicateur : k/K, m/M, g/G4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaseTaille Min Min. Size AdvSearchBaseTaille minimum. Vous pouvez utiliser un suffixe multiplicateur : k/K, m/M, g/G4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase@Restreindre les types de fichierRestrict file types AdvSearchBasetRestreindre les rsultats aux fichiers de l'arborescence :%Restrict results to files in subtree: AdvSearchBase8Sauver comme valeur initialeSave as default AdvSearchBaseVRechercher les <br>documents<br>vrifiant :'Search for
documents
satisfying: AdvSearchBase6Types de fichier recherchsSearched file types AdvSearchBaseSel -----> Sel -----> AdvSearchBase&Lancer la recherche Start Search AdvSearchBaseJusqu'To AdvSearchBase <p>Activer automatiquement la sensibilit aux majuscules si le terme de recherche contient des majuscules (sauf en premire lettre). Sans cette option, vous devez utiliser le langage de recherche et le drapeau <i>C</i> pour activer la sensibilit aux majuscules.

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity. ConfIndexW<p>Activer automatiquement la sensibilit aux accents si le terme recherch contient des accents (saufs pour ceux de unac_except_trans). Sans cette option, il vous faut utiliser le langage de recherche et le drapeau <i>D</i> pour activer la sensibilit aux accents.

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity. ConfIndexW$<p>Nombre maximum de termes de recherche rsultant d'un terme entr (par exemple expansion par caractres jokers). La valeur par dfaut de 10000 est raisonnable et vitera les requtes qui paraissent bloques pendant que le moteur parcourt l'ensemble de la liste des termes.

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. ConfIndexWr<p>Nombre maximum de clauses Xapian lmentaires gnres pour une requte. Dans certains cas, le rsultat de l'expansion des termes peut ere multiplicatif, et utiliserait trop de mmoire. La valeur par dfaut de 100000 devrait tre la fois suffisante et compatible avec les configurations matrielles typiques.5

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfIndexW<p>Ce sont les exceptions au mcanisme de suppression des accents, qui, par dfaut et en fonction de la configuration de l'index, supprime tous les accents et effectue une dcomposition canonique Unicode. Vous pouvez inhiber la suppression des accents pour certains caractres, en fonction de votre langue, et prciser d'autres dcompositions, par exemple pour des ligatures. Dans la liste spare par des espaces, le premier caractres d'un lment est la source, le reste est la traduction.l

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. ConfIndexW$Langue pour aspellAspell language ConfIndexWLSensibilit automatique aux majuscules$Automatic character case sensitivity ConfIndexWFSensibilit automatique aux accents Automatic diacritics sensitivity ConfIndexW^Impossible d'crire le fichier de configurationCan't write configuration file ConfIndexWBRpertoire de stockage de l'indexDatabase directory name ConfIndexW Dsactiver l'utilisation d'aspell pour gnrer les approximations orthographiques.<br> Utile si aspell n'est pas install ou ne fonctionne pas. Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexWDDisk full threshold to stop indexing
(e.g. 90%, 0 means no limit) ConfIndexWPermet d'indexer les pages Web visites avec Firefox <br>(il vous faut galement installer l'extension Recoll pour Firefox)\Enables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin) ConfIndexWLes entres seront recycles quand la taille sera atteinte.<br>Seule l'augmentation de la taille a un sens parce que rduire la valeur ne tronquera pas un fichier existant (mais gachera de l'espace la fin).Entries will be recycled once the size is reached.
Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). ConfIndexW$Paramtres globauxGlobal parameters ConfIndexW\Intervalle d'criture de l'index en mgaoctetsIndex flush megabytes interval ConfIndexW"Paramtres locauxLocal parameters ConfIndexW,Nom du fichier journal Log file name ConfIndexW&Niveau de verbositLog verbosity level ConfIndexWLTaille maximale pour le cache Web (Mo) Max. size for the web store (MB) ConfIndexW@Compte maximum de clauses XapianMaximum Xapian clauses count ConfIndexWPTaille maximum de l'expansion d'un termeMaximum term expansion count ConfIndexW4Pas d'utilisation d'aspellNo aspell usage ConfIndexW:Traiter la file des pages WEBProcess the WEB history queue ConfIndexW8Paramtres pour la rechercheSearch parameters ConfIndexWChemins ignors Skipped paths ConfIndexWDLangue pour l'expansion des termesStemming languages ConfIndexWLe nom du fichier ou les messages seront ecrits.<br>Utiliser 'stderr' pour le terminalPThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexWLangue pour le dictionnaire aspell. La valeur devrait ressembler 'en' ou 'fr'... <br>Si cette valeur n'est pas positionne, l'environnement national sera utilis pour la calculer, ce qui marche bien habituellement. Pour avoir une liste des valeurs possibles sur votre systme, entrer 'aspell config' sur une ligne de commande et regarder les fichiers '.dat' dans le rpertoire 'data-dir'. 3The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory.  ConfIndexWLes langages pour lesquels les dictionnaires d'expansion<br>des termes seront construits.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexWLa liste des rpertoires o l'indexation rcursive dmarre. Dfault: votre rpertoire par dfaut.LThe list of directories where recursive indexing starts. Default: your home. ConfIndexWLe nom d'un rpertoire o stocker les copies des pages visites.<br>Un chemin relatif se rfre au rpertoire de configuration.The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory. ConfIndexWBLe nom d'un rpertoire pour stocker l'index<br>Un chemin relatif sera interprt par rapport au rpertoire de configuration. La valeur par dfaut est 'xapiandb'.The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. ConfIndexWCe sont les chemins des rpertoires o l'indexation n'ira pas.<br>Les lments peuvent contenir des caractres joker. Les entrs doivent correspondre aux chemins vus par l'indexeur (ex.: si topdirs comprend '/home/me' et que '/home' est en fait un lien vers '/usr/home', un lment correct pour skippedPaths serait '/home/me/tmp*', et non '/usr/home/me/tmp*')BThese are pathnames of directories which indexing will not enter.
Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') ConfIndexW`C'est le pourcentage d'utilisation disque - utilisation totale, et non taille de l'index - o l'indexation s'arrtera en erreur.<br>La valeur par dfaut de 0 dsactive ce test.This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.
The default value of 0 removes any limit. ConfIndexWAjuste la quantit de donnes lues entre les critures sur disque.<br>Contrle l'utilisation de la mmoire. Dfaut 10 Mo This value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWCette valeur ajuste la quantite de messages emis,<br>depuis uniquement les erreurs jusqu'a beaucoup de donnees de debug.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexW*Rpertoires de dpartTop directories ConfIndexWExceptions UnacUnac exceptions ConfIndexWHRpertoire de stockage des pages WEBWeb page store directory name ConfIndexWUne liste exclusive des types MIME indexer.<br>Rien d'autre ne sera index. Normalement vide et inactifeAn exclusive list of indexed mime types.
Nothing else will be indexed. Normally empty and inactive ConfSubPanelWTypes exclusExclude mime types ConfSubPanelWUn filtre externe qui prend plus de temps sera arrt. Traite le cas rare (possible avec postscript par exemple) o un document pourrait amener un filtre boucler sans fin. Mettre -1 pour compltement supprimer la limite (dconseill).External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.  ConfSubPanelW GlobalGlobal ConfSubPanelWSi cette valeur est spcifie et positive, les fichiers de texte pur seront dcoups en tranches de cette taille pour l'indexation. Ceci diminue les ressources consommes par l'indexation et aide le chargement pour prvisualisation.If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). ConfSubPanelWhTaille maximale pour les fichiers dcomprimer (ko)Max. compressed file size (KB) ConfSubPanelWXTemps d'excution maximum pour un filtre (s)Max. filter exec. time (s) ConfSubPanelWNTaille maximale d'un fichier texte (Mo)Max. text file size (MB) ConfSubPanelW6Types MIME ne pas indexerMime types not to be indexed ConfSubPanelW&Seulement ces typesOnly mime types ConfSubPanelWdTaille de page pour les fichiers de texte pur (ko)Text file page size (KB) ConfSubPanelW^Cette valeur dfinit un seuil au del duquel les fichiers comprims ne seront pas traits. Utiliser -1 pour dsactiver la limitation, 0 pour ne traiter aucun fichier comprim.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. ConfSubPanelWjCette valeur est un seuil au del duquel les fichiers de texte pur ne seront pas indexs. Spcifier -1 pour supprimer la limite. Utilis pour viter d'indexer des fichiers monstres.This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. ConfSubPanelWr<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span>: planification de l'indexation priodique (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Chaque champ peut contenir un joker (*), une simple valeur numrique , des listes ponctues par des virgules (1,3,5) et des intervalles (1-7). Plus gnralement, les champs seront utiliss <span style=" font-style:italic;">tels quels</span> dans le fichier crontab, et la syntaxe gnrale crontab peut tre utilise, voir la page de manuel crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Par exemple, en entrant <span style=" font-family:'Courier New,courier';">*</span> dans <span style=" font-style:italic;">Jours, </span><span style=" font-family:'Courier New,courier';">12,19</span> dans <span style=" font-style:italic;">Heures</span> et <span style=" font-family:'Courier New,courier';">15</span> dans <span style=" font-style:italic;">Minutes</span>, recollindex dmarrerait chaque jour 12:15 et 19:15</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Un planning avec des activations trs frquentes est probablement moins efficace que l'indexation au fil de l'eau.</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolWl<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Cliquer <span style=" font-style:italic;">Dsactiver</span> pour arrter l'indexation automatique priodique, <span style=" font-style:italic;">Activer</span> pour la dmarrer, <span style=" font-style:italic;">Annuler</span> pour ne rien changer.</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolWDialogue Cron Cron Dialog CronToolWpJours de la semaine (* ou 0-7, 0 ou 7 signifie Dimanche))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWDsactiverDisable CronToolWActiverEnable CronToolWErreur durant l'installation de l'entre cron. Mauvaise syntaxe des champs ?3Error installing cron entry. Bad syntax in fields ? CronToolW$Heures (* ou 0-23)Hours (* or 0-23) CronToolWIl semble que des entres cres manuellement existent pour recollindex. Impossible dditer le fichier CronPIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWMinutes (0-59)Minutes (0-59) CronToolWDialogueDialog EditDialogErreur config Config error EditTransChemin local Local path EditTransChemin Originel Original path EditTransChemin source Source path EditTransAjouterAdd EditTransBaseAnnulerCancel EditTransBaseSupprimerDelete EditTransBase,Traductions de cheminsPath Translations EditTransBaseSauvegarderSave EditTransBaseSlectionner un ou plusieurs types de fichiers, puis utiliser les contrles dans le cadre ci-dessous pour changer leur traitementkSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBaseTAjustement des traductions de chemins pourSetting path translations for  EditTransBase X<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Il semble que l'index pour cette configuration n'existe pas encore.</span><br /><br />Si vous voulez simplement indexer votre rpertoire avec un jeu raisonnable de valeurs par dfaut, cliquer le bouton <span style=" font-style:italic;">Dmarrer l'indexation maintenant</span>. Vous pourrez ajuster les dtails plus tard. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Si vous voulez plus de contrle, utilisez les liens qui suivent pour ajuster la configuration et le planning d'indexation.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Ces outils peuvent tre accds plus tard partir du menu <span style=" font-style:italic;">Preferences</span>.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialogJParamtrage de la premire indexationFirst indexing setupFirstIdxDialogIndexationIndexing configurationFirstIdxDialog0Planning de l'indexationIndexing scheduleFirstIdxDialog@Dmarrer l'indexation maintenantStart indexing nowFirstIdxDialogjVous pourrez ajuster les rpertoires que vous voulez indexer, et d'autres paramtres comme les schmas de noms ou chemins de fichiers exclus, les jeux de caractres par dfaut, etc.This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialog*Vous pourrez choisir entre l'indexation intervalles fixes ou au fil de l'eau, et dfinir un planning pour la premire (bas sur l'utilitaire cron).This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog%1 non trouv %1 not found.FragButs%1 : %2%1: %2FragButs,Fragments de rechercheQuery FragmentsFragButs J<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">L'indexation <span style=" font-weight:600;">Recoll</span> peut fonctionner en permanence, traitant les fichiers ds qu'ils sont modifis, ou tre excute des moments prdtermins. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Une lecture du manuel peut vous aider choisir entre ces approches (presser F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Cet outil peut vous aider planifier l'indexation priodique, ou configurer un dmarrage automatique de l'indexation au fil de l'eau quand vous vous connectez (ou les deux, ce qui est rarement pertinent). </p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedWPlanning CronCron scheduling IdxSchedWDterminer si l'indexation au fil de l'eau dmarre quand vous vous connectez (pour l'index par dfaut).ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedWHParamtrage du planning d'indexationIndex scheduling setup IdxSchedWRDmarrage de l'indexation au fil de l'eauReal time indexing start up IdxSchedWLe dialogue vous permettra de dterminer quelle heure l'indexation devra dmarrer et installera une entre crontab._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedWDialogueDialog ListDialogGroupBoxGroupBox ListDialogbLe fichier "history" est corrompu. Le detruire : 6"history" file is damaged, please check or remove it: MainRpertoire de la base de donnes non dfini dans la configuration No db directory in configurationMain&Suivant&NextPreview&Prcdent &PreviousPreview&Rechercher : &Search for:Preview^<br>Essai d'affichage partir du texte stock.+
Attempting to display from stored text.PreviewImpossible de traduire le document en reprsentation interne pour 0Can't turn doc into internal representation for PreviewAnnulerCancelPreview AnnulCanceledPreviewEffacerClearPreviewNImpossible de rcuprer le texte stockCould not fetch stored textPreviewTCration du texte pour la prvisualisationCreating preview textPreviewPErreur de chargement : fichier manquant.)Error loading the document: file missing.PreviewHErreur de chargement : accs refus.*Error loading the document: no permission.PreviewVErreur de chargement : erreur indtermine.0Error loading the document: other handler error.PreviewErreur de chargement : erreur indtermine<br>Fichier verrouill par l'application ?^Error loading the document: other handler error
Maybe the application is locking the file ?Preview|Erreur de chargement : gestionnaire de stockage non configur.&Error loading: backend not configured.Preview EcranFormPreviewTChargement du texte de la prvisualisation Loading preview text into editorPreview&Respecter la &casse Match &CasePreviewPProgrammes filtres externes manquants : Missing helper program: Preview OuvrirOpenPreview Tab 1Tab 1Preview CopierCopyPreviewTextEdit$Replier les lignes Fold linesPreviewTextEdit$Ouvrir le document Open documentPreviewTextEdit.Prserver l'indentationPreserve indentationPreviewTextEditImprimerPrintPreviewTextEditNImprimer la fentre de prvisualisationPrint Current PreviewPreviewTextEdit.Sauvegarder le documentSave document to filePreviewTextEdit"Tout slectionner Select AllPreviewTextEdit>Afficher les valeurs des champs Show fieldsPreviewTextEdit Afficher l'image Show imagePreviewTextEdit4Afficher le corps du texteShow main textPreviewTextEditT<b>Rpertoires avec paramtres spcifiquesCustomised subtreesQObject<i>Les paramtres qui suivent sont dfinis soit globalement, si la slection dans la liste ci-dessus est vide ou rduite la ligne vide, soit pour le rpertoire slectionn. Vous pouvez ajouter et enlever des rpertoires en cliquant les boutons +/-.The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons.QObjectJeu de caractres utilis pour lire les fichiers qui n'identifient pas de manire interne leur encodage, par exemple les fichiers texte purs.<br>La valeur par dfaut est vide, et la valeur obtenue partir de l'environnement est utilise dans ce cas.Character set used for reading files which do not identify the character set internally, for example pure text files.
The default value is empty, and the value from the NLS environnement is used.QObject>Jeu de caractres<br>par dfautDefault
character setQObject8Suivre les liens symboliquesFollow symbolic linksQObjectIndexer les fichiers et rpertoires points par les liens symboliques. Pas fait par dfaut pour viter les indexations multiplesTFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject Suffixes ignorsIgnored endingsQObjectBIndexer tous les noms de fichiersIndex all file namesQObject Indexer les noms des fichiers dont le contenu n'est pas identifi ou trait (pas de type mime, ou type non support). Vrai par dfaut}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObjectNoms ignors Skipped namesQObjectLa liste des sous-rpertoires de la zone indexe<br>o certains paramtres sont redfinis. Dfaut : vide.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectLSuffixes slectionnant des fichiers qui seront indexs uniquement sur leur nom (pas d'identification de type MIME, pas de dcompression, pas d'indexation du contenu).These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing).QObjectCanevas dfinissant les fichiers ou rpertoires qui ne doivent pas etre indexs.LThese are patterns for file or directory names which should not be indexed.QObject&Ouvrir&OpenQWidgetD&Ouvrir le document/dossier parent&Open Parent document/folderQWidget&Voir contenu&PreviewQWidget&Sauver sous&Write to FileQWidgetHImpossible d'extraire le document : Cannot extract document: QWidget@Choisir exactement un rpertoireChoose exactly one directoryQWidget2Copier le nom de &FichierCopy &File NameQWidgetCopier l'&Url Copy &URLQWidgetFImpossible de lire le rpertoire : Could not read directory: QWidgetRCrer ou choisir un rpertoire d'critureCreate or choose save directoryQWidgetDChercher des documents &similairesFind &similar documentsQWidget>Ouvrir la fentre des e&xtraitsOpen &Snippets windowQWidgetOuvrir Avec Open WithQWidgetBPrvisualiser le document p&arentPreview P&arent document/folderQWidget$Excuter le Script Run ScriptQWidgetfSauvegarder la slection courante dans des fichiersSave selection to filesQWidgetVAfficher les sous-documents et attachementsShow subdocuments / attachmentsQWidgetLCollision de noms inattendue, abandon.+Unexpected file name collision, cancelling.QWidget"Ne plus afficher.Do not show again.QxtConfirmationMessage<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">L'indexation <span style=" font-weight:600;">Recoll</span> peut tre configurer pour s'excuter en arrire plan, mettant jour l'index au fur et mesure que des documents sont modifis. Vous y gagnez un index toujours jour, mais des ressources systme (mmoire et processeur) sont consommes en permanence.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>.

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWNgalement dmarrer le dmon maintenant.%Also start indexing daemon right now.RTIToolWpFichier autostart dtruit. Arrter le process en cours ?2Autostart file deleted. Kill current process too ?RTIToolW,Impossible de crer : Can't create: RTIToolWBImpossible d'excuter recollindexCould not execute recollindexRTIToolW*Effacement du fichier Deleting fileRTIToolWEffacement :  Deleting: RTIToolWjDmarrage automatique de l'indexation au fil de l'eau"Real time indexing automatic startRTIToolW2Enlvement de l'autostartRemoving autostartRTIToolW.Remplacement du fichierReplacing fileRTIToolW$Remplacement de :  Replacing: RTIToolWhDmarrer le dmon d'indexation quand je me connecte..Start indexing daemon with my desktop session.RTIToolWAttentionWarningRTIToolWB avec le message complmentaire : with additional message: RclMain&(tous les langages)(all languages)RclMain"(pas d'expansion) (no stemming)RclMain<em>Par ailleurs, il semble que la dernire mise jour pour ce fichier a chou.</em><br/>LAlso, it seems that the last index update for the file failed.
RclMain$ propos de Recoll About RecollRclMainToutAllRclMainMauvaise spcification d'application pour %1 : [%2] Merci de vrifier le fichier desktop ?Bad desktop app spec for %1: [%2] Please check the desktop fileRclMain&Chemins inexistants Bad pathsRclMainLigne de commande incorrecte pour %1 : [%2]. Vrifier le fichier mimeview.CBad viewer command line for %1: [%2] Please check the mimeview fileRclMainDImpossible d'accder au fichier : Can't access file: RclMain^Impossible de crer la fentre de visualisationCan't create preview windowRclMainImpossible d'ouvrir le fichier des synonymes (erreur dans le fichier?)&Can't set synonyms file (parse error?)RclMainNImpossible de dcomprimer le fichier : Can't uncompress file: RclMainImpossible de mettre jour l'index : un indexeur est dj actif#Can't update index: indexer runningRclMainhImpossible de mettre jour l'index : erreur interne"Can't update index: internal errorRclMainImpossible d'extraire le document ou de crer le fichier temporaire0Cannot extract document or create temporary fileRclMainPImpossible de trouver le document parentCannot find parent documentRclMainZImpossible d'accder au document dans la base+Cannot retrieve document info from databaseRclMainFChoisir un fichier pour sauvegarderChoose file to saveRclMainCliquer Annuler pour retourner la liste.<br>Cliquer Ignorer pour afficher la prvisualisation (et enregister l'option pour cette session). Il y a un risque d'afficher le mauvais document.<br/>Click Cancel to return to the list.
Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.
RclMainCliquer Ok pour essayer de mettre jour l'index. Vous devrez lancer la recherche nouveau quand l'indexation sera termine.<br>rClick Ok to try to update the index for this file. You will need to run the query again when indexing is done.
RclMainFermetureClosingRclMainZDu contenu a t index pour ces types MIME :.Content has been indexed for these MIME types:RclMaindLe chargement de la recherche sauvegarde a chouCould not load saved queryRclMainImpossible d'ouvrir un index externe. Base non ouverte. Verifier la liste des index externes.HCould not open external index. Db not open. Check external indexes list.RclMain@Impossible d'ouvrir le fichier :Could not open file: RclMainFImpossible d'crire dans le fichierCould not write to fileRclMainDsactiv parce que l'indexeur au fil de l'eau n'est pas disponible dans cet excutable.;Disabled because the real time indexer was not compiled in.RclMainNe plus afficher ce message (utiliser le dialogue de prfrences pour rtablir).DDo not show this warning next time (use GUI preferences to restore).RclMain&Filtre de documentsDocument filterRclMainDHistorique des documents consultsDocument historyRclMainFiniDoneRclMain(Documents identiquesDuplicate documentsRclMainNChemins vides ou non existants dans le fichier de configuration. Cliquer sur Ok pour dmarrer l'indexation (les donnes absentes ne seront pas limines de l'index) : Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): RclMain*Effacement de l'index Erasing indexRclMain ErreurErrorRclMain Excution de : [ Executing: [RclMain>Applications et commandes externes ncessaires pour vos types de documents, et non trouves, telles qu'enregistres par la dernire squence d'indexation dans.pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMain(Donnes d'historique History dataRclMain,L'index est verrouill Index lockedRclMainZL'index n'est pas jour pour ce fichier.<br>'Index not up to date for this file.
RclMainDErreur de la recherche sur l'indexIndex query errorRclMain:Programmation de l'indexationIndex schedulingRclMain$Types MIME indexsIndexed MIME TypesRclMain~L'indexeur est actif. Impossible d'accder au fichier webcache./Indexer is running. Can't access webcache file.RclMain&Indexation termine Indexing doneRclMain*L'indexation a chouIndexing failedRclMain,Indexation en cours : Indexing in progress: RclMain,Indexation interrompueIndexing interruptedRclMain(Erreur de chargement Load errorRclMain.Applications manquantesMissing helper programsRclMainMoniteurMonitorRclMain`Pas de visualiseur configur pour le type MIME [-No external viewer configured for mime type [RclMain:Pas d'applications manquantesNo helpers found missingRclMainnPas de donnes : l'indexation initiale n'est pas faite.3No information: initial indexing not yet performed.RclMain8Pas de recherche sauvegardeNo preserved previous searchRclMain*Aucun rsultat trouvNo results foundRclMain Pas de recherche No searchRclMain@Erreur d'indexation non fatale :Non-fatal indexing message: RclMainRienNoneRclMainOuverture d'un fichier temporaire. Les modification seront perdues<br/>si vous ne les sauvez pas dans un emplacement permanent.`Opening a temporary copy. Edits will be lost if you don't save
them to a permanent location.RclMainNettoyagePurgeRclMainRequte en cours.<br>En raison de restrictions internes, <br>annuler terminera l'excution du programmeeQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMain2Rsultats de la recherche Query resultsRclMain"Erreur de lecture Read failedRclMainNEffacer l'index et redmarrer de zro ?(Reset the index and start from scratch ?RclMainBNombre de rsultats (estimation) Result count (est.)RclMainRsultatsResultsRclMain,Sauvegarder le fichier Save fileRclMain@Recherches Sauvegardes (*.rclq)Saved Queries (*.rclq)RclMainLes schmas de slection ne peuvent tre utiliss qu'avec un rpertoire de dpart:Selection patterns can only be used with a start directoryRclMainxLes schmas de slection ncessitent un rpertoire de dpartSelection patterns need topdirRclMainDsol, pas disponible pour Windows pour le moment, utiliser les entres du menu fichier pour mettre jour l'indexYSorry, not available under Windows for now, use the File menu entries to update the indexRclMainBase radicauxStemdbRclMain*Arrter l'&IndexationStop &IndexingRclMain<Sous-documents et attachementsSub-documents and attachmentsRclMain2Le processus d'indexation en cours n'a pas t dmarr depuis cette interface. Cliquer OK pour le tuer quand mme, ou Annuler pour le laisser tranquille.yThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainLe document appartient un index externe que je ne peux pas mettre jour.@The document belongs to an external index which I can't update. RclMainL'indexeur est actif, les choses devraient aller mieux quand il aura fini.@The indexer is running so things should improve when it's done. RclMainLe visualiseur spcifi dans mimeview pour %1 : %2 est introuvable. Voulez vous dmarrer le dialogue de prfrences ?hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMain\Ces URLs(| ipath) partagent le mme contenu : -These Urls ( | ipath) share the same content:RclMainCet outil de configuration ne travaille que sur l'index principal.6This configuration tool only works for the main index.RclMainBCette recherche n'est plus active"This search is not active any moreRclMain OutilsToolsRclMaindListe vide : attendre que l'indexation progresse ?6Types list empty: maybe wait for indexing to progress?RclMainInconnueUnknownRclMaintat de l'indexeur inconnu. Impossible d'accder au fichier webcache.2Unknown indexer state. Can't access webcache file.RclMain,Mettre jour l'&index Update &IndexRclMainMise jourUpdatingRclMainLa ligne de commande pour %1 spcifie la fois le fichier et son parent : non supportQViewer command line for %1 specifies both file and parent file value: unsupportedRclMainLa ligne de commande pour %1 specifie l'utilisation du fichier parent, mais l'URL est http[s] : ne peut pas marcherPViewer command line for %1 specifies parent file but URL is http[s]: unsupportedRclMainAttentionWarningRclMain chec d'criture Write failedRclMaindocumentdocumentRclMaindocuments documentsRclMain erreurerrorRclMainnimpossible de trouver la liste des langages d'expansion#error retrieving stemming languagesRclMainerreurserrorsRclMainfichierfileRclMainfichiersfilesRclMain filtrfilteredRclMainmultimdiamediaRclMainmessagemessageRclMain autresotherRclMainprsentation presentationRclMaintrisortedRclMain"feuille de calcul spreadsheetRclMain textetextRclMain fichiers totaux) total files)RclMainPAvec re-traitement des fichiers en chec With failed files retrying RclMainBase&&A propos de Recoll &About Recoll RclMainBase$Recherche &Avance&Advanced Search RclMainBaseF&Effacer l'historique des documents&Erase document history RclMainBaseH&Effacer l'historique des recherches&Erase search history RclMainBase&Fichier&File RclMainBase&Plein cran &Full Screen RclMainBase*Interface utilisateur&GUI configuration RclMainBase &Aide&Help RclMainBase &Index&Index configuration RclMainBase&Prfrences &Preferences RclMainBase*&Reconstruire l'index&Rebuild index RclMainBase&Rsultats&Results RclMainBase.Paramtres pour le &tri&Sort parameters RclMainBase&Outils&Tools RclMainBase&Manuel &User manual RclMainBase &Voir&View RclMainBase"Recherche AvanceAdvanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBase0Historique des documentsDocument History RclMainBase2&Historique des documentsDocument &History RclMainBase&QuitterE&xit RclMainBaseIndex e&xternesE&xternal index dialog RclMainBase*Activer les synonymesEnable synonyms RclMainBaseIndex externesExternal index dialog RclMainBaseF11F11 RclMainBasePremire page First Page RclMainBasePremire page First page RclMainBasePlein cran Full Screen RclMainBaseJAller la premire page de rsultatsGo to first page of results RclMainBase0&Statistiques de l'indexIndex &statistics RclMainBase&Types &MIME indexsIndexed &MIME types RclMainBase,Programme d'indexationIndexing &schedule RclMainBaseJIndexation avec des options spcialesIndexing with special options RclMainBaseBCharger une recherche sauvegardeLoad saved query RclMainBase,&Traducteurs manquantsMissing &helpers RclMainBasePage suivante Next Page RclMainBasePage suivante Next page RclMainBasePage suivanteNext page of results RclMainBaseLa prochaine mise jour de l'index essaiera de traiter les fichiers actuellement en chec.Next update will retry previously failed files RclMainBase PgDownPgDown RclMainBasePgUpPgUp RclMainBasePage prcdente Previous Page RclMainBasePage prcdente Previous page RclMainBasePage prcdentePrevious page of results RclMainBase,Fragments de rechercheQuery Fragments RclMainBase RecollRecoll RclMainBaseLSauver en format CSV (fichier tableur)Save as CSV (spreadsheet) file RclMainBaseBSauvegarder la dernire rechercheSave last query RclMainBaseSauvegarde les rsultats dans un fichier qu'il sera possible de charger dans un tableur@Saves the result into a file which you can load in a spreadsheet RclMainBaseShift+PgUp Shift+PgUp RclMainBase<Afficher la requte en dtailsShow Query Details RclMainBase2Afficher comme un tableau Show as table RclMainBaseLMontrer les rsultats dans un tableau (Show results in a spreadsheet-like table RclMainBaseRTrier par date, le plus rcent en premierSort by date, newest first RclMainBaseRTrier par date, le plus ancien en premierSort by date, oldest first RclMainBasefTrier par date des plus rcentes aux plus anciennes#Sort by dates from newest to oldest RclMainBasefTrier par date des plus anciennes aux plus rcentes#Sort by dates from oldest to newest RclMainBase,Paramtres pour le triSort parameters RclMainBase&Indexation spcialeSpecial Indexing RclMainBase.&Exploration de l'indexTerm &explorer RclMainBase<Outil d'exploration de l'indexTerm explorer tool RclMainBaseLDclencher une indexation incrmentaleTrigger incremental pass RclMainBase&Indexer Update &index RclMainBase"Editeur &WebcacheWebcache Editor RclMainBaseQuitterQuit RclTrayIconRestaurerRestore RclTrayIconExtraitAbstract RecollModel AuteurAuthor RecollModelTImpossible de trier par pertinence inverseCan't sort by inverse relevance RecollModelDateDate RecollModelDate et heure Date and time RecollModelDate document Document date RecollModelTaille document Document size RecollModelDate fichier File date RecollModelNom de fichier File name RecollModelTaille fichier File size RecollModel IpathIpath RecollModelMots clefKeywords RecollModelType MIME MIME type RecollModel MtimeMtime RecollModel6Jeu de caractres d'origineOriginal character set RecollModelPertinenceRelevancy rating RecollModel TitreTitle RecollModelURLURL RecollModel(requte) (show query)ResList8<p><b>Aucun rsultat</b><br>

No results found
ResListb<p><i>Orthographes proposs (sans accents) : </i>4

Alternate spellings (accents suppressed): ResListD<p><i>Orthographes proposs : </i>

Alternate spellings: ResListDHistorique des documents consultsDocument historyResListDocuments DocumentsResListSuivantNextResList OuvrirOpenResList PrvisualisationPreviewResListPrcdentPreviousResList,Dtail de la recherche Query detailsResList4Nombre de rsultats (est.)Result count (est.)ResList$Liste de rsultats Result listResListExtraitsSnippetsResList*Document inaccessibleUnavailable documentResListpourforResListparmi au moinsout of at leastResList&&Enlever la colonne&Delete columnResTable<&Revenir au tri par pertinence &Reset sortResTable&&Sauvegarder en CSV &Save as CSVResTable0Ajouter une colonne "%1"Add "%1" columnResTableTImpossible d'ouvrir ou crer le fichier : Can't open/create file: ResTable>Sauvegarder dans un fichier CSVSave table to CSV fileResTable^diffrent des prfrences en cours (conserves)' differ from current preferences (kept)SSearchTous les termes All termsSSearchCertains termesAny termSSearchL'option de suffixe automatique pour la recherche sauvegarde :  Auto suffixes for stored query: SSearchL'option autophrase est positionne, mais ne l'tait pas pour la recherche sauvegarde3Autophrase is set but it was unset for stored querySSearchL'option autophrase est dsactive mais tait active pour la recherche sauvegarde3Autophrase is unset but it was set for stored querySSearch(Requte non reconnueBad query stringSSearchlEntrer un nom de fichier (caractres jokers possibles)$Enter file name wildcard expression.SSearchnEntrer une expression du langage de recherche. Antische :<br> <i>term1 term2</i> : 'term1' ET 'term2' champ non spcifi.<br> <i>field:term1</i> : 'term1' recherche dans le champ 'field'.<br> Noms de champs standards (utiliser les mots anglais)/alias:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-champs: dir, mime/format, type/rclcat, date.<br> Examples d'intervalles de dates: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> NE PAS mettre les parenthses.<br> <i>"term1 term2"</i> : phrase exacte. Options::<br> <i>"term1 term2"p</i> : proximit (pas d'ordre).<br> Utiliser le lien <b>Afficher la requte en dtail</b> en cas de doute sur les rsultats et consulter le manuel (en anglais) (&lt;F1>) pour plus de dtails. Enter query language expression. Cheat sheet:
term1 term2 : 'term1' and 'term2' in any field.
field:term1 : 'term1' in field 'field'.
Standard field names/synonyms:
title/subject/caption, author/from, recipient/to, filename, ext.
Pseudo-fields: dir, mime/format, type/rclcat, date, size.
Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.
term1 term2 OR term3 : term1 AND (term2 OR term3).
You can use parentheses to make things clearer.
"term1 term2" : phrase (must occur exactly). Possible modifiers:
"term1 term2"p : unordered proximity search with default distance.
Use Show Query link when in doubt about result and see manual (<F1>) for more detail. SSearchBEntrer les termes recherchs ici.Enter search terms here.SSearchfLes index externes pour la recherche sauvegarde : #External indexes for stored query: SSearchNom de fichier File nameSSearch4Plus de mmoire disponible Out of memorySSearch0Language d'interrogationQuery languageSSearchrLes langages d'expansion pour la recherche sauvegarde : %Stemming languages for stored query: SSearch:Choisir le type de recherche.Choose search type. SSearchBaseEffacerClear SSearchBase Ctrl+SCtrl+S SSearchBase Effacer l'entreErase search entry SSearchBaseSSearchBase SSearchBase SSearchBaseRechercherSearch SSearchBaseHAfficher l'historique des recherchesShow query history SSearchBase*Dmarrer la recherche Start query SSearchBaseToutAll SearchClauseWCertainsAny SearchClauseWNom de fichier File name SearchClauseWSans champNo field SearchClauseWRienNone SearchClauseWNombre de mots additionnels qui peuvent se trouver entre les termes recherchsHNumber of additional words that may be interspersed with the chosen ones SearchClauseW PhrasePhrase SearchClauseWProximit Proximity SearchClauseWrSlectionner le type de requte effectuer avec les mots>Select the type of query that will be performed with the words SearchClauseWTrouver :Find:SnippetsSuivantNextSnippetsPrcdentPrevSnippetsExtraitsSnippetsSnippets2<p>Dsol, aucun rsultat trouv dans les limites de recherche. Peut-tre que le document est trs gros et que le gnrateur d'extraits s'est perdu...<p>

Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...

 SnippetsWRechercherSearch SnippetsWTrier Par Page Sort By Page SnippetsW(Trier Par PertinenceSort By Relevance SnippetsWParcourirBrowseSpecIdxWRpertoire indexer rcursivement. Il doit tre l'intrieur de la zone normale<br>dfinie par la variable topdirs.Directory to recursively index. This must be inside the regular indexed area
as defined in the configuration file (topdirs).SpecIdxWSinon, seulement les fichiers modifis ou en erreur seront traits.5Else only modified or failed files will be processed.SpecIdxWEffacer les donnes pour les fichiers slectionns avant de rindexer.*Erase selected files data before indexing.SpecIdxWLaisser vide pour slectionner tous les fichiers. Vous pouvez utiliser plusieurs schmas spars par des espaces.<br>Les schmas contenant des espaces doivent ere enclos dans des apostrophes doubles.<br>Ne peut tre utilis que si le rpertoire de dpart est positionn.Leave empty to select all files. You can use multiple space-separated shell-type patterns.
Patterns with embedded spaces should be quoted with double quotes.
Can only be used if the start target is set.SpecIdxWPNe pas ressayer les fichiers en erreur.Retry previously failed files.SpecIdxW,Schmas de slection :Selection patterns:SpecIdxW&Indexation spcialeSpecial IndexingSpecIdxWRpertoire de dpart. Doit faire partie de la zone indexe. Traite toute la zone si non renseign.RStart directory. Must be part of the indexed tree. Use full indexed area if empty.SpecIdxW2Objet index de dmarrageTop indexed entitySpecIdxW&Fermer&Close SpellBase&Drivs&Expand  SpellBaseAccentsAccents SpellBase Alt+FAlt+C SpellBase Alt+DAlt+E SpellBase*Majuscules/MinusculesCase SpellBase$Faire correspondreMatch SpellBase<Pas d'information sur la base. No db info. SpellBase&Explorateur d'index Term Explorer SpellBase> Documents crs ou mis jour Documents created/updatedSpellW" Fichiers tests Files testedSpellW, Fichiers non indexs Unindexed filesSpellW%1 rsultats %1 resultsSpellWFNombre moyen de termes par documentAverage terms per documentSpellW4Taille occupee par l'indexDatabase directory sizeSpellWDoc. / Tot. Doc. / Tot.SpellW|Index : %1 documents, longueur moyenne %2 termes. %3 rsultats7Index: %1 documents, average length %2 terms.%3 resultsSpellWElementItemSpellWpLister les fichiers qui n'ont pas pu tre traits (lent),List files which could not be indexed (slow)SpellWLa liste a t tronque par ordre alphabtique. Certains termes frquents1List was truncated alphabetically, some frequent SpellWBTaille maximale document (termes)Longest document length (terms)SpellWTypes MIME : MIME types:SpellW Pas de rsultatsNo expansion foundSpellW&Nombre de documentsNumber of documentsSpellW(Expression rgulireRegexpSpellWJRsultats de la dernire indexation :Results from last indexing:SpellWHAfficher les statistiques de l'indexShow index statisticsSpellWBTaille minimale document (termes) Smallest document length (terms)SpellWXErreur dans les suggestions orthographiques.Spell expansion error. SpellW,Orthographe/PhontiqueSpelling/PhoneticSpellW,Expansion grammaticaleStem expansionSpellW TermeTermSpellW ValeurValueSpellWWildcards WildcardsSpellWlImpossible de former la liste des langages d'expansion#error retrieving stemming languagesSpellWpourraient tre absents. Essayer d'utiliser une racine plus longue.terms may be missing. Try using a longer root.SpellWTous les termes All terms UIPrefsDialogCertains termesAny term UIPrefsDialog:Selectionner au plus un index$At most one index should be selected UIPrefsDialogImpossible d'ajouter un index avec une option differente de sensibilite a la casse et aux accents>Cant add index with different case/diacritics stripping option UIPrefsDialogChoisirChoose UIPrefsDialog8Fonte par dfaut de QtWebkitDefault QtWebkit font UIPrefsDialogNom de fichier File name UIPrefsDialog0Language d'interrogationQuery language UIPrefsDialogXEn-tte HTML (la valeur par dfaut est vide)%Result list header (default is empty) UIPrefsDialogFormat de paragraphe de la liste de rsultats (tout effacer pour revenir la valeur par dfaut)Nouveaux param&egrave;tres</b>New Values:ViewActionBaseTAction (vide -> utiliser le defaut recoll) Action (empty -> recoll default)ViewActionBaseBAppliquer la slection couranteApply to current selectionViewActionBase FermerCloseViewActionBaseFException aux prfrences du bureau Exception to Desktop preferencesViewActionBase:Applications de visualisationNative ViewersViewActionBaseAction Recoll action:ViewActionBaseSlectionner un ou plusieurs types de fichiers, puis utiliser les contrles dans le cadre du bas pour changer leur traitementkSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseSlectionner un ou plusieurs types MIME, puis utiliser les contrles dans le cadre du bas pour changer leur traitementlSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBase<S&eacute;lectionner par valeur Select sameViewActionBaseDUtiliser les prfrences du bureau"Use Desktop preferences by defaultViewActionBasevaleur actuelle current valueViewActionBase$Recherche (regexp) Search regexpWebcache Editeur WebcacheWebcache editorWebcacheCopier l'URLCopy URL WebcacheEditDDtruire les entres slectionnesDelete selection WebcacheEdit~L'indexeur est actif. Impossible d'accder au fichier webcache.-Indexer is running. Can't edit webcache file. WebcacheEdit~tat indexeur inconnu. Impossible d'diter le fichier webcache.0Unknown indexer state. Can't edit webcache file. WebcacheEditLe fichier webcache a t modifi, il faudra redmarrer l'indexation aprs avoir ferm cette fentre.RWebcache was modified, you will need to run the indexer after closing this window. WebcacheEditMIMEMIME WebcacheModelUrlUrl WebcacheModelChoisirChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW$Ajouter une entre Add entryconfgui::ConfParamSLWDDtruire les entres slectionnesDelete selected entriesconfgui::ConfParamSLWDModifier les entres slectionnesEdit selected entriesconfgui::ConfParamSLW~~confgui::ConfParamSLW&Annuler&CanceluiPrefsDialogBase&OK&OKuiPrefsDialogBase<BR>
uiPrefsDialogBase <PRE>
uiPrefsDialogBase$<PRE> + repliement
 + wrapuiPrefsDialogBaseUne recherche pour [vin rouge] (2 mots) sera complte comme [vin OU rouge OU (vin PHRASE 2 rouge)].<br>
Ceci devrait donner une meilleure pertinence aux rsultats o les termes recherchs apparaissent exactement et dans l'ordre.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBase(Sparateur d'extraitAbstract snippet separatoruiPrefsDialogBaseTout activerActivate AlluiPrefsDialogBaseTActiver les liens dans la prvisualisationActivate links in preview.uiPrefsDialogBase Ajouter un index	Add indexuiPrefsDialogBase6Appliquer les modifications
Apply changesuiPrefsDialogBaserAjouter automatiquement une phrase aux recherches simples+Automatically add phrase to simple searchesuiPrefsDialogBaseSeuil de frquence de terme (pourcentage) pour la gnration automatique de phrases.Autophrase term frequency threshold percentageuiPrefsDialogBase$Panneau de boutons
Buttons PaneluiPrefsDialogBaseChoisirChooseuiPrefsDialogBasetChoisir les diteurs pour les diffrents types de fichiersChoose editor applicationsuiPrefsDialogBaseCliquer pour ajouter un autre index a la liste. Vous pouvez slectionner soit un rpertoire de configuration Recoll soit un index Xapian{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBase^Rduire dans la barre d'tat au lieu de quitter!Close to tray instead of exiting.uiPrefsDialogBase8Format de date (strftime(3))Date format (strftime(3))uiPrefsDialogBaseTout dsactiverDeactivate AlluiPrefsDialogBaseDcide si les filtres de documents sont affichs comme des radio-boutons, un menu droulant dans la barre d'outils, ou un menu.QDecide if document filters are shown as radio buttons, toolbar combobox, or menu.uiPrefsDialogBaserDsactiver l'autocompltion Qt dans l'entre de recherche*Disable Qt autocompletion in search entry.uiPrefsDialogBase8Abandonner les modificationsDiscard changesuiPrefsDialogBaseEst-ce qu'un rsum doit etre synthtis meme dans le cas ou le document original en avait un?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBaseDcide si des rsums seront construits  partir du contexte des termes de recherche. 
Peut ralentir l'affichage si les documents sont gros.zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBaseRStyle de choix des filtres de documents :Document filter choice style:uiPrefsDialogBaseHConstruire dynamiquement les rsumsDynamically build abstractsuiPrefsDialogBase`Editer le fragment  insrer dans l'en-tte HTML#Edit result page html header insertuiPrefsDialogBaseTEditer le format du paragraphe de rsultat#Edit result paragraph format stringuiPrefsDialogBaseActiverEnableuiPrefsDialogBaseIndex externesExternal IndexesuiPrefsDialogBaseSeuil de frquence (pourcentage) au del duquel les termes ne seront pas utiliss.
Les phrases contenant des termes trop frquents posent des problmes de performance.
Les termes ignors augmentent la distance de phrase, et rduisent l'efficacit de la fonction de recherche de phrase automatique.
La valeur par dfaut est 2%Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10Helvetica-10uiPrefsDialogBase&Cacher les doublonsHide duplicate results.uiPrefsDialogBasetStyle CSS de mise en avant pour les termes de la recherche#Highlight CSS style for query termsuiPrefsDialogBaseN'afficher qu'une entre pour les rsultats de contenu identique.XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBaseLes lignes dans une balise PRE ne sont pas replies. Utiliser BR conduit  perdre une partie des tabulations. Le style PRE + WRAP peut tre le meilleurs compromis mais son bon fonctionnement dpend des versions Qt.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBaseRendre clicquables les liens dans la fentre de prvisualisation et dmarrer un navigateur extrieur quand ils sont activs.dMake links inside the preview window clickable, and start an external browser when they are clicked.uiPrefsDialogBase~Nombre maximum d'extraits affichs dans la fentre des extraits;Maximum number of snippets displayed in the snippets windowuiPrefsDialogBase~Taille maximum des textes surligns avant prvisualisation (Mo)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseMenuMenuuiPrefsDialogBase8Nombre de rsultats par page"Number of entries in a result pageuiPrefsDialogBaseOuvre un dialogue permettant de slectionner la feuille de style CSS pour le popup des fragmentsAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBase`Ouvre une fentre permettant de changer la fonte-Opens a dialog to select the result list fontuiPrefsDialogBasetOuvre un dialogue pour choisir un fichier feuille de style-Opens a dialog to select the style sheet fileuiPrefsDialogBase,Traductions de cheminsPaths translationsuiPrefsDialogBaseZStyle de traduction texte ordinaire vers HTMLPlain text to HTML line styleuiPrefsDialogBasebUtiliser le format Html pour la previsualisation.&Prefer Html to plain text for preview.uiPrefsDialogBasejSuffixes automatiques pour le langage d'interrogation(Query language magic file name suffixes.uiPrefsDialogBase$Mise en vidence des termes de recherche. <br>Si le bleu utilis par dfaut est trop discret, essayer peut-tre : "color:red;background:yellow"...Query terms highlighting in results. 
Maybe try something like "color:red;background:yellow" for something more lively than the default blue...uiPrefsDialogBase@Recoll - Prfrences utilisateurRecoll - User PreferencesuiPrefsDialogBaseHMmoriser l'tat d'activation du triRemember sort activation state.uiPrefsDialogBaselOter de la liste. Sans effet sur les donnes stockes.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase(Effacer la slectionRemove selecteduiPrefsDialogBasebRemplacer les rsums existant dans les documents Replace abstracts from documentsuiPrefsDialogBaseRinitialiserResetuiPrefsDialogBasebRinitialise le style de la fentre des fragments Resets the Snippets window styleuiPrefsDialogBaseZRinitialiser la fonte la valeur par dfaut1Resets the result list font to the system defaultuiPrefsDialogBasejRestore la valeur par dfaut pour la feuille de style!Resets the style sheet to defaultuiPrefsDialogBase$Liste de rsultats Result ListuiPrefsDialogBase@Fonte pour la liste de rsultatsResult list fontuiPrefsDialogBase8Paramtres pour la rechercheSearch parametersuiPrefsDialogBaseCrer les traductions de chemins d'accs pour l'index selectionn, ou pour l'index principal si rien n'est slectionn.XSet path translations for the selected index or for the main one if no selection exists.uiPrefsDialogBaseZAfficher l'icone dans la barre d'tat systmeShow system tray icon.uiPrefsDialogBasezAfficher un avertissement quand on dite une copie temporaire)Show warning when opening temporary file.uiPrefsDialogBase^Feuille de style CSS pour le popup de fragmentsSnippets window CSS fileuiPrefsDialogBase~Trier les extraits par numro de page (dfaut: par pertinence).2Sort snippets by page number (default: by weigth).uiPrefsDialogBaseDmarrer la recherche quand un choix est fait dans les suggestions+Start search on completer popup activation.uiPrefsDialogBasebPanneau de recherche avance ouvert au dmarrage.'Start with advanced search dialog open.uiPrefsDialogBaseBDmarrer en mode recherche simpleStart with simple search modeuiPrefsDialogBaseDLangue pour l'expansion des termesStemming languageuiPrefsDialogBase Feuille de style Style sheetuiPrefsDialogBase Mode silencieux.Suppress all beeps.uiPrefsDialogBase(Fichier de synonymes Synonyms fileuiPrefsDialogBaseNombre de mots de contexte par occurrence de terme dans le rsum Synthetic abstract context wordsuiPrefsDialogBaseRTaille du rsum synthtique (caractres)$Synthetic abstract size (characters)uiPrefsDialogBaseLes textes plus gros ne seront pas surligns dans la prvisualisation (trop lent).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBaseLes mots de la liste seront automatiquement changs en clauses ext:xxx dans les requtes en langage d'interrogation.bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBaseZChanger l'tat pour les entres slectionnesToggle selecteduiPrefsDialogBaseNMenu droulant dans le panneau d'outilsToolbar ComboboxuiPrefsDialogBase*Interface utilisateurUser interfaceuiPrefsDialogBaseStyle utilisateur appliquer la fentre "snippets".<br>Note : l'en tte de page de rsultat est aussi inclus dans la fentre "snippets".User style to apply to the snippets window.
Note: the result page header insert is also included in the snippets window header.uiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_de.ts0000644000175000017500000052467213566424763014254 00000000000000 AdvSearch All clauses alle Ausdrücke Any clause irgendeinen Ausdruck texts Texte spreadsheets Tabellen presentations Präsentationen media Medien messages Nachrichten other Andere Bad multiplier suffix in size filter Ungültiger Multiplikator-Suffix im Größen-Filter text Text spreadsheet Tabelle presentation Präsentation message Nachricht AdvSearchBase Advanced search Erweiterte Suche Restrict file types Dateitypen einschränken Save as default Als Standard speichern Searched file types Durchsuchte Dateitypen All ----> Alle ----> Sel -----> Auswahl ----> <----- Sel <---- Auswahl <----- All <---- Alle Ignored file types Nicht durchsuchte Dateitypen Enter top directory for search Geben Sie das Basisverzeichnis für die Suche ein. Browse Durchsuchen Restrict results to files in subtree: Ergebnisse auf Dateien in folgendem Verzeichnisbaum einschränken: Start Search Suche starten Search for <br>documents<br>satisfying: Suche nach Dokumenten, <br>die Folgendes erfüllen: Delete clause Ausdruck entfernen Add clause Ausdruck hinzufügen Check this to enable filtering on file types Auswählen, um Filterung nach Dateitypen einzuschalten By categories Nach Kategorien Check this to use file categories instead of raw mime types Auswählen, um Dateikategorien statt Mime-Typen zu verwenden Close Schließen All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Alle nicht-leeren Felder rechts werden mit UND ("alle Ausdrücke") oder ODER ("irgendeinen Ausdruck") verknüpft. <br>Felder des Typs "Irgendeines", "Alle" und "Keines" können eine Mischung aus Wörtern und in Anführungszeichen eingeschlossenen Phrasen enthalten. <br>Nicht gefüllte Felder werden ignoriert. Invert Invertieren Minimum size. You can use k/K,m/M,g/G as multipliers Minimale Größe. Sie können k/K, m/M, g/G als Multiplikatoren verwenden. Min. Size Min. Größe: Maximum size. You can use k/K,m/M,g/G as multipliers Maximale Größe. Sie können k/K, m/M, g/G als Multiplikatoren verwenden. Max. Size Max. Größe: Filter Filtern From von To bis Check this to enable filtering on dates Auswählen, um Filterung nach Datum einzuschalten Filter dates Nach Datum filtern Find Finden Check this to enable filtering on sizes Auswählen, um Filterung nach Dateigröße einzuschalten Filter sizes Nach Größe filtern ConfIndexW Can't write configuration file Fehler beim Schreiben der Konfigurationsdatei Global parameters Globale Parameter Local parameters Lokale Parameter Search parameters Suchparameter Top directories Start-Verzeichnisse The list of directories where recursive indexing starts. Default: your home. Die Liste der Verzeichnisse, in denen die rekursive Indizierung startet. Standard: Home-Verzeichnis. Skipped paths Auszulassende Pfade These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Stemming languages Stemming-Sprachen The languages for which stemming expansion<br>dictionaries will be built. Die Sprachen, für die Worstammerweiterungsverzeichnisse erstellt werden. Log file name Log-Datei The file where the messages will be written.<br>Use 'stderr' for terminal output Die Datei, in die Ausgaben geschrieben werden.<br>Für Ausgaben auf dem Terminal 'stderr' benutzen. Log verbosity level Ausführlichkeit des Logs This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Dieser Wert steuert die Menge der Meldungen<br>(nur Fehler oder viele Debugging Ausgaben). Index flush megabytes interval Interval (MB) für Speicherleerung This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Dieser Wert steuert, wieviel Daten indiziert werden bevor die Indexinformationen auf Festplatte geschrieben werden.<br>Hierdurch kann der Speicherverbrauch des Indizierers gesteuert werden. Standardwert: 10MB Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. No aspell usage Aspell nicht benutzen Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Deaktiviert die Verwendung von Aspell für die Erzeugung von Schreibweisen-Näherungen im Ausdruck-Explorer-Werkzeug. <br>Nützlich, wenn Aspell nicht vorhanden ist oder nicht funktioniert. Aspell language Sprache für Aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Die Sprache des Aspell-Wörterbuchs (z.B. 'en' oder 'de' ...)<br>Wenn dieser Wert nicht gesetzt ist, wird die NLS-Umgebung verwendet, um die Sprache festzustellen, was im Allgemeinen funktioniert. Um eine Vorstellung zu bekommen, was auf Ihrem System installiert ist, geben Sie 'aspell config' ein und schauen Sie nach .dat Dateien im Verzeichnis 'data-dir'. Database directory name Verzeichnis für Index-Datenbank The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Der Name eines Verzeichnisses, in dem der Index gespeichert werden soll.<br>Ein nicht-absoluter Pfad ist dabei relativ zum Konfigurationsverzeichnis. Der Standard ist 'xapiandb'. Unac exceptions Unac Ausnahmen <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Dies sind Ausnahmen für den unac Mechanismus, der standardmäßig alle diakritischen Zeichen entfernt und sie durch kanonische Entsprechungen ersetzt. Sie können (abhängig von Ihrer Sprache) dieses Entfernen von Akzenten für einige Zeichen übersteuern und zusätzliche Ersetzungen angeben, z.B. für Ligaturen. Bei jedem durch Leerzeichen getrennten Eintrag ist das erste Zeichen das Ausgangszeichen und der Rest die Ersetzung. Process the WEB history queue Web-Chronik Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Web page store directory name Verzeichnis zur Ablage von Webseiten The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Der Name eines Verzeichnisses, in dem Kopien der besuchten Webseiten gespeichert werden sollen.<br>Ein nicht-absoluter Pfad ist dabei relativ zum Konfigurationsverzeichnis. Max. size for the web store (MB) Maximale Größe für Ablage von Webseiten (MB) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Automatic diacritics sensitivity Automatisch diakritische Zeichen beachten <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p> Automatisch die Beachtung von diakritischen Zeichen einschalten, wenn der Suchbegriff Zeichen mit Akzenten enthält (nicht in unac_except_trans). Ansonsten müssen Sie dafür die Abfrageprache und den <i>D</i> Modifikator verwenden. Automatic character case sensitivity Automatisch Groß-/Kleinschreibung beachten <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p> Automatisch die Beachtung von Groß-/Kleinschreibung einschalten, wenn der Eintrag Großbuchstaben enthält (außer an erster Stelle). Ansonsten müssen Sie dafür die Abfragesprache und den <i>C</i> Modifikator verwenden. Maximum term expansion count Maximale Anzahl von Ausdruck-Erweiterungen <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Maximale Anzahl von Erweiterungen für einen einzelnen Ausdruck (z.B. bei der Verwendung von Wildcards). Der Standardwert 10 000 ist vernünftig und verhindert, dass Suchanfragen scheinbar einfrieren, während die Liste der Begriffe durchlaufen wird. Maximum Xapian clauses count Maximale Anzahl von Xapian-Ausdrücken <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Maximale Anzahl von elementaren Ausdrücken, die wir zu einer einzelnen Xapian Abfrage hinzufügen. In manchen Fällen können die Ergebnisse von Ausdruck-Erweiterungen sich ausmultiplizieren, und wir wollen übermäßigen Speicherverbrauch vermeiden. Der Standardwert 100 000 sollte in den meisten Fällen hoch genug sein und zugleich zu typischen derzeitigen Hardware-Ausstattungen passen. ConfSubPanelW Only mime types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Exclude mime types Mime types not to be indexed Max. compressed file size (KB) Max. Größe kompr. Dateien (kB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Dies ist eine Obergrenze; komprimierte Dateien jenseits dieser Größe werden nicht verarbeitet. Auf -1 setzen, um keine Obergrenze zu haben, auf 0, um nie zu dekomprimieren. Max. text file size (MB) Max. Größe Textdateien (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Dies ist eine Obergrenze; Textdateien jenseits dieser Größe werden nicht verarbeitet Auf -1 setzen, um keine Obergrenze zu haben. Dies dient dazu, riesige Log-Dateien vom Index auszuschließen. Text file page size (KB) Seitengröße Textdateien (kB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Wenn dieser Wert gesetzt ist (ungleich -1), werden Textdateien zur Indizierung in Stücke dieser Größe aufgeteilt. Das hilft bei der Suche in sehr großen Textdateien (z.B. Log-Dateien). Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Externe Filter, die länger als diese Zeit laufen, werden abgebrochen. Das ist für den seltenen Fall (Postscript), in dem ein Dokument eine unendliche Schleife auslöst. Auf -1 setzen, um keine Obergrenze zu haben. Global Global CronToolW Cron Dialog Cron-Zeitplan <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> Zeitplan für periodische Indizierung (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Jedes Feld kann eine Wildcard (*), eine einzelne Zahl, eine mit Kommata getrennte Liste (1,3,5) oder einen Bereich (1-7) enthalten. Die Felder werden <span style=" font-style:italic;">so wie sie sind</span> in der crontab-Datei verwendet und die gesamte crontab Syntax kann verwendet werden, siehe crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Beispielsweise startet die Eingabe <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Wochentage, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Stunden</span> und <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minuten</span> recollindex jeden Tag um 12:15 Uhr und 19:15 Uhr.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Ein Zeitplan mit sehr häufigen Aktivierungen ist wahrscheinlich weniger effizient als Echtzeit-Indizierung.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Wochentage (* oder 0-7, 0/7 ist Sonntag) Hours (* or 0-23) Stunden (* oder 0-23) Minutes (0-59) Minuten (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Wählen Sie <span style=" font-style:italic;">Deaktivieren</span>, um die periodische Indizierung auszuschalten, <span style=" font-style:italic;">Aktivieren</span>, um sie einzuschalten, <span style=" font-style:italic;">Abbruch</span>, um nichts zu verändern.</p></body></html> Enable Aktivieren Disable Deaktivieren It seems that manually edited entries exist for recollindex, cannot edit crontab Offenbar gibt es manuelle Einträge für recollindex, crontab kann nicht angepasst werden. Error installing cron entry. Bad syntax in fields ? Fehler beim Erstellen des cron Eintrags. Falsche Syntax in Feldern? EditDialog Dialog Dialog EditTrans Source path Quellpfad Local path Lokaler Pfad Config error Konfigurationsfehler Original path Originalpfad EditTransBase Path Translations Pfadumwandlungen Setting path translations for Setze Pfadumwandlungen für Select one or several file types, then use the controls in the frame below to change how they are processed Wählen Sie einen oder mehrere Dateitypen aus. Nutzen Sie dann die Bedienelemente unten, um einzustellen wie sie verarbeitet werden. Add Hinzufügen Delete Entfernen Cancel Abbrechen Save Speichern FirstIdxDialog First indexing setup Einrichten für die erste Indizierung <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Es existiert noch kein Index für diese Konfiguration.</span><br /><br />Wenn Sie nur Ihr Home-Verzeichnis mit sinnvollen Voreinstellungen indizieren wollen, wählen Sie die Schaltfläche <span style=" font-style:italic;">Indizierung jetzt starten</span>. Sie können die Details später anpassen.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Wenn Sie das Verhalten genauer festlegen wollen, verwenden Sie die folgenden Verknüpfungen, um Einstellungen und Zeitplan für die Indizierung anzupassen.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Diese Werkzeuge können Sie später im Menü <span style=" font-style:italic;">Einstellungen</span> erreichen.</p></body></html> Indexing configuration Einstellungen für Indizierung This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Hier können Sie die zu indizierenden Verzeichnisse und andere Einstellungen (wie auszuschließende Dateipfade oder -namen, Standard-Zeichensatz usw.) anpassen. Indexing schedule Zeitplan für Indizierung This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Hier können Sie zwischen regelmäßiger Indizierung und Echtzeit-Indizierung wählen und einen automatischen Zeitplan für die regelmäßige Indizierung einrichten (mit cron). Start indexing now Indizierung jetzt starten FragButs %1 not found. %1: %2 Query Fragments IdxSchedW Index scheduling setup Einrichtung des Zeitplans für die Indizierung <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> Indizierung kann ständig laufen und Datein indizieren sobald sie verändert werden, oder aber nur zu bestimmten Zeitpunkten ablaufen.</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Im Handbuch finden Sie Informationen, anhand derer Sie sich für einen der Ansätze entscheiden können (drücken Sie F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Dieses Werkzeug hilft Ihnen, einen Zeitplan für periodische Indizierungs-Läufe einzurichten oder die Echtzeit-Indizierung zu starten, wenn Sie sich anmelden (oder beides, was aber selten sinnvoll sein dürfte). </p></body></html> Cron scheduling Cron-Zeitplan The tool will let you decide at what time indexing should run and will install a crontab entry. Mit diesem Werkzeug können Sie festlegen, zu welchen Zeiten die Indizierung laufen soll, und einen crontab Eintrag anlegen. Real time indexing start up Start der Echtzeit-Indizierung Decide if real time indexing will be started when you log in (only for the default index). Entscheiden Sie, ob die Echtzeit-Indizierung beim Anmelden gestartet wird (nur für den Standard-Index). ListDialog Dialog Dialog GroupBox GruppenBox Main No db directory in configuration Kein Datenbankverzeichnis konfiguriert Could not open database in Fehler beim Öffnen der Datenbank in . Click Cancel if you want to edit the configuration file before indexing starts, or Ok to let it proceed. . Drücken Sie Abbrechen, um die Konfigurationsdatei vor dem Start der Indizierung anzupassen oder OK um mit der Indizierung zu beginnen. Configuration problem (dynconf Konfigurationsproblem (dynconf) "history" file is damaged or un(read)writeable, please check or remove it: "history" Datei ist beschädigt oder nicht les-/schreibbar, bitte überprüfen oder entfernen Sie sie: "history" file is damaged, please check or remove it: Preview &Search for: &Suche nach: &Next &Nächstes &Previous &Vorheriges Match &Case Groß-/Kleinschreibung &beachten Clear Leeren Creating preview text Erzeuge Vorschautext Loading preview text into editor Lade Vorschautext in den Editor Cannot create temporary directory Fehler beim Anlegen des temporären Verzeichnisses Cancel Abbrechen Close Tab Tab schließen Missing helper program: Fehlendes Hilfsprogramm: Can't turn doc into internal representation for Überführung in interne Darstellung nicht möglich für Cannot create temporary directory: Fehler beim Anlegen des temporären Verzeichnisses: Error while loading file Fehler beim Lesen der Datei Form Tab 1 Open Öffnen Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text PreviewTextEdit Show fields Felder zeigen Show main text Vorschautext zeigen Print Drucken Print Current Preview Aktuelle Vorschau drucken Show image Zeige Bild Select All Alles auswählen Copy Kopieren Save document to file Dokument in Datei sichern Fold lines Zeilen umbrechen Preserve indentation Einrückung erhalten Open document QObject Global parameters Globale Parameter Local parameters Lokale Parameter <b>Customised subtrees <b>Angepasste<br> Unterverzeichnisse The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. Die Liste der Unterverzeichnisse in der indizierten Hierarchie, in denen einige Parameter anders gesetzt werden müssen. Voreinstellung: leer. <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>Die folgenden Parameter werden entweder global gesetzt (wenn nichts oder eine leere<br> Zeile in der Liste oben ausgewählt ist) oder für das ausgewählte Unterverzeichnis.<br> Sie können Verzeichnisse durch Anklicken von +/- hinzufügen oder entfernen.<br> Skipped names Auszulassende Namen These are patterns for file or directory names which should not be indexed. Dies sind Muster für Dateien oder Verzeichnisse, die nicht indiziert werden sollen. Default character set Standard-Zeichensatz This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. DIes ist der Zeichensatz, der für Dateien benutzt wird, die ihren Zeichensatz nicht intern definieren, z.B. Textdateien. Der Standardwert ist leer und der Wert der NLS-Umgebung wird benutzt. Follow symbolic links Folge symbolischen Links Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Folge symbolischen Links bei der Indizierung. Der Standardwert ist "Nein", um doppelte Indizierung zu vermeiden. Index all file names Indiziere alle Dateinamen Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Indiziere die Namen von Dateien, deren Inhalt nicht erkannt oder verarbeitet werden kann (kein oder nicht unterstützter Mime-Typ). Der Standardwert ist "Ja". Beagle web history Beagle Web-Chronik Search parameters Suchparameter Web history Web-Chronik Default<br>character set Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Ignored endings These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. QWidget Create or choose save directory Choose exactly one directory Could not read directory: Unexpected file name collision, cancelling. Cannot extract document: &Preview &Vorschau &Open &Öffnen Open With Run Script Copy &File Name &Dateinamen kopieren Copy &URL &URL kopieren &Write to File &Schreibe in Datei Save selection to files Auswahl in Dateien sichern Preview P&arent document/folder Vorschau des &übergeordneten Dokuments/Ordners &Open Parent document/folder Ö&ffnen des übergeordneten Dokuments/Ordners Find &similar documents &Ähnliche Dokumente finden Open &Snippets window Öffne &Schnipsel-Fenster Show subdocuments / attachments Untergeordnete Dokumente / Anhänge anzeigen QxtConfirmationMessage Do not show again. RTIToolW Real time indexing automatic start Automatischer Start der Echtzeit-Indizierung <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> Indizierung kann im Hintergrund laufen und den Index in Echtzeit aktualisieren sobald sich Dateien ändern. Sie erhalten so einen Index, der stets aktuell ist, aber die System-Resourcen werden ununterbrochen beansprucht.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></htm Start indexing daemon with my desktop session. Indizierungs-Dämon mit Desktop-Sitzung starten Also start indexing daemon right now. Indizierungs-Dämon jetzt sofort starten Replacing: Ersetze: Replacing file Ersetze Datei Can't create: Fehler beim Erzeugen von: Warning Warnung Could not execute recollindex Fehler beim Ausführen von recollindex Deleting: Lösche: Deleting file Lösche Datei Removing autostart Autostart wird entfernt Autostart file deleted. Kill current process too ? Autotstart-Datei wurde entfernt. Soll auch der laufende Prozess beendet werden? RclMain About Recoll Über Recoll Executing: [ Ausführen: [ Cannot retrieve document info from database Keine Informationen zum Dokument in der Datenbank Warning Warnung Can't create preview window Fehler beim Erzeugen des Vorschaufensters Query results Suchergebnisse Document history Dokumenten-Chronik History data Chronik-Daten Indexing in progress: Indizierung läuft: Files Dateien Purge Säubern Stemdb Wortstämme Closing Schließen Unknown Unbekannt This search is not active any more Diese Suche ist nicht mehr aktiv Can't start query: Kann die Suche nicht starten: Bad viewer command line for %1: [%2] Please check the mimeconf file Fehlerhafter Anzeigebefehl für %1: [%2] Überprüfen Sie die Datei mimeconf. Cannot extract document or create temporary file Fehler beim Extrahieren des Dokuments oder beim Erzeugen der temporären Datei (no stemming) (kein Stemming) (all languages) (alle Sprachen) error retrieving stemming languages Fehler beim Holen der Stemming-Sprachen Update &Index Index &aktualisieren Stop &Indexing &Indizierung stoppen All Alle media Medien message Nachricht other Andere presentation Präsentation spreadsheet Tabelle text Text sorted sortiert filtered gefiltert External applications/commands needed and not found for indexing your file types: Externe Anwendungen/Befehle, die zur Indizierung Ihrer Dateitypen gebraucht werden und nicht gefunden wurden: No helpers found missing Keine fehlenden Hilfsprogramme Missing helper programs Fehlende Hilfsprogramme Document category filter Filter für Dokumenten-Kategorie No external viewer configured for mime type [ Kein externes Anzeigeprogramm konfiguriert für Mime-Typ [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Das in mimeview angegebene Anzeigeprogramm für %1: %2 wurde nicht gefunden. Wollen Sie den Einstellungs-Dialog starten? Can't access file: Fehler beim Zugriff auf Datei: Can't uncompress file: Fehler beim Dekomprimieren von Datei: Save file Datei sichern Result count (est.) Anzahl Ergebnisse (ca.) Query details Details zur Suchanfrage Could not open external index. Db not open. Check external indexes list. Externer Index konnte nicht geöffnet werden. Datenbank nicht offen. Überprüfen Sie die Liste der externen Indizes. No results found Keine Ergebnisse gefunden None Keine Updating Aktualisiere Done Fertig Monitor Überwachen Indexing failed Indizierung gescheitert The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Der laufende Indizierungs-Prozess wurde nicht aus diesem Programm gestartet. Drücken SIe OK, um ihn dennoch zu stoppen oder Abbrechen, um ihn unverändert zu lassen. Erasing index Lösche Index Reset the index and start from scratch ? Index zurücksetzen und ganz neu aufbauen? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Suche läuft.<br>Aufgrund von Einschränkungen der Indizierungs-Bibliothek<br>führt ein Abbruch zur Beendigung des Programms. Error Fehler Index not open Index nicht geöffnet Index query error Fehler beim Abfragen des Index Content has been indexed for these mime types: Inhalte mit diesen Mime-Typen wurden indiziert: Index not up to date for this file. Refusing to risk showing the wrong entry. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. Der Index ist für diese Datei nicht auf dem neuesten Stand. Es soll nicht das Risiko eingegangen werden, den falschen Eintrag anzuzeigen. Drücken SIe OK, um den Index für diese Datei zu aktualisieren und starten Sie die Suchanfrage erneut, wenn die Indizierung abgeschlossen ist. Drücken Sie ansonsten Abbrechen. Can't update index: indexer running Fehler beim Aktualisieren des Index: Indizierung läuft Indexed MIME Types Indizierte Mime-Typen Bad viewer command line for %1: [%2] Please check the mimeview file Fehlerhafter Anzeigebefehl für %1: [%2] Überprüfen Sie die Datei mimeview. Viewer command line for %1 specifies both file and parent file value: unsupported Anzeigebefehl für %1 legt Datei und übergeordnete Datei fest: nicht unterstützt Cannot find parent document Übergeordnetes Dokument nicht gefunden Indexing did not run yet Indizierung ist noch nicht durchgeführt worden External applications/commands needed for your file types and not found, as stored by the last indexing pass in Externe Anwendungen/Befehle, die zur Indizierung Ihrer Dateitypen gebraucht werden und nicht gefunden wurden - vom letzten Indizierungslauf hinterlegt unter Index not up to date for this file. Refusing to risk showing the wrong entry. Der Index ist für diese Datei nicht mehr aktuell. Einträge könnten fehlerhaft sein und werden nicht angezeigt. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. Drücken Sie Ok, um den Index für diese Datei zu aktualisieren und die Suche daraufhin zu wiederholen. Ansonsten drücken Sie auf Abbrechen. Indexer running so things should improve when it's done Indizierung ist im Gange. Die Resultate sollten sich nach der Fertigstelltung verbessert haben Sub-documents and attachments Untergeordnete Dokumente und Anhänge Document filter The indexer is running so things should improve when it's done. Duplicate documents Doppelte Dokumente These Urls ( | ipath) share the same content: Diese URLs ( | ipath) sind inhaltsgleich: Bad desktop app spec for %1: [%2] Please check the desktop file Indexing interrupted Bad paths Selection patterns need topdir Selection patterns can only be used with a start directory No search No preserved previous search Choose file to save Saved Queries (*.rclq) Write failed Could not write to file Read failed Could not open file: Load error Could not load saved query Index scheduling Sorry, not available under Windows for now, use the File menu entries to update the index Disabled because the real time indexer was not compiled in. This configuration tool only works for the main index. Can't set synonyms file (parse error?) The document belongs to an external index which I can't update. Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Do not show this warning next time (use GUI preferences to restore). Index locked Unknown indexer state. Can't access webcache file. Indexer is running. Can't access webcache file. with additional message: Non-fatal indexing message: Types list empty: maybe wait for indexing to progress? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported Tools Results Content has been indexed for these MIME types: Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Indexing done Can't update index: internal error Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> documents document files file errors error total files) No information: initial indexing not yet performed. RclMainBase Previous page Vorherige Seite Next page Nächste Seite &File &Datei E&xit &Beenden &Tools &Werkzeuge &Help &Hilfe &Preferences &Einstellungen Search tools Suchwerkzeuge Result list Ergebnisliste &About Recoll &Über Recoll Document &History &Dokumenten-Chronik Document History Dokumenten-Chronik &Advanced Search &Erweiterte Suche Advanced/complex Search Erweiterte/komplexe Suche &Sort parameters &Sortierparameter Sort parameters Sortierparameter Next page of results Nächste Ergebnisseite Previous page of results Vorherige Ergebnisseite &Query configuration Einstellungen für &Suche &User manual &Benutzerhandbuch Recoll Recoll Ctrl+Q Ctrl+Q Update &index &Index aktualisieren Term &explorer &Ausdruck-Explorer Term explorer tool Ausdruck-Explorer-Werkzeug External index dialog Dialog für externe Indizes &Erase document history Lösche &Dokumenten-Chronik First page Erste Seite Go to first page of results Gehe zur ersten Ergebnisseite &Indexing configuration &Einstellungen für Indizierung All Alle &Show missing helpers Zeige fehlende &Hilfsprogramme PgDown PgDown PgUp PgUp &Full Screen &Vollbild F11 F11 Full Screen Vollbild &Erase search history Lösche &Such-Chronik sortByDateAsc sortByDateAsc Sort by dates from oldest to newest Nach Datum sortieren (von alt nach neu) sortByDateDesc sortByDateDesc Sort by dates from newest to oldest Nach Datum sortieren (von neu nach alt) Show Query Details Zeige Details zur Suchanfrage Show results as table Zeige Ergebnisse als Tabelle &Rebuild index Index &neu aufbauen &Show indexed types Zeige indizierte &Typen Shift+PgUp Shift+PgUp &Indexing schedule &Zeitplan für Indizierung E&xternal index dialog Dialog für externe &Indizes &Index configuration &Index-Einstellungen &GUI configuration &GUI-Einstellungen &Results &Ergebnisse Sort by date, oldest first Nach Datum sortieren (von alt nach neu) Sort by date, newest first Nach Datum sortieren (von neu nach alt) Show as table Als Tabelle anzeigen Show results in a spreadsheet-like table Zeigt Ergebnisse als Tabelle an Save as CSV (spreadsheet) file Tabelle als CSV Datei speichern Saves the result into a file which you can load in a spreadsheet Speichert Resultate als Tabellenkalkulations-kompatible CSV-Datei ab Next Page Nächste Seite Previous Page Vorherige Seite First Page Erste Seite Query Fragments With failed files retrying Next update will retry previously failed files Indexing &schedule Enable synonyms Save last query Load saved query Special Indexing Indexing with special options &View Missing &helpers Indexed &MIME types Index &statistics Webcache Editor Trigger incremental pass RclTrayIcon Restore Quit RecollModel File name Dateiname Mime type Mime Type Date Datum Abstract Auszug Author Autor Document size Größe des Dokuments Document date Datum des Dokuments File size Größe der Datei File date Datum der Datei Keywords Schlagworte Original character set Ursprünglicher Zeichensatz Relevancy rating Relevanz-Bewertung Title Titel URL URL Mtime Änderungszeitpunkt Date and time Datum und Uhrzeit Ipath Interner Pfad MIME type Mime-Typ Can't sort by inverse relevance ResList Result list Ergebnisliste Unavailable document Dokument nicht verfügbar Previous Zurück Next Weiter <p><b>No results found</b><br> <p><b>Keine Ergebnisse gefunden</b><br> &Preview &Vorschau Copy &URL &URL kopieren Find &similar documents &Ähnliche Dokumente finden Query details Suchdetails (show query) (Suchanfrage zeigen) Copy &File Name &Dateinamen kopieren Document history Dokumenten-Chronik Preview Vorschau Open Öffnen <p><i>Alternate spellings (accents suppressed): </i> <p><i>Alternative Schreibweisen (Akzente unterdrückt): </i> &Write to File &Schreibe in Datei Preview P&arent document/folder Vorschau des &übergeordneten Dokuments/Ordners &Open Parent document/folder Ö&ffnen des übergeordneten Dokuments/Ordners &Open &Öffnen Documents Dokumente out of at least von mindestens for für <p><i>Alternate spellings: </i> <p><i>Alternative Schreibweisen: </i> Open &Snippets window Öffne &Schnipsel-Fenster Duplicate documents Doppelte Dokumente These Urls ( | ipath) share the same content: ipath? Diese URLs ( | ipath) sind inhaltsgleich: Result count (est.) Anzahl Ergebnisse (ca.) Snippets Schnipsel ResTable &Reset sort Sortierung &zurücksetzen &Delete column Spalte &löschen Save table to CSV file Tabelle als CSV Datei speichern Can't open/create file: Fehler beim Öffnen/Erzeugen von Datei: &Preview &Vorschau &Open &Öffnen Copy &File Name &Dateinamen kopieren Copy &URL &URL kopieren &Write to File &Schreibe in Datei Find &similar documents &Ähnliche Dokumente finden Preview P&arent document/folder Vorschau des &übergeordneten Dokuments/Ordners &Open Parent document/folder Ö&ffnen des übergeordneten Dokuments/Ordners &Save as CSV Als CSV &speichern Add "%1" column Spalte "%1" hinzufügen ResTableDetailArea &Preview &Vorschau &Open &Öffnen Copy &File Name &Dateinamen kopieren Copy &URL &URL kopieren &Write to File &Schreibe in Datei Find &similar documents &Ähnliche Dokumente finden Preview P&arent document/folder Vorschau des &übergeordneten Dokuments/Ordners &Open Parent document/folder Ö&ffnen des übergeordneten Dokuments/Ordners ResultPopup &Preview &Vorschau &Open &Öffnen Copy &File Name &Dateinamen kopieren Copy &URL &URL kopieren &Write to File &Schreibe in Datei Save selection to files Auswahl in Dateien sichern Preview P&arent document/folder Vorschau des &übergeordneten Dokuments/Ordners &Open Parent document/folder Ö&ffnen des übergeordneten Dokuments/Ordners Find &similar documents &Ähnliche Dokumente finden Open &Snippets window Öffne &Schnipsel-Fenster Show subdocuments / attachments Untergeordnete Dokumente / Anhänge anzeigen SSearch Any term Irgendein Ausdruck All terms Alle Ausdrücke File name Dateiname Completions Vervollständigungen Select an item: Wählen Sie ein Element: Too many completions Zu viele Vervollständigungen Query language Abfragesprache Bad query string Fehlerhafte Suchanfrage Out of memory Kein Speicher mehr verfügbar Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> No actual parentheses allowed.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Geben Sie einen Abfragesprachen-Ausdruck ein. Spickzettel:<br> <i>Begriff1 Begriff2</i> : 'Begriff1' und 'Begriff2' in irgendeinem Feld.<br> <i>field:Begriff1</i> : 'Begriff1' im Feld 'field'.<br> Standard-Feldnamen/Synonyme:<br> title/subject/caption, author/from, recipient/to, filename, ext<br> Pseudo-Felder: dir, mime/format, type/rclcat, date<br> Zwei Beispiele für Datumsintervalle: 2009-03-01/2009-05-20 2009-03-01/P2M<br> <i>Begriff1 Begriff2 OR Begriff3</i> : Begriff1 AND (Begriff2 OR Begriff3)<br> Klammern sind nicht erlaubt.<br> <i>"Begriff1 Begriff2"</i> : Phrase (muss genaus so vorkommen). Mögliche Modifikatoren:<br> <i>"Begriff1 Begriff2"p</i> : ungeordnete Nähen-Suche mit voreingestelltem Abstand.<br> Im Zweifelsfalle verwenden Sie den Link <b>Suchanfrage zeigen</b> und finden im Handbuch (&lt;F1>) weitere Details. Enter file name wildcard expression. Geben Sie einen Wildcard-Ausdruck für Dateinamen ein. Enter search terms here. Type ESC SPC for completions of current term. Suchbegriffe hier eingeben. Drücken Sie ESC+Leerzeichen für Vervollständigungen des aktuellen Begriffs. Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Stemming languages for stored query: differ from current preferences (kept) Auto suffixes for stored query: External indexes for stored query: Autophrase is set but it was unset for stored query Autophrase is unset but it was set for stored query Enter search terms here. SSearchBase SSearchBase SSearchBase Clear Löschen Ctrl+S Ctrl+S Erase search entry Sucheintrag löschen Search Suchen Start query Suche starten Enter search terms here. Type ESC SPC for completions of current term. Suchbegriffe hier eingeben. Drücken Sie ESC+Leerzeichen für Vervollständigungen des aktuellen Begriffs. Choose search type. Wählen Sie die Art der Suche Show query history SearchClauseW SearchClauseW SearchClauseW Any of these Irgendeins dieser All of these Alle diese None of these Keins dieser This phrase diese Wörter Terms in proximity ähnliche Ausdrücke File name matching passende Dateinamen Select the type of query that will be performed with the words Wählen Sie die Art der Suche aus, die mit den Wörtern gestartet wird. Number of additional words that may be interspersed with the chosen ones Anzahl der Wörter, die sich zwischen den angegebenen befinden dürfen No field Kein Feld Any Irgendeines All Alle None Keines Phrase Phrase Proximity Nähe File name Dateiname Snippets Snippets Schnipsel about:blank about:blank Find: Finden: Next Weiter Prev Zurück X X SnippetsW Search Suchen <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> Sort By Relevance Sort By Page SortForm Date Datum Mime type Mime Type SortFormBase Sort Criteria Sortierkriterium Sort the Zeige die most relevant results by: relevantesten Ergebnisse sortiert nach: Descending Absteigend Close Schließen Apply Übernehmen SpecIdxW Special Indexing Else only modified or failed files will be processed. Erase selected files data before indexing. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Browse Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Selection patterns: Top indexed entity Retry previously failed files. Start directory. Must be part of the indexed tree. Use full indexed area if empty. SpellBase Term Explorer Ausdruck-Explorer &Expand &Vervollständigen Alt+E Alt+V &Close &Schließen Alt+C Alt+S Term Ausdruck No db info. Keine Datenbank-Information Match Beachte Case Groß-/Kleinschreibung Accents Betonungszeichen SpellW Wildcards Wildcards Regexp Regulärer Ausdruck Spelling/Phonetic Phonetisch Aspell init failed. Aspell not installed? Fehler bei der Initialisierung von Aspell. Ist Aspell nicht installiert? Aspell expansion error. Aspell Vervollständigungsfehler Stem expansion Wortstamm-Erweiterung error retrieving stemming languages Fehler beim Holen der Stemming-Sprachen No expansion found Keine Erweiterung gefunden Term Begriff Doc. / Tot. Dok. / Ges. Index: %1 documents, average length %2 terms Index: %1 Dokumente mit durchschnittlicher Länge von %2 Begriffen Index: %1 documents, average length %2 terms.%3 results Index: %1 Dokumente mit durchschnittlicher Länge von %2 Begriffen. %3 Ergebnisse %1 results %1 Ergebnisse List was truncated alphabetically, some frequent Liste wurde alphabetisch abgeschnitten, einige häufige Begriffe terms may be missing. Try using a longer root. können fehlen. Versuchen Sie es mit einer längeren Wurzel. Show index statistics Indexstatistiken anzeigen Number of documents Dokumentenzahl Average terms per document Durchschnittliche Zahl von Ausdrücken pro Dokument Smallest document length Minimale Zahl von Ausdrücken Longest document length Maximale Zahl von Ausdrücken Database directory size Größe des Datenbankordners MIME types: Mime-Typen: Item Eintrag Value Wert Smallest document length (terms) Longest document length (terms) Results from last indexing: Documents created/updated Files tested Unindexed files List files which could not be indexed (slow) Spell expansion error. UIPrefsDialog The selected directory does not appear to be a Xapian index Das ausgewählte Verzeichnis scheint kein Xapian-Index zu sein. This is the main/local index! Das ist der Hauptindex! The selected directory is already in the index list Das ausgewählte Verzeichnis ist bereits in der Indexliste. Select xapian index directory (ie: /home/buddy/.recoll/xapiandb) Wählen Sie das Xapian-Indexverzeichnis (z.B. /home/benutzer/.recoll/xapiandb) error retrieving stemming languages Fehler beim Holen der Stemming-Sprachen Choose Auswählen Result list paragraph format (erase all to reset to default) Format für Ergebnis-Absatz (alles löschen, um auf Standard zurück zu setzen) Result list header (default is empty) Header der Ergebnisliste (Standard ist leer) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) Wählen Sie den Recoll-Konfigurationsordner oder das Xapian-Indexverzeichnis aus (z.B. /home/ich/.recoll oder /home/ich/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read Der ausgewählten Ordner handelt scheint Recoll-Konfigurationsordner zu sein, aber die Konfiguration konnte nicht ausgelesen werden At most one index should be selected Bitte wählen Sie maximal einen Index aus Cant add index with different case/diacritics stripping option Indices mit unterschiedlichen Einstellungen zum Umgang mit Groß/-Kleinschreibung und diakritischen Zeichen können nicht hinzugefügt werden Default QtWebkit font Any term Irgendein Ausdruck All terms Alle Ausdrücke File name Dateiname Query language Abfragesprache Value from previous program exit UIPrefsDialogBase User interface Benutzeroberfläche Number of entries in a result page Anzahl der Ergebnisse pro Seite Result list font Schriftart für Ergebnisliste Helvetica-10 Helvetica-10 Opens a dialog to select the result list font Öffnet einen Dialog zur Auswahl der Schriftart für die Ergebnisliste Reset Reset Resets the result list font to the system default Setzt die Schriftart für die Ergebnisliste zurück auf den Standardwert Auto-start simple search on whitespace entry. Automatisch eine einfache Suche starten, wenn ein Worttrenner im Sucheingabefeld eingegeben wird. Start with advanced search dialog open. Nach dem Start automatisch den Dialog für die erweiterte Suche öffnen. Start with sort dialog open. Nach dem Start automatisch den Sortierdialog öffnen. Search parameters Suchparameter Stemming language Stemming Sprache Dynamically build abstracts Zusammenfassungen dynamisch erzeugen Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Festlegung ob Zusammenfassungen für Ergebnisse im Kontext der Suchparameter erzeugt werden (kann bei großen Dokumenten langsam sein). Replace abstracts from documents Ersetzen der Zusammenfassungen in den Dokumenten Do we synthetize an abstract even if the document seemed to have one? Festlegung ob eine Zusammenfassung auch dann erzeugt wird, wenn das Dokument schon eine Zusammenfassung enthält Synthetic abstract size (characters) Länge der erzeugten Zusammenfassung (Zeichen) Synthetic abstract context words Anzahl der Kontextworte in der Zusammenfassung External Indexes externe Indizes Add index Index hinzufügen Select the xapiandb directory for the index you want to add, then click Add Index Wählen Sie das xapiandb-Verzeichnis des zuzufügenden Indizes und klicken Sie auf Index hinzufügen Browse Auswahl &OK &OK Apply changes Änderungen übernehmen &Cancel &Abbrechen Discard changes Änderungen verwerfen Result paragraph<br>format string Formatstring für Ergebnisse Automatically add phrase to simple searches Automatisches Zufügen von Sätzen zu einfachen Suchen A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Eine Suche nach [Jürgen Klinsmann] wird geändert nach [Jürgen OR Klinsmann OR (Jürgen PHRASE 2 Klinsmann)]. Dadurch sollten Ergebnisse, die exakte Übereinstimmungen der Suchworte enthalten, stärker gewichtet werden. User preferences Benutzereinstellungen Use desktop preferences to choose document editor. Die Einstellung des Dokumenteneditors erfolgt in den Desktopvoreinstellungen. External indexes Externe Indizes Toggle selected Auswahl umkehren Activate All Alle Auswählen Deactivate All Alle Abwählen Remove selected Ausgewählte entfernen Remove from list. This has no effect on the disk index. Aus der Liste entfernen. Dies hat keinen Einfluss auf den gespeicherten Index. Remember sort activation state. Speichern, ob Sortieren aktiviert ist ViewAction Changing actions with different current values Aktionen mit anderen Werten ändern Mime type Mime Type Command Befehl MIME type Mime-Typ Desktop Default Desktopvoreinstellung Changing entries with different current values Einträge mit anderen Werten ändern ViewActionBase File type Dateityp Action Aktion Select one or several file types, then click Change Action to modify the program used to open them Wählen Sie einen oder mehrere Dateitypen und klicken Sie auf "Ändere Aktion", um das Programm zum Öffnen anzupassen. Change Action Ändere Aktion Close Schließen Native Viewers Anzeigeprogramme Select one or several mime types then click "Change Action"<br>You can also close this dialog and check "Use desktop preferences"<br>in the main panel to ignore this list and use your desktop defaults. Wählen Sie einen oder mehrere Mime-Typen und klicken Sie auf "Ändere Aktion".<br>Sie können diesen Dialog auch schließen und stattdessen "Die Einstellung des<br> Dokumenteneditors erfolgt in den Desktopeinstellungen" auswählen.<br> Die Liste wird dann igoriert und es werden die Desktopeinstellungen verwendet. Select one or several mime types then use the controls in the bottom frame to change how they are processed. Wählen Sie einen oder mehrere MIME-Typen aus und nutzen Sie dann die Bedienelemente unten, um das Programm zum Öffnen anzupassen. Use Desktop preferences by default Standardmäßig Desktopvoreinstellungen nutzen Select one or several file types, then use the controls in the frame below to change how they are processed Wählen Sie einen oder mehrere Dateitypen aus. Nutzen Sie dann die Bedienelemente unten, um das Programm zum Öffnen anzupassen Exception to Desktop preferences Von Desktopvoreinstellungen abweichende Ausnahme Action (empty -> recoll default) Aktion (leer → Recoll-Voreinstellung) Apply to current selection Auf aktuelle Auswahl anwenden Recoll action: Recoll-Aktion: current value aktueller Wert Select same Das Selbe wählen <b>New Values:</b> <b>Neuer Wert</b> Webcache Webcache editor Search regexp WebcacheEdit Copy URL Unknown indexer state. Can't edit webcache file. Indexer is running. Can't edit webcache file. Delete selection Webcache was modified, you will need to run the indexer after closing this window. WebcacheModel MIME Url confgui::ConfBeaglePanelW Steal Beagle indexing queue Indizierungs-Warteschlange von Beagle übernehmen Beagle MUST NOT be running. Enables processing the beagle queue to index Firefox web history.<br>(you should also install the Firefox Beagle plugin) Beagle darf NICHT laufen. Ermöglicht die Abarbeitung der Beagle-Warteschlange, um die Firefox Web-Chronik zu indizieren.<br>(Sie sollten auch das Beagle-Plugin für Firefox installieren.) Entries will be recycled once the size is reached Einträge werden wiederverwendet sobald die Größe erreicht ist. Web page store directory name Verzeichnis zur Ablage von Webseiten The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Der Name eines Verzeichnisses, in dem Kopien der besuchten Webseiten gespeichert werden sollen.<br>Ein nicht-absoluter Pfad ist dabei relativ zum Konfigurationsverzeichnis. Max. size for the web store (MB) Maximale Größe für Ablage von Webseiten (MB) Process the WEB history queue Web-Chronik confgui::ConfIndexW Can't write configuration file Fehler beim Schreiben der Konfigurationsdatei confgui::ConfParamFNW Choose Auswählen confgui::ConfParamSLW + + - - Add entry Delete selected entries ~ Edit selected entries confgui::ConfSearchPanelW Automatic diacritics sensitivity Automatisch diakritische Zeichen beachten <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p> Automatisch die Beachtung von diakritischen Zeichen einschalten, wenn der Suchbegriff Zeichen mit Akzenten enthält (nicht in unac_except_trans). Ansonsten müssen Sie dafür die Abfrageprache und den <i>D</i> Modifikator verwenden. Automatic character case sensitivity Automatisch Groß-/Kleinschreibung beachten <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p> Automatisch die Beachtung von Groß-/Kleinschreibung einschalten, wenn der Eintrag Großbuchstaben enthält (außer an erster Stelle). Ansonsten müssen Sie dafür die Abfragesprache und den <i>C</i> Modifikator verwenden. Maximum term expansion count Maximale Anzahl von Ausdruck-Erweiterungen <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Maximale Anzahl von Erweiterungen für einen einzelnen Ausdruck (z.B. bei der Verwendung von Wildcards). Der Standardwert 10 000 ist vernünftig und verhindert, dass Suchanfragen scheinbar einfrieren, während die Liste der Begriffe durchlaufen wird. Maximum Xapian clauses count Maximale Anzahl von Xapian-Ausdrücken <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Maximale Anzahl von elementaren Ausdrücken, die wir zu einer einzelnen Xapian Abfrage hinzufügen. In manchen Fällen können die Ergebnisse von Ausdruck-Erweiterungen sich ausmultiplizieren, und wir wollen übermäßigen Speicherverbrauch vermeiden. Der Standardwert 100 000 sollte in den meisten Fällen hoch genug sein und zugleich zu typischen derzeitigen Hardware-Ausstattungen passen. confgui::ConfSubPanelW Global Global Max. compressed file size (KB) Max. Größe kompr. Dateien (kB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Dies ist eine Obergrenze; komprimierte Dateien jenseits dieser Größe werden nicht verarbeitet. Auf -1 setzen, um keine Obergrenze zu haben, auf 0, um nie zu dekomprimieren. Max. text file size (MB) Max. Größe Textdateien (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Dies ist eine Obergrenze; Textdateien jenseits dieser Größe werden nicht verarbeitet Auf -1 setzen, um keine Obergrenze zu haben. Dies dient dazu, riesige Log-Dateien vom Index auszuschließen. Text file page size (KB) Seitengröße Textdateien (kB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Wenn dieser Wert gesetzt ist (ungleich -1), werden Textdateien zur Indizierung in Stücke dieser Größe aufgeteilt. Das hilft bei der Suche in sehr großen Textdateien (z.B. Log-Dateien). Max. filter exec. time (S) Max. Zeit für Filter (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loopSet to -1 for no limit. Externe Filter, die länger als diese Zeit laufen, werden abgebrochen. Das ist für den seltenen Fall (Postscript), in dem ein Dokument eine unendliche Schleife auslöst. Auf -1 setzen, um keine Obergrenze zu haben. External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Externe Filter, die länger als diese Zeit laufen, werden abgebrochen. Das ist für den seltenen Fall (Postscript), in dem ein Dokument eine unendliche Schleife auslöst. Auf -1 setzen, um keine Obergrenze zu haben. confgui::ConfTopPanelW Top directories Start-Verzeichnisse The list of directories where recursive indexing starts. Default: your home. Die Liste der Verzeichnisse, in denen die rekursive Indizierung startet. Standard: Home-Verzeichnis. Skipped paths Auszulassende Pfade These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Die Namen der Verzeichnisse, die nicht indiziert werden.<br>Kann Wildcards enthalten. Muss den Pfaden entsprechen, die der Indizierer sieht (d.h.. wenn '/home/me' in den Start-Verzeichnissen steht und '/home' eigentlich ein Link zu '/usr/home' ist, dann wäre ein korrekter Eintrag '/home/me/tmp*' und nicht '/usr/home/me/tmp*') Stemming languages Stemming-Sprachen The languages for which stemming expansion<br>dictionaries will be built. Die Sprachen, für die Worstammerweiterungsverzeichnisse erstellt werden. Log file name Log-Datei The file where the messages will be written.<br>Use 'stderr' for terminal output Die Datei, in die Ausgaben geschrieben werden.<br>Für Ausgaben auf dem Terminal 'stderr' benutzen. Log verbosity level Ausführlichkeit des Logs This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Dieser Wert steuert die Menge der Meldungen<br>(nur Fehler oder viele Debugging Ausgaben). Index flush megabytes interval Interval (MB) für Speicherleerung This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Dieser Wert steuert, wieviel Daten indiziert werden bevor die Indexinformationen auf Festplatte geschrieben werden.<br>Hierdurch kann der Speicherverbrauch des Indizierers gesteuert werden. Standardwert: 10MB Max disk occupation (%) Max. Festplattenbelegung (%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). Dies ist der Prozentsatz der Festplattenbelegung, ab dem die Indizierung gestoppt wird (um das Füllen der Festplatte zu vermeiden).<br>0 bedeutet keine Begrenzung (das ist der Standardwert). No aspell usage Aspell nicht benutzen Aspell language Sprache für Aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Die Sprache des Aspell-Wörterbuchs (z.B. 'en' oder 'de' ...)<br>Wenn dieser Wert nicht gesetzt ist, wird die NLS-Umgebung verwendet, um die Sprache festzustellen, was im Allgemeinen funktioniert. Um eine Vorstellung zu bekommen, was auf Ihrem System installiert ist, geben Sie 'aspell config' ein und schauen Sie nach .dat Dateien im Verzeichnis 'data-dir'. Database directory name Verzeichnis für Index-Datenbank The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Der Name eines Verzeichnisses, in dem der Index gespeichert werden soll.<br>Ein nicht-absoluter Pfad ist dabei relativ zum Konfigurationsverzeichnis. Der Standard ist 'xapiandb'. Use system's 'file' command 'file' Kommando benutzen Use the system's 'file' command if internal<br>mime type identification fails. Benutze das 'file' Kommando, wenn die interne Erkennung des Mime-Typs fehlschlägt. Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Deaktiviert die Verwendung von Aspell für die Erzeugung von Schreibweisen-Näherungen im Ausdruck-Explorer-Werkzeug. <br>Nützlich, wenn Aspell nicht vorhanden ist oder nicht funktioniert. The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Die Sprache des Aspell-Wörterbuchs (z.B. 'en' oder 'de' ...)<br>Wenn dieser Wert nicht gesetzt ist, wird die NLS-Umgebung verwendet, um die Sprache festzustellen, was im Allgemeinen funktioniert. Um eine Vorstellung zu bekommen, was auf Ihrem System installiert ist, geben Sie 'aspell config' ein und schauen Sie nach .dat Dateien im Verzeichnis 'data-dir'. The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Der Name eines Verzeichnisses, in dem der Index gespeichert werden soll.<br>Ein nicht-absoluter Pfad ist dabei relativ zum Konfigurationsverzeichnis. Der Standard ist 'xapiandb'. Unac exceptions Unac Ausnahmen <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Dies sind Ausnahmen für den unac Mechanismus, der standardmäßig alle diakritischen Zeichen entfernt und sie durch kanonische Entsprechungen ersetzt. Sie können (abhängig von Ihrer Sprache) dieses Entfernen von Akzenten für einige Zeichen übersteuern und zusätzliche Ersetzungen angeben, z.B. für Ligaturen. Bei jedem durch Leerzeichen getrennten Eintrag ist das erste Zeichen das Ausgangszeichen und der Rest die Ersetzung. uiPrefsDialogBase User preferences Benutzereinstellungen User interface Benutzeroberfläche Number of entries in a result page Anzahl der Ergebnisse pro Seite If checked, results with the same content under different names will only be shown once. Bei Auswahl werden Ergebnisse mit dem gleichen Inhalt unter verschiedenen Namen nur einmal gezeigt. Hide duplicate results. Verstecke doppelte Ergebnisse Highlight color for query terms Farbe zur Hervorhebung von Suchbegriffen Result list font Schriftart für Ergebnisliste Opens a dialog to select the result list font Öffnet einen Dialog zur Auswahl der Schriftart für die Ergebnisliste Helvetica-10 Helvetica-10 Resets the result list font to the system default Setzt die Schriftart für die Ergebnisliste auf den Standardwert zurück Reset Zurücksetzen Result paragraph<br>format string Formatstring für Ergebnisse Texts over this size will not be highlighted in preview (too slow). Texte über dieser Größe werden in der Vorschau nicht mit Hervorhebungen versehen (zu langsam). Maximum text size highlighted for preview (megabytes) Maximale Textgröße für Vorschau-Hervorhebung Use desktop preferences to choose document editor. Einstellung des Dokumenteneditors erfolgt in den Desktopeinstellungen Choose editor applications Standardanwendungen auswählen Display category filter as toolbar instead of button panel (needs restart). Kategorie-Filter in Werkzeugleiste statt als Radio-Buttons (Neustart erforderlich) Auto-start simple search on whitespace entry. Automatisch eine einfache Suche starten, wenn ein Worttrenner eingegeben wird Start with advanced search dialog open. Nach dem Start automatisch den Dialog für die erweiterte Suche öffnen Start with sort dialog open. Nach dem Start automatisch den Sortierdialog öffnen. Remember sort activation state. Speichern, ob Sortierung aktiviert ist Prefer Html to plain text for preview. Bei Vorschau HTML gegenüber reinem Text bevorzugen Search parameters Suchparameter Stemming language Stemming-Sprache A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Eine Suche nach [Rolling Stones] wird geändert zu [Rolling OR Stones OR (Rolling PHRASE 2 Stones)]. Dadurch sollten Ergebnisse, in denen die Suchworte genau wie eingegeben auftreten, stärker gewichtet werden. Automatically add phrase to simple searches Automatisches Hinzufügen von Phrasen zu einfachen Suchen Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Versuchen wir, Zusammenfassungen für Ergebnisse aus den Fundstellen zu erzeugen? Dies kann bei großen Dokumenten langsam sein. Dynamically build abstracts Zusammenfassungen dynamisch erzeugen Do we synthetize an abstract even if the document seemed to have one? Erzeugen wir eine Zusammenfassung auch dann, wenn das Dokument schon eine Zusammenfassung enthält? Replace abstracts from documents Ersetzen der Zusammenfassungen aus Dokumenten Synthetic abstract size (characters) Länge der erzeugten Zusammenfassung (in Zeichen) Synthetic abstract context words Anzahl der Kontextworte in der Zusammenfassung The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Die Worte in dieser Liste werden automatisch zu ext:xxx Ausdrücken im Abfragesprachen-Eintrag umgewandelt. Query language magic file name suffixes. Magische Dateinamen-Erweiterungen für Abfragesprache Enable Aktivieren External Indexes Externe Indizes Toggle selected Auswahl umkehren Activate All Alle auswählen Deactivate All Alle abwählen Remove from list. This has no effect on the disk index. Aus der Liste entfernen. Dies hat keinen Einfluss auf den gespeicherten Index. Remove selected Ausgewählte entfernen Click to add another index directory to the list Anklicken, um ein weiteres Index-Verzeichnis zur Liste hinzuzufügen Add index Index hinzufügen Apply changes Änderungen übernehmen &OK &OK Discard changes Änderungen verwerfen &Cancel &Abbrechen Abstract snippet separator Trenner für Zusammenfassungs-Teile Style sheet Style Sheet Opens a dialog to select the style sheet file Öffnet einen Dialog zur Auswahl der Style Sheet Datei Choose Auswählen Resets the style sheet to default Setzt das Style Sheet auf den Standardwert zurück Lines in PRE text are not folded. Using BR loses some indentation. Zeilen in PRE-Text werden nicht umgebrochen. Bei Verwendung von BR gehen manche Einrückungen verloren. Use <PRE> tags instead of <BR>to display plain text as html in preview. <PRE> Tags statt <BR> verwenden, um Texte in der Vorschau als HTML anzuzeigen Result List Ergebnisliste Edit result paragraph format string Format-String für Ergebnis-Absatz editieren Edit result page html header insert HTML-Header der Ergebnisseite ergänzen Date format (strftime(3)) Datumsformat (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Häufigkeitsschwellwert in Prozent, über dem Begriffe nicht beim automatischen Hinzufügen von Phrasen verwendet werden. Häufige Begriffe beeinträchtigen die Performance bei Phrasen stark. Weggelassene Begriffe erhöhen den Phrasen-Slack und vermindern den Nutzender automatischen Phrasen. Der Standardwert ist 2. Autophrase term frequency threshold percentage Häufigkeitsschwellwert für automatische Phrasen Plain text to HTML line style Zeilen-Stil für Umwandlung von Text in HTML Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. Zeilen in PRE-Text werden nicht umgebrochen. Bei Verwendung von BR gehen manche Einrückungen verloren. Möglicherweise ist der Stil 'PRE + Umbruch' das, was Sie wollen. <BR> <BR> <PRE> <PRE> <PRE> + wrap <PRE> + Umbruch Exceptions Ausnahmen Mime types that should not be passed to xdg-open even when "Use desktop preferences" is set.<br> Useful to pass page number and search string options to, e.g. evince. Mime-Typen, die nicht an xdg-open übergeben werden sollen, selbst wenn Desktopvoreinstellungen gewählt wurden.<br> Nützlich, um Seitenzahl und Suchstring zu übergebn, z.B. an evince. Disable Qt autocompletion in search entry. Qt-Autovervollständigung in Suchleiste deaktivieren. Search as you type. Suche beim Eintippen starten. Paths translations Pfadumwandlung Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Klicken Sie hier um einen weiteren Indexordner zur Liste hinzuzufügen. Sie können entweder einen Recoll-Konfigurationsordner oder einen Xapian-Index auswählen. Snippets window CSS file Schnipsel-Fenster CSS Datei Opens a dialog to select the Snippets window CSS style sheet file Öffnet einen Dialog zur Auswahl der Schnipsel-Fenster CSS Style Sheet Datei Resets the Snippets window style Setzt das Schnipsel-Fenster Style Sheet auf den Standardwert zurück Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Document filter choice style: Buttons Panel Toolbar Combobox Menu Show system tray icon. Close to tray instead of exiting. Start with simple search mode User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Synonyms file Show warning when opening temporary file. Highlight CSS style for query terms Recoll - User Preferences Set path translations for the selected index or for the main one if no selection exists. Activate links in preview. Make links inside the preview window clickable, and start an external browser when they are clicked. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Suppress all beeps. recoll-1.26.3/qtgui/i18n/recoll_es.qm0000644000175000017500000025544013566424763014254 00000000000000.cLDo2N/XM8Xo`^EhqltnwwkQv& +GcxCG,ͺAz LcSI(B؅ p#2vvw 5w 5|w 5wU%.8ֳ 6f3ͼu9ggd׸~;U pUqe K!DC&/.N=dD?dG$"JUYbJUYtY [u_nNHmu1ʷ^Iʗrʗ <@^5WL*x,PJg0>A:>% A!B 1<.|Zd;[f<zLNBX4Ke 2vh@Z(Pb8"3uZIryInnff ef'~,rWpWy%n y%0΄Ԛ: ^#1/X|DGWl̔ iiR -Z-3)TBB9(BSkosr?)r"|, 0<"EASxâӱ @  RI^G~Cj?>G'` Mt$" UKDp h%u9.v29T#RK6 %On\7 !^[GXnD?w)>C&]!vb#+G7I^<~ FW#qFN-H:"_guvap.f r|N:ϗcnTۤ blsÓtÓtȍɆtKf]8q 2# C,y*:u=Jn(Q}};EG07B"cL@m@4c 䴥9?/ Hܔ -(r 9Zy ;3J D&9 K ]# cC k lM\ I qDW  J ,9 9F ü>?z 3 ' *N :^ c } *Rok +<1 6 >V: G.~ `P ` aE cE d8 y I D{ VT/" C     ԅ0 yem TH  ,x@ =! Kj h  ֯  Σ r @ ٷw ۷ ( ?7( Vd͠ D 'И +bCq /Ω 97[ 9ɝ L*$ P֙ RV T# V0 \iCAS ]. `F { h vn {l> !Y !Y# WR     + i' +  3 %w ~5 N  m 'R -M 8" F̙ OE ]st ] u0C y y~b 3  ȩ*M u u P P 5d;Q $ m- 7 ( Ւq, H Q5/ £  q]5 %nW/.!8b 9<I u]Q~@Y~s[st\nAe3g3`p~_ <+!$c}mc |0{|E' lELDiL&Todas las clusulas All clauses AdvSearch$Cualquier clusula Any clause AdvSearchfSufijo multiplicador incorrecto en filtro de tamao$Bad multiplier suffix in size filter AdvSearch mediosmedia AdvSearchmensajemessage AdvSearch otrosother AdvSearchpresentacin presentation AdvSearchhoja de clculo spreadsheet AdvSearch hojas de clculo spreadsheets AdvSearch textotext AdvSearch textostexts AdvSearch<----- Todos <----- All AdvSearchBase<----- Sel <----- Sel AdvSearchBaseAadir clusula Add clause AdvSearchBase"Bsqueda avanzadaAdvanced search AdvSearchBaseTodos ----> All ----> AdvSearchBasejTodos los campos no vacos a la derecha sern combinados con conjunciones AND (opcin "Todas las clusulas") o OR (opcin "Cualquier clusula").<br>Los campos "Cualquiera", "Todas" y "Ninguna" pueden aceptar una mezcla de palabras simples y frases dentro de comillas dobles.<br>Campos sin datos son ignorados.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBase BuscarBrowse AdvSearchBasePor categoras By categories AdvSearchBaseXMarque esto para habilitar filtros en fechas'Check this to enable filtering on dates AdvSearchBasenMarque esto para habilitar filtros en tipos de archivos,Check this to enable filtering on file types AdvSearchBaseZMarque esto para habilitar filtros en tamaos'Check this to enable filtering on sizes AdvSearchBasenMarque esto para usar categoras en lugar de tipos MIME;Check this to use file categories instead of raw mime types AdvSearchBase CerrarClose AdvSearchBaseBorrar clusula Delete clause AdvSearchBaseVIngrese directorio inicial para la bsquedaEnter top directory for search AdvSearchBase FiltroFilter AdvSearchBaseFiltrar fechas Filter dates AdvSearchBase"Filtro de tamaos Filter sizes AdvSearchBase BuscarFind AdvSearchBase DesdeFrom AdvSearchBase6Tipos de archivos ignoradosIgnored file types AdvSearchBaseInvertirInvert AdvSearchBaseTamao mximo Max. Size AdvSearchBaseTamao mximo. Puede utilizar k/K, m/M o g/G como multiplicadores4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaseTamao Mnimo Min. Size AdvSearchBaseTamao mnimo. Puede utilizar k/K, m/M o g/G como multiplicadores4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase6Restringir tipos de archivoRestrict file types AdvSearchBasedRestringir resultados a archivos en subdirectorio:%Restrict results to files in subtree: AdvSearchBase6Guardar como predeterminadoSave as default AdvSearchBaseHBuscar documentos<br>que satisfagan:'Search for
documents
satisfying: AdvSearchBase4Tipos de archivos buscadosSearched file types AdvSearchBaseSel -----> Sel -----> AdvSearchBase Iniciar bsqueda Start Search AdvSearchBase HastaTo AdvSearchBase^<p>Habilitar automticamente la sensibilidad a las maysculas/minsculas si la entrada tiene caracteres en mayscula en una posicin distinta al primer caracter. De otra forma necesita usar el lenguaje de bsqueda y el modificador <i>C</i> para especificar la sensibilidad a las maysculas y minsculas.

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity. ConfIndexW2<p>Habilitar automticamente la sensibilidad de diacrticos si el trmino de bsqueda tiene caracteres acentuados (no presentes en unac_except_trans). De otra forma necesita usar el lenguage de bsqueda y el modificador <i>D</i> para especificar la sensibilidad de los diacrticos.

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity. ConfIndexW<p>Mxima expansin de conteo para un solo trmino (ej: cuando se usan comodines). El valor por defecto de 10000 es razonable y evitar consultas que parecen congelarse mientras el motor de bsqueda recorre la lista de trminos.

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. ConfIndexW<p>Nmero mximo de clusulas elementales agregadas a una consulta de Xapian. En algunos casos, el resultado de la expansin de trminos puede ser multiplicativo, y deseamos evitar el uso excesivo de memoria. El valor por defecto de 100000 debera ser lo suficientemente alto en la mayora de los casos, y compatible con las configuraciones de hardware tpicas en la actualidad.5

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfIndexWH<p>Estas son excepciones al mecanismo unac, el cual, de forma predeterminada, elimina todos los diacrticos, y realiza una descomposicin cannica. Es posible prevenir la eliminacin de acentos para algunos caracteres, dependiendo de su lenguaje, y especificar descomposiciones adicionales, por ejemplo, para ligaturas. En cada entrada separada por espacios, el primer caracter es el origen, y el resto es la traduccin.l

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. ConfIndexWLenguaje AspellAspell language ConfIndexWSensibilidad automtica a la distincin de maysculas/minsculas de los caracteres$Automatic character case sensitivity ConfIndexWLSensibilidad automtica de diacrticos Automatic diacritics sensitivity ConfIndexWZNo se puede escribir archivo de configuracinCan't write configuration file ConfIndexWLNombre del directorio de base de datosDatabase directory name ConfIndexW@Deshabilita el uso de aspell para generar aproximaciones ortogrficas en la herramienta explorador de trminos.<br>til si aspell no se encuentra o no funciona.Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexWHabilita la indexacin de pginas visitadas en Firefox.<br>(necesita tambin el plugin Recoll para Firefox)\Enables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin) ConfIndexW&Parmetros globalesGlobal parameters ConfIndexW\Intervalo en megabytes de escritura del ndiceIndex flush megabytes interval ConfIndexW$Parmetros localesLocal parameters ConfIndexW:Nombre de archivo de registro Log file name ConfIndexW@Nivel de verbosidad del registroLog verbosity level ConfIndexWLTamao mximo para el almacn web (MB) Max. size for the web store (MB) ConfIndexWHMximo conteo de clusulas de XapianMaximum Xapian clauses count ConfIndexWLMximo conteo de expansin de trminosMaximum term expansion count ConfIndexW$No utilizar aspellNo aspell usage ConfIndexWDProcesar la cola del historial WEBProcess the WEB history queue ConfIndexW,Parmetros de bsquedaSearch parameters ConfIndexW(Directorios omitidos Skipped paths ConfIndexW*Lenguajes para racesStemming languages ConfIndexWEl archivo donde los mensajes sern escritos.<br>Use 'stderr' para salida a la terminalPThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexWEl lenguaje para el diccionario aspell. Esto debera ser algo como 'en' o 'fr' ...<br>Si no se establece este valor, el ambiente NLS ser utilizado para calcularlo, lo cual usualmente funciona. Para tener una idea de lo que est instalado en sus sistema, escriba 'aspell-config' y busque archivos .dat dentro del directorio 'data-dir'.3The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory.  ConfIndexWLos lenguajes para los cuales los diccionarios de expansin de races sern creados.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexWLa lista de directorios donde la indexacin recursiva comienza. Valor por defecto: su directorio personal.LThe list of directories where recursive indexing starts. Default: your home. ConfIndexWTEl nombre del directorio dnde almacenar las copias de pginas web visitadas.<br>Una ruta de directorio no absoluta es utilizada, relativa al directorio de configuracin.The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory. ConfIndexWZEl nombre de un directorio donde almacenar el ndice.<br>Una ruta no absoluta se interpreta como relativa al directorio de configuracin. El valor por defecto es 'xapiandb'.The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. ConfIndexW8Este valor ajusta la cantidad de datos indexados entre escrituras al disco.<br> Esto ayuda a controlar el uso de memoria del indexador. Valor estndar 10MB This value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWEste valor ajusta la cantidad de mensajes,<br>desde solamente errores hasta montones de informacin de depuracin.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexW*Directorios primariosTop directories ConfIndexW Excepciones UnacUnac exceptions ConfIndexWdNombre del directorio del almacn para pginas webWeb page store directory name ConfIndexWFiltros externos que se ejecuten por ms tiempo del establecido sern detenidos. Esto es por el caso inusual (ej: postscript) dnde un documento puede causar que un filtro entre en un ciclo infinito. Establezca el nmero -1 para indicar que no hay lmite.External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.  ConfSubPanelW GlobalGlobal ConfSubPanelWSi se utiliza este valor (diferente de -1), los archivos de texto sern separados en partes de este tamao para ser indexados. Esto ayuda con las bsquedas de archivos de texto muy grandes (ej: archivos de registro).If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). ConfSubPanelWPTamao mximo de archivo comprimido (KB)Max. compressed file size (KB) ConfSubPanelWPTamao mximo para archivo de texto (MB)Max. text file size (MB) ConfSubPanelWVTamao de pgina para archivo de texto (KB)Text file page size (KB) ConfSubPanelWdEste valor establece un umbral mas all del cual los archivos<br>comprimidos no sern procesados. Escriba 1 para no tener lmite,<br>o el nmero 0 para nunca hacer descompresin.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. ConfSubPanelWEste valor establece un umbral ms all del cual los archivos de texto no sern procesados.<br>Escriba 1 para no tener lmites. Este valor es utilizado para excluir archivos de registro gigantescos del ndice.This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. ConfSubPanelW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> horario de indexado por lotes (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Cada campo puede contener un comodn (*), un valor numrico nico, listas separadas por comas (1,3,5) y rangos (1-7). Ms generalmente, los campos sern usados <span style=" font-style:italic;">tal como son</span> dentro del archivo crontab, y toda la sintaxis crontab puede ser usada, ver crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Por ejemplo, ingresar <span style=" font-family:'Courier New,courier';">*</span> en <span style=" font-style:italic;">Das, </span><span style=" font-family:'Courier New,courier';">12,19</span> en <span style=" font-style:italic;">Horas</span> y <span style=" font-family:'Courier New,courier';">15</span> en <span style=" font-style:italic;">Minutos</span> iniciara recollindex cada da a las 12:15 AM y 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Un horario con activaciones frecuentes es probablemente menos eficiente que la indexacin en tiempo real.</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolWr<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Presione <span style=" font-style:italic;">Deshabilitar</span> para detener la indexacin automtica por lotes, <span style=" font-style:italic;">Habilitar</span> para activarla, <span style=" font-style:italic;">Cancelar</span> para no cambiar nada.</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolWVentana de Cron Cron Dialog CronToolWZDas de la semana (* o 0-7, 0 o 7 es Domingo))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWDeshabilitarDisable CronToolWHabilitarEnable CronToolWError al instalar entrada de cron. Sintaxis incorrecta en los campos?3Error installing cron entry. Bad syntax in fields ? CronToolW Horas (* o 0-23)Hours (* or 0-23) CronToolWParece ser que existen entradas para recollindex editadas manualmente, no se puede editar crontabPIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWMinutos (0-59)Minutes (0-59) CronToolW$Ventana de dilogoDialog EditDialog,Error de configuracin Config error EditTransRuta local Local path EditTransRuta original Original path EditTransRuta de origen Source path EditTrans AadirAdd EditTransBaseCancelarCancel EditTransBase BorrarDelete EditTransBase(Ruta de traduccionesPath Translations EditTransBaseGuardarSave EditTransBaseSeleccione uno o ms tipos de archivos, y use los controles en la caja abajo para cambiar cmo se procesankSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBaseHEstablecer ruta de traducciones paraSetting path translations for  EditTransBase <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Parece ser que el ndice para esta configuracin no existe.</span><br /><br />Si solamente desea indexar su directorio personal con un conjunto de valores iniciales razonables, presione el botn <span style=" font-style:italic;">Iniciar indexacin ahora</span>. Es posible ajustar los detalles ms tarde.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Si necesita ms control, use los enlaces siguientes para ajustar la configuracin de indexacin y el horario.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Estas herramientas pueden ser accedidas luego desde el men <span style=" font-style:italic;">Preferencias</span>.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialogFPrimera configuracin de indexacinFirst indexing setupFirstIdxDialog6Configuracin de indexacinIndexing configurationFirstIdxDialog*Horario de indexacinIndexing scheduleFirstIdxDialog0Iniciar indexacin ahoraStart indexing nowFirstIdxDialogBEsto le permite ajustar los directorios que quiere indexar y otros parmetros, como rutas de archivos o nombres excluidos, conjuntos de caracteres estndar, etc.This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialog"Esto le permite escoger entre indexacin en tiempo real y por lotes, y configurar un horario automtico para indexar por lotes (utilizando cron).This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog D<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">La indexacin de <span style=" font-weight:600;">Recoll</span> puede ejecutarse permanentemente, indexando archivos cuando cambian, o puede ejecutarse en intervalos discretos. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Leer el manual puede ayudarle a decidir entre estos dos mtodos (presione F1).</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Esta herramienta puede ayudarle a configurar un horario para automatizar la ejecucin de indexacin por lotes, o iniciar la indexacin en tiempo real cuando inicia la sesin (o ambos, lo cual rara vez tiene sentido).</p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedWHorario de CronCron scheduling IdxSchedWDecida si la indexacin en tiempo real ser ejecutada cuando inicie la sesin (solo para el ndice estndar).ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedWLConfiguracin de horario de indexacinIndex scheduling setup IdxSchedWLInicio de la indexacin en tiempo realReal time indexing start up IdxSchedWEsta herramienta le permite decidir a qu hora la indexacin se ejecutar e instalar una entrada en el crontab._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedW$Ventana de dilogoDialog ListDialogCuadro de grupoGroupBox ListDialog^Directorio de base de datos no est configurado No db directory in configurationMain&Siguiente&NextPreview&Previo &PreviousPreview&Buscar por: &Search for:Preview|No se puede convertir documento a representacin interna para 0Can't turn doc into internal representation for PreviewCancelarCancelPreviewLimpiarClearPreview:Creando texto de vista previaCreating preview textPreviewVCargando texto de vista previa en el editor Loading preview text into editorPreviewD&Coincidir maysculas y minsculas Match &CasePreview6Programa ayudante faltante:Missing helper program: Preview AbrirOpenPreview CopiarCopyPreviewTextEditDoblar lneas Fold linesPreviewTextEdit*Preservar indentacinPreserve indentationPreviewTextEditImprimirPrintPreviewTextEdit8Imprimir vista previa actualPrint Current PreviewPreviewTextEdit>Guardar documento en un archivoSave document to filePreviewTextEdit Seleccionar todo Select AllPreviewTextEditMostrar campos Show fieldsPreviewTextEditMostrar imagen Show imagePreviewTextEdit.Mostrar texto principalShow main textPreviewTextEdit@<b>Subdirectorios personalizadosCustomised subtreesQObject2Seguir enlaces simblicosFollow symbolic linksQObjectSeguir enlaces simblicos al indexar. El valor por defecto es no, para evitar indexar duplicadosTFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObjectJIndexar todos los nombres de archivosIndex all file namesQObjecthIndexar los nombres de los archivos para los cuales los contenidos no pueden ser<br>identificados o procesados (tipo MIME invlido o inexistente). El valor por defecto es verdadero}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject Nombres omitidos Skipped namesQObjectLa lista de subdirectorios en la jerarqua indexada<br>dnde algunos parmetros necesitan ser definidos. Valor por defecto: vaco.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectEstos son patrones de nombres de archivos o directorios que no deben ser indexados.LThese are patterns for file or directory names which should not be indexed.QObject &Abrir&OpenQWidgetL&Abrir documento/directorio ascendente&Open Parent document/folderQWidgetCopiar &URL Copy &URLQWidget8Buscar documentos &similaresFind &similar documentsQWidget8Abrir ventana de &fragmentosOpen &Snippets windowQWidget8Guardar seleccin a archivosSave selection to filesQWidget@Mostrar subdocumentos / adjuntosShow subdocuments / attachmentsQWidget<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">La indexacin de <span style=" font-weight:600;">Recoll</span> puede configurarse para ejecutar como un demonio, actualizando el ndice cuando los archivos cambian, en tiempo real. Obtiene un ndice actualizado siempre, pero los recursos del sistema son utilizados permanentemente.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>.

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWdTambin iniciar demonio de indexacin ahora mismo.%Also start indexing daemon right now.RTIToolWArchivo de autoinicio borrado. Detener el proceso actual tambin?2Autostart file deleted. Kill current process too ?RTIToolW$No se puede crear:Can't create: RTIToolW@No se puede ejecutar recollindexCould not execute recollindexRTIToolW Borrando archivo Deleting fileRTIToolWBorrando: Deleting: RTIToolWbInicio automtico de la indexacin en tiempo real"Real time indexing automatic startRTIToolW*Eliminando autoinicioRemoving autostartRTIToolW(Reemplazando archivoReplacing fileRTIToolWReemplazando: Replacing: RTIToolWzIniciar el demonio de indexacin con mi sesin de escritorio..Start indexing daemon with my desktop session.RTIToolWAdvertenciaWarningRTIToolW*(todos los lenguajes)(all languages)RclMain(sin races) (no stemming)RclMain Acerca de Recoll About RecollRclMainTodoAllRclMainLnea de comando incorrecta de visualizador para %1: [%2] Por favor revise el archivo mimeconfCBad viewer command line for %1: [%2] Please check the mimeview fileRclMain>No se puede accesar el archivo:Can't access file: RclMainRNo se puede crear ventana de vista previaCan't create preview windowRclMainHNo se puede descomprimir el archivo:Can't uncompress file: RclMainpNo se puede actualizar el ndice: indexador en ejecucin#Can't update index: indexer runningRclMainrNo se puede extraer el documento o crear archivo temporal0Cannot extract document or create temporary fileRclMain>No se encuentra documento padreCannot find parent documentRclMainNo se puede recuperar informacin del documento de la base de datos+Cannot retrieve document info from databaseRclMainCerrandoClosingRclMainNo se puede abrir ndice externo. Base de datos no abierta. Revise listado de ndices externos.HCould not open external index. Db not open. Check external indexes list.RclMain.Historial de documentosDocument historyRclMain HechoDoneRclMain*Documentos duplicadosDuplicate documentsRclMainBorrando ndice Erasing indexRclMain ErrorErrorRclMainEjecutando: [ Executing: [RclMainAplicaciones/comandos externos requeridos por sus tipos de archivos y no encontrados, como se almacenaron en el ltimo pase de indexacin en pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMain$Datos de historial History dataRclMain8Error de consulta del ndiceIndex query errorRclMain(Tipos MIME indexadosIndexed MIME TypesRclMain Indexacin fallIndexing failedRclMain.Indexacin en progreso:Indexing in progress: RclMain.Indexacin interrumpidaIndexing interruptedRclMain:Programas ayudantes faltantesMissing helper programsRclMainMonitorMonitorRclMain`No hay visualizador configurado para tipo MIME [-No external viewer configured for mime type [RclMainNo helpers found missingRclMain"No hay resultadosNo results foundRclMainNingunoNoneRclMainPurgeRclMainConsulta en progreso.<br>Debido a limitaciones en la librera de indexacin,<br>cancelar terminar el programaeQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMain,Resultados de bsqueda Query resultsRclMainRRestaurar el ndice e iniciar desde cero?(Reset the index and start from scratch ?RclMain6Conteo de resultados (est.)Result count (est.)RclMainGuardar archivo Save fileRclMain RazdbStemdbRclMain&Detener &IndexacinStop &IndexingRclMain2Sub-documentos y adjuntosSub-documents and attachmentsRclMainEl proceso de indexacin actual no se inicio desde esta interfaz. Presione Ok para detenerlo, o Cancelar para dejarlo ejecutaryThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainhThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMainhEstos URLs ( | ipath) comparten el mismo contenido: -These Urls ( | ipath) share the same content:RclMain8Esta bsqueda no est activa"This search is not active any moreRclMainDesconocidoUnknownRclMain$Actualizar &ndice Update &IndexRclMainActualizandoUpdatingRclMainLnea de comandos del visualizador para %1 especifica valores para el archivo y el archivo padre: no soportadoQViewer command line for %1 specifies both file and parent file value: unsupportedRclMainAdvertenciaWarningRclMainPerror al recuperar lenguajes para races#error retrieving stemming languagesRclMainfiltradofilteredRclMain mediosmediaRclMainmensajemessageRclMainotrootherRclMainpresentacin presentationRclMainordenadosortedRclMainhoja de clculo spreadsheetRclMain textotextRclMain"&Acerca de Recoll &About Recoll RclMainBase$Bsqueda &Avanzada&Advanced Search RclMainBase>Borrar historial de &documentos&Erase document history RclMainBase:Borrar historial de &bsqueda&Erase search history RclMainBase&Archivo&File RclMainBase$Pantalla &Completa &Full Screen RclMainBase*Configuracin de &GUI&GUI configuration RclMainBase &Ayuda&Help RclMainBase2&Configuracin del ndice&Index configuration RclMainBase&Preferencias &Preferences RclMainBase&&Reconstruir ndice&Rebuild index RclMainBase&Resultados&Results RclMainBase6Parmetros de &ordenamiento&Sort parameters RclMainBase&Herramientas&Tools RclMainBase$Manual de &Usuario &User manual RclMainBase4Bsqueda avanzada/complejaAdvanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBase.Historial de DocumentosDocument History RclMainBase0Historial de &DocumentosDocument &History RclMainBase &SalirE&xit RclMainBaseD&Configuracin de ndices externosE&xternal index dialog RclMainBaseBConfiguracin de ndices externosExternal index dialog RclMainBaseF11F11 RclMainBasePrimera Pgina First Page RclMainBasePrimera pgina First page RclMainBase"Pantalla Completa Full Screen RclMainBaseHIr a la primera pgina de resultadosGo to first page of results RclMainBase Pgina Siguiente Next Page RclMainBase Siguiente pgina Next page RclMainBase<Pgina de resultados siguienteNext page of results RclMainBase AvPgPgDown RclMainBase RePgPgUp RclMainBasePgina Anterior Previous Page RclMainBasePgina anterior Previous page RclMainBase:Pgina de resultados anteriorPrevious page of results RclMainBase RecollRecoll RclMainBaseZGuardar como un archivo CSV (hoja de clculo)Save as CSV (spreadsheet) file RclMainBaseGuardar el resultado en un archivo que se puede cargar en una hoja de clculo@Saves the result into a file which you can load in a spreadsheet RclMainBase Maysculas+RePg Shift+PgUp RclMainBaseBMostrar resultados de la consultaShow Query Details RclMainBase$Mostrar como tabla Show as table RclMainBasezMostrar resultados en una tabla similar a una hoja de clculo(Show results in a spreadsheet-like table RclMainBaseHOrdenar por fecha, recientes primeroSort by date, newest first RclMainBaseFOrdenar por fecha, antiguos primeroSort by date, oldest first RclMainBaselOrdenar por fechas de la ms reciente a la ms antigua#Sort by dates from newest to oldest RclMainBaselOrdenar por fechas de la ms antigua a la ms reciente#Sort by dates from oldest to newest RclMainBase4Parmetros de ordenamientoSort parameters RclMainBase.&Explorador de trminosTerm &explorer RclMainBaseLHerramienta de exploracin de trminosTerm explorer tool RclMainBase$Actualizar &ndice Update &index RclMainBaseResumenAbstract RecollModel AutorAuthor RecollModel FechaDate RecollModelFecha y hora Date and time RecollModel&Fecha del documento Document date RecollModel(Tamao del documento Document size RecollModel"Fecha del archivo File date RecollModel$Nombre del archivo File name RecollModel$Tamao del archivo File size RecollModel IpathIpath RecollModelPalabras claveKeywords RecollModelTipo MIME MIME type RecollModelFecha ModMtime RecollModel>Conjunto de caracteres originalOriginal character set RecollModel4Calificacin de relevanciaRelevancy rating RecollModel TtuloTitle RecollModelURLURL RecollModel$(mostrar consulta) (show query)ResList@<p><b>No hay resultados</b></br>

No results found
ResListf<p><i>Ortografa alterna (acentos suprimidos): </i>4

Alternate spellings (accents suppressed): ResList><p><i>Escrituras Alternas: </i>

Alternate spellings: ResList.Historial de documentosDocument historyResListDocumentos DocumentsResListSiguienteNextResList AbrirOpenResListVista previaPreviewResListAnteriorPreviousResList(Detalles de bsqueda Query detailsResList6Conteo de resultados (est.)Result count (est.)ResList&Lista de resultados Result listResListFragmentosSnippetsResList.Documento no disponibleUnavailable documentResListparaforResListde por lo menosout of at leastResList&Borrar columna&Delete columnResTable.&Restaurar ordenamiento &Reset sortResTable"&Guardar como CSV &Save as CSVResTable(Agregar columna "%1"Add "%1" columnResTable@No se puede abrir/crear archivo:Can't open/create file: ResTable6Guardar tabla a archivo CSVSave table to CSV fileResTable$Todos los trminos All termsSSearch"Cualquier trminoAny termSSearch"Consulta invlidaBad query stringSSearchhIngrese expresin de comodn para nombre de archivo.$Enter file name wildcard expression.SSearch"Nombre de archivo File nameSSearchNo hay memoria Out of memorySSearch(Lenguaje de consultaQuery languageSSearch.Elija tipo de bsqueda.Choose search type. SSearchBaseLimpiarClear SSearchBase Ctrl+SCtrl+S SSearchBase4Borrar entrada de bsquedaErase search entry SSearchBaseSSearchBase SSearchBase SSearchBaseBsquedaSearch SSearchBase Iniciar consulta Start query SSearchBaseTodoAll SearchClauseWCualquieraAny SearchClauseW"Nombre de archivo File name SearchClauseWNingn campoNo field SearchClauseWNingunoNone SearchClauseWNmero de palabras adicionales que pueden ser intercaladas con las escogidasHNumber of additional words that may be interspersed with the chosen ones SearchClauseW FrasePhrase SearchClauseWProximidad Proximity SearchClauseWzElija el tipo de consulta que ser realizada con las palabras>Select the type of query that will be performed with the words SearchClauseWBuscar:Find:SnippetsSiguienteNextSnippetsAnteriorPrevSnippetsFragmentosSnippetsSnippets BuscarSearch SnippetsW BuscarBrowseSpecIdxW&Cerrar&Close SpellBase&Expandir&Expand  SpellBaseAcentosAccents SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBase0Distincin de maysculasCase SpellBaseLenguajeMatch SpellBase2No hay informacin de bd. No db info. SpellBase,Explorador de trminos Term Explorer SpellBase%1 resultados %1 resultsSpellW>Trminos promedio por documentoAverage terms per documentSpellWRTamao del directorio de la base de datosDatabase directory sizeSpellWDoc./Tot. Doc. / Tot.SpellWndice: %1 documentos, largo promedio %2 trminos. %3 resultados7Index: %1 documents, average length %2 terms.%3 resultsSpellWElementoItemSpellWpLa lista fue separada alfabticamente, algunos trminos 1List was truncated alphabetically, some frequent SpellWTipos MIME: MIME types:SpellW.Expansin no encontradaNo expansion foundSpellW(Nmero de documentosNumber of documentsSpellW"Expresin regularRegexpSpellW>Mostrar estadsticas del ndiceShow index statisticsSpellW&Ortografa/fonticaSpelling/PhoneticSpellW&Expansin de racesStem expansionSpellWTrminoTermSpellW ValorValueSpellWComodines WildcardsSpellWPerror al recuperar lenguajes para races#error retrieving stemming languagesSpellW~frecuentes pueden no aparecer. Intente usar una raz ms larga..terms may be missing. Try using a longer root.SpellW$Todos los trminos All terms UIPrefsDialog"Cualquier trminoAny term UIPrefsDialogPAl menos un ndice debe ser seleccionado$At most one index should be selected UIPrefsDialogNo se puede agregar un ndice con diferente opcin para remover maysculas/minsculas/diacrticos>Cant add index with different case/diacritics stripping option UIPrefsDialog ElegirChoose UIPrefsDialog(Lenguaje de consultaQuery language UIPrefsDialogEncabezado de la lista de resultados (valor por defecto es vaco)%Result list header (default is empty) UIPrefsDialogFormato de prrafo para la lista de resultados (borre todo para volver al valor por defecto)Nuevos valores</b>New Values:ViewActionBaseZAccin (vaco -> valor por defecto de recoll) Action (empty -> recoll default)ViewActionBase:Aplicar a la seleccin actualApply to current selectionViewActionBase CerrarCloseViewActionBaseXExcepcin de las preferencias del escritorio Exception to Desktop preferencesViewActionBase,Visualizadores NativosNative ViewersViewActionBase AccinRecoll action:ViewActionBaseSeleccione uno o ms tipos de archivos, y use los controles en la caja abajo para cambiar cmo se procesankSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseSeleccione uno o ms tipos mime, y use los controles en la caja abajo para cambiar cmo se procesan.lSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBase"Seleccionar misma Select sameViewActionBase\Usar preferencias del escritorio como estndar"Use Desktop preferences by defaultViewActionBase valor current valueViewActionBase ElegirChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW&Cancelar&CanceluiPrefsDialogBase&OK&OKuiPrefsDialogBase<BR>
uiPrefsDialogBase <PRE>

uiPrefsDialogBase<PRE> + wrap
 + wrapuiPrefsDialogBaseUna bsqueda por [rolling stones] (2 trminos) ser cambiada por [rolling or stones or (rolling phrase 2 stones)].
Esto dar mayor precedencia a los resultados en los cuales los trminos de bsqueda aparecen exactamente como fueron escritos.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBaseDSeparador de fragmentos de resumenAbstract snippet separatoruiPrefsDialogBaseActivar TodosActivate AlluiPrefsDialogBaseAadir ndice	Add indexuiPrefsDialogBaseAplicar cambios
Apply changesuiPrefsDialogBasebAutomticamente aadir frases a bsquedas simples+Automatically add phrase to simple searchesuiPrefsDialogBasexPorcentaje del umbral de frecuencia de trminos de autofrase.Autophrase term frequency threshold percentageuiPrefsDialogBaseElegirChooseuiPrefsDialogBaseBEscoger aplicaciones para edicinChoose editor applicationsuiPrefsDialogBaseHaga clic para agregar otro directorio de ndice a la lista. Puede seleccionar un directorio de configuracin de Recoll o un ndice Xapian.{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBase<Formato de fecha (strftime(3))Date format (strftime(3))uiPrefsDialogBase Desactivar TodosDeactivate AlluiPrefsDialogBasevDeshabilitar autocompletar de Qt en la entrada de bsqueda.*Disable Qt autocompletion in search entry.uiPrefsDialogBase"Descartar cambiosDiscard changesuiPrefsDialogBasexSintetizar un resumen aunque el documento parece tener uno?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBaseFIntentar construir resmenes para elementos en la lista de resultados utilizando el contexto de los trminos de bsqueda?
Puede ser lento para documentos grandes.zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBaseBConstruir resmenes dinmicamenteDynamically build abstractsuiPrefsDialogBasepEditar encabezado html insertado en pgina de resultados#Edit result page html header insertuiPrefsDialogBasejEditar texto de formato para el prrafo de resultados#Edit result paragraph format stringuiPrefsDialogBaseHabilitarEnableuiPrefsDialogBase ndices ExternosExternal IndexesuiPrefsDialogBasedUmbral de porcentaje de frecuencia sobre el cul no utilizamos trminos dentro de la autofrase.
Los trminos frequentes son un problema importante de desempeo con las frases.
Trminos omitidos aumenta la holgura de la frase, y reducen la eficiencia de la autofrase.
El valor por defecto es 2 (por ciento).Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10Helvetica-10uiPrefsDialogBase>Esconder resultados duplicados.Hide duplicate results.uiPrefsDialogBaseSi est marcado, los resultados con el mismo contenido bajo nombres diferentes sern mostrados solo una vez.XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBaseLas lneas en texto PRE no son dobladas. Al usar BR se pierde indentacin. El estilo PRE + Wrap probablemente es lo que est buscando.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBase|Tamao mximo de texto resaltado para vista previa (megabytes)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBase\Nmero de elementos en la pgina de resultados"Number of entries in a result pageuiPrefsDialogBaseAbre una ventana de dilogo para el archivo de estilos CSS de la ventana de fragmentosAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBaseAbre una ventana para seleccionar el tipo de letra para la lista de resultados-Opens a dialog to select the result list fontuiPrefsDialogBase~Abre una ventana de dilogo para seleccionar la hoja de estilos-Opens a dialog to select the style sheet fileuiPrefsDialogBase*Rutas de traduccionesPaths translationsuiPrefsDialogBaseDTexto comn a estilo de lnea HTMLPlain text to HTML line styleuiPrefsDialogBase^Preferir HTML a texto simple para vista previa.&Prefer Html to plain text for preview.uiPrefsDialogBaseSufijos para nombres mgicos de archivos en el lenguaje de consulta.(Query language magic file name suffixes.uiPrefsDialogBase\Recordar estado de activacin de ordenamiento.Remember sort activation state.uiPrefsDialogBaseEliminar de la lista. Esto no tiene efecto en el ndice en disco.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase$Eliminar seleccinRemove selecteduiPrefsDialogBaseLReemplazar resmenes de los documentos Replace abstracts from documentsuiPrefsDialogBaseRestaurarResetuiPrefsDialogBaseEstablece el valor por defecto para el estilo de la ventana de Fragmentos Resets the Snippets window styleuiPrefsDialogBaseRestaurar el tipo de letra de la lista de resultados al valor por defecto del sistema1Resets the result list font to the system defaultuiPrefsDialogBasedRestablecer la hoja de estilo al valor por defecto!Resets the style sheet to defaultuiPrefsDialogBase&Lista de resultadosResult ListuiPrefsDialogBaseLTipo de letra para lista de resultadosResult list fontuiPrefsDialogBase,Parmetros de bsquedaSearch parametersuiPrefsDialogBaseRArchivo CSS para la ventana de fragmentosSnippets window CSS fileuiPrefsDialogBasehIniciar con la ventana de bsqueda avanzada abierta.'Start with advanced search dialog open.uiPrefsDialogBase$Lenguaje de racesStemming languageuiPrefsDialogBaseHoja de estiloStyle sheetuiPrefsDialogBaseXPalabras de contexto del resumen sintetizado Synthetic abstract context wordsuiPrefsDialogBaseVTamao del resumen sintetizado (caracteres)$Synthetic abstract size (characters)uiPrefsDialogBase~Textos ms all de este tamao no sern resaltados (muy lento).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBaseLas palabras en la lista sern convertidas automticamente a clusulas ext:xxx en el ingreso de lenguaje de consulta.bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase"Cambiar seleccinToggle selecteduiPrefsDialogBase&Interfaz de usuarioUser interfaceuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_it.qm0000644000175000017500000005714713566424763014265 00000000000000-AE'90,>.cdXMMOvG
lcxCQ{
L"SI6w	
p#v4w52w56@w58w5?LU	ֳ=6;Pf3+ZͼuN6;U#_UeW&L*.N?d4\JUY%JUY3YO
c
+<
6$
`P&
`
cE.

yp
IC
C!K
3p
-,-THFh6Σ.L$!۷/f'И+97P֙!RV'T#V
6\iCQ~`F=gF;
8DX
F%
y@
3W
u6
u?
P!
P'
5dN
Hh
£=M/.<V=*G7J%ob)zc5uzc>?P1Y˓
;	]Q~PRY~s*[s0Ag3. ||Sp
l).pLiWTutti i terminiAll clauses	AdvSearch"Qualsiasi termine
Any clause	AdvSearchmultimedialimedia	AdvSearch
altriother	AdvSearch fogli di calcolospreadsheets	AdvSearch
testitexts	AdvSearch<----- Tutti
<----- All
AdvSearchBase<----- Sel
<----- Sel
AdvSearchBase&Aggiungi condizione
Add clause
AdvSearchBase Ricerca avanzataAdvanced search
AdvSearchBaseTutti ---->	All ---->
AdvSearchBaseEsploraBrowse
AdvSearchBasePer categorie
By categories
AdvSearchBaselContrassegna per abilitare la ricerca sul tipo di file,Check this to enable filtering on file types
AdvSearchBasetContrassegna per usare le categorie al posto dei tipi mime;Check this to use file categories instead of raw mime types
AdvSearchBaseChiudiClose
AdvSearchBase$Elimina condizione
Delete clause
AdvSearchBaseNScrivi la directory base per la ricercaEnter top directory for search
AdvSearchBase8Ignora i file di questo tipoIgnored file types
AdvSearchBase*Limita i tipi di fileRestrict file types
AdvSearchBaseRLimita i risultati alla sotto-directory: %Restrict results to files in subtree:
AdvSearchBase$Salva come defaultSave as default
AdvSearchBaseHCerca i documenti<br>che contengono:'Search for 
documents
satisfying: AdvSearchBase"Ricerca tipo fileSearched file types AdvSearchBaseSel -----> Sel -----> AdvSearchBase Cerca Start Search AdvSearchBase Lingua di aspellAspell language ConfIndexW\Impossibile scrivere il file di configurazioneCan't write configuration file ConfIndexW@Nome della cartella del databaseDatabase directory name ConfIndexW"Parametri globaliGlobal parameters ConfIndexWIntervallo in megabite per il salvataggio intermedio dell'indiceIndex flush megabytes interval ConfIndexW Parametri localiLocal parameters ConfIndexW(Nome del file di log Log file name ConfIndexW8Livello di verbosit del logLog verbosity level ConfIndexW Non usare aspellNo aspell usage ConfIndexW0Parametri per la ricercaSearch parameters ConfIndexW"Indirizzi saltati Skipped paths ConfIndexW*Lingue per la radice Stemming languages ConfIndexWIl file dove verranno scritti i messaggi.<br>Usa 'stderr' per il terminalePThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexWLingue per le quali verr costruito<br>il dizionario delle espansioni radicali.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexWLista delle cartelle in cui inizia lindicizzazione recorsiva. Di default la tua home.LThe list of directories where recursive indexing starts. Default: your home. ConfIndexW@Questo valore regola il volume di dati da indicizzare tra un salvataggio e l'altro.<br>Aiuta a controllare l'uso della memoria. Di default post uguale a 10MbThis value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWQuesto valore regola il numero dei messaggi,>br>dai soli errori a mole indicazioni per il debug.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexW$Cartella superioreTop directories ConfIndexWGlobaleGlobal ConfSubPanelWAnnullaCancel EditTransBasepNessuna directory per il DB di base nella configurazione No db directory in configurationMain&Successivo&NextPreview&Precedente &PreviousPreview&Cerca: &Search for:PreviewImpossibile tradurre il documento per la rappresentazione interna 0Can't turn doc into internal representation for PreviewAnnullaCancelPreviewCancellaClearPreviewFCreazione del testo per l'anteprimaCreating preview textPreviewVCaricamento anteprima del testo nell'editor Loading preview text into editorPreview:Rispetta &Maiuscole/minuscole Match &CasePreviewLManca il programma di filtro esterno: Missing helper program: Preview><b>Ramificazioni personalizzateCustomised subtreesQObject.Segue il link simbolicoFollow symbolic linksQObjectSegue il link simbolico durante l'indicizzazione. Di default no, per evitare la duplicazione dell'indiceTFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject@Indicizza tutti i nomi dei filesIndex all file namesQObject&Indicizza il nome di quei files il cui contenuto non pu essere identificato o processato (tipo mime non supportato). Di default impostato a vero}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObjectNomi saltati Skipped namesQObjectLista delle sottocartelle nella gerarchia indicizzata<br>ove alcuni parametri devono essere ridefiniti. Predefinita: vuota.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectQuesti sono modelli per i nomi delle cartelle e/o dei files che non devono vebire indicizzati.LThese are patterns for file or directory names which should not be indexed.QObject&Anteprima&PreviewQWidget.Copia il nome del &FileCopy &File NameQWidgetCopia l'&Url Copy &URLQWidget.Trova documenti &similiFind &similar documentsQWidgetAttenzioneWarningRTIToolW"(tutte le lingue)(all languages)RclMain((nessuna espansione) (no stemming)RclMain,Informazioni su Recoll About RecollRclMainRNon posso creare la finestra di anteprimaCan't create preview windowRclMainvNon posso estrarre il documento o creare il file temporaneo0Cannot extract document or create temporary fileRclMainxImpossibile caricare informazioni del documento dal database+Cannot retrieve document info from databaseRclMainChiusuraClosingRclMain0Cronologia dei documentiDocument historyRclMain Esecuzione di: [ Executing: [RclMain&Cronologia dei dati History dataRclMain4Indicizzazione in corso: Indexing in progress: RclMainPuliscoPurgeRclMain.Risultati della ricerca Query resultsRclMain&Database espansioniStemdbRclMainBQuesta ricerca non e' piu' attiva"This search is not active any moreRclMainSconosciutoUnknownRclMainAttenzioneWarningRclMainberrore nel recupero delle lingue per l'espansione#error retrieving stemming languagesRclMainmultimedialimediaRclMain altriotherRclMain.&Informazioni su Recoll &About Recoll RclMainBase"Ricerca &Avanzata&Advanced Search RclMainBaseJ&Cancella la cronologia dei documenti&Erase document history RclMainBase &File&File RclMainBase &Aiuto&Help RclMainBase&Preferenze &Preferences RclMainBase,&Parametri ordinamento&Sort parameters RclMainBase&Strumenti&Tools RclMainBase&Manuale utente &User manual RclMainBaseLMostra la finestra di Ricerca avanzataAdvanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBaseLVisualizza la cronologia dei documentiDocument History RclMainBase*C&ronologia documentiDocument &History RclMainBase &EsciE&xit RclMainBase:Configurazione indici esterniExternal index dialog RclMainBasePrima pagina First page RclMainBaseFVai alla prima pagina dei risultatiGo to first page of results RclMainBasePagina seguente Next page RclMainBasePagina seguenteNext page of results RclMainBase"Pagina precedente Previous page RclMainBase"Pagina precedentePrevious page of results RclMainBase RecollRecoll RclMainBaseVConfigurazione dei parametri di ordinamentoSort parameters RclMainBase"&Esplora l'indiceTerm &explorer RclMainBase@Strumento di esplorazione indiceTerm explorer tool RclMainBase Aggiorna &indice Update &index RclMainBaseDataDate RecollModelNome file File name RecollModel: (mostra dettagli di ricerca) (show query)ResList<<p><b>Nessun risultato</b><br>

No results found
ResList0Cronologia dei documentiDocument historyResListRisultati DocumentsResListSuccessivoNextResListPrecedentePreviousResList Dettagli ricerca Query detailsResList&Lista dei risultati Result listResList,Documento inaccessibleUnavailable documentResListperforResList totale di almenoout of at leastResList Tutti All termsSSearchQualsiasiAny termSSearch:Stringa di ricerca malformataBad query stringSSearchNome file File nameSSearch Memoria esaurita Out of memorySSearch8Linguaggio di interrogazioneQuery languageSSearchCancellaClear SSearchBase Ctrl+SCtrl+S SSearchBase0Cancella voce di ricercaErase search entry SSearchBaseSSearchBase SSearchBase SSearchBase CercaSearch SSearchBaseInizia ricerca Start query SSearchBaseNome file File name SearchClauseWNumero di parole che possono frapporsi tra i termini di ricerca indicatiHNumber of additional words that may be interspersed with the chosen ones SearchClauseWSeleziona il tipo di ricerca da effettuare con i termini indicati>Select the type of query that will be performed with the words SearchClauseWSuccessivoNextSnippets CercaSearch SnippetsWEsploraBrowseSpecIdxW&Chiudi&Close SpellBase&Espandi &Expand  SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBase0Esplorazione dei termini Term Explorer SpellBase0Nessun epansione trovataNo expansion foundSpellW(Espressione regolareRegexpSpellW&Ortografia/FoneticaSpelling/PhoneticSpellW.Espansione grammaticaleStem expansionSpellWTermineTermSpellWCaratteri jolly WildcardsSpellWpImpossibile formare la lista di espansione per la lingua#error retrieving stemming languagesSpellW Tutti All terms UIPrefsDialogQualsiasiAny term UIPrefsDialogNome file File name UIPrefsDialog8Linguaggio di interrogazioneQuery language UIPrefsDialogvLa directory selezionata non sembra essera un indice Xapian;The selected directory does not appear to be a Xapian index UIPrefsDialogXLa directory selezionata e' gia' nella lista3The selected directory is already in the index list UIPrefsDialog<Questo e' l'indice principale!This is the main/local index! UIPrefsDialogImpossibile formare la lista delle lingue per l'espansione grammaticale#error retrieving stemming languages UIPrefsDialog ChiudiCloseViewActionBase>Applicazione di visualizzazioneNative ViewersViewActionBaseChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW&Annulla&CanceluiPrefsDialogBase&OK&OKuiPrefsDialogBaseUna ricerca per [vino rosso] (2 parole) sara' completata come [vino O rosso O (vino FRASE 2 rosso)]. Questo dovrebbe dare la precedenza ai risultati che contengono i termini esattamente come sono stati scritti.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBaseSeleziona tutti Activate AlluiPrefsDialogBaseAggiungi indice Add indexuiPrefsDialogBase"Applica modifiche Apply changesuiPrefsDialogBasejAggiungi automaticamente frase alle ricerche semplici+Automatically add phrase to simple searchesuiPrefsDialogBase"Deseleziona tuttiDeactivate AlluiPrefsDialogBase"Annulla modificheDiscard changesuiPrefsDialogBaseDevo sintetizzare un riassunto anche se il documento sembra ne abbia uno?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBase>Devo cercare di costruire i riassunti per le voci nell'elenco dei risultati usando il contesto dei termini di ricerca? Puo' essere lento per grossi documenti..zDo we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents.uiPrefsDialogBaseHCostruisci dinamicamente i riassuntiDynamically build abstractsuiPrefsDialogBaseIndici esterniExternal IndexesuiPrefsDialogBaseHelvetica-10 Helvetica-10uiPrefsDialogBaseDimensione massima del testo da evidenziare nell'anteprima (megabytes)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBase<Numero di risultati per pagina"Number of entries in a result pageuiPrefsDialogBaseApre una finestra di dialogo per selezionare i fonts della lista dei risultati-Opens a dialog to select the result list fontuiPrefsDialogBasedRicorda lo stato dell'impostazione di ordinamento.Remember sort activation state.uiPrefsDialogBasetRimuovi dalla lista. Non ha effetto sull'indice del disco.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase&Rimuovi selezionatiRemove selecteduiPrefsDialogBaseJSostituisci i riassunti dei documenti Replace abstracts from documentsuiPrefsDialogBaseRipristinaResetuiPrefsDialogBaseVRipristina i font della lista dei risultati1Resets the result list font to the system defaultuiPrefsDialogBase@Fonts per la lista dei risultatiResult list fontuiPrefsDialogBase0Parametri per la ricercaSearch parametersuiPrefsDialogBase^Inizia aprendo la finestra di ricerca avanzata.'Start with advanced search dialog open.uiPrefsDialogBase6Linguaggio per l'espansioneStemming languageuiPrefsDialogBaseZNumero di parole di contesto per il riassunto Synthetic abstract context wordsuiPrefsDialogBaseBNumero caratteri per il riassunto$Synthetic abstract size (characters)uiPrefsDialogBaseTesti di lunghezza superiore a questa non vengono evidenziati nella preview (troppo lento).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBase&Commuta selezionatiToggle selecteduiPrefsDialogBase$Interfaccia utenteUser interfaceuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_ru.ts0000644000175000017500000073446413566424763014314 00000000000000 AdvSearch All clauses Всем условиям Any clause Любому условию texts тексты spreadsheets таблицы presentations презентации media медиа messages сообщения other другое Bad multiplier suffix in size filter Неверный множитель в фильтре размера text текст spreadsheet таблица presentation презентация message сообщение AdvSearchBase Advanced search Сложный поиск Restrict file types Ограничить типы файлов Save as default Сделать параметром по умолчанию Searched file types Искать среди All ----> Все ----> Sel -----> Выделенные ----> <----- Sel <----- Выделенные <----- All <----- Все Ignored file types Игнорируемые типы файлов Enter top directory for search Указать имя каталога верхнего уровня для поиска Browse Обзор Restrict results to files in subtree: Ограничить результаты поиска файлами в подкаталоге: Start Search Начать поиск Search for <br>documents<br>satisfying: Искать <br>документы,<br>удовлетворяющие: Delete clause Удалить условие Add clause Добавить условие Check this to enable filtering on file types Фильтровать по типам файлов By categories По категориям Check this to use file categories instead of raw mime types Использовать категории, а не типы MIME Close Закрыть All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Все заполненные поля справа будут объединены логическим И («Все условия») или ИЛИ («Любое условие»). <br>В полях типа «Любые», «Все» или «Без» допустимы сочетания простых слов и фразы, заключённые в двойные кавычки.<br>Пустые поля игнорируются. Invert Обратить Minimum size. You can use k/K,m/M,g/G as multipliers Минимальный размер. Допускается использование множителей к/К, м/М, г/Г Min. Size Минимум Maximum size. You can use k/K,m/M,g/G as multipliers Максимальный размер. Допускается использование множителей к/К, м/М, г/Г Max. Size Максимум Filter Фильтр From с To по Check this to enable filtering on dates Включить фильтрование по дате Filter dates Фильтровать по дате Find Поиск Check this to enable filtering on sizes Включить фильтрование по размеру Filter sizes Фильтровать по размеру ConfIndexW Can't write configuration file Невозможно записать файл конфигурации Global parameters Общие параметры Local parameters Частные параметры Search parameters Параметры поиска Top directories Каталоги верхнего уровня The list of directories where recursive indexing starts. Default: your home. Список каталогов, где начинается рекурсивное индексирование. По умолчанию: домашний каталог. Skipped paths Пропускать These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Полный путь к директории, которая не будет затрагиваться при индексировании. <br>Может содержать маски. Записи должны совпадать с путями, которые видит индексатор (например, если topdirs включает «/home/me», а «/home» на самом деле ведёт к «/usr/home», правильной записью skippedPath будет «/home/me/tmp*», а не «/usr/home/me/tmp*») Stemming languages Языки со словоформами The languages for which stemming expansion<br>dictionaries will be built. Языки, для которых будут построены<br>словари однокоренных слов. Log file name Файл журнала The file where the messages will be written.<br>Use 'stderr' for terminal output Файл, куда будут записываться сообщения.<br>Используйте 'stderr' для вывода в терминал Log verbosity level Уровень подробности журнала This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Это значение определяет подробность поступающих сообщений,<br>от ошибок до отладочных данных. Index flush megabytes interval Интервал сброса данных индекса (МБ) This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Это значение определяет количество данных, индексируеммых между сбросами на диск.<br>Помогает контролировать использование памяти индексатором. Значение по умолчанию: 10МБ Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. Процент занятого пространства на диске — общее пространство диска, занятое не только индексом, — при котором индексирование завершится ошибкой и прекратится.<br>По умолчанию значение 0 снимает любые ограничения. No aspell usage Не использовать aspell Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Отключает использование aspell для создания вариантов написания в обозревателе терминов.<br> Полезно, если aspell отсутствует или не работает. Aspell language Язык aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Язык словаря aspell. Выгладит как «en» или «ru»...<br>Если значение не установлено, для его расчёта будeт использованs параметры системы (локаль). Чтобы узнать, какие параметры доступны в системе, наберите «aspell config» и проверьте, какие .dat-файлы содержатся в каталоге 'data-dir'. Database directory name Каталог базы данных The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Имя каталога, в котором хранится индекс<br>Путь указывается относительно каталога конфигурации и не является абсолютным. По умолчанию: «xapiandb». Unac exceptions Исключения unac <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Это исключения для механизма unac, который по умолчанию отбрасывает все диакритические знаки и проводит каноническую декомпозицию. Можно переопределить механизм удаления надстрочных знаков для отдельных символов или добавить правила декомпозиции (например, для лигатур). В каждой, отделённой запятой записи первый символ является исходным, а остальные — его интерпретация. Process the WEB history queue Обрабатывать просмотренные веб-страницы Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Включает индексирование страниц, просмотренных в Firefox.<br>(требуется установка дополнения Recoll для Firefox) Web page store directory name Имя каталога хранения веб-страниц The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Имя каталога хранения просмотренных веб-страниц.<br>Путь указывается относительно каталога конфигурации и не является абсолютным. Max. size for the web store (MB) Максимальный размер веб-хранилища (МБ) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Записи будут удалены при достижении максимального размера хранилища.<br>Целесообразно увеличить размер, так как уменьшение значения не повлечёт усечение сущесвующего файла (в результате приведёт только к расходованию пространства впустую). Automatic diacritics sensitivity Автоматический учёт диакритических знаков <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>Автоматически включает учёт диакритических знаков, если строка поиска содержит диакритические знаки (кроме unac_except_trans). В противном случае используйте язык запросов и модификатор <i>D</i> для учёта диакритических знаков. Automatic character case sensitivity Автоматический учёт регистра <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>Автоматически включает учёт регистра, если строка поиска содержит заглавные буквы (кроме первой буквы). В противном случае используйте язык запросов и модификатор <i>C</i> учёта регистра. Maximum term expansion count Максимальное число однокоренных слов <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Максимальное число однокоренных слов для одного слова (например, при использовании масок). Стандартное значение 10 000 является разумным и поможет избежать ситуаций, когда запрос кажется зависшим при переборе списка слов. Maximum Xapian clauses count Максимальное число Xapian-предложений <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Максимальное число элементарных условий, добавляемых к запросу Xapian. В некоторых случаях результат поиска однокоренных слов может быть избыточным и занять слишком большой объём памяти. Стандартное значение в 100 000 достаточно для большинства случаев и подходит для современных аппаратных конфигураций. ConfSubPanelW Only mime types Только MIME-типы An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Исчерпывающий перечень индексируемых типов MIME.<br>Другие типы индексироваться не будут. Обычно пуст и неактивен Exclude mime types Исключить MIME-типы Mime types not to be indexed Типы MIME, индексирование которых проводиться не будет Max. compressed file size (KB) Максимальный размер сжатого файла (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Это значение устанавливает предельный размер сжатых файлов, которые будут обрабатываться. Значение -1 снимает ограничение, 0 отключает распаковку. Max. text file size (MB) Максимальный размер текстового файла (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Это значение устанавливает предельный размер текстовых файлов, которые будут обрабатываться. Значение -1 снимает ограничение. Рекомендуется использовать для исключения файлов журнала большого размера из процесса индексирования. Text file page size (KB) Размер страницы текстового файла (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Если это значение установлено (т.е. не равно -1), то при индексировании текстовые файлы разбиваются на блоки соответствующего размера. Данный параметр полезен при выполнении поиска в очень больших текстовых файлах (например, файлах журналов). Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Работа внешних фильтров, длящаяся дольше указанного времени, будет прервана. Применяется для редких случаев (например, с фильтром postscript), когда возникает зацикливание фильтра при обработке какого-то документа. Установите значение -1, чтобы снять ограничение. Global Общее CronToolW Cron Dialog Настройка заданий Cron <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Индексирование <span style=" font-weight:600;">Recoll</span> по расписанию (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Каждое поле может содержать маску (*), единичное числовое значение, разделённый запятыми список (1,3,5) или диапазон чисел (1-7). Эти поля будут использованы <span style=" font-style:italic;">как есть</span> в файле crontab, также можно указать необходимые параметры в самом файле, см. crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Например, если ввести знак <span style=" font-family:'Courier New,courier';">*</span> в поле <span style=" font-style:italic;">«Дни недели»</span>, <span style=" font-family:'Courier New,courier';">12,19</span> — в поле <span style=" font-style:italic;">«Часы»</span> и <span style=" font-family:'Courier New,courier';">15</span> — в поле <span style=" font-style:italic;">«Минуты»</span>, индексирование будет производиться ежедневно в 12:15 и 19:15.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Расписание с очень частыми запусками может оказаться менее эффективным, чем индексирование в реальном времени.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Дни недели (* или 0-7, 0 или 7 — воскресенье) Hours (* or 0-23) Часы (* или 0-23) Minutes (0-59) Минуты (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Для остановки автоматического идексирования по расписанию нажмите <span style=" font-style:italic;">«Отключить»</span>, для запуска — <span style=" font-style:italic;">«Включить»</span>, для отмены внесённых изменений — <span style=" font-style:italic;">«Отмена»</span>.</p></body></html> Enable Включить Disable Отключить It seems that manually edited entries exist for recollindex, cannot edit crontab Похоже, что для recollindex есть вручную исправленные записи, редактирование crontab невозможно Error installing cron entry. Bad syntax in fields ? Ошибка установки записи cron. Неверный синтаксис полей? EditDialog Dialog Диалог EditTrans Source path Исходный путь Local path Локальный путь Config error Ошибка конфигурации Original path Изначальный путь EditTransBase Path Translations Корректировка путей Setting path translations for Задать корректировку для Select one or several file types, then use the controls in the frame below to change how they are processed Выберите типы файлов и используйте кнопки управления ниже, чтобы изменить порядок обработки файлов Add Добавить Delete Удалить Cancel Отмена Save Сохранить FirstIdxDialog First indexing setup Настройка первого индексирования <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Похоже, что индекс для этой конфигурации не существует.</span><br /><br />Для индексирования только домашнего каталога с набором умолчаний нажмите кнопку <span style=" font-style:italic;">«Запустить индексирование»</span>. Детальную настройку можно будет провести позже. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Если требуется больше контроля, воспользуйтесь приведёнными ниже ссылками для настройки параметров и расписания индексирования.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Перейти к этим инструментам позднее можно через меню <span style=" font-style:italic;">«Настройка»</span>.</p></body></html> Indexing configuration Настройка индексирования This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Здесь можно указать, какие каталоги требуется индексировать, а также настроить такие параметры, как пути к файлам-исключениям или их имена, используемые по умолчанию кодировки и т.д. Indexing schedule Расписание индексирования This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Здесь можно выбрать режим индексирования: по расписанию или в реальном времени, а также настроить расписание индексирования (с использованием cron). Start indexing now Запустить индексирование FragButs %1 not found. %1 не найден. %1: %2 %1: %2 Query Fragments Фрагменты запроса IdxSchedW Index scheduling setup Настройка расписания индексирования <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Индексирование <span style=" font-weight:600;">Recoll</span> может работать постоянно, индексируя изменяющиеся файлы, или запускаться дискретно через определённые промежутки времени. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Рекомендуется ознакомиться с руководством пользователя программы, чтобы выбрать наиболее подходящий режим работы (нажмите F1 для вызова справки). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Этот инструмент позволяет выбрать, будет ли индексирование производиться по расписанию или в реальном времени при входе в систему (или оба варианта сразу, что вряд ли имеет смысл). </p></body></html> Cron scheduling Расписание запуска cron The tool will let you decide at what time indexing should run and will install a crontab entry. Этот инструмент позволяет выбрать, в какое время запускать индексирование, а также сделать запись в crontab. Real time indexing start up Запуск индексирования в реальном времени Decide if real time indexing will be started when you log in (only for the default index). Здесь можно выбрать, нужно ли начинать индексирование в реальном времени при входе в систему (только для индекса по умолчанию). ListDialog Dialog Диалог GroupBox у MS используется перевод "Группа" (A standard Windows control that groups a set of controls) GroupBox Main No db directory in configuration Каталог БД в конфигурации не указан Could not open database in Невозможно открыть БД в . Click Cancel if you want to edit the configuration file before indexing starts, or Ok to let it proceed. . Нажмите "Отменить", если хотите исправить файл конфигурации до начала индексирования, или "OK" для продолжения. Configuration problem (dynconf Проблема конфигурации (dynconf "history" file is damaged or un(read)writeable, please check or remove it: Файл истории ("history") повреждён или не читается/не записывается, проверьте или удалите его: "history" file is damaged, please check or remove it: Файл истории повреждён, проверьте или удалите его: Preview Form Tab 1 &Search for: &Искать: &Next &Следующий &Previous &Предыдущий Match &Case &С учётом регистра Open Открыть Clear Очистить Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text Creating preview text Создание текста для просмотра Loading preview text into editor Загрузка текста в редактор Cannot create temporary directory Невозможно создать временный каталог Cancel Отмена Close Tab Закрыть вкладку Missing helper program: Отсутствует вспомогательное приложение: Can't turn doc into internal representation for Невозможно сконвертировать документ во внутреннее представление для Cannot create temporary directory: Невозможно создать временный каталог: Error while loading file Ошибка загрузки файла PreviewTextEdit Show fields Показать поля Show main text Показать основной текст Print Печать Open document Print Current Preview Печать текущего вида Show image Показать изображение Select All Выделить всё Copy Копировать Save document to file Сохранить документ в файл Fold lines Линии сгиба Preserve indentation Сохранять отступы QObject Global parameters Общие параметры Local parameters Частные параметры <b>Customised subtrees <b>Пользовательские подкаталоги The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. Список подкаталогов индексируемого дерева,<br>к которым должны применяться особые параметры. По умолчанию: пусто. <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>Следующие ниже параметры применяются или к каталогу верхнего уровня, если в списке выше ничего не выбрано<br>или выбрана пустая строка, или только для выбранного подкаталога.<br>Добавлять и удалять каталоги можно при помощи кнопок +/-. Skipped names Пропускать These are patterns for file or directory names which should not be indexed. Шаблоны имён файлов или каталогов, имена которых не следует индексировать. Default character set Кодировка по умолчанию This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Эта кодировка будет использована при чтении файлов, в которых таковая не указывается явно (например, чисто текстовых файлов).<br>Обычно значение пусто, тогда оно извлекается из окружения (локали). Follow symbolic links Открывать символические ссылки Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Открывать символические ссылки при индексировании. По умолчанию действие не выполняется во избежание дублированного индексирования Index all file names Индексировать все имена файлов Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Индексировать имена файлов, содержимое которых невозможно определить или обработать (неизвестный или неподдерживаемый тип MIME). По умолчанию включено Beagle web history Web-история Beagle Search parameters Параметры поиска Web history Просмотренные веб-страницы <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. Default<br>character set Кодировка<br>по умолчанию Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Кодировка, которая будет использована при чтении файлов, в которых кодировка не указана явно, например, чисто текстовых файлов.<br>Значение по умолчанию не установлено и берётся из параметров системы (локали). Ignored endings Игнорировать окончания These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). Окончания имен файлов, которые будут индексироваться только по имени (без попыток определения MIME-типа, без разворачивания файла, без индексирования содержимого). QWidget Create or choose save directory Создать или выбрать каталог сохранения Choose exactly one directory Выберите только один каталог Could not read directory: Невозможно прочитать каталог: Unexpected file name collision, cancelling. Неожиданный конфликт имён файлов, отмена действия. Cannot extract document: Невозможно извлечь документ: &Preview &Просмотр &Open О&ткрыть Open With Открыть с помощью Run Script Запустить выполнение сценария Copy &File Name Копировать &имя файла Copy &URL Копировать &URL &Write to File &Записать в файл Save selection to files Сохранить выделение в файлы Preview P&arent document/folder &Просмотр родительского документа/каталога &Open Parent document/folder &Открыть родительский документ/каталог Find &similar documents Найти &похожие документы Open &Snippets window Открыть окно &фрагментов Show subdocuments / attachments Показать вложенные документы QxtConfirmationMessage Do not show again. Больше не показывать. RTIToolW Real time indexing automatic start Автоматический запуск индексирования в реальном времени <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Индексирование при помощи <span style=" font-weight:600;">Recoll</span> может быть настроено как сервис, обновляющий индекс одновременно с изменением файлов, то есть в реальном времени. При этом постоянное обновление индекса будет происходить за счёт непрерывного использования системных ресурсов.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Запускать службу индексирования одновременно с сеансом рабочего стола. Also start indexing daemon right now. Также запустить прямо сейчас службу индексирования. Replacing: Замена: Replacing file Замена файла Can't create: Невозможно создать: Warning Предупреждение Could not execute recollindex Не удалось запустить recollindex Deleting: Удаление: Deleting file Удаление файла Removing autostart Отмена автозапуска Autostart file deleted. Kill current process too ? Файл автозапуска удалён. Прекратить текущий процесс? RclMain Query results Результаты запроса Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> Cannot retrieve document info from database Невозможно извлечь сведения о документе из БД Warning Предупреждение Can't create preview window Невозможно создать окно просмотра Executing: [ Выполняется: [ About Recoll О программе Document history Просмотренные документы History data История Indexing in progress: Идёт индексирование: Files Файлы Purge Очистка Stemdb Stemdb Closing Закрытие Unknown Неизвестно This search is not active any more Этот поиск больше не активен Bad viewer command line for %1: [%2] Please check the mimeconf file Неверная командная строка программы просмотра %1: [%2] Проверьте файл mimeconf Cannot extract document or create temporary file Невозможно извлечь документ или создать временный файл (no stemming) (без однокоренных слов) (all languages) (все языки) error retrieving stemming languages ошибка получения списка языков Update &Index Обновить &индекс Indexing interrupted Индексирование прервано documents document files file errors error total files) Stop &Indexing О&становить индексирование Can't start query: Ошибка старта запроса: All Все media медиа message сообщение other другое presentation презентация spreadsheet таблица text текст sorted сортированные filtered фильтрованные External applications/commands needed and not found for indexing your file types: Отсутствующие внешние приложения/команды, требуемые для индексирования ваших файлов: No helpers found missing Все вспомогательные приложения доступны Missing helper programs Вспомогательные приложения отсутствуют Save file dialog Сохранить файл Choose a file name to save under Выберите имя файла для сохранения Document category filter Фильтр категории документа No external viewer configured for mime type [ Не настроена внешняя программа для просмотра mime-типа [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Программа просмотра, указанная в mimeview для %1: %2, не найдена. Открыть диалог настройки? Can't access file: Невозможно получить доступ к файлу: Can't uncompress file: Невозможно распаковать файл: Save file Сохранение файл Result count (est.) Кол-во результатов (расчётное) Query details Подробности запроса Could not open external index. Db not open. Check external indexes list. Не удалось открыть внешний индекс. БД не открыта. Проверьте список внешних индексов. No results found Поиск не дал результатов None Отсутствует Updating Обновление Done Готово Monitor Монитор Indexing failed Не удалось выполнить индексирование Indexing done Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Для запуска текущего процесса индексирования был использован другой интерфейс. Нажмите «OK» для прекращения процесса или «Отмена» для его продолжения Erasing index Стирание индекса Reset the index and start from scratch ? Сбросить индекс и начать заново? Can't update index: internal error Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Идёт обработка запроса.<br>Из-за ограничений библиотеки<br>отмена действия приведёт к закрытию приложения Error Ошибка Index not open Индекс не открыт Index query error Ошибка запроса Indexed Mime Types Индексируемые MIME-типы Content has been indexed for these mime types: Проиндексировано содержание для следующих MIME-типов: Index not up to date for this file. Refusing to risk showing the wrong entry. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. Индекс для этого файла устарел. Не буду рисковать показывать не тот фрагмент. Нажмите OK для обновления индекса для этого файла, затем повторите запрос. Или же Отменить. Can't update index: indexer running Невозможно обновить индекс: индексатор уже запущен Indexed MIME Types Проиндексированные MIME-типы Bad viewer command line for %1: [%2] Please check the mimeview file Ошибка командной строки программы просмотра %1: [%2] Проверьте файл mimeview Viewer command line for %1 specifies both file and parent file value: unsupported В командной строке программы просмотра %1 указан как сам файл, так и родительский файл: команда не поддерживается Cannot find parent document Невозможно найти родительский документ Indexing did not run yet Индексирование пока не запущено External applications/commands needed for your file types and not found, as stored by the last indexing pass in Внешние приложения/команды, требуемые для индексирования файлов, не найдены, как указано в результатах последнего индексирования в Index not up to date for this file. Refusing to risk showing the wrong entry. Индекс для этого файла устарел. Отказываюсь рисковать показывать неправильную запись. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. Нажмите ОК, чтобы обновить индекс для файла, и перезапустите очередь по окончании. Или нажмите Отмена. Indexer running so things should improve when it's done Индексация выполняется, так что по завершении положение должно улучшиться Sub-documents and attachments Вложенные документы Document filter Фильтр документов Index not up to date for this file. Refusing to risk showing the wrong entry. Индекс для этого файла устарел. Возможно получение ошибочных результатов, поэтому действие производиться не будет. Click Ok to update the index for this file, then you will need to re-run the query when indexing is done. Нажмите ОК, чтобы обновить индекс для данного файла, и повторите запрос по окончании индексирования. The indexer is running so things should improve when it's done. Индексация выполняется, по завершении ситуация должна улучшиться. Duplicate documents Дублирующиеся документы These Urls ( | ipath) share the same content: Данные URL ( | ipath) имеют одинаковое содержимое: Bad desktop app spec for %1: [%2] Please check the desktop file Неверная спецификация для настольного приложения %1: [%2] Проверьте файл .desktop The current indexing process was not started from this interface, can't kill it Для запуска текущего процесса индексирования был использован другой интерфейс. Прекратить выполнение процесса невозможно Bad paths Неверные пути Bad paths in configuration file: Неверные пути в файле конфигурации: Selection patterns need topdir Для шаблонов отбора требуется topdir Selection patterns can only be used with a start directory Шаблоны отбора могут быть использованы только c корневым каталогом No search Результаты поиска отсутствуют No preserved previous search Отсутствуют сохранённые результаты предыдущего поиска Choose file to save Выбор файла для сохранения Saved Queries (*.rclq) Сохраненные запросы (*.rclq) Write failed Ошибка записи Could not write to file Не удалось выполнить запись в файл Read failed Ошибка чтения Could not open file: Не удалось открыть файл: Load error Ошибка загрузки Could not load saved query Не удалось загрузить сохранённый запрос Index scheduling Расписание индексирования Sorry, not available under Windows for now, use the File menu entries to update the index К сожалению, работа с Windows в данный момент невозможна. Используйте пункты меню «Файл» для обновления индекса Disabled because the real time indexer was not compiled in. Отключено, так как не был скомпилирован индексатор данных в реальном времени. This configuration tool only works for the main index. Данный инструмент настройки применим только к основныму индексу. No information: initial indexing not yet performed. Content has been indexed for these MIME types: Can't set synonyms file (parse error?) Невозможно установить файл синономов (ошибка анализа?) The document belongs to an external index which I can't update. Документ относится к внешнему индексу, который невозможно обновить. Click Cancel to return to the list. <br>Click Ignore to show the preview anyway (and remember for this session). Нажмите «Отмена» для возврата к списку. <br>Нажмите «Игнорировать», чтобы открыть просмотр (и запомнить выбор для данного сеанса). Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Открывается временная копия. Изменения будут утеряны, если их не сохранить<br/>в постоянном местоположении. Do not show this warning next time (use GUI preferences to restore). Больше не показывать (для восстановления значений используйте окно настройки интерфейса). Index locked Индекс заблокирован Unknown indexer state. Can't access webcache file. Неизвестный статус индексатора. Невозможно получить доступ к файлу веб-кэша. Indexer is running. Can't access webcache file. Идёт индексирование. Невозможно получить доступ к файлу веб-кэша. with additional message: с дополнительным сообщением: Non-fatal indexing message: Сообщение о некритичной ошибке индексирования: Types list empty: maybe wait for indexing to progress? Список типов пуст: подождать продолжения выполнения индексирования? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported В командной строке программы просмотра %1 указан родительский файл, а в URL — сетевой протокол http[s]: команда не поддерживается Tools Инструменты Results Результаты RclMainBase Recoll Recoll &File &Файл &Tools &Инструменты &Preferences &Настройка &Help &Справка Search tools Инструменты поиска Result list Список результатов E&xit &Выход &About Recoll &О программе &User manual &Руководство пользователя Document &History &Просмотренные документы Document History Просмотренные документы &Advanced Search &Сложный поиск Advanced/complex Search Сложный поиск &Sort parameters &Параметры сортировки Sort parameters Параметры сортировки Next page Следующая страница Next page of results Следующая страница результатов Previous page Предыдущая страница Previous page of results Предыдущая страница результатов &Query configuration &Конфигурация Ctrl+Q Ctrl+Q Update &index Обновить &индекс Term &explorer Обозреватель &терминов Анализатор &терминов Term explorer tool Инструмент обзора терминов External index dialog Настройка внешнего индекса &Erase document history &Очистить список просмотренных документов First page Первая страница Go to first page of results Перейти к первой странице результатов &Indexing configuration Настройки ин&дексирования All все &Show missing helpers &Показать недостающие внешние программы PgDown PgDown PgUp PgUp &Full Screen Во весь &экран F11 F11 Full Screen Во весь экран &Erase search history Очистить историю &поиска sortByDateAsc sortByDateAsc Sort by dates from oldest to newest Сортировать по дате от старых к новым sortByDateDesc sortByDateDesc Sort by dates from newest to oldest Сортировать по дате от новых к старым Show Query Details Показать сведения о запросе Show results as table Показать результаты в виде таблицы &Rebuild index Пере&строить индекс &Show indexed types Показать индексируемые &типы Shift+PgUp Shift-PgUp &Indexing schedule &Расписание индексирования E&xternal index dialog Настройка &внешнего индекса &Index configuration Настройка &индекса &GUI configuration Настройка и&нтерфейса &Results Список &результатов Sort by date, oldest first Сортировать по дате от старых к новым Sort by date, newest first Сортировать по дате от новых к старым Show as table Показать в виде таблицы Show results in a spreadsheet-like table Показать результаты в виде таблицы Save as CSV (spreadsheet) file Сохранить в CSV-файл Saves the result into a file which you can load in a spreadsheet Сохранить результаты в текстовый файл с разделителями, открываемый как таблица Next Page Следующая страница Previous Page Предыдущая страница First Page Первая страница Query Fragments Фрагменты запроса With failed files retrying С повторной обработкой файлов с ошибками Next update will retry previously failed files При следующем обновлении будут повторно обработаны файлы с ошибками Indexing &schedule &Расписание индексирования Enable synonyms Учитывать синонимы Save last query Сохранить последний запрос Load saved query Загрузить сохранённый запрос Special Indexing Специальное индексирование Indexing with special options Индексирование с особыми параметрами &View &Вид Missing &helpers Недостающие &вспомогательные приложения Indexed &MIME types Проиндексированные &MIME-типы Index &statistics &Статистика индекса Webcache Editor Редактор веб-кэша Trigger incremental pass Запустить пошаговый проход RclTrayIcon Restore Восстановить Quit Выход RecollModel File name Имя файла Mime type Тип MIME Date Дата Abstract Содержимое Author Автор Document size Размер документа Document date Дата документа File size Размер файла File date Дата файла Keywords Ключевые слова Original character set Исходная кодировка Relevancy rating Релевантность Title Заголовок URL URL Mtime Изменено Date and time Дата и время Can't sort by inverse relevance Ipath Ipath MIME type Тип MIME ResList Result list Список результатов Unavailable document Документ недоступен Previous Предыдущий Next Следующий <p><b>No results found</b><br> <p><b>Поиск не дал результатов</b><br> &Preview &Просмотр Find &similar documents Найти &подобные документы Query details Сведения о запросе (show query) (показать запрос) Copy &File Name Скопировать &имя файла Copy &URL Копировать &URL filtered фильтрованное sorted сортированное Document history Просмотренные документы Preview Просмотр Open Открыть <p><i>Alternate spellings (accents suppressed): </i> <p><i>Варианты написания (без диакритических знаков): </i> &Write to File &Сохранить в файл Preview P&arent document/folder &Предпросмотр родительского документа/каталога &Open Parent document/folder &Открыть родительский документ/каталог &Open О&ткрыть Documents Документы out of at least из минимум for для <p><i>Alternate spellings: </i> <p><i>Варианты написания: </i> Duplicate documents Дублированные документы These Urls ( | ipath) share the same content: Данные URL ( | индексные пути) имеют одно и то же содержимое: Result count (est.) Кол-во результатов (расчётное) Snippets Фрагменты ResTable &Reset sort &Сбросить сортировку &Delete column &Удалить столбец Add " Добавить столбец " " column " Save table to CSV file Сохранить таблицу в CSV-файл Can't open/create file: Невозможно открыть/создать файл: &Preview &Просмотр &Open О&ткрыть Copy &File Name Скопировать &имя файла Copy &URL Копировать &URL &Write to File &Сохранить в файл Find &similar documents Найти &подобные документы Preview P&arent document/folder &Предпросмотр родительского документа/каталога &Open Parent document/folder &Открыть родительский документ/каталог &Save as CSV &Сохранить как CSV Add "%1" column Добавить столбец «%1» ResTableDetailArea &Preview &Просмотр &Open О&ткрыть Copy &File Name Скопировать &имя файла Copy &URL Копировать &URL &Write to File &Сохранить в файл Find &similar documents Найти &подобные документы Preview P&arent document/folder &Предпросмотр родительского документа/каталога &Open Parent document/folder &Открыть родительский документ/каталог ResultPopup &Preview &Просмотр &Open О&ткрыть Copy &File Name Скопировать &имя файла Copy &URL Копировать &URL &Write to File &Сохранить в файл Save selection to files Сохранить выделение в файлы Preview P&arent document/folder &Предпросмотр родительского документа/каталога &Open Parent document/folder &Открыть родительский документ/каталог Find &similar documents Найти &подобные документы Open &Snippets window Открыть окно &Фрагменты Show subdocuments / attachments Показать вложенные документы SSearch Any term Любое слово All terms Все слова File name Имя файла Completions Дополнения Select an item: Выберите: Too many completions Слишком много дополнений Query language Язык запроса Bad query string Ошибка в строке запроса Out of memory Недостаточно памяти Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> No actual parentheses allowed.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Введите выражение на языке запросов. Шпаргалка:<br> <i>слово1 слово2</i> : 'слово1' и 'слово2' в любом поле.<br> <i>поле:слово1</i> : 'слово1' в поле 'поле'.<br> Стандартные поля/синонимы:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Псевдо-поля: dir, mime/format, type/rclcat, date.<br> Примеры интервалов между двумя датами: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>слово1 слово2 OR слово3</i> : слово1 И (слово2 ИЛИ слово3).<br> В действительности, скобки не разрешены.<br> <i>"слово1 слово2"</i> : фраза (должна встречаться именно в таком виде). Модификаторы:<br> <i>"слово1 слово2"p</i> : неупорядоченный поиск с расстоянием (<i><b>p</b>roximity</i>) по умолчанию.<br> Используйте ссылку <b>показать запрос</b>, когда сомневаетесь в результате, и смотрите подробности в руководстве (&lt;F1>). Enter file name wildcard expression. Укажите маску имени файла. Enter search terms here. Type ESC SPC for completions of current term. Укажите искомые слова. Введите Esc-пробел для автозаполнения. Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Введите фразу на языке запроса. Подсказка:<br> <i>слово1 слово2</i> : «слово1» и «слово2» в любом поле.<br> <i>поле:слово1</i> : «слово1» в поле «поле».<br> Стандартные названия/синонимы названий полей:<br> название/тема/подпись, автор/от, получатель/кому, имя файла, расширение.<br> Псевдо-поля: dir, mime/формат, тип/rclcat, дата, размер.<br> Два примера обозначения временного интервала: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>слово1 слово2 ИЛИ слово3</i> : слово1 И (слово2 ИЛИ слово3).<br> Допускается использование кавычек.<br> <i>«слово1 слово2»</i> : фраза (требуется полное совпадение). Возможные модификаторы:<br> <i>«слово1 слово2»p</i> : неупорядоченный поиск с заданным по умолчанию расстоянием между словами.<br> Перейдите по ссылке <b>Показать запрос</b>, если не уверены в результате. Более подробную информацию можно получить из руководства пользователя (&lt;F1>). Enter search terms here. Stemming languages for stored query: Языки со словоформами для сохранённого запроса: differ from current preferences (kept) отличаются от текущих параметров (сохранено) Auto suffixes for stored query: Автоматически подставляемые суффиксы для сохранённого запроса: External indexes for stored query: Внешние индексы для сохранённого запроса: Autophrase is set but it was unset for stored query Автоматически подставляемая фраза задана, но для сохранённого запроса она задана не была Autophrase is unset but it was set for stored query Автоматически подставляемая фраза не задана, но для сохранённого запроса она была задана SSearchBase SSearchBase SSearchBase Clear Очистить Ctrl+S Ctrl+S Show query history Erase search entry Очистить поле поиска Search Поиск Start query Начать поиск Enter search terms here. Type ESC SPC for completions of current term. Укажите искомые слова. Введите Esc-пробел для автозаполнения. Choose search type. Выберите тип поиска. SearchClauseW Any of these с любыми словами All of these со всеми словами None of these без этих слов This phrase фраза Terms in proximity слова вблизи File name matching имя файла Select the type of query that will be performed with the words Выберите, какой тип запроса по словам будет произведён Number of additional words that may be interspersed with the chosen ones Количество слов между выбранными словами No field Поле не используется Any Любое All Все None Без Phrase Фраза Proximity Расстояние между словами File name Имя файла Snippets Snippets Фрагменты about:blank about:blank Find: Найти: Next Следующий Prev Предыдущий SnippetsW Search Искать Sort By Relevance Sort By Page <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> <p>К сожалению, точные совпадения с заданными параметрами не найдены. Возможно, документ слишком большой, и генератор фрагментов дал сбой...</p> SortForm Date Дата Mime type Тип MIME SortFormBase Sort Criteria Критерии сортировки Sort the Сортировать most relevant results by: наиболее похожих результатов по: Descending убыванию Close Закрыть Apply Применить SpecIdxW Special Indexing Специальное индексирование Do not retry previously failed files. Не обрабатывать файлы с ошибками повторно. Retry previously failed files. Else only modified or failed files will be processed. Или будут обрабатываться только изменённые файлы или файлы с ошибками. Erase selected files data before indexing. Стирать сведения о выбранных файлах перед началом индексирования. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Каталог для рекурсивного индексирования. Должен находиться внутри стандартной проиндексированной области,<br> как указано в файле настройки (topdirs). Browse Обзор Start directory. Must be part of the indexed tree. Use full indexed area if empty. Start directory (else use regular topdirs): Корневой каталог (или использовать стандартный topdirs): Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Оставить поле пустым для выбора всех файлов. Можно использовать несколько разделённых пробелом шаблонов.<br>Шаблоны, включающие в себя пробел, должны быть взяты в двойные кавычки.<br>Можно использовать, только если задан корневой каталог для индексирования. Selection patterns: Шаблоны отбора: Top indexed entity Проиндексированный элемент верхнего уровня SpellBase Term Explorer Обозреватель терминов &Expand &Однокоренные слова Alt+E Alt-E &Close &Закрыть Alt+C Alt+C Term Слово No db info. Сведения из БД отсутствуют. Match Учитывать Case регистр Accents диакритические знаки SpellW Wildcards Маски Regexp Регулярное выражение Spelling/Phonetic Написание/произношение Aspell init failed. Aspell not installed? Не получилось инициализировать Aspell. Он установлен? Aspell expansion error. Ошибка раскрывания aspell. Stem expansion Однокоренные слова error retrieving stemming languages ошибка получения списка языков No expansion found Однокоренных слов не найдено Term Слово Doc. / Tot. В документе / Всего Index: %1 documents, average length %2 terms Индекс: %1 документ(ов), средняя длина %2 элемент(ов) Index: %1 documents, average length %2 terms.%3 results Индекс: %1 документ(ов), средняя длина %2 слов(о). %3 результат(ов) %1 results %1 результат(ов) List was truncated alphabetically, some frequent В списке приведены сокращённые формы в алфавитном порядке, некоторые часто повторяющиеся terms may be missing. Try using a longer root. слова могут отсутствовать. Попробуйте использовать более длинный корень. Show index statistics Показать статистику индекса Number of documents Число документов Average terms per document Среднее кол-во слов в документе Smallest document length Минимальная длина документа Longest document length Максимальная длина документа Database directory size Размер каталога базы данных MIME types: MIME-типы: Item Элемент Value Значение Smallest document length (terms) Наименьшая длина документа (слов) Longest document length (terms) Наибольшая длина документа (кол-во слов) Results from last indexing: Результаты последнего индексирования: Documents created/updated Создано/обновлено документов Files tested Проверено файлов Unindexed files Непроиндексированных файлов List files which could not be indexed (slow) Перечислить файы, которые не удалось проиндексировать (медленно) Spell expansion error. Ошибка поиска однокоренных слов. UIPrefsDialog The selected directory does not appear to be a Xapian index Выбранный каталог не является каталогом индексов Xapian This is the main/local index! Этот индекс является главным/локальным! The selected directory is already in the index list Этот каталог уже указан в списке индексов Select xapian index directory (ie: /home/buddy/.recoll/xapiandb) Выберите каталог индекса Xapian (например, /home/приятель/.recoll/xapiandb) error retrieving stemming languages ошибка получения списка языков Choose Выбрать Result list paragraph format (erase all to reset to default) Формат абзаца в списке результатов (очистите для сброса к значениям по умолчанию) Result list header (default is empty) Заголовок списка результатов (по умолчанию пуст) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) Выберите каталог конфигурации Recoll или каталог индексов Xapian (например, /home/me/.recoll или /home/me/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read Вероятно, выбранный каталог является каталогом конфигурации Recoll, но чтение конфигуарции невозможно At most one index should be selected Следует выбрать не больше одного индекса Cant add index with different case/diacritics stripping option Невозможно добавить индекс с другими настройками учёта регистра и диакритических знаков Default QtWebkit font Шрифт QtWebkit по умолчанию Any term Любое слово All terms Все слова File name Имя файла Query language Язык запроса Value from previous program exit Значение из предыдущего запуска программы UIPrefsDialogBase User interface Интерфейс Number of entries in a result page Количество записей на страницу результатов Result list font Шрифт списка результатов Helvetica-10 Helvetica-10 Opens a dialog to select the result list font Открывает диалог выбора шрифта списка результатов Reset Вернуть Resets the result list font to the system default Устанавливает шрифт списка результатов в обычный Auto-start simple search on whitespace entry. Начинать простой поиск по вводу пробела. Start with advanced search dialog open. Открывать диалог сложного поиска при запуске. Start with sort dialog open. Открывать диалог сортировки при запуске. Search parameters Параметры поиска Stemming language Язык словоформ Dynamically build abstracts Динамическое выделение конспекта Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Следует ли пытаться построить конспект из найденных документов, опираясь на контекст ключевых слов? Может быть медленным для больших документов. Replace abstracts from documents Замещать конспект, содержащийся в документах Do we synthetize an abstract even if the document seemed to have one? Создавать ли конспект, если документ уже имеет его? Synthetic abstract size (characters) Размер создаваемого конспекта (в символах) Synthetic abstract context words Контекстных слов External Indexes Внешние индексы Add index Добавить индекс Select the xapiandb directory for the index you want to add, then click Add Index Выберите нужный каталог с индексом Xapian и нажмите "Добавить индекс" Browse Просмотр &OK &OK Apply changes Применить изменения &Cancel &Отмена Discard changes Отменить изменения Result paragraph<br>format string Строка форматирования<br>блока результатов Automatically add phrase to simple searches Автоматически добавлять фразу при простом поиске A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Поиск [rolling stones] (два слова) будет изменён на [rolling или stones или (rolling phrase 2 stones)]. Это может поднять результаты, для которых слова следуют именно в том порядке, как введены. User preferences Предпочтения Use desktop preferences to choose document editor. Использовать десктопные настройки для выбора редактора документов. External indexes Внешние индексы Toggle selected Переключить выделенные Activate All Включить все Remove selected Удалить выделенные Remove from list. This has no effect on the disk index. Удалить из списка. Индекс на диске без изменений. Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Определяет формат каждого блока списка результатов. Используйте html-формат qt и замены в стиле printf:<br>%A аннотация<br> %D дата<br> %I название пиктограммы<br> %K ключевые слова (если есть)<br> %L ссылки предварительного просмотра и редактирования<br> %M тип MIME<br> %N количество результатов<br> %R релевантность<br> %S размер<br> %T заголовок<br> %U URL<br> Remember sort activation state. Запомнить состояние сортировки. Maximum text size highlighted for preview (megabytes) Максимальный объём текста с выделением при просмотре (Мб) Texts over this size will not be highlighted in preview (too slow). Тексты большего размера не будут подсвечиваться при предварительном просмотре (медленно). Highlight color for query terms Цвет выделения ключевых слов Deactivate All Выключить все Prefer Html to plain text for preview. Предпочитать HTML тексту для предпросмотра If checked, results with the same content under different names will only be shown once. Показывать результаты с тем же содержанием под разными именами не более одного раза Hide duplicate results. Прятать дубликаты Choose editor applications Выбор редакторов Display category filter as toolbar instead of button panel (needs restart). Показывать фильтр категории документа в виде выпадающего списка, а не панели с кнопками (требуется перезапуск). ViewAction Changing actions with different current values Изменение действий с различными текущими значениями Mime type Тип MIME Command Команда MIME type Тип MIME Desktop Default Взять из окружения Changing entries with different current values Изменение записей с различными текущими значениями ViewActionBase File type Тип файла Action Действие Select one or several file types, then click Change Action to modify the program used to open them Выберите один или несколько типов файлов, затем нажмите "Сменить действие" для изменения программы, работающей с ними Change Action Сменить действие Close Закрыть Native Viewers Встроенные просмотрщики Select one or several mime types then click "Change Action"<br>You can also close this dialog and check "Use desktop preferences"<br>in the main panel to ignore this list and use your desktop defaults. Выберите один или несколько MIME-типов и нажмите"Сменить действие"<br>Также Вы можете закрыть этот диалог и установить флаг "Использовать десктопные настройки"<br> в основной панели, чтобы использовать десктопные настройки вместо данного списка. Select one or several mime types then use the controls in the bottom frame to change how they are processed. Выберите MIME-типы и используйте кнопки в нижней рамке, чтобы изменить порядок обработки файлов. Use Desktop preferences by default Использовать параметры настройки окружения по умолчанию Select one or several file types, then use the controls in the frame below to change how they are processed Выберите типы файлов и используйте кнопки, расположенные в рамке ниже, чтобы изменить порядок обработки файлов Exception to Desktop preferences Исключения для параметров настройки окружения Action (empty -> recoll default) Действие (пусто -> по умолчанию) Apply to current selection Применить к выделению Recoll action: Действие recoll: current value текущее значение Select same Выделить такие же <b>New Values:</b> <b>Новые значения:</b> Webcache Webcache editor Редактор веб-кэша Search regexp Поиск по регулярному выражению WebcacheEdit Copy URL Копировать URL Unknown indexer state. Can't edit webcache file. Неизвестный статус индексатора. Невозможно редактировать файл веб-кэша. Indexer is running. Can't edit webcache file. Идёт индексирование. Невозможно редактировать файл веб-кэша. Delete selection Удалить выделенные Webcache was modified, you will need to run the indexer after closing this window. Содержимое веб-кэша былыо изменено, после закрытия этого окна необходимо запустить индексирование. WebcacheModel MIME MIME Url Url confgui::ConfBeaglePanelW Steal Beagle indexing queue Воспользоваться очередью индексирования Beagle Beagle MUST NOT be running. Enables processing the beagle queue to index Firefox web history.<br>(you should also install the Firefox Beagle plugin) Beagle НЕ должен выполняться. Включить обработку очереди Beagle для индексирования Web-истории Firefox.<br>(для этого следует также установить плагин Beagle для Firefox) Web cache directory name Каталог Веб-кэша The name for a directory where to store the cache for visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Имя каталога, в котором хранится кэш посещенных Веб-страниц.<br>Путь указывается относительно каталога с конфигурацией и не является абсолютным. Max. size for the web cache (MB) Предел размера Веб-кэша (MB) Entries will be recycled once the size is reached При достижении указанного размера кэша старые записи будут удаляться Web page store directory name Имя каталога хранения веб-страниц The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Имя каталога хранения просмотренных веб-страниц.<br>Путь указывается относительно каталога конфигурации и не является абсолютным. Max. size for the web store (MB) Максимальный размер веб-хранилища (МБ) Process the WEB history queue Обрабатывать просмотренные веб-страницы Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Включает индексирование страниц, просмотренных в Firefox.<br>(требуется установка дополнения Recoll для Firefox) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Записи будут удалены при достижении максимального размера хранилища.<br>Целесообразно увеличить размер, так как уменьшение значения не повлечёт усечение сущесвующего файла (в результате приведёт только к расходованию пространства впустую). confgui::ConfIndexW Can't write configuration file Невозможно записать файл конфигурации confgui::ConfParamFNW Browse Просмотр Choose Выбрать confgui::ConfParamSLW + + Add entry - - Delete selected entries ~ Edit selected entries confgui::ConfSearchPanelW Automatic diacritics sensitivity Автоматический учёт диакритических знаков <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>Автоматически включает учёт диакритических знаков, если строка поиска содержит диакритические знаки (кроме unac_except_trans). В противном случае используйте язык запросов и модификатор <i>D</i> для учёта диакритических знаков. Automatic character case sensitivity Автоматический учёт регистра <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>Автоматически включает учёт регистра, если строка поиска содержит заглавные буквы (кроме первой буквы). В противном случае используйте язык запросов и модификатор <i>C</i> учёта регистра. Maximum term expansion count Максимальное число однокоренных слов <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Максимальное число однокоренных слов для одного слова (например, при использовании масок). Стандартное значение 10 000 является разумным и поможет избежать ситуаций, когда запрос кажется зависшим при переборе списка слов. Maximum Xapian clauses count Максимальное число Xapian-предложений <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Максимальное число элементарных условий, добавляемых к запросу Xapian. В некоторых случаях результат поиска однокоренных слов может быть избыточным и занять слишком большой объём памяти. Стандартное значение в 100 000 достаточно для большинства случаев и подходит для современных аппаратных конфигураций. confgui::ConfSubPanelW Global Общее Max. compressed file size (KB) Максимальный размер сжатого файла (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Это значение устанавливает предельный размер сжатых файлов, которые будут обрабатываться. Значение -1 снимает ограничение, 0 отключает распаковку. Max. text file size (MB) Максимальный размер текстового файла (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Это значение устанавливает предельный размер текстовых файлов, которые будут обрабатываться. Значение -1 снимает ограничение. Рекомендуется использовать для исключения файлов журнала большого размера из процесса индексирования. Text file page size (KB) Размер страницы текстового файла (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Если это значение установлено (т.е. не равно -1), то при индексировании текстовые файлы разбиваются на блоки соответствующего размера. Данный параметр полезен при выполнении поиска в очень больших текстовых файлах (например, файлах журналов). Max. filter exec. time (S) Максимальное время работы фильтра (сек) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loopSet to -1 for no limit. Внешние фильтры, выполняющиеся дольше указанного предельного времени работы, принудительно завершаются. Это может помочь в тех редких случаях, когда фильтр (например, postscript) зацикливается при обработке некоторого документа. Значение, равное -1, выключает проверку времени работы. External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Работа внешних фильтров, длящаяся дольше указанного времени, будет прервана. Применяется для редких случаев (например, с фильтром postscript), когда возникает зацикливание фильтра при обработке какого-то документа. Установите значение -1, чтобы снять ограничение. Only mime types Только MIME-типы An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Исчерпывающий перечень индексируемых типов MIME.<br>Другие типы индексироваться не будут. Обычно пуст и неактивен Exclude mime types Исключить MIME-типы Mime types not to be indexed Типы MIME, индексирование которых проводиться не будет confgui::ConfTopPanelW Top directories Каталоги верхнего уровня The list of directories where recursive indexing starts. Default: your home. Список каталогов, где начинается рекурсивное индексирование. По умолчанию: домашний каталог. Skipped paths Пропускать Stemming languages Языки со словоформами The languages for which stemming expansion<br>dictionaries will be built. Языки, для которых будут построены<br>словари однокоренных слов. Log file name Файл журнала The file where the messages will be written.<br>Use 'stderr' for terminal output Файл, куда будут записываться сообщения.<br>Используйте 'stderr' для вывода в терминал Log verbosity level Уровень подробности журнала This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Это значение определяет подробность поступающих сообщений,<br>от ошибок до отладочных данных. Index flush megabytes interval Интервал сброса данных индекса (МБ) Max disk occupation (%) Максимальное использование диска (%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). Процент занятости диска, при котором индексирование будет прервано (во избежание заполнения доступного места).<br>Обычно: 0 (отключает проверку). No aspell usage Не использовать aspell Aspell language Язык aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Язык словаря aspell. Обычно вроде 'en' или 'ru'...<br>Если значение не указано, будет использовано окружение (локаль), что обычно работает. Чтобы посмотреть, какие варианты доступны в системе, наберите 'aspell config' и проверьте, какие .dat-файлы есть в каталоге 'data-dir'. Database directory name Каталог базы данных The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Имя каталога, в котором хранится индекс<br>Относительный путь берётся от каталога конфигурации. Обычно 'xapiandb'. Use system's 'file' command Использовать системную 'file' Use the system's 'file' command if internal<br>mime type identification fails. Использовать системную команду 'file' <br>при сбое внутреннего определителя типов MIME. These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Имена каталогов, которые индексирование обойдёт.<br>Может содержать шаблоны. Обязано подходить к путям, которые видит индексатор (например, если topdirs включает '/home/me' и '/home' является ссылкой на '/usr/home', то правильная запись должна быть '/home/me/tmp*', а не '/usr/home/me/tmp*') This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Это значение определяет количество данных, индексируеммых между сбросами на диск.<br>Помогает контролировать использование памяти индексатором. Значение по умолчанию: 10МБ Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Отключает использование aspell для создания вариантов написания в обозревателе терминов.<br> Полезно, если aspell отсутствует или не работает. The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Язык словаря aspell. Выгладит как «en» или «ru»...<br>Если значение не установлено, для его расчёта будeт использованs параметры системы (локаль). Чтобы узнать, какие параметры доступны в системе, наберите «aspell config» и проверьте, какие .dat-файлы содержатся в каталоге 'data-dir'. The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Имя каталога, в котором хранится индекс<br>Путь указывается относительно каталога конфигурации и не является абсолютным. По умолчанию: «xapiandb». Unac exceptions Исключения unac <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Это исключения для механизма unac, который по умолчанию отбрасывает все диакритические знаки и проводит каноническую декомпозицию. Можно переопределить механизм удаления надстрочных знаков для отдельных символов или добавить правила декомпозиции (например, для лигатур). В каждой, отделённой запятой записи первый символ является исходным, а остальные — его интерпретация. These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Полный путь к директории, которая не будет затрагиваться при индексировании. <br>Может содержать маски. Записи должны совпадать с путями, которые видит индексатор (например, если topdirs включает «/home/me», а «/home» на самом деле ведёт к «/usr/home», правильной записью skippedPath будет «/home/me/tmp*», а не «/usr/home/me/tmp*») Max disk occupation (%, 0 means no limit) Максимальный объём использования диска (%, 0 — без ограничений) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. Процент занятого пространства на диске — общее пространство диска, занятое не только индексом, — при котором индексирование завершится ошибкой и прекратится.<br>По умолчанию значение 0 снимает любые ограничения. uiPrefsDialogBase User preferences Предпочтения User interface Интерфейс пользователя Number of entries in a result page Количество записей на странице результатов If checked, results with the same content under different names will only be shown once. Показывать результаты с одинаковым содержанием под разными именами не более одного раза. Hide duplicate results. Скрывать повторяющиеся результаты. Highlight color for query terms Цвет выделения ключевых слов Result list font Шрифт списка результатов Opens a dialog to select the result list font Открыть диалог выбора шрифта списка результатов Helvetica-10 Helvetica-10 Resets the result list font to the system default Установка используемого по умолчанию системного шрифта для списка результатов Reset Сброс Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Определяет формат каждого блока списка результатов. Используйте html-формат qt и замены в стиле printf:<br>%A аннотация<br> %D дата<br> %I название пиктограммы<br> %K ключевые слова (если есть)<br> %L ссылки предварительного просмотра и редактирования<br> %M тип MIME<br> %N количество результатов<br> %R релевантность<br> %S размер<br> %T заголовок<br> %U URL<br> Result paragraph<br>format string Строка форматирования<br>блока результатов Texts over this size will not be highlighted in preview (too slow). Тексты большего размера не будут подсвечиваться при просмотре (медленно). Maximum text size highlighted for preview (megabytes) Максимальный размер текста, подсвечиваемого при просмотре (Мб) Use desktop preferences to choose document editor. Использовать десктопные настройки для выбора редактора документов. Choose editor applications Выбор приложений-редакторов Display category filter as toolbar instead of button panel (needs restart). Показывать фильтр категории документа в виде выпадающего списка, а не панели с кнопками (требуется перезапуск). Auto-start simple search on whitespace entry. Начинать простой поиск по вводу пробела. Start with advanced search dialog open. Открывать диалог сложного поиска при запуске. Start with sort dialog open. Открывать диалог сортировки при запуске. Remember sort activation state. Запомнить порядок сортировки результатов. Prefer Html to plain text for preview. Просмотр простого текста в формате HTML. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Suppress all beeps. Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Search parameters Параметры поиска Stemming language Язык со словоформами A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Поиск [rolling stones] (два слова) будет изменён на [rolling или stones или (rolling phrase 2 stones)]. Это позволит повысить приоритет поиска результатов, в которых слова следуют именно в указанном порядке. Automatically add phrase to simple searches Автоматически объединять слова во фразу при простом поиске Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Создавать фрагменты для результатов поиска с использованием контекста слов запроса? Процесс может оказаться медленным для больших документов. Dynamically build abstracts Динамическое создание фрагментов Do we synthetize an abstract even if the document seemed to have one? Создавать фрагмент, если он уже имеется для данного документа? Replace abstracts from documents Заменять фрагменты документов Synthetic abstract size (characters) Размер создаваемого фрагмента (кол-во символов) Synthetic abstract context words Кол-во слов вокруг слов поиска во фрагментах The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Список слов, которые будут автоматически преобразованы в расширение файла вида ext:xxx в запросе. Query language magic file name suffixes. Распознавание типа файлов при помощи файла магических чисел «magic file». Enable Включить External Indexes Внешние индексы Toggle selected Переключить выделенные Activate All Включить все Deactivate All Отключить все Remove from list. This has no effect on the disk index. Удалить из списка. Индекс на диске останется без изменений. Remove selected Удалить выделенные Click to add another index directory to the list Нажмите, чтобы добавить ещё одну папку с индексом в список Add index Добавить индекс Apply changes Применить изменения &OK &OК Discard changes Отменить изменения &Cancel &Отмена Abstract snippet separator Разделитель фрагментов в результатах Use <PRE> tags instead of <BR>to display plain text as html. Использовать тэги <PRE> вместо <BR>для отображения простого текста как html. Lines in PRE text are not folded. Using BR loses indentation. Строки в PRE-тексте не переносятся. При использовании BR теряются отступы. Style sheet Таблица стилей Opens a dialog to select the style sheet file Открыть диалог выбора файла стилей Choose Выбрать Resets the style sheet to default Сброс таблицы стилей к значению по умолчанию Lines in PRE text are not folded. Using BR loses some indentation. Строки текста PRE не переносятся. Использование BR теряет чать отступов. Use <PRE> tags instead of <BR>to display plain text as html in preview. Использовать теги <PRE> вместо <BR> для отображения обычного текста как HTML при предварительном просмотре. Result List Список результатов Edit result paragraph format string Редактировать строку формата абзаца результатов Edit result page html header insert Редактировать вставку HTML-заголовка списка результатов Date format (strftime(3)) Формат даты (по strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Порог частоты в процентах, выше которого слова в автофразе не используются. Часто появляющиеся слова представляют основную проблему обработки фраз. Пропуск слов увеличивает стек фразы и уменьшает эффективность функции автофразы. Значение по умолчанию — 2 (процента). Autophrase term frequency threshold percentage Порог частоты появления слов в автофразе в процентах Plain text to HTML line style Стиль отображения простого текста в HTML Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. Строки тега PRE не переносятся. Тег BR теряет часть отступов. Тег PRE-Wrap может дать желаемый результат. <BR> <BR> (перевод строки) <PRE> <PRE> <PRE> + wrap <PRE> + wrap Disable Qt autocompletion in search entry. Отключить Qt-автозавершение при заполнении строки поиска. Search as you type. Искать по мере ввода. Paths translations Корректировка путей Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Щёлкните, чтобы добавить другой каталог индекса в список. Можно выбрать каталог конфигурации Recoll или индекс Xapian. Snippets window CSS file CSS-файл для окна «Фрагменты» Opens a dialog to select the Snippets window CSS style sheet file Открыть диалог выбора таблицы стилей CSS для окна «Фрагменты» Resets the Snippets window style Сброс стиля окна «Фрагменты» Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Стиль отображения фильтров: в виде кнопок-переключателей, поле со списком на панели инструментов или меню. Document filter choice style: Стиль отображения фильтров: Buttons Panel Панель кнопок Toolbar Combobox Поле со списком Menu Меню Show system tray icon. Показывать значок в системном лотке. Close to tray instead of exiting. Скрывать в лотке вместо выхода. Start with simple search mode Открывать диалог простого поиска при запуске User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Применить пользовательский стиль окна фрагментов.<br> Примечание: вставка заголовка страницы результатов также включена в заголовок окна фрагментов. Synonyms file Файл синонимов Show warning when opening temporary file. Показывать предупреждение при открытии временного файла. Highlight CSS style for query terms CSS-стиль подсветки слов запроса Recoll - User Preferences Recoll — Пользовательская настройка Set path translations for the selected index or for the main one if no selection exists. Задать корректировку путей для выбранного или главного индекса, если ничего не выбрано. Activate links in preview. Активировать ссылки в просмотре. Make links inside the preview window clickable, and start an external browser when they are clicked. Делать ссылки внутри окна просмотра активными и открывать их в браузере по щелчку. recoll-1.26.3/qtgui/i18n/recoll_pl.ts0000644000175000017500000030107513303776057014260 00000000000000 AdvSearch All clauses Każdy warunek Any clause Któryś warunek media multimedia other pozostałe Bad multiplier suffix in size filter Błędna jednostka we filtrze rozmiaru text tekstowe spreadsheet arkusze presentation prezentacje message wiadomości AdvSearchBase Advanced search Dokładne szukanie Search for <br>documents<br>satisfying:</br></br> Znajdź <br>dokumenty<br>spełniacjące:</br></br> Delete clause Usuń warunek Add clause Dodaj warunek Restrict file types Określ typ pliku Check this to enable filtering on file types Zaznacz, by określić typ pliku By categories Jako kategoria Check this to use file categories instead of raw mime types Zaznacz, by użyć kategorii Save as default Zapamiętaj wybrane typy Searched file types Przeszukaj plik All ----> Wszystkie ----> Sel -----> Zaznaczone -----> <----- Sel <----- Zaznaczone <----- All <----- Wszystkie Ignored file types Pomiń pliki Enter top directory for search Podaj szczyt katalogu szukania Browse Przeglądaj Restrict results to files in subtree: Tylko pliki LEŻĄCE W katalogu: Start Search Szukaj Close Zamknij All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored.</br></br> Podane warunki (pola z prawej strony) będą użyte razem (dla zaznaczonego "Każdy warunek") lub oddzielnie (dla zaznaczonego "Któryś warunek"). <br>Pola "Któreś", "Wszystkie" i "Żadne" przyjmują pojedyńcze wyrazy lub frazy w cudzysłowiu.<br>Pola bez danych są ignorowane.</br></br> Invert LEŻĄCE POZA Minimum size. You can use k/K,m/M,g/G as multipliers Dopuszczalne jednostki: k/K, m/M, g/G Min. Size Większy od: Maximum size. You can use k/K,m/M,g/G as multipliers Dopuszczalne jednostki: k/K, m/M, g/G Max. Size Mniejszy od: Filter Filtry From Po: To Przed: Check this to enable filtering on dates Zaznacz, by określić datę Filter dates Po dacie Find Znajdź Check this to enable filtering on sizes Zaznacz, by określić rozmiar Filter sizes Po rozmiarze CronToolW Cron Dialog Ustaw cykl (CRON) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indeksuj cyklicznie (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Każde pole może zawierać wieloznacznik (*), pojdyńczą wartość, listę po przecinku (1,3,5) oraz zakres (1-7). Tak samo<span style=" font-style:italic;">jak</span>gdyby to był plik Crontab. Dlatego możliwe jest użycie składni Crontab. (zobacz crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Przykładowo wpisując <span style=" font-family:'Courier New,courier';">*</span> w <span style=" font-style:italic;">"Dni tygodnia", </span><span style=" font-family:'Courier New,courier';">12,19</span> w <span style=" font-style:italic;">"Godziny"</span> oraz <span style=" font-family:'Courier New,courier';">15</span> w <span style=" font-style:italic;">"Minuty"</span> uruchomili byśmy indeksowanie (recollindex) każdego dnia o 00:15 oraz 19:15</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Indeksowanie cykliczne (nawet te bardzo częste) jest mniej efektowne niż indeksowanie w czasie rzeczywistym.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Dni tygodnia (* or 0-7, 0 lub 7 to Niedziela) Hours (* or 0-23) Godziny (* lub 0-23) Minutes (0-59) Minuty (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Kliknij <span style=" font-style:italic;">Wyłącz</span>, aby zatrzymać automatyczne indeksowanie. <span style=" font-style:italic;">Włącz</span>, aby je rozpocząć. <span style=" font-style:italic;">Anuluj</span>, aby utrzymać obecny stan.</p></body></html> Enable Włącz Disable Wyłącz It seems that manually edited entries exist for recollindex, cannot edit crontab Nie można zmienić crontab. Wygląda na to, że istnieją ręczne wpisy dla recollindex. Error installing cron entry. Bad syntax in fields ? Błąd przy rejestrowaniu cyklu. Błędna składnia w polach? EditDialog Dialog Okno dialogowe EditTrans Source path Ścieżka źródłowa Local path Ścieżka lokalna Config error Błąd konfiguracji Original path Ścieżka oryginalna EditTransBase Path Translations Ścieżka tłumaczenia Setting path translations for Ustawienie ścieżki translacji dla Select one or several file types, then use the controls in the frame below to change how they are processed Wybierz jeden lub kilka typów pliku, następnie wskaż w ramce poniżej jak mają zostać przetworzone Add Dodaj Delete Usuń Cancel Anuluj Save Zapisz FirstIdxDialog First indexing setup Początkowa konfiguracja indeksowania <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Indeks dla tej konfiguracji nie istnieje.</span><br /><br />Jeśli tylko chcesz indeksować swój katalog domowy użwyając fabrcznych ustawień, wciśnij przycisk <span style=" font-style:italic;">Rozpocznij indeksowanie </span>. Szczegóły możesz ustawić również później. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Jeśli chesz mieć większą kontrolę, użyj następujących odnośników w celu konfiguracji indeksowania oraz jego harmonogramu.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">To samo możesz również otrzymać poźniej wybierając <span style=" font-style:italic;">Ustawienia</span> z menu.</p></body></html> Indexing configuration Konfiguracja indeksowania This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Tutaj możesz wybrać katalogi do indeksowania, oraz inne parametry tj. wyłączenie ścieżek plików czy ich nazw, domyślny zestaw znaków, etc. Indexing schedule Harmonogram indeksowania This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Tutaj możesz wybrać między indeksowaniem w kolejce, a indeksowaniem nabierząco, jak i ustaleniem automatycznej kolejki indeksowania (dzięki Cron) Start indexing now Rozpocznij indeksowanie IdxSchedW Index scheduling setup Konfiguracja harmonogramu indeksowania <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Indeksowanie <span style=" font-weight:600;">Recoll</span> może być uruchomione na stałe (indeksując każdą zmianę) lub w określonych cyklach.</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Instrukcja obsługi (EN) może pomóc wybrać rozwiązanie dla Ciebie (wciśnij F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Narzędzie to pomoże Ci zaplanować indeksowanie cykliczne lub wybierzesz indeksowanie "na bieżąco" po zalogowaniu (lub jedno i drugie, co rzadko jest sendowne).</p></body></html> Cron scheduling Planowanie z użyciem Cron The tool will let you decide at what time indexing should run and will install a crontab entry. Tutaj zdecydujesz o jakim czasie indeksowanie ma być uruchamiane (po przez wpis do crontab). Real time indexing start up Uruchom indeksowanie "na bieżąco" Decide if real time indexing will be started when you log in (only for the default index). Pozwala uruchomić indeksowanie po zalogowaniu. ListDialog Dialog Okno dialogowe GroupBox Grupa Main No db directory in configuration Brak katalogu dla bazy danych w konfiguracji "history" file is damaged or un(read)writeable, please check or remove it: Plik "history" jest uszkodzony lub brak możliwości jego odczytu/zapisu, zmień to lub go usuń: Preview Close Tab Zamknij kartę Cancel Anuluj Missing helper program: Brak programu usprawniającego: Can't turn doc into internal representation for Nie mogę przemienić dokumentu na władny format Creating preview text Tworzę podgląd tekstu Loading preview text into editor Ładuję podgląd tekstu do edytora &Search for: &Szukaj: &Next &Następny &Previous &Poprzedni Clear Wyczyść Match &Case Sprawdzaj &wielkość liter Error while loading file Błąd ładowania pliku PreviewTextEdit Show fields Pokaż pola Show main text Pokaż tekst główny Print Drukuj Print Current Preview Drukuj obecny podgląd Show image Pokaż obraz Select All Zaznacz wszystko Copy Kopiuj Save document to file Zapisz dokument do pliku Fold lines Zwiń linie Preserve indentation Zachowaj wcięcia QObject Global parameters Parametry globalne Local parameters Parametry lokalne <b>Customised subtrees</b> <b>Dostosowana gałąź</b> The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty.</br> Lista podkatalogów w zaindeksowanej hierarchii <br> dla których część parametrów musi być ustalona ponownie. Domyślnie: pusty.</br> <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons.</br></br></i> <i>Poniższe parametry tyczą są podświetlonego katalogu z powyższej listy.<br> Jeśli powyższa lista jest pusta lub podświetla pustą linię poniższe ustawienia tyczą się najpłytszego katalogu.<br>Możesz dodać lub usunąć katalog klikając przyciski +/-</br></br></i> Skipped names Wykluczenia These are patterns for file or directory names which should not be indexed. Tutaj ustawiasz reguły wykluczające indeksowanie plików i katalogów. Default character set Domyślny zestaw znaków This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used.</br> To jest zestaw znaków służący do odczytu plików i nie jest tożsamy z ustawieniami wewnętrznymi (np: czyste pliki tesktowe)<br>Domyślnie jest pusty, a użyta jest wartość ze środowiska NLS.</br> Follow symbolic links Idź za dowiązaniami symbolicznymi Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Indeksując, idź za dowiązaniami symbolicznymi. Domyślnia wartość to NIE, chroni przed zduplikowanymi indeksami. Index all file names Indeksuj wszystkie nazwy plików Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Indeksuj nazwy plików dla których zawartość nie może być rozpoznana lub przetworzona (Nie lub nieobsługiwany typ MIME). Domyślnie Tak. Search parameters Parametry szukania Web history Historia sieci RTIToolW Real time indexing automatic start Automatyczny start indeksowania w czasie rzeczywistym <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indeksacja może być uruchomiona w tle (daemon), aktualizując indeks nabierząco. Zyskujesz zawsze aktualny indeks, tracąc część zasobów systemowych.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Uruchom indeksowanie w tle razem ze startem komputera. Also start indexing daemon right now. Dodatkowo natychmiast uruchom indeksowanie w tle. Replacing: Podmiana: Replacing file Podmiana pliku Can't create: Nie mogę utworzyć: Warning Ostrzeżenie Could not execute recollindex Nie można wykonać recollindex Deleting: Usuwanie: Deleting file Usuwanie pliku Removing autostart Usuwanie autostartu Autostart file deleted. Kill current process too ? Usunięto plik autostartu. Zamknąć również bieżący proces? RclMain (no stemming) wyłącz ciosanie (ang. stemming) (all languages) (każdy język) error retrieving stemming languages Błąd pobierania "reguł ciosania" (ang. stemming languages) Indexing in progress: Indeksowanie w tracie: Purge Wyczyść Stemdb Stemdb Closing Zamykanie Unknown Nieznane Query results Wynik zapytania Cannot retrieve document info from database Brak możliwości pobrania informacji o dokumencie z bazy danych Warning Ostrzeżenie Can't create preview window Nie można utworzyć okna podglądu This search is not active any more To wyszukanie przestało być aktywne Cannot extract document or create temporary file Nie można wypakować dokumentu lub stworzyć plik tymczasowy Executing: [ Wykonuję: [ About Recoll Karta Recoll History data Historia danych Document history Historia dokumentów Update &Index Odśwież &Indeks Stop &Indexing Zatrzymaj &Indeksowanie All Wszystko media multimedia message wiadomości other pozostałe presentation prezentacje spreadsheet arkusze text tekstowe sorted posortowane filtered przefiltrowane No helpers found missing Wszystkie rozszerzenia znaleziono Missing helper programs Brakujące rozszerzenia Document category filter Filtr kategorii dokumentu No external viewer configured for mime type [ Brak skonfigurowanej zewnętrzenej przeglądarki typów MIME [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Brak przeglądarki dla typu MIME %1: %2 . Chcesz to ustawić teraz? Can't access file: Nie mogę uzyskać dostępu do pliku: Can't uncompress file: Nie mogę wypakować pliku: Save file Zapisz plik Result count (est.) Liczba wyników (szac.) Could not open external index. Db not open. Check external indexes list. Nie mogę otworzyc zewnętrznego indeksu. Nie otwarta baza danych. Sprawdź listę zewnętrznych indeksów. No results found Brak wyników None Nic Updating Odświeżanie Done Zakończone Monitor Sprawdzanie Indexing failed Porażka indeksowania The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Obecny proces indeksowania uruchomiono z innego okna. Kliknij Ok, by zamknąć proces. Erasing index Usuwanie indeksu Reset the index and start from scratch ? Ponownie spisać indeks od zera? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program</br></br> Trwa odpytywanie.<br>z powodu ograniczeń biblioteki indeksowania,<br>anulowanie zamknie program</br></br> Error Błąd Index not open Indeks jest zamknięty Index query error Błąd odpytania indeksu Content has been indexed for these mime types: Zawartość została zaindeksowana dla tych typów MIME: Can't update index: indexer running Nie mogę zaktualizować indeksu: pracujący indekser Indexed MIME Types Zaindeksowane typy MIME Bad viewer command line for %1: [%2] Please check the mimeview file Błędna komenda przeglądarki dla typu %1: [%2] Sprawdź plik widoku MIME Viewer command line for %1 specifies both file and parent file value: unsupported Polecenie czytnika dla %1 podaje zarówno plik jak i wartość pliku rodzica: niewspierane Cannot find parent document Nie można odszukać rodzica dokumentu Indexing did not run yet Indeksowanie nie zostało jeszcze uruchomione External applications/commands needed for your file types and not found, as stored by the last indexing pass in Brak zewnętrznych aplikacji|komend wymaganych przez twoje typy plików. Index not up to date for this file. Refusing to risk showing the wrong entry. Indeks tego pliku jest nieaktualny. Odmawiam podania błędnych wyników. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. Kliknij Ok by uaktualnić indeks tego pliku, po zakończeniu ponów zapytanie lub Anuluj. Indexer running so things should improve when it's done Indeksowanie w trakcie, spodziewana poprawa po zakończeniu. Sub-documents and attachments Poddokumenty i załączniki RclMainBase Recoll Recoll Search tools Narzędzia wyszukiwania Result list Wyniki &File &Plik &Tools &Narzędzia &Preferences &Ustawienia &Help &Pomoc E&xit &Zakończ Ctrl+Q Ctrl+Q Update &index &Aktualizacja indeksu &Erase document history &Usuń historię dokumentu &About Recoll &Karta Recoll &User manual &Instrukcja Document &History &Historia dokumentu Document History Historia Dokumentu &Advanced Search &Zaawansowane szukanie Advanced/complex Search Złożone szukanie &Sort parameters Parametry &sortowania Sort parameters Parametry sortowania Term &explorer Przejżyj &terminy Term explorer tool Przeglądanie terminów Next page Następna strona Next page of results Następna strona wyników First page Pierwsza strona Go to first page of results Przejdź do pierwszej strony wyników Previous page Poprzednia strona Previous page of results Poprzednia strona wyników External index dialog Zewnętrzny indeks All Wszystko &Show missing helpers Pokaż &brakujących pomocników PgDown PgDown PgUp PgUp &Full Screen Pełen &Ekran F11 F11 Full Screen Pełen ekran &Erase search history &Usuń historię szukania Sort by dates from oldest to newest Sortuj po dacie: od najstarszego Sort by dates from newest to oldest Sortuj po dacie: od najnowszego Show Query Details Pokaż szczegóły zapytania &Rebuild index &Odnów indeks &Show indexed types Pokaż zaindeksowane &typy Shift+PgUp Shift+PgUp &Indexing schedule &Plan indeksowania E&xternal index dialog Zewnętrzny indeks &Index configuration &Konfiguracja indeksu &GUI configuration Konfiguracja &GUI &Results &Wyniki Sort by date, oldest first Sortuj po dacie: od najstarszego Sort by date, newest first Sortuj po dacie: od najnowszego Show as table Pokaż jako tabela Show results in a spreadsheet-like table Pokaż wyniki jako arkusz Save as CSV (spreadsheet) file Zapisz jako plik CSV (arkusz) Saves the result into a file which you can load in a spreadsheet Zapisz wyniki do pliku czytelnego przez arkusz Next Page Następna strona Previous Page Poprzednia strona First Page Pierwsza strona RecollModel Abstract Abstrakcja Author Autor Document size Rozmiar dokumentu Document date Data dokumentu File size Rozmiar pliku File name Nazwa pliku File date Data pliku Keywords Słowa klucze Original character set Oryginalny zestaw znaków Relevancy rating Trafność Title Tytuł URL URL Mtime Czas modyfikacji Date Data Date and time Data i czas Ipath Ipath MIME type Typ MIME ResList Result list Lista wyników (show query) (Pokaż zapytanie) Document history Historia dokumentu <p><b>No results found</b><br /></p> <p><b>Brak wyników</b><br /></p> Previous Poprzedni Next Następny Unavailable document Dokument niedostępny Preview Poprzedni Open Otwórz <p><i>Alternate spellings (accents suppressed): </i></p> <p><i>Aleternatywna pisowania (ignorowane akcenty): </i></p> Documents Dokumenty out of at least z co najmniej for dla <p><i>Alternate spellings: </i></p> <p><i>Alternatywna pisownia: </i></p> Duplicate documents Duplikaty dokumentów These Urls ( | ipath) share the same content: Te URLe ( | ipath) mają tą samą zawartość: Result count (est.) Liczba wyników (oszacowana) Query details Szczegóły zapytania ResTable &Reset sort &Reset sortowania &Delete column &Usuń kolumnę Save table to CSV file Zapisz tabelę jako plik CSV Can't open/create file: Nie można otworzyć|utworzyć pliku: &Save as CSV &Zapisz jako CSV Add "%1" column Dodaj "%1" kolumnę ResultPopup &Preview &Poprzedni &Open &Otwórz Copy &File Name &Kopiuj nazwę pliku Copy &URL Kopiuj &URL &Write to File Zapisz &do pliku Save selection to files Zapisz zaznaczenie do pliku Preview P&arent document/folder Podgląd rodzica dokumentu|katalogu &Open Parent document/folder &Otwórz dokument|katalog rodzica Find &similar documents Znajdź &podobne dokumenty Open &Snippets window Otwórz okno &snipetów Show subdocuments / attachments Pokaż poddokumenty|załączniki SSearch Any term Któryś termin All terms Każdy termin File name Nazwa pliku Query language Język zapytań Bad query string Błędne zapytanie Out of memory Brak pamięci Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> No actual parentheses allowed.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (<F1>) for more detail. </br></br></br></br></br></br></br></br></br></br></br> Wprowadź wyrażenie pytające. Ściąga:<br> <i>termin1 termin2</i> : 'termin1' i 'termin2' w którymś polu.<br> <i>pole:termin1</i> : 'termin1' w polu 'pole'.<br> Standardowe pola nazwy|synonimy:<br> tytuł/przedmiot/napis, autor/od, odbiorca/do, nazwa pliku, ext.<br> Pseudo-pola: katalog, mime/format, typ/rclcat, data.<br> Przykład przedziału dat: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>termin1 termin2 LUB termin3</i> : termin1 LUB (termin2 LUB termin3).<br> Nawiasy aktualnie niedozwolone.<br> <i>"termin1 termin2"</i> : fraza (wystąpienie dokładne). Dostępne modyfikatory:<br> <i>"termin1 termin2"p</i> : nieuporządkowane sąsiednie szukanie z domyślną odległością.<br> Użyj <b>Pokaż zapytanie</b> jeśli jesteś niepewny wyników i sprawdź je z instrukcją (<F1>) . </br></br></br></br></br></br></br></br></br></br></br> Enter file name wildcard expression. Wprowadź wieloznakowe (wildcard) wyrażenie nazwy pliku Enter search terms here. Type ESC SPC for completions of current term. Wprowadź tutaj szkane terminy. Wpisz ESC SPC by uzupełnić bieżący termin. SSearchBase SSearchBase SSearchBase Clear Wyczyść Ctrl+S Ctrl+S Erase search entry Usuń szukany wpis Search Szukaj Start query Start zapytania Enter search terms here. Type ESC SPC for completions of current term. Wprowadź tutaj szkane terminy. Wpisz ESC SPC by uzupełnić bieżący termin. Choose search type. Wybierz typ szukania. SearchClauseW Select the type of query that will be performed with the words Wybierz typ zapytania, który będzie użyty z wyrazami Number of additional words that may be interspersed with the chosen ones Liczba dodatkowych wyrazów, które mogą być przeplatane z wybranymi No field Bez pola Any Któryś All Każdy None Żaden Phrase Fraza Proximity Sąsiedztwo File name Nazwa pliku Snippets Snippets Snipety Find: Znajdź: Next Następny Prev Poprzedni SnippetsW Search Szukaj SpellBase Term Explorer Przegląd terminów &Expand &Rozszerz Alt+E Alt+E &Close &Zamknij Alt+C Alt+C No db info. Brak informacji bd. Match Dopasowanie Case Wielkość znaków (Case) Accents Akcenty SpellW Wildcards Wieloznaczniki (wildcards) Regexp Wyrażenie regułowe (regexp) Stem expansion Roszerzenie rdzenia (Stem expansion) Spelling/Phonetic Pisownia/Fonetyczność error retrieving stemming languages Błąd pobierania "reguł ciosania" (ang. stemming languages) Aspell init failed. Aspell not installed? Nieudany start Aspell. Nie zainstalowano Aspell? Aspell expansion error. Błąd rozszerzenia Aspell. No expansion found Nieznalezione rozszerzenie Term Termin Doc. / Tot. Dok. / Razem Index: %1 documents, average length %2 terms.%3 results Indeks: %1 dokumenty, średnia długość %2 terminów.%3 wyników %1 results %1 wyników List was truncated alphabetically, some frequent Lista obcięta alfabetycznie, część częsta terms may be missing. Try using a longer root. Terminy mogą zginąć. Użyj dłuższego rdzenia Show index statistics Pokaż statystyki indeksowania Number of documents Liczba dokumentów Average terms per document Średnia terminów na dokument Smallest document length Najmniejsza długość dokumentu Longest document length Największa długość dokumentu Database directory size Rozmiar katalogu bazy danych MIME types: Typy MIME: Item Element Value Wartość UIPrefsDialog error retrieving stemming languages Błąd pobierania "reguł ciosania" (ang. stemming languages) The selected directory does not appear to be a Xapian index Wybrany katalog nie wygląda jak indeks Xapian This is the main/local index! To jest główny|lokalny indeks! The selected directory is already in the index list Wybrany słownik już należy do indeksu Choose Wybierz Result list paragraph format (erase all to reset to default) Format paragrafu listy wyników (usuń wszystko by wróćić do domyślnych) Result list header (default is empty) Nagłówek listy wyników (domyślnie pusty) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) Wybierz katalog konfiguracji recoll lub katalog indeksu xapian (np.: /home/ja/.recoll lub /home/ja/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read Wybrany katalog wygląda jak katalog konfiguracji Recoll, jednakże kofiguracja nie może być przeczytana At most one index should be selected Co najwyżej jeden indeks powinnien być wyberany Cant add index with different case/diacritics stripping option Nie można dodać indeksu z opcją różnej wielkości-liter/znakach-diakrytycznych ViewAction Command Komenda MIME type Typ Mime Desktop Default Domyślnie ustawienia pulpitu Changing entries with different current values Zmiana wpisów o różne obecne wartości ViewActionBase Native Viewers Systemowy czytnik Close Zamknij Select one or several mime types then use the controls in the bottom frame to change how they are processed. Wybierz jedno lub kilka typów MIME po czym określ jak mają być przetwarzane używając kontrolek na dole ramki Use Desktop preferences by default Użyj domyślnie ustawień Pulpitu Select one or several file types, then use the controls in the frame below to change how they are processed Wybierz jeden lub kilka typów pliku, następnie wskaż w ramce poniżej jak mają zostać przetworzone Exception to Desktop preferences Wyjątki dla ustawień Pulpitu Action (empty -> recoll default) Czyń (pusty -> recoll domyślnie) Apply to current selection Użyj dla obecnego wyboru Recoll action: Recoll zachowanie: current value obecna wartość Select same Wybierz to samo <b>New Values:</b> <b>Nowa wartość:</b> confgui::ConfBeaglePanelW Entries will be recycled once the size is reached Wpisy będą odnowione gdy osiągnie rozmiar Web page store directory name Nazwa katalogu dla trzymania stron web The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory.</br> Nazwa katalogu w którym trzymane są kopie odwiedzonych stron.<br>Nieabsolutna ścieżka jest brana względnie do katalogu konfiguracji.</br> Max. size for the web store (MB) Maks. rozmiar dla schowka webowego (MB) Process the WEB history queue Przejdź do kolejki historii web Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin)</br> Włącz ineksowanie odwiedzonych stron w Firefox.<br>(Wymagana instalacja dodatku Firefox Recoll)</br> confgui::ConfIndexW Can't write configuration file Nie można pisać w pliku konfiguracji confgui::ConfParamFNW Choose Wybierz confgui::ConfParamSLW + + - - confgui::ConfSearchPanelW Automatic diacritics sensitivity Automatyczna czułość na diakrytyki <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity.</p> <p>Automatycznie uruchom czułość diakrytyków jeśli szukana fraza zawiera "ogonki" (nie w unac_except_trans). Inaczej musisz użyć języka zapytań oraz modyfikator<i>D</i> by wskazać czułość diakrytyków.</p> Automatic character case sensitivity Automatyczna czułość wielkości znaków <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity.</p> <p>Automatycznie uruchom rozróżnianie wielkości znaków jeśli wpis ma wielkie litery (poza pierwszym znakiem). Inaczej musisz użyć języka zapytań oraz modyfikatora <i>C</i> by wskazać rozróżnianie wielkości liter.</p> Maximum term expansion count Maksymalna liczba rozszerzeń terminu <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.</p> <p>Maksymalna liczba rozszerzeń dla pojedyńczego terminu (np.: używając wieloznaczników). Domyślne 10 000 jest wartością rozsądną oraz strzeże przed zawieszeniem zapytania podczas gdy przeszukiwana jest lista terminów</p> Maximum Xapian clauses count Maksymalna liczba klauzuli Xapian <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.</p> <p>Maksymalna liczba początkowych klauzuli dodawanych do zapytania Xapian. W niektórych przypadkach, wynik rozszerzeń terminu może być pomnożony, zwiększając zużycie pamięci. Domyślne 100 000 powinno być dostatecznie wysokie dla większości przypadków oraz działające na obecnych konfiguracjach sprzętowych.</p> confgui::ConfSubPanelW Global Globalnie Max. compressed file size (KB) Maks. rozmiar skompresowanego pliku (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Wartość progowa od której skompresowane pliki przestają być przetwarzane. Brak limitu to -1, 0 wyłącza przetwarzanie plików skompresowanych. Max. text file size (MB) Maks. rozmiar plików tekstowych (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Wartość progowa po której pliki tekstowe przestają być przetwarzane. Brak limitu to -1. Używaj do wykluczenia gigantycznych plików dziennika systemowego (logs). Text file page size (KB) Rozmiar strony pliku tekstowego (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Indeksując dzieli plik tekstowy na podane kawałki (jeśli różne od -1). Pomocne przy szukaniu w wielkich plikach (np.: dzienniki systemowe). Max. filter exec. time (S) Maks. czas filtrowania (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Przerywa po tym czasie zewnętrzne filtrowanie. Dla rzadkich przypadków (np.: postscript) kiedy dokument może spowodować zapętlenie filtrowania. Brak limitu to -1. confgui::ConfTopPanelW Top directories Szczytowe katalogi The list of directories where recursive indexing starts. Default: your home. Lista katalogów rekursywnego indeksowania. Domyślnie: Twój katalog domowy. Skipped paths Wykluczone ścieżki These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')</br> Te katalogi są wykluczone z indeksowania.<br> Dozwolone wieloznaczniki. Muszą odpowiadać ścieżkom znanym indekserowi (np.: jeśli szczytowy katalog zawiera "/home/ja" i "/home" jest linkiem do "/usr/home", to poprawna ścieżka to "/home/ja/tmp*", natomiast błędna to "/usr/home/ja/tmp*")</br> Stemming languages Reguły ciosania (ang. stemming languages) The languages for which stemming expansion<br>dictionaries will be built.</br> Języki dla których słownik rozszerzenia ciosania<br> (stemming) będzie zbudowany.</br> Log file name Nazwa pliku dziennika (logs) The file where the messages will be written.<br>Use 'stderr' for terminal output</br> Plik w którym zapisywane są komunikaty.<br>Użyj "stderr" by skorzystać z konsoli</br> Log verbosity level Poziom stężenia komunikatu This value adjusts the amount of messages,<br>from only errors to a lot of debugging data.</br> Wartość ta ustawia ilość komunikatów,<br>od prostych błędów aż po mnogie informacje diagnostyczne.</br> Index flush megabytes interval Interwał (megabajty) opróżniania indeksowania This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB </br> Ustawia próg indeksowania danych zanim zostaną wysłane na dysk.<br>Odpowiada za kontrolowanie zużycia pamięci przez indekser. Domyślnie: 10MB</br> Max disk occupation (%) Maks. zużycie dysku (%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default).</br> Procent zużycia dysku po którym indeksowanie zostanie przerwane (chroni przed zapełnieniem dysku).<br>0 oznacz brak limitu (domyślnie).</br> No aspell usage Brak użycia Aspell Aspell language Język Aspell Database directory name Nazwa katalogu bazy danych Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. </br> Wyłącz używanie Aspell do tworzenia przybliżeń w narzędziu przeglądania terminów.<br> Użyteczne, gdy brak Aspell lub jest zepsuty. </br> The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. </br> Język dla katalogu Aspell, wyglądający jak "en" lub "pl" ..<br>Gdy brak, wartość środowiska NLS zostanie użyta (zwykle działa). By sprawdzić, co posiadasz zainstalowane, wpisz "aspell config" po czym znajdź pliki .dat w katalogu "data-dir". </br> The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.</br> Nazwa katalogu przechowania indeksu<br>Nieabsolutna ścieżka jest brana względnie do katalogu konfiguracji. Domyślnie jest to "xapiandb".</br> Unac exceptions Unac exceptions <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.</p> <p>To są wyjątki mechaniki unac., która domyślnie usuwa wszystkie diakrytyki oraz wykonują dekompozycję kanoniczną. Możliwe nadpisanie nieakcentowania dla pewnych znaków, w zależności od twojego języka oraz wyszczególnienie dodatkowych dekompozycji, np.: ligatur. Dla każdego wpisu (oddzielony spacjami) pierwszy znak to źródło, pozostałe to tłumaczenie</p> uiPrefsDialogBase User preferences Ustawienia User interface Wygląd Number of entries in a result page Liczba wyników na stronie If checked, results with the same content under different names will only be shown once. Wyświetl tylko raz gdy tak sama zawartość (choć różne nazwy) Hide duplicate results. Ukryj duplikaty w wynikach. Highlight color for query terms Podświetl terminy z zapytania Result list font Czcionka listy wyników Opens a dialog to select the result list font Otwiera okno wyboru czcionek Helvetica-10 Helvetica-10 Resets the result list font to the system default Reset czcionki wyników do domyślnej Reset Reset Texts over this size will not be highlighted in preview (too slow). Teksty powyżej tego rozmiaru będą ukryte w podglądzie (zbyt wolne). Maximum text size highlighted for preview (megabytes) Maks. rozmiar tekstu dla wyróżnienia w podglądzie (MB) Choose editor applications Wybierz edytor aplikacji Display category filter as toolbar instead of button panel (needs restart). Wyświetl filtr kategorii jako pasek zamiast panelu (wymagany restart). Auto-start simple search on whitespace entry. Proste szukanie gdy użyto biłych znaków we wpisie. Start with advanced search dialog open. Rozpocznij oknem zaawansowanego szukania. Remember sort activation state. Pamiętaj stan sortowania. Prefer Html to plain text for preview. Użyj HTML (zamiast czysty tekst) dla podglądu. Search parameters Parametry szukania Stemming language Język ciosania A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Wyszukanie dla [rolling stones] (2 terminy) zostanie zamienione na [rolling or stones or (rolling phrase 2 stones)]. To powinno dać pierwszeństwo wynikom, dokładnie tak jak zostały wpisane. Automatically add phrase to simple searches Automatycznie dodaj frazę do szukania prostego Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Mam budować streszczenie dla wyników po przez użycie kontekstu teminów zapytania? Może zwalniać dla dużych dokumentów. Dynamically build abstracts Buduj streszczenia dynamicznie Do we synthetize an abstract even if the document seemed to have one? Tworzyć sztuczne streszczenie nawet jeśli dokument ma własne? Replace abstracts from documents Zamień streszczenia z dokumentów Synthetic abstract size (characters) Rozmiar sztucznego streszczenia (w znakach) Synthetic abstract context words Kontekstowe wyrazy sztucznego streszczenia The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Wyrazy z listy zostaną automatycznie zmienione w klauzule ext:xxx we wpisach języka zapytań. Query language magic file name suffixes. Magiczne przyrostki nazw plików języka zapytań Enable Włącz External Indexes Zewnętrzne indeksy Toggle selected Odwróc zaznaczenie Activate All Aktywuj wszystko Deactivate All Deaktywuj wszystko Remove from list. This has no effect on the disk index. Usuń z listy. Brak skutku dla indeksu na dysku. Remove selected Usuń zaznaczenie Add index Dodaj indeks Apply changes Zastosuj zmiany &OK &Ok Discard changes Porzuć zmiany &Cancel &Anuluj Abstract snippet separator Oddzielacz snipetu streszczenia Style sheet Arkusz stylów Opens a dialog to select the style sheet file Otwiera okno wyboru arkusza stylów Choose Wybierz Resets the style sheet to default Reset arkusza stylów do domyślnych Result List Lista wyników Edit result paragraph format string Zmień format paragrafu dla wyniku Edit result page html header insert Zmień nagłówek HTML dla strony wyników Date format (strftime(3)) Format daty (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Próg częstotliowści procentowej dla której terminy wew. autofrazy nie są używane. Częste terminy są powodem słabej wydajności fraz. Pominięte terminy zwiększają rozlużnienie frazy oraz zmniejszanją wydajność autofrazy. Domyślna wartość to 2 (%). Autophrase term frequency threshold percentage Procentowy próg częstości dla terminu Autofrazy Plain text to HTML line style Styl linii czystego tekstu do HTML Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. Linie w PRE nie są zwijane. Użycie BR zaciera wcięcia. PRE + Zawijaj styl może być tym co szukasz. <BR /> <BR /> <PRE /> <PRE /> <PRE> + wrap</PRE> <PRE> + zawijaj</PRE> Disable Qt autocompletion in search entry. Wyłącz podpowiedź Qt dla wpisu szukania Search as you type. Szukaj podczas pisania. Paths translations Ścieżki tłumaczeń Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Kliknij by dodać kolejny katalog do listy. Możesz wybrać zarówno katalog konfiguracji Recoll jak i indeks Xapian. Snippets window CSS file Okno snippetów CSS Opens a dialog to select the Snippets window CSS style sheet file Otwórz okno by wybrać snipet CSS Resets the Snippets window style Reset stylu okna recoll-1.26.3/qtgui/i18n/recoll_xx.qm0000644000175000017500000000002013545064515014252 00000000000000 AdvSearch All clauses 全部条件 Any clause 任意条件 texts 文本 spreadsheets 电子表格 presentations 演示文稿 media 多媒体文件 messages 邮件 other 其它 Bad multiplier suffix in size filter 文件尺寸过滤器的后缀单位不正确 text 文本文件 spreadsheet 电子表格 presentation 演示文档 message 邮件 AdvSearchBase Advanced search 高端搜索 Search for <br>documents<br>satisfying: 搜索<br>满足以下条件<br>的文档: Delete clause 删除条件 Add clause 添加条件 Restrict file types 限定文件类型 Check this to enable filtering on file types 选中这个,以便针对文件类型进行过滤 By categories 按大类来过滤 Check this to use file categories instead of raw mime types 选中这个,以便使用较大的分类,而不使用具体的文件类型 Save as default 保存为默认值 Searched file types 将被搜索的文件类型 All ----> 移动全部→ Sel -----> 移动选中项→ <----- Sel ←移动选中项 <----- All ←移动全部 Ignored file types 要忽略的文件类型 Enter top directory for search 输入要搜索的最上层目录 Browse 浏览 Restrict results to files in subtree: 将结果中的文件限定在此子目录树中: Start Search 开始搜索 Close 关闭 All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. 右边的所有非空字段都会按照逻辑与(“全部条件”选项)或逻辑或(“任意条件”选项)来组合。<br>“任意”“全部”和“无”三种字段类型都接受输入简单词语和双引号引用的词组的组合。<br>空的输入框会被忽略。 Invert 反转过滤条件 Minimum size. You can use k/K,m/M,g/G as multipliers 最小尺寸。你可使用k/K、m/M、g/G作为单位 Min. Size 最小尺寸 Maximum size. You can use k/K,m/M,g/G as multipliers 最大尺寸。你可使用k/K、m/M、g/G作为单位 Max. Size 最大尺寸 Filter 过滤 From To Check this to enable filtering on dates 选中这个,以便针对日期进行过滤 Filter dates 过滤日期 Find 查找 Check this to enable filtering on sizes 选中这个,以便针对文件尺寸进行过滤 Filter sizes 过滤尺寸 ConfIndexW Can't write configuration file 无法写入配置文件 Global parameters 全局参数 Local parameters 局部参数 Search parameters 搜索参数 Top directories 顶级目录 The list of directories where recursive indexing starts. Default: your home. 索引从这个列表中的目录开始,递归地进行。默认:你的家目录。 Skipped paths 略过的路径 These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Stemming languages 词根语言 The languages for which stemming expansion<br>dictionaries will be built. 将会针对这些语言<br>构造词根扩展词典。 Log file name 记录文件名 The file where the messages will be written.<br>Use 'stderr' for terminal output 程序输出的消息会被保存到这个文件。<br>使用'stderr'以表示将消息输出到终端 Log verbosity level 记录的话痨级别 This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. 这个值调整的是输出的消息的数量,<br>其级别从仅输出报错信息到输出一大堆调试信息。 Index flush megabytes interval 刷新索引的间隔,兆字节 This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 这个值调整的是,当积累咯多少索引数据时,才将数据刷新到硬盘上去。<br>用来控制索引进程的内存占用情况。默认为10MB Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. No aspell usage 不使用aspell Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 禁止在词语探索器中使用aspell来生成拼写相近的词语。<br>在没有安装aspell或者它工作不正常时使用这个选项。 Aspell language Aspell语言 The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Database directory name 数据库目录名 The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Unac exceptions <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. Process the WEB history queue Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Web page store directory name 网页储存目录名 The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. 用来储存复制过来的已访问网页的目录名。<br>如果使用相对路径,则会相对于配置目录的路径进行处理。 Max. size for the web store (MB) 网页存储的最大尺寸(MB) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Automatic diacritics sensitivity <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. Automatic character case sensitivity <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. Maximum term expansion count <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. Maximum Xapian clauses count <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfSubPanelW Only mime types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Exclude mime types Mime types not to be indexed Max. compressed file size (KB) 压缩文件最大尺寸(KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. 尺寸大于这个值的压缩文件不会被处理。设置成-1以表示不加任何限制,设置成0以表示根本不处理压缩文件。 Max. text file size (MB) 文本文件最大尺寸(MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. 尺寸大于这个值的文本文件不会被处理。设置成-1以表示不加限制。 其作用是从索引中排除巨型的记录文件。 Text file page size (KB) 文本文件单页尺寸(KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). 如果设置咯这个值(不等于-1),则文本文件会被分割成这么大的块,并且进行索引。 这是用来搜索大型文本文件的(例如记录文件)。 Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Global 全局 CronToolW Cron Dialog 计划任务对话框 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T19:47:37" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T19:56:53" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } .T3 { font-style:italic; } .T4 { font-family:Courier New,courier; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> 批量索引计划任务(cron) </p><p class="P1">每个字段都可以包括一个通配符(*)、单个数字值、逗号分隔的列表(1,3,5)和范围(1-7)。更准确地说,这些字段会被<span class="T3">按原样</span>输出到crontab 文件中,因此这里可以使用crontab 的所有语法,参考crontab(5)。</p><p class="P1"><br/>例如,在<span class="T3">日期</span>中输入<span class="T4">*</span>,<span class="T3">小时</span>中输入<span class="T4">12,19</span>,<span class="T3">分钟</span>中输入<span class="T4">15 </span>的话,会在每天的12:15 AM 和7:15 PM启动recollindex。</p><p class="P1">一个频繁执行的计划任务,其性能可能比不上实时索引。</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) 星期日(*或0-7,0或7是指星期天) Hours (* or 0-23) 小时(*或0-23) Minutes (0-59) 分钟(0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:08:00" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:11:47" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T2 { font-style:italic; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1">点击<span class="T2">禁用</span>以停止进行自动化的批量索引,点击<span class="T2">启用</span>以启用此功能,点击<span class="T2">取消</span>则不改变任何东西。</p></body></html> Enable 启用 Disable 禁用 It seems that manually edited entries exist for recollindex, cannot edit crontab 看起来已经有手动编辑过的recollindex条目了,因此无法编辑crontab Error installing cron entry. Bad syntax in fields ? 插入cron条目时出错。请检查语法。 EditDialog Dialog 对话框 EditTrans Source path Local path Config error Original path EditTransBase Path Translations Setting path translations for Select one or several file types, then use the controls in the frame below to change how they are processed Add Delete Cancel 取消 Save FirstIdxDialog First indexing setup 第一次索引设置 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:14:44" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:23:13" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T2 { font-weight:bold; } .T4 { font-style:italic; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T2">未找到对应于此配置实例的索引数据。</span><br/><br/>如果你只想以一组合理的默认参数来索引你的家目录的话,就直接按<span class="T4">立即开始索引</span>按钮。以后还可以调整配置参数的。</p><p class="P1">如果你想调整某些东西的话,就使用下面的链接来调整其中的索引配置和定时计划吧。</p><p class="P1">这些工具可在以后通过<span class="T4">选项</span>菜单访问。</p></body></html> Indexing configuration 索引配置 This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. 在这里可以调整你想要对其进行索引的目录,以及其它参数,例如:要排除和路径或名字、默认字符集…… Indexing schedule 定时索引任务 This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). 在这里可以选择是要进行批量索引还是实时索引,还可以设置一个自动化的定时(使用cron)批量索引任务。 Start indexing now 立即开始索引 FragButs %1 not found. %1: %2 Query Fragments IdxSchedW Index scheduling setup 定时索引设置 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:27:11" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:30:49" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> 索引程序可持续运行并且在文件发生变化时对其进行索引,也可以间隔一定时间运行一次。</p><p class="P1">你可以读一下手册,以便更好地做出抉择(按F1)。</p><p class="P1">这个工具可帮助你设置一个自动进行批量索引的定时任务,或者设置成当你登录时便启动实时索引(或者两者同时进行,当然那几乎没有意义)。</p></body></html> Cron scheduling 定时任务 The tool will let you decide at what time indexing should run and will install a crontab entry. 这个工具帮助你确定一个让索引运行的时间,它会插入一个crontab条目。 Real time indexing start up 实时索引设置 Decide if real time indexing will be started when you log in (only for the default index). 作出决定,是否要在登录时便启动实时索引(只对默认索引有效)。 ListDialog Dialog 对话框 GroupBox 分组框 Main No db directory in configuration 配置实例中没有数据库目录 "history" file is damaged or un(read)writeable, please check or remove it: "history"文件被损坏,或者不可(读)写,请检查一下或者删除它: "history" file is damaged, please check or remove it: Preview Close Tab 关闭标签页 Cancel 取消 Missing helper program: 缺少辅助程序: Can't turn doc into internal representation for 无法为此文件将文档转换成内部表示方式: Creating preview text 正在创建预览文本 Loading preview text into editor 正在将预览文本载入到编辑器中 &Search for: 搜索(&S): &Next 下一个(&N) &Previous 上一个(&P) Clear 清空 Match &Case 匹配大小写(&C) Cannot create temporary directory: 无法创建临时目录: Error while loading file 文件载入出错 Form Tab 1 Open 打开 Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text PreviewTextEdit Show fields 显示字段 Show main text 显示主文本 Print 打印 Print Current Preview 打印当前预览文本 Show image 显示图片 Select All 全选 Copy 复制 Save document to file 将文档保存到文件 Fold lines 自动换行 Preserve indentation 保留缩进符 Open document QObject Global parameters 全局参数 Local parameters 局部参数 <b>Customised subtrees <b>自定义的子目录树 The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. 这是已索引的目录树中的一些子目录组成的列表<br>,它们的某些参数需要重定义。默认:空白。 <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>以下的参数,当你在上面的列表中不选中任何条目或者选中一个空行时,<br>就是针对顶级目录起作用的,否则便是对选中的子目录起作用的。<br>你可以点击+/-按钮,以便添加或删除目录。 Skipped names 要略过的文件名 These are patterns for file or directory names which should not be indexed. 具有这些模式的文件或目录不会被索引。 Default character set 默认字符集 This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. 这是用来读取那些未标明自身的字符集的文件时所使用的字符集,例如纯文本文件。<br>默认值是空,会使用系统里的自然语言环境参数中的值。 Follow symbolic links 跟踪符号链接 Follow symbolic links while indexing. The default is no, to avoid duplicate indexing 在索引时跟踪符号链接。默认是不跟踪的,以避免重复索引 Index all file names 对所有文件名进行索引 Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true 对那些无法判断或处理其内容(未知类型或其类型不被支持)的文件的名字进行索引。默认为是 Beagle web history Beagle网页历史 Search parameters 搜索参数 Default<br>character set Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Ignored endings These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. QWidget Create or choose save directory Choose exactly one directory Could not read directory: Unexpected file name collision, cancelling. Cannot extract document: &Preview 预览(&P) &Open 打开(&O) Open With Run Script Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) &Write to File 写入文件(&W) Save selection to files Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) Find &similar documents 查找类似的文档(&s) Open &Snippets window Show subdocuments / attachments QxtConfirmationMessage Do not show again. RTIToolW Real time indexing automatic start 实时索引自动启动 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T21:00:38" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T21:02:43" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> 索引程序可以以守护进程的方式运行,在文件发生变化时便实时更新索引。这样你的索引一直是与文件同步的,但是会占用一定的系统资源。</p></body></html> Start indexing daemon with my desktop session. 在我的桌面会话启动时便启动索引进程。 Also start indexing daemon right now. 同时此次也立即启动索引进程。 Replacing: 正在替换: Replacing file 正在替换文件 Can't create: 无法创建: Warning 警告 Could not execute recollindex 无法执行recollindex Deleting: 正在删除: Deleting file 正在删除文件 Removing autostart 正在删除自动启动项 Autostart file deleted. Kill current process too ? 自动启动文件已经删除。也要杀死当前进程吗? RclMain (no stemming) (不进行词根计算) (all languages) (对全部语言进行词根计算) error retrieving stemming languages 提取词根语言时出错 Indexing in progress: 正在索引: Purge 删除 Stemdb Stem数据库 Closing 正在关闭 Unknown 未知 Query results 查询结果 Cannot retrieve document info from database 无法从数据库获取文档信息 Warning 警告 Can't create preview window 无法创建预览窗口 This search is not active any more 这个查询已经不是活跃的了 Bad viewer command line for %1: [%2] Please check the mimeconf file 针对%1的查看命令[%2]配置出错 请检查mimeconf文件 Cannot extract document or create temporary file 无法提取文档或创建临时文件 Executing: [ 正在执行:[ About Recoll Recoll说明 History data 历史数据 Document history 文档历史 Update &Index 更新索引(&I) Stop &Indexing 停止索引(&I) All 全部 media 多媒体文件 message 邮件 other 其它 presentation 演示文档 spreadsheet 电子表格 text 文本文件 sorted 已排序 filtered 已过滤 External applications/commands needed and not found for indexing your file types: 需要用来辅助对你的文件进行索引,却又找不到的外部程序/命令: No helpers found missing 目前不缺少任何辅助程序 Missing helper programs 未找到的辅助程序 Document category filter 文档分类过滤器 No external viewer configured for mime type [ 针对此种文件类型没有配置外部查看器[ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? 没有找到mimeview中为%1: %2配置的查看器。 是否要打开选项对话框? Can't access file: 无法访问文件: Can't uncompress file: 无法解压缩此文件: Save file 保存文件 Result count (est.) 结果数(估计值) Query details 查询语句细节 Could not open external index. Db not open. Check external indexes list. 无法打开外部索引。数据库未打开。请检查外部索引列表。 No results found 未找到结果 None Updating 正在更新 Done 已完成 Monitor 监视器 Indexing failed 索引失败 The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone 当前索引进程不是由此界面启动的。点击确定以杀死它,或者点击取消以让它自由运行 Erasing index 正在删除索引 Reset the index and start from scratch ? 从头重新开始索引吗? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program 查询正在进行中。<br>由于索引库的某些限制,<br>取消的话会导致程序退出 Error 错误 Index not open 索引未打开 Index query error 索引查询出错 Content has been indexed for these mime types: 已经为这些文件类型索引其内容: Index not up to date for this file. Refusing to risk showing the wrong entry. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. 此文件的索引已过时。程序拒绝显示错误的条目。请点击确定以更新此文件的索引,等待索引完成之后再查询。或者,取消。 Can't update index: indexer running 无法更新索引:索引程序已在运行 Indexed MIME Types 已索引的文件类型 Bad viewer command line for %1: [%2] Please check the mimeview file Viewer command line for %1 specifies both file and parent file value: unsupported Cannot find parent document External applications/commands needed for your file types and not found, as stored by the last indexing pass in Sub-documents and attachments Document filter The indexer is running so things should improve when it's done. Duplicate documents These Urls ( | ipath) share the same content: Bad desktop app spec for %1: [%2] Please check the desktop file Indexing interrupted Bad paths Selection patterns need topdir Selection patterns can only be used with a start directory No search No preserved previous search Choose file to save Saved Queries (*.rclq) Write failed Could not write to file Read failed Could not open file: Load error Could not load saved query Index scheduling Sorry, not available under Windows for now, use the File menu entries to update the index Disabled because the real time indexer was not compiled in. This configuration tool only works for the main index. Can't set synonyms file (parse error?) The document belongs to an external index which I can't update. Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Do not show this warning next time (use GUI preferences to restore). Index locked Unknown indexer state. Can't access webcache file. Indexer is running. Can't access webcache file. with additional message: Non-fatal indexing message: Types list empty: maybe wait for indexing to progress? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported Tools Results Content has been indexed for these MIME types: Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Indexing done Can't update index: internal error Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> documents document files file errors error total files) No information: initial indexing not yet performed. RclMainBase Recoll Recoll Search tools 搜索工具 Result list 结果列表 &File 文件(&F) &Tools 工具(&T) &Preferences 选项(&P) &Help 帮助(&H) E&xit 退出(&x) Ctrl+Q Ctrl+Q Update &index 更新索引(&i) &Erase document history 删除文档历史(&E) &About Recoll Recoll说明(&A) &User manual 用户手册(&U) Document &History 文档历史(&H) Document History 文档历史 &Advanced Search 高端搜索(&A) Advanced/complex Search 高端/复杂搜索 &Sort parameters 排序参数(&S) Sort parameters 排序参数 Term &explorer 词语探索器(&e) Term explorer tool 词语探索器 Next page 下一页 Next page of results 下一页结果 First page 第一页 Go to first page of results 跳转到结果的第一页 Previous page 上一页 Previous page of results 上一页结果 &Query configuration 查询配置(&Q) External index dialog 外部索引对话框 &Indexing configuration 索引配置(&I) All 全部 &Show missing helpers 显示缺少的辅助程序列表(&S) PgDown 向下翻页 PgUp 向上翻页 &Full Screen 全屏(&F) F11 F11 Full Screen 全屏 &Erase search history 删除搜索历史(&E) sortByDateAsc 按日期升序排列 Sort by dates from oldest to newest 按日期排列,最旧的在前面 sortByDateDesc 按日期降序排列 Sort by dates from newest to oldest 按日期排列,最新的在前面 Show Query Details 显示查询语句细节 Show results as table 以表格的形式显示结果 &Rebuild index 重新构造索引(&R) &Show indexed types 显示已索引的文件类型(&S) Shift+PgUp Shift+向上翻页 &Indexing schedule 定时索引(&I) E&xternal index dialog 外部索引对话框(&x) &Index configuration &GUI configuration &Results Sort by date, oldest first Sort by date, newest first Show as table Show results in a spreadsheet-like table Save as CSV (spreadsheet) file Saves the result into a file which you can load in a spreadsheet Next Page Previous Page First Page Query Fragments With failed files retrying Next update will retry previously failed files Indexing &schedule Enable synonyms Save last query Load saved query Special Indexing Indexing with special options &View Missing &helpers Indexed &MIME types Index &statistics Webcache Editor Trigger incremental pass RclTrayIcon Restore Quit RecollModel Abstract 摘要 Author 作者 Document size 文档尺寸 Document date 文档日期 File size 文件尺寸 File name 文件名 File date 文件日期 Keywords 关键词 Original character set 原字符集 Relevancy rating 相关度 Title 标题 URL 路径 Mtime 修改时间 Date 日期 Date and time 日期及时间 Ipath 内部路径 MIME type 文件类型 Can't sort by inverse relevance ResList Result list 结果列表 (show query) (显示查询语句细节) &Preview 预览(&P) Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) Find &similar documents 查找类似的文档(&s) Document history 文档历史 <p><b>No results found</b><br> <p><b>未找到结果</b><br> Previous 上一页 Next 下一页 Unavailable document 无法访问文档 Preview 预览 Open 打开 <p><i>Alternate spellings (accents suppressed): </i> <p><i>其它拼写形式(忽视口音):</i> &Write to File 写入文件(&W) Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) &Open 打开(&O) Documents out of at least 个文档,最少共有 for 个文档,查询条件: <p><i>Alternate spellings: </i> Result count (est.) 结果数(估计值) Query details 查询语句细节 Snippets ResTable &Reset sort 重置排序条件(&R) &Delete column 删除此列(&D) Save table to CSV file 将表格保存成CSV文件 Can't open/create file: 无法打开/创建文件: &Preview 预览(&P) &Open 打开(&O) Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) &Write to File 写入文件(&W) Find &similar documents 查找类似的文档(&s) Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) &Save as CSV 保存为CSV(&S) Add "%1" column 添加"%1"列 ResTableDetailArea &Preview 预览(&P) &Open 打开(&O) Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) &Write to File 写入文件(&W) Find &similar documents 查找类似的文档(&s) Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) ResultPopup &Preview 预览(&P) &Open 打开(&O) Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) &Write to File 写入文件(&W) Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) Find &similar documents 查找类似的文档(&s) SSearch Any term 任一词语 All terms 全部词语 File name 文件名 Query language 查询语言 Bad query string 查询语言格式不正确 Out of memory 内存不足 Too many completions 有太多与之相关的补全选项啦 Completions 补全选项 Select an item: 选择一个条目: Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> No actual parentheses allowed.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-23T08:43:25" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-23T09:07:39" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .Standard { font-size:12pt; font-family:Nimbus Roman No9 L; writing-mode:page; } .T1 { font-style:italic; } .T2 { font-style:italic; } .T4 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="Standard">输入查询语言表达式。简要说明:<br/><span class="T2">词语</span><span class="T1">1 </span><span class="T2">词语</span><span class="T1">2</span> : '词语1'和'词语2'同时出现在任意字段中。<br/><span class="T2">字段</span><span class="T1">:</span><span class="T2">词语</span><span class="T1">1</span> : '词语1'出现在字段'字段'中。<br/>标准字段名/同义名:<br/>title/subject/caption、author/from、recipient/to、filename、ext。<br/>伪字段名:dir、mime/format、type/rclcat、date。<br/>日期段的两个示例:2009-03-01/2009-05-20 2009-03-01/P2M。<br/><span class="T2">词语</span><span class="T1">1 </span><span class="T2">词语</span><span class="T1">2 OR </span><span class="T2">词语</span><span class="T1">3</span> : 词语1 <span class="T4">与</span> (词语2 <span class="T4">或</span> 词语3)。<br/>不允许用真正的括号来表示逻辑关系。<br/><span class="T1">"</span><span class="T2">词语</span><span class="T1">1 </span><span class="T2">词语</span><span class="T1">2"</span> : 词组(必须按原样出现)。可用的修饰词:<br/><span class="T1">"</span><span class="T2">词语</span><span class="T1">1 </span><span class="T2">词语</span><span class="T1">2"p</span> : 以默认距离进行的无序近似搜索。<br/>有疑问时可使用<span class="T4">显示查询语句细节</span>链接来查看查询语句的细节,另外请查看手册(&lt;F1&gt;)以了解更多内容。</p></body></html> Enter file name wildcard expression. 输入文件名通配符表达式。 Enter search terms here. Type ESC SPC for completions of current term. 在此输入要搜索的词语。按Esc 空格来查看针对当前词语的补全选项。 Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Stemming languages for stored query: differ from current preferences (kept) Auto suffixes for stored query: External indexes for stored query: Autophrase is set but it was unset for stored query Autophrase is unset but it was set for stored query Enter search terms here. SSearchBase SSearchBase SSearchBase Clear 清空 Ctrl+S Ctrl+S Erase search entry 删除搜索条目 Search 搜索 Start query 开始查询 Enter search terms here. Type ESC SPC for completions of current term. 在此输入要搜索的词语。按Esc 空格来查看针对当前词语的补全选项。 Choose search type. 选择搜索类型。 Show query history SearchClauseW Select the type of query that will be performed with the words 选择要对右边的词语进行的查询类型 Number of additional words that may be interspersed with the chosen ones 允许在选中的词语之间出现的额外词语的个数 No field 不限字段 Any 任意 All 全部 None Phrase 词组 Proximity 近似 File name 文件名 Snippets Snippets Find: Next 下一页 Prev SnippetsW Search 搜索 <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> Sort By Relevance Sort By Page SpecIdxW Special Indexing Else only modified or failed files will be processed. Erase selected files data before indexing. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Browse 浏览 Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Selection patterns: Top indexed entity Retry previously failed files. Start directory. Must be part of the indexed tree. Use full indexed area if empty. SpellBase Term Explorer 词语探索器 &Expand 展开(&E) Alt+E Alt+E &Close 关闭(&C) Alt+C Alt+C No db info. 未找到数据库信息。 Match Case Accents SpellW Wildcards 通配符 Regexp 正则表达式 Stem expansion 词根扩展 Spelling/Phonetic 拼写/发音检查 error retrieving stemming languages 提取词根语言时出错 Aspell init failed. Aspell not installed? Aspell初始化失败。是否未安装Aspell? Aspell expansion error. Aspell扩展出错。 No expansion found 未找到扩展 Term 词语 Doc. / Tot. 文档数/总数 Index: %1 documents, average length %2 terms 索引:%1个文档,平均长度为%2个词语 Index: %1 documents, average length %2 terms.%3 results %1 results List was truncated alphabetically, some frequent terms may be missing. Try using a longer root. Show index statistics Number of documents Average terms per document Database directory size MIME types: Item Value Smallest document length (terms) Longest document length (terms) Results from last indexing: Documents created/updated Files tested Unindexed files List files which could not be indexed (slow) Spell expansion error. UIPrefsDialog error retrieving stemming languages 提取词根语言时出错 The selected directory does not appear to be a Xapian index 选中的目录不是Xapian索引 This is the main/local index! 这是主要/本地索引! The selected directory is already in the index list 选中的目录已经在索引列表中 Select xapian index directory (ie: /home/buddy/.recoll/xapiandb) 选择xapian索引目录(例如:/home/buddy/.recoll/xapiandb) Choose 选择 Result list paragraph format (erase all to reset to default) Result list header (default is empty) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read At most one index should be selected Cant add index with different case/diacritics stripping option Default QtWebkit font Any term 任一词语 All terms 全部词语 File name 文件名 Query language 查询语言 Value from previous program exit ViewAction Changing actions with different current values 正在针对不同的当前值而改变动作 Command 命令 MIME type 文件类型 Desktop Default Changing entries with different current values ViewActionBase Native Viewers 本地查看器 Select one or several file types, then click Change Action to modify the program used to open them 选中一个或多个文件类型,然后点击“修改动作”来修改用来打开这些文件的程序 Change Action 修改动作 Close 关闭 Select one or several mime types then click "Change Action"<br>You can also close this dialog and check "Use desktop preferences"<br>in the main panel to ignore this list and use your desktop defaults. 选中一个或多个文件类型祟点击“修改动作”<br>或者可以关闭这个对话框,而在主面板中选中“使用桌面默认设置”<br>那样就会无视这个列表而使用桌面的默认设置。 Select one or several mime types then use the controls in the bottom frame to change how they are processed. Use Desktop preferences by default Select one or several file types, then use the controls in the frame below to change how they are processed Exception to Desktop preferences Action (empty -> recoll default) Apply to current selection Recoll action: current value Select same <b>New Values:</b> Webcache Webcache editor Search regexp WebcacheEdit Copy URL Unknown indexer state. Can't edit webcache file. Indexer is running. Can't edit webcache file. Delete selection Webcache was modified, you will need to run the indexer after closing this window. WebcacheModel MIME Url confgui::ConfBeaglePanelW Steal Beagle indexing queue 窃取Beagle索引队列 Beagle MUST NOT be running. Enables processing the beagle queue to index Firefox web history.<br>(you should also install the Firefox Beagle plugin) 不可运行Beagle。启用对beagle队列的处理,以索引火狐网页历史。<br>(你还需要安装火狐Beagle插件) Entries will be recycled once the size is reached 当尺寸达到设定值时,这些条目会被循环使用 Web page store directory name 网页储存目录名 The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. 用来储存复制过来的已访问网页的目录名。<br>如果使用相对路径,则会相对于配置目录的路径进行处理。 Max. size for the web store (MB) 网页存储的最大尺寸(MB) confgui::ConfIndexW Can't write configuration file 无法写入配置文件 confgui::ConfParamFNW Choose 选择 confgui::ConfParamSLW + + - - Add entry Delete selected entries ~ Edit selected entries confgui::ConfSubPanelW Global 全局 Max. compressed file size (KB) 压缩文件最大尺寸(KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. 尺寸大于这个值的压缩文件不会被处理。设置成-1以表示不加任何限制,设置成0以表示根本不处理压缩文件。 Max. text file size (MB) 文本文件最大尺寸(MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. 尺寸大于这个值的文本文件不会被处理。设置成-1以表示不加限制。 其作用是从索引中排除巨型的记录文件。 Text file page size (KB) 文本文件单页尺寸(KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). 如果设置咯这个值(不等于-1),则文本文件会被分割成这么大的块,并且进行索引。 这是用来搜索大型文本文件的(例如记录文件)。 Max. filter exec. time (S) 过滤器的最长执行时间(S) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loopSet to -1 for no limit. 外部过滤器的执行时间如果超过这个值,则会被强行中断。在罕见的情况下,某些文档(例如postscript)会导致过滤器陷入死循环。设置成-1以表示不加限制。 confgui::ConfTopPanelW Top directories 顶级目录 The list of directories where recursive indexing starts. Default: your home. 索引从这个列表中的目录开始,递归地进行。默认:你的家目录。 Skipped paths 略过的路径 These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') 索引进程不会进入具有这些名字的目录。<br>可以包含通配符。必须匹配索引进程自身所见到的路径(例如:如果topdirs包含'/home/me',而实际上'/home'是到'/usr/home'的链接,则正确的skippedPath条目应当是'/home/me/tmp*',而不是'/usr/home/me/tmp*') Stemming languages 词根语言 The languages for which stemming expansion<br>dictionaries will be built. 将会针对这些语言<br>构造词根扩展词典。 Log file name 记录文件名 The file where the messages will be written.<br>Use 'stderr' for terminal output 程序输出的消息会被保存到这个文件。<br>使用'stderr'以表示将消息输出到终端 Log verbosity level 记录的话痨级别 This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. 这个值调整的是输出的消息的数量,<br>其级别从仅输出报错信息到输出一大堆调试信息。 Index flush megabytes interval 刷新索引的间隔,兆字节 This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 这个值调整的是,当积累咯多少索引数据时,才将数据刷新到硬盘上去。<br>用来控制索引进程的内存占用情况。默认为10MB Max disk occupation (%) 最大硬盘占用率(%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). 当硬盘的占用率达到这个数时,索引会失败并且停止(以避免塞满你的硬盘)。<br>设为0则表示不加限制(这是默认值)。 No aspell usage 不使用aspell Aspell language Aspell语言 The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. aspell词典的语言。表示方式是'en'或'fr'……<br>如果不设置这个值,则会使用系统环境中的自然语言设置信息,而那个通常是正确的。要想查看你的系统中安装咯哪些语言的话,就执行'aspell config',再在'data-dir'目录中找.dat文件。 Database directory name 数据库目录名 The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. 用来储存索引数据的目录的名字<br>如果使用相对路径,则路径会相对于配置目录进行计算。默认值是'xapiandb'。 Use system's 'file' command 使用系统里的'file'命令 Use the system's 'file' command if internal<br>mime type identification fails. 当内部的文件类型识别功能失效时<br>使用系统里的'file'命令。 Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 禁止在词语探索器中使用aspell来生成拼写相近的词语。<br>在没有安装aspell或者它工作不正常时使用这个选项。 uiPrefsDialogBase User preferences 用户选项 User interface 用户界面 Number of entries in a result page 一个结果页面中显示的结果条数 If checked, results with the same content under different names will only be shown once. 如果选中这个,则拥有相同文件内容的不同文件名只会显示一个。 Hide duplicate results. 隐藏重复结果。 Highlight color for query terms 查询词语的高亮颜色 Result list font 结果列表字体 Opens a dialog to select the result list font 打开一个对话框,以选择用于结果列表的字体 Helvetica-10 文泉驿微米黑-12 Resets the result list font to the system default 将结果列表中的字体重设为系统默认值 Reset 重置 Texts over this size will not be highlighted in preview (too slow). 超过这个长度的文本不会在预览窗口里高亮显示(太慢)。 Maximum text size highlighted for preview (megabytes) 在预览中对其进行高亮显示的最大文本尺寸(兆字节) Use desktop preferences to choose document editor. 使用桌面系统的设置来选择文档编辑器。 Choose editor applications 选择编辑器程序 Display category filter as toolbar instead of button panel (needs restart). 将文件类型过滤器显示成工具条,而不是按钮面板(需要重启程序)。 Auto-start simple search on whitespace entry. 输入空格时自动开始进行简单搜索。 Start with advanced search dialog open. 启动时打开高端搜索对话框。 Remember sort activation state. 记住排序状态。 Prefer Html to plain text for preview. 预览中优先使用Html。 Search parameters 搜索参数 Stemming language 词根语言 A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. 对[滚 石] (2个词语)的搜索会变成[滚 or 石 or (滚 2个词语 石)]。 对于那些搜索词语在其中按照原样出现的结果,其优先级会高一些。 Automatically add phrase to simple searches 自动将词组添加到简单搜索中 Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. 是否要使用查询词语周围的上下文来构造结果列表条目中的摘要? 对于大的文档可能会很慢。 Dynamically build abstracts 动态构造摘要 Do we synthetize an abstract even if the document seemed to have one? 即使文档本身拥有一个摘要,我们仍然自行合成摘要信息? Replace abstracts from documents 取代文档中自带的摘要 Synthetic abstract size (characters) 合成摘要长度(字符个数) Synthetic abstract context words 合成摘要上下文 The words in the list will be automatically turned to ext:xxx clauses in the query language entry. 这个列表中的词语会在查询语言输入框里自动变成ext:xxx语句。 Query language magic file name suffixes. 查询语言神奇文件名后缀。 Enable 启用 External Indexes 外部索引 Toggle selected 切换选中项 Activate All 全部激活 Deactivate All 全部禁用 Remove from list. This has no effect on the disk index. 从列表中删除。这不会对硬盘上的索引造成损害。 Remove selected 删除选中项 Click to add another index directory to the list 点击这里,以将另一个索引目录添加到列表中 Add index 添加索引 Apply changes 使改变生效 &OK 确定(&O) Discard changes 放弃这些改变 &Cancel 取消(&C) Abstract snippet separator 摘要中的片段的分隔符 Style sheet 样式单 Opens a dialog to select the style sheet file 打开一个对话框,以选择样式单文件 Choose 选择 Resets the style sheet to default 将样式单重置为默认值 Lines in PRE text are not folded. Using BR loses some indentation. PRE中的文字不会换行。使用BR的话会使一些缩进失效。 Use <PRE> tags instead of <BR>to display plain text as html in preview. 在将纯文本显示成html预览的时候,使用<PRE>标签,而不是<BR>标签。 Result List 结果列表 Edit result paragraph format string 编辑结果段落的格式字符串 Edit result page html header insert 编辑结果页面的html头部插入项 Date format (strftime(3)) 日期格式(strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). 这是一个频率阈值,超过这个值的话,我们就不会把词语放到自动词组中。 高频词语是词组中性能问题的主要来源。 略过的词语会增加词组的空缺值,因此会降低自动词组功能的效率。 默认值是2(百分比)。 Autophrase term frequency threshold percentage 自动词组频率阈值百分比 Plain text to HTML line style Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. <BR> <PRE> <PRE> + wrap Disable Qt autocompletion in search entry. Paths translations Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Snippets window CSS file Opens a dialog to select the Snippets window CSS style sheet file Resets the Snippets window style Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Document filter choice style: Buttons Panel Toolbar Combobox Menu Show system tray icon. Close to tray instead of exiting. Start with simple search mode User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Synonyms file Show warning when opening temporary file. Highlight CSS style for query terms Recoll - User Preferences Set path translations for the selected index or for the main one if no selection exists. Activate links in preview. Make links inside the preview window clickable, and start an external browser when they are clicked. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Suppress all beeps. recoll-1.26.3/qtgui/i18n/recoll_zh_CN.qm0000644000175000017500000023203613566424763014642 00000000000000VEfflxh2ME!$—'H*%ގ*09+L+fe1 :d%AV GGH6%H6RJ+J++J6hJ6QLbѐMzPhSST5WTW`X"Z0@[ %\egwssOv8RvvܡzϳYj!,0́JL~fSML)n*_gt: ;En.J0,;s>.cDo2NױXMXeJ`^hgltn#wwv)c xC_qͺ&~ LSI؅ p#Zvzvw 50w 5\w 5w 5iw_U.!ֳ6f3ͼuWg>g׸~;UfKUe  $!D "&-.N=d }?dG$bJUYJUYYw[u;_n"uʷΞʗʗz^LxJgA'%SAB <.:d;[<dLNBe 2(vh&4(PYb "3 Z nIgIn.nsf=f fPT,gWfWy%dy%D΄: #tX|'Wc̔ Nii" -Z 5-g)TB9BS,k]or?) r̤|ak,_<"!7KAS LâG  !R ǢPI^E~C [>&!`Mi-$a" /UK p hu9v2MT#K e %1nj7E '^ '[QX nDw)$C!v#+"/7I^޿<~yFW#?FN9H:"_guapwf g|N"wϗ)nI lփÓtpÓtȍ˖Ɇt$]8f  #=,y:uJnhQ}};E$"f0 Q 3c}@>(P%c 䴥_? Hݺ -(gG 9Zy ;38 D K ]#Ծ cC k lM # P qDm +l  9)c ü> 3$ _ *N :^ c *Re +<J 6 >V G.~ `Pө ` aE cE d8 y I R (! VT C  y -   ԅ yed TH  ,x% =! Kj h, , ٩ Σt r Ϩ ٷ ۷  ? Vdv  'И +bC / 97 9ɝՅ L*( P֙_ RVt T# V \iC ] `F h v {lG !Y !Y W. : ʞ  i&   F ~# N m 'R8 -I 8 i F OEr ]h\ ]R u0s y^ y~0 3 ȩH u u P7 Pۭ 5dR  d 7 G Ւf H Q5W £O qE%n;/.8b9<#iQ~Y~s\[s\de3g3p~ #! cmcl|| 'w lu LVi$QhgaN All clauses AdvSearchNagaN Any clause AdvSearcheN\:[nVhvTSUOMN kcxn$Bad multiplier suffix in size filter AdvSearch YZOSeNmedia AdvSearchNmessage AdvSearchQv[other AdvSearchoy:ehc presentation AdvSearchu5[Phh< spreadsheet AdvSearchu5[Phh< spreadsheets AdvSearcheg,text AdvSearcheg,texts AdvSearch !yRQh <----- All AdvSearchBase !yR N-y <----- Sel AdvSearchBasemRgaN Add clause AdvSearchBasezd}"Advanced search AdvSearchBase yRQh! All ----> AdvSearchBaseSvb@g ^zz[WkOc qg;N QhgaN  y b;b NagaN  y ge~T0<br> Na  Qh T e N y[Wk|{WcS׏Qe{SU͋TS_S_u(v~v~T0<br>zzvQehFO_ue0All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBasemOBrowse AdvSearchBase c Y'|{gen By categories AdvSearchBase N-N* NO[egۈLn'Check this to enable filtering on dates AdvSearchBase" N-N* NO[eN|{WۈLn,Check this to enable filtering on file types AdvSearchBase" N-N* NO[eN\:[ۈLn'Check this to enable filtering on sizes AdvSearchBase4 N-N* NOOu(Y'vR|{ N Ou(QwOSveN|{W;Check this to use file categories instead of raw mime types AdvSearchBaseQsClose AdvSearchBaseR dgaN Delete clause AdvSearchBaseQed}"vgN \Bv_UEnter top directory for search AdvSearchBasenFilter AdvSearchBaseneg Filter dates AdvSearchBasen\:[ Filter sizes AdvSearchBasegb~Find AdvSearchBaseNFrom AdvSearchBase_ueveN|{WIgnored file types AdvSearchBase S͏lngaNInvert AdvSearchBasegY'\:[ Max. Size AdvSearchBase0gY'\:[0O`SOu(k/K0m/M0g/GO\N:SUOM4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaseg\\:[ Min. Size AdvSearchBase0g\\:[0O`SOu(k/K0m/M0g/GO\N:SUOM4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase P[eN|{WRestrict file types AdvSearchBase"\~gN-veNP[W(kd[Pv_UhN-%Restrict results to files in subtree: AdvSearchBase O[XN:؋P<Save as default AdvSearchBase(d}"<br>n፳NN gaN<br>vehc'Search for
documents
satisfying: AdvSearchBase\d}"veN|{WSearched file types AdvSearchBase yR N-y! Sel -----> AdvSearchBase_Yd}" Start Search AdvSearchBaseR0To AdvSearchBase<p>Ygd}"SN-d[WkNKYST+g Y'Q[Wkv RRSY'\QvR$e0T&R YOu(gT<i>C</i>Op{&gec[[Y'\QvR$e0

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity. ConfIndexW<p>Ygd}"SN-ST+^&g Sry_(N W(unac_except_transN-)v RRSY'\QvR$e0T&R YOu(gT<i>D</i>Op{&gec[[Y'\QvR$e0

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity. ConfIndexW<p>[SUN*SUvgY'h9bi\Uepv(OYkd yW(Ou(M{&eOueH)0؋v10000f/NN*rTtvP< YQM_S_dΐMSh9Rhe_wguLbPGk{0

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. ConfIndexW<p>bNTSUN*XapiangSN-RQevgY'v[PSepv0gN`QN h9bi\Uv~gOf/P Xv bN`󉁐QMOu(YQ[X0؋v100000^_Sen፳e^8vY'RlB SȀN_SRMvQxWxlNMnvQ|[05

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfIndexW<p>f/[unacg:R6vOY ؋`QN g:R6OR db@g vR$eO`o ^vۈLkcvR0YSNc qg]vvryp[gN*[W{&vcSdn NSc[YvROY [Y ep 0W(kN*u1zzhThese are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. ConfIndexWAspellAspell language ConfIndexWRet[W{&vY'\QeOa`'$Automatic character case sensitivity ConfIndexWRR$eY'\Q Automatic diacritics sensitivity ConfIndexWelQQeMneNCan't write configuration file ConfIndexW epcn^v_UT Database directory name ConfIndexWvykbW(͋c}"VhN-Ou(aspellgeubbQvv͋0<br>W(lg [aspellb[]O\N kc^8eOu(N* y0Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexWNT/u([pkrv]򋿕ubۈL}"_0<br>Yؗ[pkrvRecollcN \Enables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin) ConfIndexWQh\@SepGlobal parameters ConfIndexWR7e}"_v QF[WIndex flush megabytes interval ConfIndexW\@SepLocal parameters ConfIndexW _UeNT  Log file name ConfIndexW_Uvu~R+Log verbosity level ConfIndexWQu[XPvgY'\:[MB  Max. size for the web store (MB) ConfIndexWgY'vXapian[PSepvMaximum Xapian clauses count ConfIndexWgY'h9bi\UepvMaximum term expansion count ConfIndexWN Ou(aspellNo aspell usage ConfIndexWYtQuSSRProcess the WEB history queue ConfIndexWd}"SepSearch parameters ConfIndexW uev_ Skipped paths ConfIndexWh9Stemming languages ConfIndexWTz ^Qvm`oOO[XR0N*eN0<br>Ou('stderr'Nhy:\m`oQR0~zPThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexWaspellQxv0hy:e_f/'en'b'fr' & &<br>YgN nN*P< ROOu(|~sXN-vq6nO`o N*^8f/kcxnv0`gw O`v|~N-[TTNv \1bgL'aspell config' QW('data-dir'v_UN-b~.dateN03The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory.  ConfIndexW*\O[N<br>g h9bi\UQx0IThe languages for which stemming expansion
dictionaries will be built. ConfIndexW:}"_NΏN*RhN-vv_U_Y _RW0ۈL0؋O`v[v_U0LThe list of directories where recursive indexing starts. Default: your home. ConfIndexWbu(geP[XY R6gev]򋿕Quvv_UT 0<br>YgOu(v[_ ROv[NMnv_Uv_ۈLYt0The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory. ConfIndexWtu(geP[X}"_epcnvv_UvT [W<br>YgOu(v[_ R_Ov[NMnv_UۈL{0؋PA non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. ConfIndexWvN*P<etvf/ _Sy}/TY\}"_epcne bM\epcnR7eR0xlvN S0<br>u(gecR6}"_z vQ[XS`u(`Q0؋N:10MBThis value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWTN*P<etvf/Qvm`ovep <br>Qv~R+NNŏQbO`oR0QNY'XO`o0ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexWv~v_UTop directories ConfIndexW UnacOYUnac exceptions ConfIndexWQuP[Xv_UT Web page store directory name ConfIndexW]O\eNN*P <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T19:47:37" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T19:56:53" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } .T3 { font-style:italic; } .T4 { font-family:Courier New,courier; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> by}"_RNR(cron) </p><p class="P1">kN*[WkSNSbNN*M{&(*)0SUN*ep[WP<0SRvRh(1,3,5)TV(1-7)0fQxnW0 N[WkO<span class="T3">c Sh7</span>QR0crontab eNN- VkdّSNOu(crontab vb@g l S€crontab(5)0</p><p class="P1"><br/>OY W(<span class="T3">eg</span>N-Qe<span class="T4">*</span> <span class="T3">\e</span>N-Qe<span class="T4">12,19</span> <span class="T3">R</span>N-Qe<span class="T4">15 </span>v OW(kY)v12:15 AM T7:15 PMT/Rrecollindex0</p><p class="P1">NN*~AbgLvRNR Qv`'SkN N [e}"_0</p></body></html> 

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolW<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:08:00" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:11:47" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T2 { font-style:italic; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1">pQ<span class="T2">yu(</span>NP\kbۈLRSvby}"_ pQ<span class="T2">T/u(</span>NT/u(kdR pQ<span class="T2">Sm</span>RN e9SNOUN0</p></body></html> 

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolWRNR[hF Cron Dialog CronToolW&fge(*b0-7 0b7f/cfgY)))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWyu(Disable CronToolWT/u(Enable CronToolW$cQecrongaveQ0hgl03Error installing cron entry. Bad syntax in fields ? CronToolW\e(*b0-23)Hours (* or 0-23) CronToolWPw wge]~g bKRvrecollindexgavN VkdelcrontabPIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWR(0-59)Minutes (0-59) CronToolW[hFDialog EditDialogMn Config error EditTransg,W0_ Local path EditTransSYˍ_ Original path EditTransn_ Source path EditTransmRAdd EditTransBaseSmCancel EditTransBaseR dDelete EditTransBase_ScbPath Translations EditTransBaseO[XSave EditTransBaseD N-NN*bYN*eN|{W q6TOu(N bhFhFN-vcNgenYOUYt[NkSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBase[SONRn_ScbSetting path translations for  EditTransBase<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:14:44" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:23:13" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T2 { font-weight:bold; } .T4 { font-style:italic; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T2">g*b~R0[^NkdMn[Ov}"_epcn0</span><br/><br/>YgO`S`NN~Ttv؋Sepge}"_O`v[v_Uv \1vcc <span class="T4">zSs_Y}"_</span>c 0NTSNetMnSepv0</p><p class="P1">YgO``etgNNv \1Ou(N bvcgeetQvN-v}"_MnT[eRT'0</p><p class="P1">N]QwSW(NT<span class="T4"> y</span>SU0</p></body></html> 

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialog{,Nk!}"_nFirst indexing setupFirstIdxDialog}"_MnIndexing configurationFirstIdxDialog [e}"_NRIndexing scheduleFirstIdxDialog zSs_Y}"_Start indexing nowFirstIdxDialog^W(ّSNetO``[QvۈL}"_vv_U NSQv[Sep OYcdT_bT [W0؋[W{& & &This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialogdW(ّSN bf/ۈLby}"_f/[e}"_ SN勾nNN*RSv[eOu(cron by}"_NR0This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialogJ<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:27:11" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:30:49" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> }"_z ^Sc~ЈL^vNW(eNSuSSe[QvۈL}"_ N_SNN[eЈLNk!0</p><p class="P1">O`SNNN bKQ NOfY}W0PZQbbc F1 0</p><p class="P1">N*]QwS^.RO`nNN*RۈLby}"_v[eNR bnb_SO`v{_UeOT/R[e}"_bN$T eۈL _Sq6QNNlg aNI 0</p></body></html> 

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedW[eNRCron scheduling IdxSchedWy:[Wk Show fieldsPreviewTextEditf>y:VrG Show imagePreviewTextEdit f>y:N;eg,Show main textPreviewTextEdit<b>[NIv[Pv_UhCustomised subtreesQObject ߎ*{&ScFollow symbolic linksQObject4W(}"_eߎ*{&Sc0؋f/N ߎ*v NQMY }"_TFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject[b@g eNT ۈL}"_Index all file namesQObjectT[NelR$ebYtQvQ[g*w|{WbQv|{WN e/c veNvT [WۈL}"_0؋N:f/}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObjectueveNT  Skipped namesQObjectZf/]}"_vv_UhN-vNN[Pv_U~bvRh<br> [NvgNSep[NI0؋zzv}0sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObject$Qwg Nj!_veNbv_UN O}"_0LThese are patterns for file or directory names which should not be indexed.QObject bS_&O &OpenQWidgetbS_N N~ehc/v_U&O &Open Parent document/folderQWidget &P &PreviewQWidgetQQeeN&W &Write to FileQWidgetY R6eNT &F Copy &File NameQWidgetY R6_&U  Copy &URLQWidgetgb~|{Oy:[Pehc/DNShow subdocuments / attachmentsQWidget~<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T21:00:38" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T21:02:43" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> }"_z ^SNN[bz ve_ЈL W(eNSuSSeO[efe}"_0h7O`v}"_Nvf/NeNT kev OFf/OS`u(N[v|~ߍDn0</p></body></html> .

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWT ekdk!N_zSsT/R}"_z 0%Also start indexing daemon right now.RTIToolW*RT/ReN]~R d0N_g@k{_SRMz T2Autostart file deleted. Kill current process too ?RTIToolW elR^Can't create: RTIToolWelbgLrecollindexCould not execute recollindexRTIToolW kcW(R deN Deleting fileRTIToolW kcW(R d Deleting: RTIToolW[e}"_RT/R"Real time indexing automatic startRTIToolWkcW(R dRT/RyRemoving autostartRTIToolW kcW(fcbeNReplacing fileRTIToolW kcW(fcb Replacing: RTIToolW$W(bvhLbOT/ReOT/R}"_z 0.Start indexing daemon with my desktop session.RTIToolWfTJWarningRTIToolW[QhۈLh9{ (all languages)RclMainN ۈLh9{  (no stemming)RclMainRecollf About RecollRclMainQhAllRclMain<[%1vgw z ^T}NN [%2 hgmimevieweNCBad viewer command line for %1: [%2] Please check the mimeview fileRclMainelՋeNCan't access file: RclMainelR^zSCan't create preview windowRclMainelՉS)kdeNCan't uncompress file: RclMainelfe}"_}"_z ^]W(ЈL#Can't update index: indexer runningRclMainelcSehcbR^N4eeN0Cannot extract document or create temporary fileRclMainelb~R0NNehcCannot find parent documentRclMainelNepcn^SehcO`o+Cannot retrieve document info from databaseRclMainkcW(QsClosingRclMain4elbS_Y}"_0epcn^g*bS_0hgY}"_Rh0HCould not open external index. Db not open. Check external indexes list.RclMainehcSSDocument historyRclMain][bDoneRclMainY ehcDuplicate documentsRclMain kcW(R d}"_ Erasing indexRclMainErrorRclMain kcW(bgL[ Executing: [RclMainZW(N k!v}"_z N-Ss [YveN|{W :\NNYvz ^/T}N [NP[XW(SOeNN-pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMainSSepcn History dataRclMain }"_gQIndex query errorRclMain]}"_veN|{WIndexed MIME TypesRclMain}"_Y1%Indexing failedRclMain kcW(}"_Indexing in progress: RclMaing*b~R0vRz ^Missing helper programsRclMainvщVhMonitorRclMain$[kdyeN|{Wlg MnYgw Vh[-No external viewer configured for mime type [RclMainvRMN :\NOURz ^No helpers found missingRclMain g*b~R0~gNo results foundRclMaineNoneRclMainR dPurgeRclMainLgkcW(ۈLN-0<br>u1N}"_^vgNPR6 <br>SmvO[z ^QeQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMaing~g Query resultsRclMainNY4e_Y}"_T(Reset the index and start from scratch ?RclMain~gepO0P< Result count (est.)RclMainO[XeN Save fileRclMainStemepcn^StemdbRclMainP\kb}"_&I Stop &IndexingRclMain [PehcSʖDNSub-documents and attachmentsRclMainL_SRM}"_z N f/u1kduLbT/Rv0pQxn[Ng@k{[ bpQSmN勩[u1ЈLyThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainNlg b~R0mimeviewN-N:%1: %2Mnvgw Vh0 f/T&bS_ y[hFhThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMain,NN _|Q_ NKQqNw@vT vQ[-These Urls ( | ipath) share the same content:RclMainN*g]~N f/m;vN"This search is not active any moreRclMaing*wUnknownRclMainfe}"_&I  Update &IndexRclMainkcW(feUpdatingRclMain@[%1vgw z ^T}NN-T ec[NeNSNNeNP<f/N e/cvQViewer command line for %1 specifies both file and parent file value: unsupportedRclMainfTJWarningRclMaincS֋h9eQ#error retrieving stemming languagesRclMain]nfilteredRclMain YZOSeNmediaRclMainNmessageRclMainQv[otherRclMainoy:ehc presentationRclMain]c^sortedRclMainu5[Phh< spreadsheetRclMaineg,eNtextRclMainRecollf&A  &About Recoll RclMainBasezd}"&A &Advanced Search RclMainBaseR dehcSS&E &Erase document history RclMainBaseR dd}"SS&E &Erase search history RclMainBase eN&F &File RclMainBase Qh\O&F  &Full Screen RclMainBaseuLbn&G &GUI configuration RclMainBase ^.R&H &Help RclMainBase}"_n&I &Index configuration RclMainBase y&P  &Preferences RclMainBaseeg }"_&R &Rebuild index RclMainBase ~g&R &Results RclMainBasec^Sep&S &Sort parameters RclMainBase ]Qw&T &Tools RclMainBaseu(b7bKQ&U  &User manual RclMainBasez/Y gBd}"Advanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBaseehcSSDocument History RclMainBaseehcSS&H Document &History RclMainBase Q&x E&xit RclMainBaseY}"_[hF&x E&xternal index dialog RclMainBaseY}"_[hFExternal index dialog RclMainBaseF11F11 RclMainBase{,Nu First Page RclMainBase{,Nu First page RclMainBaseQh\O Full Screen RclMainBaselR0~gv{,NuGo to first page of results RclMainBaseN Nu Next Page RclMainBaseN Nu Next page RclMainBase N Nu~gNext page of results RclMainBaseTN uPgDown RclMainBaseTN uPgUp RclMainBaseN Nu Previous Page RclMainBaseN Nu Previous page RclMainBase N Nu~gPrevious page of results RclMainBase RecollRecoll RclMainBaseO[XN:CSVu5[Phh< eNSave as CSV (spreadsheet) file RclMainBase(\~gO[XR0NN*Su(u5[Phhy:gS~ƂShow Query Details RclMainBaseNhh<_b_f>y: Show as table RclMainBase$NNN*|{Oy:~g(Show results in a spreadsheet-like table RclMainBasec egc^ eehcW(RMSort by date, newest first RclMainBasec egc^ eehcW(RMSort by date, oldest first RclMainBasec egcR gevW(RMb#Sort by dates from newest to oldest RclMainBasec egcR gevW(RMb#Sort by dates from oldest to newest RclMainBasec^SepSort parameters RclMainBase͋c}"Vh&e Term &explorer RclMainBase ͋c}"VhTerm explorer tool RclMainBasefe}"_&i  Update &index RclMainBasedXAbstract RecollModelO\Author RecollModelegDate RecollModel egSe Date and time RecollModelehceg Document date RecollModelehc\:[ Document size RecollModeleNeg File date RecollModeleNT  File name RecollModeleN\:[ File size RecollModelQ_Ipath RecollModelQs.Keywords RecollModeleN|{W MIME type RecollModelOe9eMtime RecollModelS[W{&Original character set RecollModelvQs^Relevancy rating RecollModelhTitle RecollModel_URL RecollModelf>y:gS~Ƃ  (show query)ResList&<p><b>g*b~R0~g</b><br>

No results found
ResList.<p><i>Qv[bQ_b__S </i>4

Alternate spellings (accents suppressed): ResList"<p><i>Qv[bQ_b_</i>

Alternate spellings: ResListehcSSDocument historyResList{, DocumentsResListN NN*NextResListbS_OpenResListPreviewResListN NN*PreviousResList gS~Ƃ Query detailsResList~gepO0P< Result count (est.)ResList~gRh Result listResListrGeSnippetsResList elՋehcUnavailable documentResListN*ehc ggaNforResListN*ehc g\Qqg out of at leastResListR dkdR&D &Delete columnResTablenc^gaN&R  &Reset sortResTableO[XN:CSV&S  &Save as CSVResTablemR"%1"RAdd "%1" columnResTableelbS_/R^eNCan't open/create file: ResTable\hhSelect the type of query that will be performed with the words SearchClauseWgb~Find:SnippetsN NN*NextSnippetsN NN*PrevSnippetsrGeSnippetsSnippetsd}"Search SnippetsWmOBrowseSpecIdxW Qs&C &Close SpellBase \U_&E &Expand  SpellBaseSAccents SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBaseY'\QCase SpellBaseS9MMatch SpellBaseg*b~R0epcn^O`o0 No db info. SpellBase ͋c}"Vh Term Explorer SpellBase %1N*~g %1 resultsSpellWkN*ehcN-v^sWGSUN*epAverage terms per documentSpellWepcn^v_U\:[Database directory sizeSpellW ehcep/`;ep Doc. / Tot.SpellW2}"_%1N*ehc ^sWG^N:%2N*SU0%3N*~g7Index: %1 documents, average length %2 terms.%3 resultsSpellWgavItemSpellWRh]c [Wk͘z^b*e gN*^81List was truncated alphabetically, some frequent SpellWYZOSehc|{WRh MIME types:SpellW g*b~R0bi\UNo expansion foundSpellWehcN*epNumber of documentsSpellW kcRh_RegexpSpellWf>y:}"_~ߋO`oShow index statisticsSpellWbQ/SїhgSpelling/PhoneticSpellWh9bi\UStem expansionSpellW͋TermSpellWP<ValueSpellWM{& WildcardsSpellWcS֋h9eQ#error retrieving stemming languagesSpellW,vSUSO:Y10\Ou(NN*fvh90.terms may be missing. Try using a longer root.SpellWQh͋ All terms UIPrefsDialogNN͋Any term UIPrefsDialoggY^_S N-NN*}"_$At most one index should be selected UIPrefsDialog0elmR^&g N T vY'\Q/eO`oRje_v}"_>Cant add index with different case/diacritics stripping option UIPrefsDialog bChoose UIPrefsDialogeNT  File name UIPrefsDialoggQuery language UIPrefsDialog~gRhhY4؋N:zz %Result list header (default is empty) UIPrefsDialog4~gRhvk=h<_R dQhQ[SsSnN:؋r` evP<</b>New Values:ViewActionBase*RO\zzv}Rhy:Ou(recollv؋P<  Action (empty -> recoll default)ViewActionBase^u(R0_SRM N-yN Apply to current selectionViewActionBaseQsCloseViewActionBase[hLb؋P
uiPrefsDialogBase <PRE>

uiPrefsDialogBase<PRE>+cbL
 + wrapuiPrefsDialogBase[[n w] (2N*͋)vd}"OSb[n or w or (n 2N*͋ w)]0
[NNd}"͋W(QvN-c	qgSh7Qsv~gQvOQH~ONN0A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBasedXN-vrGkvR{&Abstract snippet separatoruiPrefsDialogBaseQhom;Activate AlluiPrefsDialogBasemR}"_	Add indexuiPrefsDialogBase
Oe9SueH
Apply changesuiPrefsDialogBaseR\~mRR0{SUd}"N-+Automatically add phrase to simple searchesuiPrefsDialogBaseR~ĘsP_Ne9SDiscard changesuiPrefsDialogBase4SsOehcg,bg	NN*dXbNNq6LTbdXO`oEDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBaseTf/T&Ou(g͋ThVvN
Negeg ~gRhgavN-vdX
[NY'vehcSO_ab0zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBaseR`g dXDynamically build abstractsuiPrefsDialogBase ~gubvhtmlY4cQey#Edit result page html header insertuiPrefsDialogBase~gk=vh<_[W{&N2#Edit result paragraph format stringuiPrefsDialogBaseT/u(EnableuiPrefsDialogBaseY}"_External IndexesuiPrefsDialogBasef/NN*sP<ǏN*PR0R~N-0
ؘ͋f/~N-`'vN;gen0
uev͋OXR~vzz:P<VkdOMONR~RveHs0
؋Py:NN*0XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBasebPREeg,N-vNLf/N
ObSv0Ou(BRON"Y1NN)O`o0PRE+cbLhy:vgY'eg,\:[QF[W	5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseNN*~gubN-f>y:v~ggaep"Number of entries in a result pageuiPrefsDialogBase0bS_NN*[hFN	brGezSvCSSh7_SUeNAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBase(bS_NN*[hFN	bu(N~gRhv[WOS-Opens a dialog to select the result list fontuiPrefsDialogBase bS_NN*[hFN	bh7_SUeN-Opens a dialog to select the style sheet fileuiPrefsDialogBase_ScbPaths translationsuiPrefsDialogBase ~eg,lcbN:HTMLcbL{&vh<Plain text to HTML line styleuiPrefsDialogBaseN-OQHOu(Html0&Prefer Html to plain text for preview.uiPrefsDialogBasegy^YGeNT
T0(Query language magic file name suffixes.uiPrefsDialogBaseOOc^r`0Remember sort activation state.uiPrefsDialogBase,NRhN-R d0N
O[xlvN
v}"_ bc_[07Remove from list. This has no effect on the disk index.uiPrefsDialogBase
R d	N-yRemove selecteduiPrefsDialogBaseSNehcN-^&vdX Replace abstracts from documentsuiPrefsDialogBasenResetuiPrefsDialogBasenrGezSvh7_ Resets the Snippets window styleuiPrefsDialogBase"\~gRhN-v[WOS͋N:|~ߞ؋P<1Resets the result list font to the system defaultuiPrefsDialogBase\h7_SUnN:؋P<!Resets the style sheet to defaultuiPrefsDialogBase~gRhResult ListuiPrefsDialogBase~gRh[WOSResult list fontuiPrefsDialogBased}"SepSearch parametersuiPrefsDialogBaserGezSvCSSeNSnippets window CSS fileuiPrefsDialogBaseT/RebS_zd}"[hF0'Start with advanced search dialog open.uiPrefsDialogBaseh9Stemming languageuiPrefsDialogBaseh7_SUStyle sheetuiPrefsDialogBaseTbdXN
Ne Synthetic abstract context wordsuiPrefsDialogBaseTbdX^[W{&N*ep	$Synthetic abstract size (characters)uiPrefsDialogBase4ǏN*^veg,N
OW(zS̚Nf>y:Y*ab	0CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBase@N*RhN-v͋OW(gQehF́RSbext:xxxS0bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase
Rcb	N-yToggle selecteduiPrefsDialogBaseu(b7uLbUser interfaceuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_hu.ts0000644000175000017500000035516413303776057014271 00000000000000



    AdvSearch
    
        All clauses
        Minden feltétel
    
    
        Any clause
        Bármely feltétel
    
    
        media
        Média
    
    
        other
        Egyéb
    
    
        Bad multiplier suffix in size filter
        Hibás sokszorozó utótag a méretszűrőben!
    
    
        text
        Szöveg
    
    
        spreadsheet
        Munkafüzet
    
    
        presentation
        Prezentáció
    
    
        message
        Üzenet
    


    AdvSearchBase
    
        Advanced search
        Összetett keresés
    
    
        Search for <br>documents<br>satisfying:
        A keresés módja:
    
    
        Delete clause
        Feltétel törlése
    
    
        Add clause
        Új feltétel
    
    
        Restrict file types
        Fájltípus
    
    
        Check this to enable filtering on file types
        A találatok szűrése a megadott fájltípusokra
    
    
        By categories
        Kategória
    
    
        Check this to use file categories instead of raw mime types
        A találatok szűrése MIME típus helyett fájlkategóriára
    
    
        Save as default
        Mentés alapértelmezettként
    
    
        Searched file types
        Keresett fájltípusok
    
    
        All ---->
        Mind ----->
    
    
        Sel ----->
        Kijelölt ----->
    
    
        <----- Sel
        <----- Kijelölt
    
    
        <----- All
        <----- Mind
    
    
        Ignored file types
        Kizárt fájltípusok
    
    
        Enter top directory for search
        A keresés kezdő könyvtárának megadása
    
    
        Browse
        Tallózás
    
    
        Restrict results to files in subtree:
        Keresés az alábbi könyvtárból indulva:
    
    
        Start Search
        A keresés indítása
    
    
        Close
        Bezárás
    
    
        All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored.
        A jobb oldali nem üres mezők „Minden feltétel” választásakor ÉS, „Bármely feltétel” választásakor VAGY kapcsolatban lesznek.<br>A „Bármely szó”, „Minden szó” és az „Egyik sem” típusú mezőkben szavak és idézőjelbe tett részmondatok kombinációja adható meg.<br>Az üres mezők figyelmen kívül lesznek hagyva.
    
    
        Invert
        Megfordítás
    
    
        Minimum size. You can use k/K,m/M,g/G as multipliers
        Minimális méret, sokszorozó utótag lehet a k/K, m/M, g/G
    
    
        Min. Size
        legalább
    
    
        Maximum size. You can use k/K,m/M,g/G as multipliers
        Maximális méret, sokszorozó utótag lehet a k/K, m/M, g/G
    
    
        Max. Size
        legfeljebb
    
    
        Filter
        Szűrők
    
    
        From
        ettől
    
    
        To
        eddig
    
    
        Check this to enable filtering on dates
        A találatok szűrése a fájlok dátuma alapján
    
    
        Filter dates
        Dátum
    
    
        Find
        Keresés
    
    
        Check this to enable filtering on sizes
        A találatok szűrése a fájlok mérete alapján
    
    
        Filter sizes
        Méret
    


    CronToolW
    
        Cron Dialog
        Cron időzítő
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A <span style=" font-weight:600;">Recoll</span> indexelő időzítése (cron) </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Minden mezőben megadható csillag (*), szám, számok listája (1,3,5) vagy számtartomány (1-7). Általánosabban, a mezők jelentése ugyanaz,  <span style=" font-style:italic;">mint</span> a crontab fájlban, és a teljes crontab szintaxis használható, lásd a crontab(5) kézikönyvlapot.</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Például <span style=" font-family:'Courier New,courier';">*</span>-ot írva a <span style=" font-style:italic;">naphoz, </span><span style=" font-family:'Courier New,courier';">12,19</span>-et az <span style=" font-style:italic;">órához</span> és <span style=" font-family:'Courier New,courier';">15</span>-öt a <span style=" font-style:italic;">perchez</span>, a recollindex minden nap 12:15-kor és du. 7:15-kor fog elindulni.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Túl gyakori ütemezés helyett célszerűbb lehet a valós idejű indexelés engedélyezése.</p></body></html>
    
    
        Days of week (* or 0-7, 0 or 7 is Sunday)
        A hét napja (* vagy 0-7, 0 vagy 7 a vasárnap)
    
    
        Hours (* or 0-23)
        Óra (* vagy 0-23)
    
    
        Minutes (0-59)
        Perc (0-59)
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A <span style=" font-style:italic;">Kikapcsolás</span> megszünteti, a <span style=" font-style:italic;">Bekapcsolás</span> aktiválja az időzített indexelést, a <span style=" font-style:italic;">Mégsem</span> nem változtat a beállításon.</p></body></html>
    
    
        Enable
        Bekapcsolás
    
    
        Disable
        Kikapcsolás
    
    
        It seems that manually edited entries exist for recollindex, cannot edit crontab
        Úgy tűnik, egy kézi bejegyzése van a recollindexnek, nem sikerült a crontab szerkesztése!
    
    
        Error installing cron entry. Bad syntax in fields ?
        Hiba a cron bejegyzés hozzáadásakor! Rossz szintaxis a mezőkben?
    


    EditDialog
    
        Dialog
        Párbeszédablak
    


    EditTrans
    
        Source path
        Eredeti elérési út
    
    
        Local path
        Helyi elérési út
    
    
        Config error
        Beállítási hiba
    
    
        Original path
        Eredeti elérési út
    


    EditTransBase
    
        Path Translations
        Elérési út átalakítása
    
    
        Setting path translations for 
        Elérési út-átalakítás ehhez: 
    
    
        Select one or several file types, then use the controls in the frame below to change how they are processed
        Kijelölhető egy vagy több elérési út is
    
    
        Add
        Hozzáadás
    
    
        Delete
        Törlés
    
    
        Cancel
        Mégsem
    
    
        Save
        Mentés
    


    FirstIdxDialog
    
        First indexing setup
        Az indexelés beállítása első induláskor
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">A jelenlegi beállításokhoz még nem tartozik index.</span><br /><br />A saját mappa indexelése javasolt alapbeállításokkal az <span style=" font-style:italic;">Indexelés indítása most</span> gombbal indítható. A beállítások később módosíthatók.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Az alábbi hivatkozások az indexelés finomhangolására és időzítésére szolgálnak.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Ezek a lehetőségek később a <span style=" font-style:italic;">Beállítások</span> menüből is elérhetők.</p></body></html>
    
    
        Indexing configuration
        Az indexelés beállításai
    
    
        This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.
        Megadható az indexelendő könyvtárak köre és egyéb paraméterek, például kizárt elérési utak vagy fájlnevek, alapértelmezett betűkészlet stb.
    
    
        Indexing schedule
        Az időzítés beállításai
    
    
        This will let you chose between batch and real-time indexing, and set up an automatic  schedule for batch indexing (using cron).
        Lehetőség van ütemezett indításra és valós idejű indexelésre, az előbbi időzítése is beállítható (a cron segítségével).
    
    
        Start indexing now
        Indexelés indítása most
    


    FragButs
    
        %1 not found.
        A fájl nem található: %1.
    
    
        %1:
 %2
        %1:
 %2
    
    
        Query Fragments
        Statikus szűrők
    


    IdxSchedW
    
        Index scheduling setup
        Az indexelés időzítése
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A <span style=" font-weight:600;">Recoll</span> indexelő futhat folyamatosan, így a fájlok változásakor az index is azonnal frissül, vagy indulhat meghatározott időközönként.</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A kézikönyv segítséget nyújt a két eljárás közül a megfelelő kiválasztásához (F1).</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Lehetőség van az időzített indexelés ütemezésére, vagy a valós idejű indexelő automatikus indítására bejelentkezéskor (vagy mindkettőre, bár ez ritkán célszerű).</p></body></html>
    
    
        Cron scheduling
        Cron időzítő
    
    
        The tool will let you decide at what time indexing should run and will install a crontab entry.
        Az indexelés kezdő időpontjainak beállítása egy crontab bejegyzés által.
    
    
        Real time indexing start up
        Valós idejű indexelés indítása
    
    
        Decide if real time indexing will be started when you log in (only for the default index).
        A valós idejű indexelés indítása bejelentkezéskor (csak az alapértelmezett indexhez).
    


    ListDialog
    
        Dialog
        Párbeszédablak
    
    
        GroupBox
        GroupBox
    


    Main
    
        No db directory in configuration
        Nincs adatbáziskönyvtár a beállítófájlban
    
    
        "history" file is damaged or un(read)writeable, please check or remove it: 
        Az előzmények fájlja sérült vagy nem lehet írni/olvasni, ellenőrizni vagy törölni kell: 
    


    Preview
    
        Close Tab
        Lap bezárása
    
    
        Cancel
        Mégsem
    
    
        Missing helper program: 
        Hiányzó segédprogram:
    
    
        Can't turn doc into internal representation for 
        Nem sikerült értelmezni: 
    
    
        Creating preview text
        Előnézet létrehozása
    
    
        Loading preview text into editor
        Az előnézet betöltése a megjelenítőbe
    
    
        &Search for:
        Kere&sés:
    
    
        &Next
        &Következő
    
    
        &Previous
        &Előző
    
    
        Clear
        Törlés
    
    
        Match &Case
        Kis- és &nagybetűk
    
    
        Error while loading file
        Hiba a fájl betöltése közben!
    


    PreviewTextEdit
    
        Show fields
        Mezők
    
    
        Show main text
        Tartalom
    
    
        Print
        Nyomtatás
    
    
        Print Current Preview
        A jelenlegi nézet nyomtatása
    
    
        Show image
        Kép
    
    
        Select All
        Mindent kijelöl
    
    
        Copy
        Másolás
    
    
        Save document to file
        Mentés fájlba
    
    
        Fold lines
        Sortörés
    
    
        Preserve indentation
        Eredeti tördelés
    


    QObject
    
        Global parameters
        Általános beállítások
    
    
        Local parameters
        Helyi beállítások
    
    
        <b>Customised subtrees
        <b>Egyedi alkönyvtárak
    
    
        The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty.
        Az indexelt hierarchián belüli alkönyvtárak listája,<br> melyekre eltérő beállítások vonatkoznak. Alapértelmezetten üres.
    
    
        <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons.
        <i>Ha a fenti listából semmi vagy egy üres sor van kijelölve, úgy a következő jellemzők<br>az indexelendő legfelső szintű, egyébként a kijelölt mappára vonatkoznak.<br>A +/- gombokkal lehet a listához könyvtárakat adni vagy onnan törölni.
    
    
        Skipped names
        Kizárt nevek
    
    
        These are patterns for file or directory  names which should not be indexed.
        Mintával megadható  fájl- és könyvtárnevek, melyeket nem kell indexelni
    
    
        Follow symbolic links
        Szimbolikus linkek követése
    
    
        Follow symbolic links while indexing. The default is no, to avoid duplicate indexing
        Indexeléskor kövesse a szimbolikus linkeket.<br>Alapértelmezetten ki van kapcsolva, elkerülendő a dupla indexelést.
    
    
        Index all file names
        Minden fájlnév indexelése
    
    
        Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true
        A Recoll számára ismeretlen típusú vagy értelmezhetetlen fájlok nevét is indexelje.<br>Alapértelmezetten engedélyezve van.
    
    
        Search parameters
        Keresési beállítások
    
    
        Web history
        Webes előzmények
    
    
        Default<br>character set
        Alapértelmezett<br>karakterkódolás
    
    
        Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used.
        A karakterkódolásról információt nem tároló fájlok (például egyszerű szöveges fájlok) kódolása.<br>Alapértelmezetten nincs megadva, és a nyelvi környezet (NLS) alapján lesz beállítva.
    
    
        Ignored endings
        Kizárt kiterjesztések
    
    
        These are file name endings for files which will be indexed by name only 
(no MIME type identification attempt, no decompression, no content indexing).
        Az ilyen fájlnévvégződésű fájlok csak a nevük alapján indexelendők
(nem történik MIME típusfelismerés, kicsomagolás és tartalomindexelés sem).
    


    QWidget
    
        Create or choose save directory
        Mentési könyvtár megadása
    
    
        Choose exactly one directory
        Csak pontosan egy könyvtár adható meg!
    
    
        Could not read directory: 
        A könyvtár nem olvasható: 
    
    
        Unexpected file name collision, cancelling.
        A fájl már létezik, ezért ki lesz hagyva.
    
    
        Cannot extract document: 
        Nem sikerült kicsomagolni a fájlt: 
    
    
        &Preview
        &Előnézet
    
    
        &Open
        &Megnyitás
    
    
        Open With
        Megnyitás ezzel:
    
    
        Run Script
        Szkript futtatása
    
    
        Copy &File Name
        &Fájlnév másolása
    
    
        Copy &URL
        &URL másolása
    
    
        &Write to File
        Menté&s fájlba
    
    
        Save selection to files
        A kijelölés mentése fájlba
    
    
        Preview P&arent document/folder
        A szülő előné&zete
    
    
        &Open Parent document/folder
        A szülő megnyi&tása
    
    
        Find &similar documents
        &Hasonló dokumentum keresése
    
    
        Open &Snippets window
        Ér&demi részek
    
    
        Show subdocuments / attachments
        Aldokumentumok / csatolmányok
    


    QxtConfirmationMessage
    
        Do not show again.
        Ne jelenjen meg újra.
    


    RTIToolW
    
        Real time indexing automatic start
        A valós idejű indexelés automatikus indítása
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A <span style=" font-weight:600;">Recoll</span> indexelője indítható szolgáltatásként, így az index minden fájlváltozáskor azonnal frissül. Előnye a mindig naprakész index, de folyamatosan igénybe veszi az erőforrásokat.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>
    
    
        Start indexing daemon with my desktop session.
        Az indexelő szolgáltatás indítása a munkamenettel
    
    
        Also start indexing daemon right now.
        Az indexelő szolgáltatás indítása most
    
    
        Replacing: 
        Csere: 
    
    
        Replacing file
        Fájl cseréje
    
    
        Can't create: 
        Nem sikerült létrehozni: 
    
    
        Warning
        Figyelmeztetés
    
    
        Could not execute recollindex
        A recollindex indítása sikertelen
    
    
        Deleting: 
        Törlés: 
    
    
        Deleting file
        Fájl törlése
    
    
        Removing autostart
        Az autostart kikapcsolása
    
    
        Autostart file deleted. Kill current process too ?
        Az autostart fájl törölve lett. A most futó indexelőt is le kell állítani?
    


    RclMain
    
        (no stemming)
        (nincs szótőképzés)
    
    
        (all languages)
        (minden nyelv)
    
    
        error retrieving stemming languages
        hiba a szótőképzés nyelvének felismerésekor
    
    
        Indexing in progress: 
        Az indexelés folyamatban: 
    
    
        Purge
        törlés
    
    
        Stemdb
        szótövek adatbázisa
    
    
        Closing
        lezárás
    
    
        Unknown
        ismeretlen
    
    
        Query results
        A keresés eredménye
    
    
        Cannot retrieve document info from database
        Nem sikerült az adatbázisban információt találni a dokumentumról.
    
    
        Warning
        Figyelmeztetés
    
    
        Can't create preview window
        Nem sikerült létrehozni az előnézetet
    
    
        This search is not active any more
        Ez a keresés már nem aktív.
    
    
        Cannot extract document or create temporary file
        Nem sikerült a kicsomagolás vagy az ideiglenes fájl létrehozása.
    
    
        Executing: [
        Végrehajtás: [
    
    
        About Recoll
        A Recoll névjegye
    
    
        History data
        Előzményadatok
    
    
        Document history
        Előzmények
    
    
        Update &Index
        &Index frissítése
    
    
        Stop &Indexing
        Indexelé&s leállítása
    
    
        All
        Mind
    
    
        media
        Média
    
    
        message
        Üzenet
    
    
        other
        Egyéb
    
    
        presentation
        Prezentáció
    
    
        spreadsheet
        Munkafüzet
    
    
        text
        Szöveg
    
    
        sorted
        rendezett
    
    
        filtered
        szűrt
    
    
        No helpers found missing
        Nincs hiányzó segédprogram.
    
    
        Missing helper programs
        Hiányzó segédprogramok
    
    
        No external viewer configured for mime type [
        Nincs külső megjelenítő beállítva ehhez a MIME típushoz [
    
    
        The viewer specified in mimeview for %1: %2 is not found.
Do you want to start the  preferences dialog ?
        A mimeview fájlban megadott megjelenítő ehhez: %1: %2  nem található.
Megnyissuk a beállítások ablakát?
    
    
        Can't access file: 
        A fájl nem elérhető: 
    
    
        Can't uncompress file: 
        Nem sikerült kicsomagolni a fájlt: 
    
    
        Save file
        Fájl mentése
    
    
        Result count (est.)
        Találatok száma (kb.)
    
    
        Could not open external index. Db not open. Check external indexes list.
        Egy külső index megnyitása nem sikerült. Ellenőrizni kell a külső indexek listáját.
    
    
        No results found
        Nincs találat
    
    
        None
        semmi
    
    
        Updating
        frissítés
    
    
        Done
        kész
    
    
        Monitor
        figyelés
    
    
        Indexing failed
        Sikertelen indexelés
    
    
        The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone
        A jelenleg futó indexelő nem erről a felületről lett indítva.<br>Az OK gombbal kilőhető, a Mégsem gombbal meghagyható.
    
    
        Erasing index
        Index törlése
    
    
        Reset the index and start from scratch ?
        Indulhat az index törlése és teljes újraépítése?
    
    
        Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program
        A keresés folyamatban van.<br>Az indexelő korlátozásai miatt<br>megszakításkor a program kilép.
    
    
        Error
        Hiba
    
    
        Index not open
        Nincs megnyitott index
    
    
        Index query error
        Indexlekérdezési hiba
    
    
        Content has been indexed for these mime types:
        Az alábbi MIME típusok szerepelnek az indexben:
    
    
        Can't update index: indexer running
        Nem sikerült frissíteni az indexet: az indexelő már fut.
    
    
        Indexed MIME Types
        Indexelt MIME típusok
    
    
        Bad viewer command line for %1: [%2]
Please check the mimeview file
        Hibás a megjelenítő parancssor ehhez: %1: [%2]
Ellenőrizni kell a mimeview fájlt!
    
    
        Viewer command line for %1 specifies both file and parent file value: unsupported
        %1 megjelenítő parancssora fájlt és szülőt is megad: ez nem támogatott.
    
    
        Cannot find parent document
        Nem található a szülődokumentum.
    
    
        Indexing did not run yet
        Az indexelő jelenleg nem fut.
    
    
        External applications/commands needed for your file types and not found, as stored by the last indexing pass in 
        Az alábbi külső alkalmazások/parancsok hiányoznak a legutóbbi indexelés során keletkezett napló alapján -----> 
    
    
        Sub-documents and attachments
        Aldokumentumok és csatolmányok
    
    
        Document filter
        Dokumentumszűrő
    
    
        Index not up to date for this file. Refusing to risk showing the wrong entry. 
        A fájl bejegyzése az indexben elavult. Esetlegesen téves adatok megjelenítése helyett kihagyva. 
    
    
        Click Ok to update the index for this file, then you will need to re-run the query when indexing is done. 
        Az OK-ra kattintva frissíthető a fájl indexbejegyzése, ennek végeztével újra kell futtatni a keresést.
    
    
        The indexer is running so things should improve when it's done. 
        Az indexelő fut, ennek végeztére a dolgok még helyreállhatnak.
    
    
        Duplicate documents
        Másodpéldányok
    
    
        These Urls ( | ipath) share the same content:
        Ezek az URL-ek (| ipath) azonos tartalmúak:
    
    
        Bad desktop app spec for %1: [%2]
Please check the desktop file
        Hibás alkalmazásbeállítás ehhez:%1: [%2]
Ellenőrizni kell az asztali beállítófájlt!
    
    
        Indexing interrupted
        Az indexelés megszakadt.
    
    
        The current indexing process was not started from this interface, can't kill it
        A jelenleg futó indexelő nem erről a felületről lett indítva, nem állítható le.
    
    
        Bad paths
        Hibás elérési utak
    
    
        Bad paths in configuration file:

        Hibás elérési utak a beállítófájlban: 
    
    
        Selection patterns need topdir
        A mintához kezdő könyvtár szükséges
    
    
        Selection patterns can only be used with a start directory
        Minta használatához kezdő könyvtárt is meg kell adni.
    
    
        No search
        Nincs keresés
    
    
        No preserved previous search
        Nincs előzőleg mentett keresés
    
    
        Choose file to save
        Mentés ide
    
    
        Saved Queries (*.rclq)
        Mentett keresések (*.rclq)
    
    
        Write failed
        Sikertelen írásművelet
    
    
        Could not write to file
        A fájl írása sikertelen
    
    
        Read failed
        Sikertelen olvasás
    
    
        Could not open file: 
        Nem sikerült megnyitni a fájlt: 
    
    
        Load error
        Betöltési hiba
    
    
        Could not load saved query
        Nem sikerült betölteni a mentett keresést
    
    
        Index scheduling
        Az időzítés beállításai
    
    
        Sorry, not available under Windows for now, use the File menu entries to update the index
        Sajnos Windows rendszeren még nem vehető igénybe, a Fájl menüből lehet frissíteni az indexet.
    
    
        Disabled because the real time indexer was not compiled in.
        Nem elérhető, mert a valós idejű indexelés nincs a programba fordítva.
    
    
        This configuration tool only works for the main index.
        Ez a beállítóeszköz csak az elsődleges indexszel használható.
    
    
        Can't set synonyms file (parse error?)
        Nem lehet betölteni a szinonímafájlt (értelmezési hiba?)
    
    
        The document belongs to an external index which I can't update. 
        A dokumentum külső indexhez tartozik, mely innen nem frissíthető.
    
    
        Click Cancel to return to the list. <br>Click Ignore to show the preview anyway (and remember for this session).
        Visszatérés a listához: Mégsem.<b>Az előnézet megnyitása mindenképp (és megjegyzés erre a munkamenetre): Mellőzés.
    
    
        Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location.
        Egy ideiglenes másolat lesz megnyitva. A módosítások<br/>megőrzéséhez a fájlt el kell menteni máshová.
    
    
        Do not show this warning next time (use GUI preferences to restore).
        Ne jelenjen meg többször (a GUI beállításaiban visszaállítható).
    
    
        Index locked
        Az index zárolva van
    
    
        Unknown indexer state. Can't access webcache file.
        Az indexelő állapota ismeretlen. A webes gyorstár nem hozzáférhető.
    
    
        Indexer is running. Can't access webcache file.
        Az indexelő fut. A webes gyorstár nem hozzáférhető.
    


    RclMainBase
    
        Recoll
        Recoll
    
    
        &File
        &Fájl
    
    
        &Tools
        &Eszközök
    
    
        &Preferences
        &Beállítások
    
    
        &Help
        &Súgó
    
    
        E&xit
        &Kilépés
    
    
        Ctrl+Q
        Ctrl+Q
    
    
        Update &index
        Az &index frissítése
    
    
        &Erase document history
        &Előzmények törlése
    
    
        &About Recoll
        A Recoll &névjegye
    
    
        &User manual
        &Felhasználói kézikönyv
    
    
        Document &History
        &Előzmények
    
    
        Document  History
        Előzmények
    
    
        &Advanced Search
        Összetett &keresés
    
    
        Advanced/complex  Search
        Összetett keresés
    
    
        &Sort parameters
        &Rendezési beállítások
    
    
        Sort parameters
        Rendezési beállítások
    
    
        Term &explorer
        &Szóvizsgáló
    
    
        Term explorer tool
        Szóvizsgáló
    
    
        Next page
        Következő oldal
    
    
        Next page of results
        Következő oldal
    
    
        First page
        Első oldal
    
    
        Go to first page of results
        Első oldal
    
    
        Previous page
        Előző oldal
    
    
        Previous page of results
        Előző oldal
    
    
        External index dialog
        Külső indexek
    
    
        PgDown
        PgDown
    
    
        PgUp
        PgUp
    
    
        &Full Screen
        &Teljes képernyő
    
    
        F11
        F11
    
    
        Full Screen
        Teljes képernyő
    
    
        &Erase search history
        Keresé&si előzmények törlése
    
    
        Sort by dates from oldest to newest
        Növekvő rendezés dátum szerint
    
    
        Sort by dates from newest to oldest
        Csökkenő rendezés dátum szerint
    
    
        Show Query Details
        A keresés részletei
    
    
        &Rebuild index
        Index új&raépítése
    
    
        Shift+PgUp
        Shift+PgUp
    
    
        E&xternal index dialog
        &Külső indexek
    
    
        &Index configuration
        &Indexelés
    
    
        &GUI configuration
        &Felhasználói felület
    
    
        &Results
        &Találatok
    
    
        Sort by date, oldest first
        Növekvő rendezés dátum szerint
    
    
        Sort by date, newest first
        Csökkenő rendezés dátum szerint
    
    
        Show as table
        Táblázatos nézet
    
    
        Show results in a spreadsheet-like table
        A találatok megjelenítése táblázatban
    
    
        Save as CSV (spreadsheet) file
        Mentés CSV (strukturált szöveg) fájlba
    
    
        Saves the result into a file which you can load in a spreadsheet
        A találatok mentése egy táblázatkezelővel megnyitható fájlba
    
    
        Next Page
        Következő oldal
    
    
        Previous Page
        Előző oldal
    
    
        First Page
        Első oldal
    
    
        Query Fragments
        Statikus szűrők
    
    
            With failed files retrying
            A sikerteleneket újra
    
    
        Next update will retry previously failed files
        A következő frissítéskor újra próbálja a sikertelenül indexelt fájlokat
    
    
        Indexing &schedule
        Időzí&tés
    
    
        Enable synonyms
        Szinonímák engedélyezése
    
    
        Save last query
        A legutóbbi keresés mentése
    
    
        Load saved query
        Mentett keresés betöltése
    
    
        Special Indexing
        Egyedi indexelés
    
    
        Indexing with special options
        Indexelés egyedi beállításokkal
    
    
        &View
        &Nézet
    
    
        Missing &helpers
        &Hiányzó segédprogramok
    
    
        Indexed &MIME types
        Indexelt &MIME típusok
    
    
        Index &statistics
        &Statisztika
    
    
        Webcache Editor
        Webes gyorstár szerkesztése
    


    RclTrayIcon
    
        Restore
        A Recoll megjelenítése
    
    
        Quit
        Kilépés
    


    RecollModel
    
        Abstract
        Tartalmi kivonat
    
    
        Author
        Szerző
    
    
        Document size
        A dokumentum mérete
    
    
        Document date
        A dokumentum dátuma
    
    
        File size
        A fájl mérete
    
    
        File name
        Fájlnév
    
    
        File date
        A fájl dátuma
    
    
        Keywords
        Kulcsszavak
    
    
        Original character set
        Eredeti karakterkódolás
    
    
        Relevancy rating
        Relevancia
    
    
        Title
        Cím
    
    
        URL
        URL
    
    
        Mtime
        Módosítás ideje
    
    
        Date
        Dátum
    
    
        Date and time
        Dátum és idő
    
    
        Ipath
        Belső elérési út
    
    
        MIME type
        MIME típus
    


    ResList
    
        Result list
        Találati lista
    
    
        (show query)
        (a&nbsp;keresés&nbsp;részletei)
    
    
        Document history
        Előzmények
    
    
        <p><b>No results found</b><br>
        <p><b>Nincs találat.</b><br>
    
    
        Previous
        Előző
    
    
        Next
        Következő
    
    
        Unavailable document
        Elérhetetlen dokumentum
    
    
        Preview
        Előnézet
    
    
        Open
        Megnyitás
    
    
        <p><i>Alternate spellings (accents suppressed): </i>
        <p><i>Alternatív írásmód (ékezetek nélkül): </i>
    
    
        Documents
        Találatok a lapon:
    
    
        out of at least
         • Az összes találat:
    
    
        for
        
    
    
        <p><i>Alternate spellings: </i>
        <p><i>Alternatív írásmód: </i>
    
    
        Result count (est.)
        Találatok száma (kb.)
    
    
        Query details
        A keresés részletei
    
    
        Snippets
        Érdemi részek
    


    ResTable
    
        &Reset sort
        &Rendezés alaphelyzetbe
    
    
        &Delete column
        Oszlop &törlése
    
    
        Save table to CSV file
        A táblázat mentése CSV fájlba
    
    
        Can't open/create file: 
        Nem sikerült megnyitni/létrehozni: 
    
    
        &Save as CSV
        &Mentés CSV fájlba
    
    
        Add "%1" column
        „%1” oszlop hozzáadása
    


    SSearch
    
        Any term
        Bármely szó
    
    
        All terms
        Minden szó
    
    
        File name
        Fájlnév
    
    
        Query language
        Keresőnyelv
    
    
        Bad query string
        Hibás keresőkifejezés
    
    
        Out of memory
        Elfogyott a memória
    
    
        Enter file name wildcard expression.
        A fájlnév megadásához helyettesítő karakterek is használhatók
    
    
        Enter search terms here. Type ESC SPC for completions of current term.
        Ide kell írni a keresőszavakat.
ESC SZÓKÖZ billentyűsorozat: a szó lehetséges kiegészítéseit ajánlja fel.
    
    
        Enter query language expression. Cheat sheet:<br>
<i>term1 term2</i> : 'term1' and 'term2' in any field.<br>
<i>field:term1</i> : 'term1' in field 'field'.<br>
 Standard field names/synonyms:<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br>
 Two date interval exemples: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br>
  You can use parentheses to make things clearer.<br>
<i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br>
<i>"term1 term2"p</i> : unordered proximity search with default distance.<br>
Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail.

        Keresőnyelvi kifejezés megadása. Segítség:<br>
<i>szo1 szo2</i> : 'szo1' és 'szo2' bármely mezőben.<br>
<i>mezo:szo1</i> : 'szo1' a 'mezo' nevű mezőben.<br>
 Szabványos mezőnevek/szinonímák:<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pszeudómezők: dir, mime/format, type/rclcat, date, size.<br>
 Péda dátumtartományra: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>szo1 szo2 OR szo3</i> : szo1 AND (szo2 OR szo3).<br>
  A jobb olvashatóság érdekében használhatók zárójelek.<br>
<i>"szo1 szo2"</i> : részmondat (pontosan így kell előfordulnia). Lehetséges módosítók:<br>
<i>"szo1 szo2"p</i> : szavak egymáshoz közel, bármilyen sorrendben, alapértelmezett távolsággal.<br>
<b>A keresés részletei</b> segíthet feltárni a nem várt találatok okát. Részletesebb leírás a kézikönyvben (&lt;F1>) található.

    
    
        Stemming languages for stored query: 
        A mentett keresés szótőképző nyelve: 
    
    
         differ from current preferences (kept)
         eltér a jelenlegi beállítástól (megtartva).
    
    
        Auto suffixes for stored query: 
        A mentett keresés automatikus toldalékolása: 
    
    
        External indexes for stored query: 
        A mentett keresés külső indexe: 
    
    
        Autophrase is set but it was unset for stored query
        Az „automatikus részmondat” be van kapcsolva, de a keresés mentésekor tiltva volt.
    
    
        Autophrase is unset but it was set for stored query
        Az „automatikus részmondat” ki van kapcsolva, de a keresés mentésekor engedélyezve volt.
    


    SSearchBase
    
        SSearchBase
        SSearchBase
    
    
        Clear
        Törlés
    
    
        Ctrl+S
        Ctrl+S
    
    
        Erase search entry
        A keresőmező törlése
    
    
        Search
        Keresés
    
    
        Start query
        A keresés indítása
    
    
        Enter search terms here. Type ESC SPC for completions of current term.
        Ide kell írni a keresőszavakat.
ESC SZÓKÖZ billentyűsorozat: a szó lehetséges kiegészítéseit ajánlja fel.
    
    
        Choose search type.
        A keresés módjának kiválasztása
    


    SearchClauseW
    
        Select the type of query that will be performed with the words
        A megadott szavakkal végzett keresés típusának kiválasztása
    
    
        Number of additional words that may be interspersed with the chosen ones
        A keresett szavak között található további szavak megengedett száma
    
    
        No field
        Nincs mező
    
    
        Any
        Bármely szó
    
    
        All
        Mind
    
    
        None
        semmi
    
    
        Phrase
        Részmondat
    
    
        Proximity
        Távolság
    
    
        File name
        Fájlnév
    


    Snippets
    
        Snippets
        Érdemi részek
    
    
        Find:
        Keresés:
    
    
        Next
        Következő
    
    
        Prev
        Előző
    


    SnippetsW
    
        Search
        Keresés
    
    
        <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p>
        <p>Sajnos a megadott határok között nincs pontos egyezés. Talán túl nagy a dokumentum, és a feldolgozó elakadt...</p>
    


    SpecIdxW
    
        Special Indexing
        Egyedi indexelés
    
    
        Do not retry previously failed files.
        A korábban sikertelenül indexelt fájlok kihagyása
    
    
        Else only modified or failed files will be processed.
        Egyébként csak a módosult vagy korábban sikertelenül indexelt fájlok lesznek feldolgozva
    
    
        Erase selected files data before indexing.
        A kijelölt fájlok tárolt adatainak törlése indexelés előtt
    
    
        Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs).
        A könyvtár, melyet rekurzívan indexelni kell.<br>A beállítófájlban megadott kezdő könyvtáron (topdir) belül kell lennie.
    
    
        Browse
        Tallózás
    
    
        Start directory (else use regular topdirs):
        Kezdő könyvtár (üresen a rendes kezdő könyvtár):
    
    
        Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set.
        Az összes fájl feldolgozásához üresen kell hagyni. Szóközökkel elválasztva több shell típusú minta is megadható.<br>A szóközt tartalmazó mintákat kettős idézőjellel kell védeni.<br>Csak kezdő könyvtár megadásával együtt használható.
    
    
        Selection patterns:
        Kijelölés mintával:
    
    
        Top indexed entity
        Az indexelendő kezdő könyvtár
    


    SpellBase
    
        Term Explorer
        Szóvizsgáló
    
    
        &Expand 
        &Listázás
    
    
        Alt+E
        Alt+E
    
    
        &Close
        &Bezárás
    
    
        Alt+C
        Alt+C
    
    
        No db info.
        Nincs információ az adatbázisról.
    
    
        Match
        Egyéb beállítások
    
    
        Case
        Kis-és nagybetű
    
    
        Accents
        Ékezetek
    


    SpellW
    
        Wildcards
        Helyettesítő karakterek
    
    
        Regexp
        Reguláris kifejezés
    
    
        Stem expansion
        Szótő és toldalékok
    
    
        Spelling/Phonetic
        Írásmód/fonetika
    
    
        error retrieving stemming languages
        hiba a szótőképzés nyelvének felismerésekor
    
    
        Aspell init failed. Aspell not installed?
        Az aspell indítása nem sikerült. Telepítve van?
    
    
        Aspell expansion error. 
        Aspell toldalékolási hiba.
    
    
        No expansion found
        Nincsenek toldalékok.
    
    
        Term
        Szó
    
    
        Doc. / Tot.
        Dok. / Össz.
    
    
        Index: %1 documents, average length %2 terms.%3 results
        Index: %1 dokumentum, átlagosan %2 szó. %3 találat.
    
    
        %1 results
        %1 találat
    
    
        List was truncated alphabetically, some frequent 
        Ez egy rövidített, betűrend szerinti lista, gyakori 
    
    
        terms may be missing. Try using a longer root.
        szavak hiányozhatnak. Javallott hosszabb szógyök megadása.
    
    
        Show index statistics
        Indexstatisztika
    
    
        Number of documents
        A dokumentumok száma
    
    
        Average terms per document
        A szavak átlagos száma dokumentumonként
    
    
        Database directory size
        Az adatbázis mérete
    
    
        MIME types:
        MIME típusok:
    
    
        Item
        Megnevezés
    
    
        Value
        Érték
    
    
        Smallest document length (terms)
        A szavak száma a legrövidebb dokumentumban
    
    
        Longest document length (terms)
        A szavak száma a leghosszabb dokumentumban
    
    
        Results from last indexing:
        A legutóbbi indexelés eredménye:
    
    
          Documents created/updated
          létrehozott/frissített dokumentum
    
    
          Files tested
          vizsgált fájl
    
    
          Unindexed files
          nem indexelt fájl
    


    UIPrefsDialog
    
        error retrieving stemming languages
        hiba a szótőképzés nyelvének felismerésekor
    
    
        The selected directory does not appear to be a Xapian index
        A kijelölt könyvtár nem tartalmaz Xapian indexet.
    
    
        This is the main/local index!
        Ez a fő-/helyi index!
    
    
        The selected directory is already in the index list
        A kijelölt könyvtár már szerepel az indexben.
    
    
        Choose
        Tallózás
    
    
        Result list paragraph format (erase all to reset to default)
        A találati lista bekezdésformátuma (törléssel visszaáll az alapértelmezettre)
    
    
        Result list header (default is empty)
        A találati lista fejléce (alapértelmezetten üres)
    
    
        Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb)
        A Recoll beállításainak vagy a Xapian indexnek a könyvtára (pl.: /home/felhasznalo/.recoll vagy /home/felhasznalo/.recoll/xapiandb)
    
    
        The selected directory looks like a Recoll configuration directory but the configuration could not be read
        A kijelölt könyvtárban egy olvashatatlan Recoll beállítás található.
    
    
        At most one index should be selected
        Csak egy indexet lehet kijelölni.
    
    
        Cant add index with different case/diacritics stripping option
        Eltérő kis-és nagybetű-, ill. ékezetkezelésű index nem adható hozzá.
    
    
        Default QtWebkit font
        Alapértelmezett QtWebkit betűkészlet
    
    
        Any term
        Bármely szó
    
    
        All terms
        Minden szó
    
    
        File name
        Fájlnév
    
    
        Query language
        Keresőnyelv
    
    
        Value from previous program exit
        A legutóbb használt
    


    ViewAction
    
        Command
        Parancs
    
    
        MIME type
        MIME típus
    
    
        Desktop Default
        Asztali alapértelmezés
    
    
        Changing entries with different current values
        A cserélendő mezők értéke eltér egymástól.
    


    ViewActionBase
    
        Native Viewers
        Dokumentumtípusok megjelenítői
    
    
        Close
        Bezárás
    
    
        Select one or several mime types then use the controls in the bottom frame to change how they are processed.
        Egy vagy több MIME típus kijelölése után az alsó keretben állítható be az adott típusokhoz elvárt művelet.
    
    
        Use Desktop preferences by default
        Az asztali alapértelmezés alkalmazása
    
    
        Select one or several file types, then use the controls in the frame below to change how they are processed
        Kijelölhető egy vagy több elérési út is
    
    
        Exception to Desktop preferences
        Eltérés az asztali beállításoktól
    
    
        Action (empty -> recoll default)
        Művelet (üres -> Recoll alapértelmezés)
    
    
        Apply to current selection
        Alkalmazás a kijelöltekre
    
    
        Recoll action:
        Recoll művelet:
    
    
        current value
        jelenlegi érték
    
    
        Select same
        Azonosak kijelölése
    
    
        <b>New Values:</b>
        <b>Új érték:</b>
    


    Webcache
    
        Webcache editor
        Webes gyorstár szerkesztése
    
    
        Search regexp
        Keresés reguláris kifejezéssel
    


    WebcacheEdit
    
        Copy URL
        URL másolása
    
    
        Unknown indexer state. Can't edit webcache file.
        Az indexelő állapota ismeretlen. A webes gyorstár nem szerkeszthető.
    
    
        Indexer is running. Can't edit webcache file.
        Az indexelő fut. A webes gyorstár nem szerkeszthető.
    
    
        Delete selection
        A kijelöltek törlése
    
    
        Webcache was modified, you will need to run the indexer after closing this window.
        A webes gyorstár módosult. Ezen ablak bezárása után indítani kell az indexelőt.
    


    WebcacheModel
    
        MIME
        MIME
    
    
        Url
        URL
    


    confgui::ConfBeaglePanelW
    
        Web page store directory name
        A weblapokat tároló könyvtár neve
    
    
        The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory.
        A látogatott weblapok másolatát tároló könyvtár neve.<br>Relatív elérési út a beállításokat tároló könyvtárhoz képest értendő.
    
    
        Max. size for the web store (MB)
        A webes tároló max. mérete (MB)
    
    
        Process the WEB history queue
        A webes előzmények feldolgozása
    
    
        Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin)
        A Firefoxszal látogatott oldalak indexelése<br>(a Firefox Recoll kiegészítőjét is telepíteni kell)
    
    
        Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end).
        A méret elérésekor a legkorábbi bejegyzések törlődnek.<br>Csak a növelésnek van haszna, mivel csökkentéskor a már létező fájl nem lesz kisebb (csak egy része állandóan kihasználatlan marad).
    


    confgui::ConfIndexW
    
        Can't write configuration file
        A beállítófájl írása sikertelen
    


    confgui::ConfParamFNW
    
        Choose
        Tallózás
    


    confgui::ConfParamSLW
    
        +
        +
    
    
        -
        -
    


    confgui::ConfSearchPanelW
    
        Automatic diacritics sensitivity
        Automatikus ékezetérzékenység
    
    
        <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity.
        <p>Automatikusan különbözőnek tekinti az ékezetes betűket az ékezet nélküli párjuktól, ha tartalmaz ékezetes betűt a kifejezés (az unac_except_trans kivételével). Egyébként a keresőnyelv <i>D</i> módosítójával érhető el ugyanez.
    
    
        Automatic character case sensitivity
        Kis-és nagybetűk automatikus megkülönböztetése
    
    
        <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity.
        <p>Automatikusan különbözőnek tekinti a kis-és nagybetűket, ha az első karakter kivételével bárhol tartalmaz nagybetűt a kifejezés. Egyébként a keresőnyelv <i>C</i> módosítójával érhető el ugyanez.
    
    
        Maximum term expansion count
        A toldalékok maximális száma
    
    
        <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.
        <p>Egy szó toldalékainak maximális száma (pl. helyettesítő karakterek használatakor). Az alapértelmezett 10 000 elfogadható érték, és elkerülhető vele a felhasználói felület időleges válaszképtelensége is.
    
    
        Maximum Xapian clauses count
        A Xapian feltételek maximális száma
    
    
        <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.
        Egy Xapian kereséshez felhasználható elemi feltételek maximális száma. Néha a szavak toldalékolása szorzó hatású, ami túlzott memóriahasználathoz vezethet. Az alapértelmezett 100 000 a legtöbb esetben elegendő, de nem is támaszt különleges igényeket a hardverrel szemben.
    


    confgui::ConfSubPanelW
    
        Global
        Minden könyvtárra vonatkozik
    
    
        Max. compressed file size (KB)
        A tömörített fájlok max. mérete (KB)
    
    
        This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever.
        A tömörített fájlok indexbe kerülésének határértéke.
-1 esetén nincs korlát.
0 esetén soha nem történik kicsomagolás.
    
    
        Max. text file size (MB)
        Szövegfájl max. mérete (MB)
    
    
        This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. 
This is for excluding monster log files from the index.
        A szövegfájlok indexbe kerülésének határértéke.
-1 esetén nincs korlát.
Az óriásira nőtt naplófájlok feldolgozása kerülhető el így.
    
    
        Text file page size (KB)
        Szövegfájl lapmérete (KB)
    
    
        If this value is set (not equal to -1), text files will be split in chunks of this size for indexing.
This will help searching very big text  files (ie: log files).
        Ha be van állítva (nem egyenlő -1), akkor a szövegfájlok indexelése ilyen méretű darabokban történik.
Ez segítséget nyújt a nagyon nagy méretű szövegfájlokban (pl. naplófájlok) való kereséshez.
    
    
        Max. filter exec. time (S)
        A szűrő max. futási ideje (s)
    
    
        External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.

        A túl hosszú ideig futó külső szűrők leállítása
Néha előfordul (pl. postscript esetén), hogy a szűrő végtelen ciklusba kerül.
-1 esetén nincs korlát.

    
    
        Only mime types
        MIME típusok
    
    
        An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive
        Az indexelendő MIME típusok listája.<br>Csak ezek a típusok kerülnek az indexbe. Rendesen üres és inaktív.
    
    
        Exclude mime types
        Kizárt MIME típusok
    
    
        Mime types not to be indexed
        Ezek a MIME típusok kimaradnak az indexelésből
    


    confgui::ConfTopPanelW
    
        Top directories
        Kezdő könyvtárak
    
    
        The list of directories where recursive indexing starts. Default: your home.
        A megadott könyvtárak rekurzív indexelése. Alapértelmezett értéke a saját könyvtár.
    
    
        Skipped paths
        Kizárt elérési utak
    
    
        These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')
        Az indexelő által mellőzendő könyvtárak nevei.<br>Használhatók a helyettesítő karakterek. Csak az indexelő hatókörébe eső elérési utakat lehet megadni (pl.: ha a kezdő könyvtár a „/home/felhasznalo” és a „/home” egy link a „/usr/home”-ra, akkor helyes elérési út a „/home/felhasznalo/tmp*”, de nem az a „/usr/home/felhasznalo/tmp*”).
    
    
        Stemming languages
        A szótőképzés nyelve
    
    
        The languages for which stemming expansion<br>dictionaries will be built.
        Ezen nyelvekhez készüljön szótövező és -toldalékoló szótár
    
    
        Log file name
        A naplófájl neve
    
    
        The file where the messages will be written.<br>Use 'stderr' for terminal output
        Az üzenetek kiírásának a helye.<br>A „stderr” a terminálra küldi az üzeneteket.
    
    
        Log verbosity level
        A naplózás szintje
    
    
        This value adjusts the amount of messages,<br>from only errors to a lot of debugging data.
        Az üzenetek mennyiségének szabályozása,<br>a hibaüzenetekre szorítkozótól a részletes hibakeresésig.
    
    
        Index flush megabytes interval
        Indexírási intervallum (MB)
    
    
        This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 
        Az az adatmennyiség, melyet két lemezre írás között az indexelő feldolgoz.<br>Segíthet kézben tartani a memóriafoglalást. Alapértelmezett: 10MB
    
    
        Max disk occupation (%)
        Max. lemezhasználat (%)
    
    
        This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default).
        Százalékos lemezfoglalás, melyen túllépve az indexelő nem működik tovább (megelőzendő az összes szabad hely elfoglalását).<br>0 esetén nincs korlát (alapértelmezett).
    
    
        No aspell usage
        Az aspell mellőzése
    
    
        Aspell language
        Az aspell nyelve
    
    
        Database directory name
        Az adatbázis könyvtárneve
    
    
        Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 
        A szóvizsgálóban az aspell használatának mellőzése a hasonló szavak keresésekor.<br>Hasznos, ha az aspell nincs telepítve vagy nem működik.
    
    
        The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. 
        Az aspell szótár nyelve. pl. „en” vagy „hu”...<br>Ha nincs megadva, akkor az NLS környezet alapján lesz beállítva, ez általában megfelelő. A rendszerre telepített nyelveket az „aspell config” parancs kiadása után a „data-dir” könyvtárban található .dat fájlokból lehet megtudni.
    
    
        The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.
        Az indexet tartalmazó könyvtár neve.<br>Relatív elérési út a beállítási könyvtárhoz képest értendő. Alapértelmezett: „xapiandb”.
    
    
        Unac exceptions
        Unac kivételek
    
    
        <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.
        <p>Az unac alapértelmezetten eltávolít minden ékezetet és szétbontja a ligatúrákat. Az itt megadott kivételekkel lehetőség van adott karakterek esetén tiltani a műveletet, ha a használt nyelv ezt szükségessé teszi. Ezen kívül előírhatók további felbontandó karakterek is. Az egyes elemeket egymástól szóközzel kell elválasztani. Egy elem első karaktere az eredetit, a további karakterek a várt eredményt határozzák meg.
    


    uiPrefsDialogBase
    
        User preferences
        Beállítások
    
    
        User interface
        Felhasználói felület
    
    
        Number of entries in a result page
        A találatok száma laponként
    
    
        If checked, results with the same content under different names will only be shown once.
        A különböző nevű, de azonos tartalmú találatokból csak egy jelenjen meg
    
    
        Hide duplicate results.
        Többszörös találatok elrejtése
    
    
        Highlight color for query terms
        A keresőszavak kiemelésének színe
    
    
        Result list font
        A találati lista betűkészlete
    
    
        Opens a dialog to select the result list font
        A találati lista betűkészletének kiválasztása
    
    
        Helvetica-10
        Helvetica-10
    
    
        Resets the result list font to the system default
        A találati lista betűkészletének rendszerbeli alapértelmezésére állítása
    
    
        Reset
        Alaphelyzet
    
    
        Texts over this size will not be highlighted in preview (too slow).
        Ezen méret felett az előnézetben nem alkalmaz kiemelést (túl lassú)
    
    
        Maximum text size highlighted for preview (megabytes)
        Az előnézeti kiemelés korlátja (megabyte)
    
    
        Choose editor applications
        A társítások beállítása
    
    
        Auto-start simple search on whitespace entry.
        Automatikus keresés szóköz hatására
    
    
        Start with advanced search dialog open.
        Az összetett keresés ablaka is legyen nyitva induláskor
    
    
        Remember sort activation state.
        A rendezési állapot mentése
    
    
        Prefer Html to plain text for preview.
        Az előnézetben HTML egyszerű szöveg helyett
    
    
        Search parameters
        Keresési beállítások
    
    
        Stemming language
        A szótőképzés nyelve
    
    
        A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.
        Ha például a keresőkifejezés a [rolling stones] (két szó), akkor helyettesítődik
a [rolling OR stones OR (rolling PHRASE 2 stones)] kifejezéssel.
Így előbbre kerülnek azok a találatok, meylek a keresett szavakat
pontosan úgy tartalmazzák, ahogyan meg lettek adva.
    
    
        Automatically add phrase to simple searches
        Az egyszerű keresés automatikus bővítése részmondattal
    
    
        Do we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.
        Próbáljon-e tartalmi kivonatot készíteni a keresőszavak alapján a találati lista elemeihez?
Nagy dokumentumok esetén lassú lehet.
    
    
        Dynamically build abstracts
        Dinamikus kivonatolás
    
    
        Do we synthetize an abstract even if the document seemed to have one?
        Kivonatoljon akkor is, ha a dokumentum már rendelkezik ezzel?
    
    
        Replace abstracts from documents
        A kivonat cseréje
    
    
        Synthetic abstract size (characters)
        A kivonat mérete (karakter)
    
    
        Synthetic abstract context words
        Az kivonat környező szavainak száma
    
    
        The words in the list will be automatically turned to ext:xxx clauses in the query language entry.
        Szavak listája, melyek keresőszóként megadva
automatikusan ext:xxx keresőnyelvi kifejezéssé alakíttatnak
    
    
        Query language magic file name suffixes.
        Keresőnyelvi mágikus fájlnévkiterjesztések
    
    
        Enable
        Bekapcsolás
    
    
        External Indexes
        Külső indexek
    
    
        Toggle selected
        A kijelölt váltása
    
    
        Activate All
        Mindet bekapcsol
    
    
        Deactivate All
        Mindet kikapcsol
    
    
        Remove from list. This has no effect on the disk index.
        Törlés a listából. Az index a lemezről nem törlődik.
    
    
        Remove selected
        A kijelölt törlése
    
    
        Add index
        Index hozzáadása
    
    
        Apply changes
        A változtatások alkalmazása
    
    
        &OK
        &OK
    
    
        Discard changes
        A változtatások elvetése
    
    
        &Cancel
        &Mégsem
    
    
        Abstract snippet separator
        A kivonat elemeinek elválasztója
    
    
        Style sheet
        Stíluslap
    
    
        Opens a dialog to select the style sheet file
        A megjelenés stílusát leíró fájl kiválasztása
    
    
        Choose
        Tallózás
    
    
        Resets the style sheet to default
        A stílus visszaállítása az alapértelmezettre
    
    
        Result List
        Találati lista
    
    
        Edit result paragraph format string
        A találatok bekezdésformátuma
    
    
        Edit result page html header insert
        A találatok lapjának fejlécformátuma
    
    
        Date format (strftime(3))
        Dátumformátum (strftime(3))
    
    
        Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). 
        Egy olyan gyakorisági határérték, mely felett az adott szavak kihagyandók a részmondatokból.
Részmondatkereséskor a gyakori szavak a teljesítménybeli problémák fő okai.
A kihagyott szavak lazítják a részek közti kapcsolatot és gyengítik az automatikus részmondat hatásfokát.
Az alapértelmezett érték 2 (százalék).
    
    
        Autophrase term frequency threshold percentage
        Az automatikus részmondatok százalékos gyakorisági határértéke
    
    
        Plain text to HTML line style
        Az egyszerű szövegből alkotott HTML sor stílusa
    
    
        Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.
        A PRE tagok közti sorok nincsenek törve.
BR tag estén a behúzások elveszhetnek.
A PRE+wrap valószínűleg a legjobb választás.
    
    
        <BR>
        <BR>
    
    
        <PRE>
        <PRE>
    
    
        <PRE> + wrap
        <PRE> + wrap
    
    
        Disable Qt autocompletion in search entry.
        A Qt automatikus kiegészítésének tiltása a keresőmezőben
    
    
        Search as you type.
        Keresés minden leütéskor
    
    
        Paths translations
        Elérési út átalakítása
    
    
        Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.
        További index felvétele a listára. Egy Recoll beállítási könyvtárat vagy egy Xapian indexkönyvtárat kell megadni.
    
    
        Snippets window CSS file
        CSS az <i>Érdemi részek</i> ablakhoz
    
    
        Opens a dialog to select the Snippets window CSS style sheet file
        Az <i>Érdemi részek</i> ablak tartalmának stílusát leíró fájl kiválasztása
    
    
        Resets the Snippets window style
        Az <i>Érdemi részek</i> ablak stílusának alaphelyzetbe állítása
    
    
        Decide if document filters are shown as radio buttons, toolbar combobox, or menu.
        A szűrők megjeleníthetők rádiógombokkal, legördülő listában az eszköztáron vagy menüben
    
    
        Document filter choice style:
        A szűrőválasztó stílusa:
    
    
        Buttons Panel
        Rádiógombok
    
    
        Toolbar Combobox
        Legördülő lista
    
    
        Menu
        Menü
    
    
        Show system tray icon.
        Ikon az értesítési területen
    
    
        Close to tray instead of exiting.
        Bezárás az értesítési területre kilépés helyett
    
    
        Start with simple search mode
        Az egyszerű keresés módja induláskor
    
    
        User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header.
        Az <i>Érdemi részek</i> ablak tartalmára alkalmazandó stílus.<br>A találati lista fejléce az <i>Érdemi részek</i> ablakban is megjelenik.
    
    
        Synonyms file
        Szinonímafájl
    
    
        Show warning when opening temporary file.
        Ideiglenes fájlok megnyitásakor figyelmeztetés
    


recoll-1.26.3/qtgui/i18n/recoll_hu.qm0000644000175000017500000032625113545064515014250 000000000000001r:d;AViFGaGH6H6J+eJ+J6
qJ6 *LbeMzPhS|ST5ڳWTiWX"Z0[%۞\SefCgwRj^s!siv8avvvCj3!?0T́fJ)WfSj&5nUnHgt:<"Enӷ.3ʸ?(	
|+1w0,T-;s>.csiDo2PNXM&X3`^h6ltsnjwOowvw!HvmrFcb')xCjuXͺA#
LSIQs؅G
sKy/p#v?v6w5yw5[w5:w5wٰUQ..7;Uֳ}6Af3"WͼuggHf׸~9;U4Uge(!D&y.Nv</R7b=d?dG$JUYoJUYYJ[ui_n|u{ʷfʗNʗ+^cLt
	47xSJgz=c[A%AL<B `Vf<.e-dԮ1M;[<L NB*X41e22lPvh/wTG(Pb"3
\Z]I6IenKnߤ
ff1fp?J,7W5@WbvS¾8y%2y%{!	;ŠplH
΄$:#{cX|C'W1e̔i*i
-Zjz
"-"5[)T'>j>B9QBSHkRor?)r|ptk{,<Z԰"QO},tASâ

:
_R,Ǣ|I^~C#">aip#	0`6M8$"	-օ!UKip
hmsdu9N
v2hovƒ{^%T#jPBfK[U	%GnĬ7hi{ÒP&^1[eXnD(w)
fCn!v#+|+^L7I^<~cCjFW#FNwH:"z_guKap>Jf7LpiTGj|N]"w{\ϗnXnT.o݈DF,}$l|n(ÓtÓt`>vȍcɆt_]85 W#
.03bEn%<
*snr,y)l:uGjJ¢OJn7QqgYY}}q;E0\c$4c `h@	ʶPŊdagc䴥 :W?UWh	Tm	J	N2	Hˆ	-(6`	7	9Zy	;3	D=	K$	]# 	cCE	kj	lMb			7	qD
	ף	Jp	v	`	9E	ÛC@	ü>!		3	k]	eȄ	o	i}
	?\
*Nh
͹Ѝ
:^'
c	
*R3
+<Vz
6&
>V#
G.~K
`P
`
aE[
cEə
d8
yg
Ih
C
sn8

VT>
C~:
1

2qǶԅ
^	ye2GTH%d,xhT=!PvKjZXP,Xτhe-t9^FrjΣ r~Xٷΰ۷p?~	Vdx4&!RY'И+bC.ʢֹ/9ɝ"L*P֙RVT#uVU\iC]-"`F
h:v{l!Y!YߎWҭ|}c``Ҭpz}$i=[mJ~Nu
m8
#Dg
'RK
-
7Qc
8i
F<
OEJ
X^
]7
]!
^Ն
^
mC
u0
y;
y~
3c
ȩs
u
u
PF
P
5d~

1
7_-
i|S
qm
Ւ5
H

Q5-
£q9IѾ*{&%n.ʢ%w/.x3U%68b9<92Q~SW"zY~s[s-\2e3̖g3hx5p~_r du!lcm~c		|u|Bt^'R
l7WLioMinden felttelAll clauses	AdvSearch Brmely felttel
Any clause	AdvSearchPHibs sokszoroz uttag a mretszqrQben!$Bad multiplier suffix in size filter	AdvSearch
Mdiamedia	AdvSearchzenetmessage	AdvSearch
Egybother	AdvSearchPrezentcipresentation	AdvSearchMunkafzetspreadsheet	AdvSearchSzvegtext	AdvSearch<----- Mind
<----- All
AdvSearchBase<----- Kijellt
<----- Sel
AdvSearchBasej felttel
Add clause
AdvSearchBase"sszetett keressAdvanced search
AdvSearchBaseMind ----->	All ---->
AdvSearchBase`A jobb oldali nem res mezQk  Minden felttel  vlasztsakor S,  Brmely felttel  vlasztsakor VAGY kapcsolatban lesznek.<br>A  Brmely sz ,  Minden sz  s az  Egyik sem  tpus mezQkben szavak s idzQjelbe tett rszmondatok kombincija adhat meg.<br>Az res mezQk figyelmen kvl lesznek hagyva.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. 
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBaseTallzsBrowse AdvSearchBaseKategria By categories AdvSearchBaseVA tallatok szqrse a fjlok dtuma alapjn'Check this to enable filtering on dates AdvSearchBaseXA tallatok szqrse a megadott fjltpusokra,Check this to enable filtering on file types AdvSearchBaseVA tallatok szqrse a fjlok mrete alapjn'Check this to enable filtering on sizes AdvSearchBaselA tallatok szqrse MIME tpus helyett fjlkategrira;Check this to use file categories instead of raw mime types AdvSearchBaseBezrsClose AdvSearchBase Felttel trlse Delete clause AdvSearchBaseJA keress kezdQ knyvtrnak megadsaEnter top directory for search AdvSearchBase SzqrQkFilter AdvSearchBase Dtum Filter dates AdvSearchBase Mret Filter sizes AdvSearchBaseKeressFind AdvSearchBase ettQlFrom AdvSearchBase$Kizrt fjltpusokIgnored file types AdvSearchBaseMegfordtsInvert AdvSearchBaselegfeljebb Max. Size AdvSearchBasepMaximlis mret, sokszoroz uttag lehet a k/K, m/M, g/G4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaselegalbb Min. Size AdvSearchBasepMinimlis mret, sokszoroz uttag lehet a k/K, m/M, g/G4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaseFjltpusRestrict file types AdvSearchBaseLKeress az albbi knyvtrbl indulva:%Restrict results to files in subtree: AdvSearchBase4Ments alaprtelmezettkntSave as default AdvSearchBase A keress mdja:'Search for
documents
satisfying: AdvSearchBase(Keresett fjltpusokSearched file types AdvSearchBaseKijellt -----> Sel -----> AdvSearchBase$A keress indtsa Start Search AdvSearchBase eddigTo AdvSearchBase<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A <span style=" font-weight:600;">Recoll</span> indexelQ idQztse (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Minden mezQben megadhat csillag (*), szm, szmok listja (1,3,5) vagy szmtartomny (1-7). ltalnosabban, a mezQk jelentse ugyanaz, <span style=" font-style:italic;">mint</span> a crontab fjlban, s a teljes crontab szintaxis hasznlhat, lsd a crontab(5) kziknyvlapot.</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Pldul <span style=" font-family:'Courier New,courier';">*</span>-ot rva a <span style=" font-style:italic;">naphoz, </span><span style=" font-family:'Courier New,courier';">12,19</span>-et az <span style=" font-style:italic;">rhoz</span> s <span style=" font-family:'Courier New,courier';">15</span>-t a <span style=" font-style:italic;">perchez</span>, a recollindex minden nap 12:15-kor s du. 7:15-kor fog elindulni.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Tl gyakori temezs helyett clszerqbb lehet a vals idejq indexels engedlyezse.</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolWV<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A <span style=" font-style:italic;">Kikapcsols</span> megsznteti, a <span style=" font-style:italic;">Bekapcsols</span> aktivlja az idQztett indexelst, a <span style=" font-style:italic;">Mgsem</span> nem vltoztat a belltson.</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolWCron idQztQ Cron Dialog CronToolWZA ht napja (* vagy 0-7, 0 vagy 7 a vasrnap))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWKikapcsolsDisable CronToolWBekapcsolsEnable CronToolWHiba a cron bejegyzs hozzadsakor! Rossz szintaxis a mezQkben?3Error installing cron entry. Bad syntax in fields ? CronToolW"ra (* vagy 0-23)Hours (* or 0-23) CronToolWgy tqnik, egy kzi bejegyzse van a recollindexnek, nem sikerlt a crontab szerkesztse!PIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWPerc (0-59)Minutes (0-59) CronToolWPrbeszdablakDialog EditDialogBelltsi hiba Config error EditTrans Helyi elrsi t Local path EditTrans$Eredeti elrsi t Original path EditTrans$Eredeti elrsi t Source path EditTransHozzadsAdd EditTransBase MgsemCancel EditTransBase TrlsDelete EditTransBase,Elrsi t talaktsaPath Translations EditTransBase MentsSave EditTransBaseNKijellhetQ egy vagy tbb elrsi t iskSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBase:Elrsi t-talakts ehhez: Setting path translations for  EditTransBase R<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">A jelenlegi belltsokhoz mg nem tartozik index.</span><br /><br />A sajt mappa indexelse javasolt alapbelltsokkal az <span style=" font-style:italic;">Indexels indtsa most</span> gombbal indthat. A belltsok ksQbb mdosthatk.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Az albbi hivatkozsok az indexels finomhangolsra s idQztsre szolglnak.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Ezek a lehetQsgek ksQbb a <span style=" font-style:italic;">Belltsok</span> menbQl is elrhetQk.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialogNAz indexels belltsa elsQ indulskorFirst indexing setupFirstIdxDialog0Az indexels belltsaiIndexing configurationFirstIdxDialog.Az idQzts belltsaiIndexing scheduleFirstIdxDialog.Indexels indtsa mostStart indexing nowFirstIdxDialogMegadhat az indexelendQ knyvtrak kre s egyb paramterek, pldul kizrt elrsi utak vagy fjlnevek, alaprtelmezett betqkszlet stb.This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialogLehetQsg van temezett indtsra s vals idejq indexelsre, az elQbbi idQztse is bellthat (a cron segtsgvel).This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog2A fjl nem tallhat: %1. %1 not found.FragButs%1: %2%1: %2FragButsStatikus szqrQkQuery FragmentsFragButs <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A <span style=" font-weight:600;">Recoll</span> indexelQ futhat folyamatosan, gy a fjlok vltozsakor az index is azonnal frissl, vagy indulhat meghatrozott idQkznknt.</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A kziknyv segtsget nyjt a kt eljrs kzl a megfelelQ kivlasztshoz (F1).</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">LehetQsg van az idQztett indexels temezsre, vagy a vals idejq indexelQ automatikus indtsra bejelentkezskor (vagy mindkettQre, br ez ritkn clszerq).</p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedWCron idQztQCron scheduling IdxSchedWA vals idejq indexels indtsa bejelentkezskor (csak az alaprtelmezett indexhez).ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedW,Az indexels idQztseIndex scheduling setup IdxSchedW<Vals idejq indexels indtsaReal time indexing start up IdxSchedWAz indexels kezdQ idQpontjainak belltsa egy crontab bejegyzs ltal._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedWPrbeszdablakDialog ListDialogGroupBoxGroupBox ListDialogAz elQzmnyek fjlja srlt vagy nem lehet rni/olvasni, ellenQrizni vagy trlni kell: K"history" file is damaged or un(read)writeable, please check or remove it: MainRNincs adatbzisknyvtr a belltfjlban No db directory in configurationMain&KvetkezQ&NextPreview &ElQzQ &PreviousPreviewKere&ss: &Search for:Preview2Nem sikerlt rtelmezni: 0Can't turn doc into internal representation for Preview MgsemCancelPreview TrlsClearPreviewLap bezrsa Close TabPreview(ElQnzet ltrehozsaCreating preview textPreview:Hiba a fjl betltse kzben!Error while loading filePreviewJAz elQnzet betltse a megjelentQbe Loading preview text into editorPreview$Kis- s &nagybetqk Match &CasePreview*Hinyz segdprogram:Missing helper program: PreviewMsolsCopyPreviewTextEditSortrs Fold linesPreviewTextEdit Eredeti trdelsPreserve indentationPreviewTextEditNyomtatsPrintPreviewTextEdit8A jelenlegi nzet nyomtatsaPrint Current PreviewPreviewTextEditMents fjlbaSave document to filePreviewTextEditMindent kijell Select AllPreviewTextEdit MezQk Show fieldsPreviewTextEditKp Show imagePreviewTextEditTartalomShow main textPreviewTextEdit,<b>Egyedi alknyvtrakCustomised subtreesQObject<i>Ha a fenti listbl semmi vagy egy res sor van kijellve, gy a kvetkezQ jellemzQk<br>az indexelendQ legfelsQ szintq, egybknt a kijellt mappra vonatkoznak.<br>A +/- gombokkal lehet a listhoz knyvtrakat adni vagy onnan trlni.The parameters that follow are set either at the top level, if nothing
or an empty line is selected in the listbox above, or for the selected subdirectory.
You can add or remove directories by clicking the +/- buttons.QObjectnA karakterkdolsrl informcit nem trol fjlok (pldul egyszerq szveges fjlok) kdolsa.<br>Alaprtelmezetten nincs megadva, s a nyelvi krnyezet (NLS) alapjn lesz belltva.Character set used for reading files which do not identify the character set internally, for example pure text files.
The default value is empty, and the value from the NLS environnement is used.QObjectDAlaprtelmezett<br>karakterkdolsDefault
character setQObject6Szimbolikus linkek kvetseFollow symbolic linksQObjectIndexelskor kvesse a szimbolikus linkeket.<br>Alaprtelmezetten ki van kapcsolva, elkerlendQ a dupla indexelst.TFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject*ltalnos belltsokGlobal parametersQObject*Kizrt kiterjesztsekIgnored endingsQObject2Minden fjlnv indexelseIndex all file namesQObjectA Recoll szmra ismeretlen tpus vagy rtelmezhetetlen fjlok nevt is indexelje.<br>Alaprtelmezetten engedlyezve van.}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject"Helyi belltsokLocal parametersQObject(Keressi belltsokSearch parametersQObjectKizrt nevek Skipped namesQObjectAz indexelt hierarchin belli alknyvtrak listja,<br> melyekre eltrQ belltsok vonatkoznak. Alaprtelmezetten res.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectAz ilyen fjlnvvgzQdsq fjlok csak a nevk alapjn indexelendQk (nem trtnik MIME tpusfelismers, kicsomagols s tartalomindexels sem).These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing).QObjectMintval megadhat fjl- s knyvtrnevek, melyeket nem kell indexelniLThese are patterns for file or directory names which should not be indexed.QObject Webes elQzmnyek Web historyQObject&Megnyits&OpenQWidget&A szlQ megnyi&tsa&Open Parent document/folderQWidget&ElQnzet&PreviewQWidgetMent&s fjlba&Write to FileQWidgetFNem sikerlt kicsomagolni a fjlt: Cannot extract document: QWidgetLCsak pontosan egy knyvtr adhat meg!Choose exactly one directoryQWidget"&Fjlnv msolsaCopy &File NameQWidget&URL msolsa Copy &URLQWidget4A knyvtr nem olvashat: Could not read directory: QWidget2Mentsi knyvtr megadsaCreate or choose save directoryQWidget8&Hasonl dokumentum keresseFind &similar documentsQWidgetr&demi rszekOpen &Snippets windowQWidget Megnyits ezzel: Open WithQWidget$A szlQ elQn&zetePreview P&arent document/folderQWidget"Szkript futtatsa Run ScriptQWidget4A kijells mentse fjlbaSave selection to filesQWidget:Aldokumentumok / csatolmnyokShow subdocuments / attachmentsQWidgetRA fjl mr ltezik, ezrt ki lesz hagyva.+Unexpected file name collision, cancelling.QWidget*Ne jelenjen meg jra.Do not show again.QxtConfirmationMessage`<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A <span style=" font-weight:600;">Recoll</span> indexelQje indthat szolgltatsknt, gy az index minden fjlvltozskor azonnal frissl. ElQnye a mindig napraksz index, de folyamatosan ignybe veszi az erQforrsokat.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>.

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWLAz indexelQ szolgltats indtsa most%Also start indexing daemon right now.RTIToolWAz autostart fjl trlve lett. A most fut indexelQt is le kell lltani?2Autostart file deleted. Kill current process too ?RTIToolW2Nem sikerlt ltrehozni: Can't create: RTIToolWBA recollindex indtsa sikertelenCould not execute recollindexRTIToolWFjl trlse Deleting fileRTIToolWTrls:  Deleting: RTIToolWXA vals idejq indexels automatikus indtsa"Real time indexing automatic startRTIToolW2Az autostart kikapcsolsaRemoving autostartRTIToolWFjl cserjeReplacing fileRTIToolWCsere:  Replacing: RTIToolWbAz indexelQ szolgltats indtsa a munkamenettel.Start indexing daemon with my desktop session.RTIToolWFigyelmeztetsWarningRTIToolW(minden nyelv)(all languages)RclMain&(nincs sztQkpzs) (no stemming)RclMain"A Recoll nvjegye About RecollRclMainMindAllRclMainHibs alkalmazsbellts ehhez:%1: [%2] EllenQrizni kell az asztali belltfjlt!?Bad desktop app spec for %1: [%2] Please check the desktop fileRclMain$Hibs elrsi utak Bad pathsRclMainLHibs elrsi utak a belltfjlban: !Bad paths in configuration file: RclMainHibs a megjelentQ parancssor ehhez: %1: [%2] EllenQrizni kell a mimeview fjlt!CBad viewer command line for %1: [%2] Please check the mimeview fileRclMain*A fjl nem elrhetQ: Can't access file: RclMainJNem sikerlt ltrehozni az elQnzetetCan't create preview windowRclMainpNem lehet betlteni a szinonmafjlt (rtelmezsi hiba?)&Can't set synonyms file (parse error?)RclMainFNem sikerlt kicsomagolni a fjlt: Can't uncompress file: RclMainpNem sikerlt frissteni az indexet: az indexelQ mr fut.#Can't update index: indexer runningRclMainNem sikerlt a kicsomagols vagy az ideiglenes fjl ltrehozsa.0Cannot extract document or create temporary fileRclMain@Nem tallhat a szlQdokumentum.Cannot find parent documentRclMainNem sikerlt az adatbzisban informcit tallni a dokumentumrl.+Cannot retrieve document info from databaseRclMainMents ideChoose file to saveRclMainVisszatrs a listhoz: Mgsem.<b>Az elQnzet megnyitsa mindenkpp (s megjegyzs erre a munkamenetre): MellQzs.pClick Cancel to return to the list.
Click Ignore to show the preview anyway (and remember for this session).RclMainAz OK-ra kattintva frissthetQ a fjl indexbejegyzse, ennek vgeztvel jra kell futtatni a keresst.jClick Ok to update the index for this file, then you will need to re-run the query when indexing is done. RclMainlezrsClosingRclMain^Az albbi MIME tpusok szerepelnek az indexben:.Content has been indexed for these mime types:RclMainRNem sikerlt betlteni a mentett keresstCould not load saved queryRclMainEgy klsQ index megnyitsa nem sikerlt. EllenQrizni kell a klsQ indexek listjt.HCould not open external index. Db not open. Check external indexes list.RclMain@Nem sikerlt megnyitni a fjlt: Could not open file: RclMain.A fjl rsa sikertelenCould not write to fileRclMainNem elrhetQ, mert a vals idejq indexels nincs a programba fordtva.;Disabled because the real time indexer was not compiled in.RclMainNe jelenjen meg tbbszr (a GUI belltsaiban visszallthat).DDo not show this warning next time (use GUI preferences to restore).RclMainDokumentumszqrQDocument filterRclMainElQzmnyekDocument historyRclMainkszDoneRclMainMsodpldnyokDuplicate documentsRclMainIndex trlse Erasing indexRclMainHibaErrorRclMainVgrehajts: [ Executing: [RclMainAz albbi klsQ alkalmazsok/parancsok hinyoznak a legutbbi indexels sorn keletkezett napl alapjn -----> pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMainElQzmnyadatok History dataRclMain(Az index zrolva van Index lockedRclMain,Nincs megnyitott indexIndex not openRclMainA fjl bejegyzse az indexben elavult. Esetlegesen tves adatok megjelentse helyett kihagyva. NIndex not up to date for this file. Refusing to risk showing the wrong entry. RclMain*Indexlekrdezsi hibaIndex query errorRclMain.Az idQzts belltsaiIndex schedulingRclMain*Indexelt MIME tpusokIndexed MIME TypesRclMainfAz indexelQ fut. A webes gyorstr nem hozzfrhetQ./Indexer is running. Can't access webcache file.RclMain:Az indexelQ jelenleg nem fut.Indexing did not run yetRclMain(Sikertelen indexelsIndexing failedRclMain4Az indexels folyamatban: Indexing in progress: RclMain0Az indexels megszakadt.Indexing interruptedRclMainBetltsi hiba Load errorRclMain,Hinyz segdprogramokMissing helper programsRclMainfigyelsMonitorRclMainrNincs klsQ megjelentQ belltva ehhez a MIME tpushoz [-No external viewer configured for mime type [RclMain6Nincs hinyz segdprogram.No helpers found missingRclMain<Nincs elQzQleg mentett keressNo preserved previous searchRclMainNincs tallatNo results foundRclMainNincs keress No searchRclMain semmiNoneRclMainEgy ideiglenes msolat lesz megnyitva. A mdostsok<br/>megQrzshez a fjlt el kell menteni mshov.`Opening a temporary copy. Edits will be lost if you don't save
them to a permanent location.RclMain trlsPurgeRclMainA keress folyamatban van.<br>Az indexelQ korltozsai miatt<br>megszaktskor a program kilp.eQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMain&A keress eredmnye Query resultsRclMain$Sikertelen olvass Read failedRclMain`Indulhat az index trlse s teljes jraptse?(Reset the index and start from scratch ?RclMain*Tallatok szma (kb.)Result count (est.)RclMainFjl mentse Save fileRclMain4Mentett keressek (*.rclq)Saved Queries (*.rclq)RclMainjMinta hasznlathoz kezdQ knyvtrt is meg kell adni.:Selection patterns can only be used with a start directoryRclMainFA minthoz kezdQ knyvtr szksgesSelection patterns need topdirRclMainSajnos Windows rendszeren mg nem vehetQ ignybe, a Fjl menbQl lehet frissteni az indexet.YSorry, not available under Windows for now, use the File menu entries to update the indexRclMain&sztvek adatbzisaStemdbRclMain*Indexel&s lelltsaStop &IndexingRclMain<Aldokumentumok s csatolmnyokSub-documents and attachmentsRclMainA jelenleg fut indexelQ nem errQl a felletrQl lett indtva, nem llthat le.OThe current indexing process was not started from this interface, can't kill itRclMainA jelenleg fut indexelQ nem errQl a felletrQl lett indtva.<br>Az OK gombbal kilQhetQ, a Mgsem gombbal meghagyhat.yThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainA dokumentum klsQ indexhez tartozik, mely innen nem frissthetQ.@The document belongs to an external index which I can't update. RclMain|Az indexelQ fut, ennek vgeztre a dolgok mg helyrellhatnak.@The indexer is running so things should improve when it's done. RclMainA mimeview fjlban megadott megjelentQ ehhez: %1: %2 nem tallhat. Megnyissuk a belltsok ablakt?hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMainVEzek az URL-ek (| ipath) azonos tartalmak:-These Urls ( | ipath) share the same content:RclMainzEz a bellteszkz csak az elsQdleges indexszel hasznlhat.6This configuration tool only works for the main index.RclMain6Ez a keress mr nem aktv."This search is not active any moreRclMainismeretlenUnknownRclMainAz indexelQ llapota ismeretlen. A webes gyorstr nem hozzfrhetQ.2Unknown indexer state. Can't access webcache file.RclMain"&Index frisstse Update &IndexRclMainfrisstsUpdatingRclMain%1 megjelentQ parancssora fjlt s szlQt is megad: ez nem tmogatott.QViewer command line for %1 specifies both file and parent file value: unsupportedRclMainFigyelmeztetsWarningRclMain,Sikertelen rsmqvelet Write failedRclMainVhiba a sztQkpzs nyelvnek felismersekor#error retrieving stemming languagesRclMain szqrtfilteredRclMain MdiamediaRclMain zenetmessageRclMain EgybotherRclMainPrezentci presentationRclMainrendezettsortedRclMainMunkafzet spreadsheetRclMain SzvegtextRclMain2 A sikerteleneket jra With failed files retrying RclMainBase$A Recoll &nvjegye &About Recoll RclMainBase$sszetett &keress&Advanced Search RclMainBase&&ElQzmnyek trlse&Erase document history RclMainBase8Keres&si elQzmnyek trlse&Erase search history RclMainBase &Fjl&File RclMainBase &Teljes kpernyQ &Full Screen RclMainBase*&Felhasznli fellet&GUI configuration RclMainBase &Sg&Help RclMainBase&Indexels&Index configuration RclMainBase&Belltsok &Preferences RclMainBase$Index j&raptse&Rebuild index RclMainBase&Tallatok&Results RclMainBase,&Rendezsi belltsok&Sort parameters RclMainBase&Eszkzk&Tools RclMainBase.&Felhasznli kziknyv &User manual RclMainBase &Nzet&View RclMainBase"sszetett keressAdvanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBaseElQzmnyekDocument History RclMainBase&ElQzmnyekDocument &History RclMainBase&KilpsE&xit RclMainBase&KlsQ indexekE&xternal index dialog RclMainBase0Szinonmk engedlyezseEnable synonyms RclMainBaseKlsQ indexekExternal index dialog RclMainBaseF11F11 RclMainBaseElsQ oldal First Page RclMainBaseElsQ oldal First page RclMainBaseTeljes kpernyQ Full Screen RclMainBaseElsQ oldalGo to first page of results RclMainBase&StatisztikaIndex &statistics RclMainBase,Indexelt &MIME tpusokIndexed &MIME types RclMainBaseIdQz&tsIndexing &schedule RclMainBase>Indexels egyedi belltsokkalIndexing with special options RclMainBase2Mentett keress betltseLoad saved query RclMainBase.&Hinyz segdprogramokMissing &helpers RclMainBaseKvetkezQ oldal Next Page RclMainBaseKvetkezQ oldal Next page RclMainBaseKvetkezQ oldalNext page of results RclMainBaseA kvetkezQ frisstskor jra prblja a sikertelenl indexelt fjlokat.Next update will retry previously failed files RclMainBase PgDownPgDown RclMainBasePgUpPgUp RclMainBaseElQzQ oldal Previous Page RclMainBaseElQzQ oldal Previous page RclMainBaseElQzQ oldalPrevious page of results RclMainBaseStatikus szqrQkQuery Fragments RclMainBase RecollRecoll RclMainBaseLMents CSV (strukturlt szveg) fjlbaSave as CSV (spreadsheet) file RclMainBase6A legutbbi keress mentseSave last query RclMainBasexA tallatok mentse egy tblzatkezelQvel megnyithat fjlba@Saves the result into a file which you can load in a spreadsheet RclMainBaseShift+PgUp Shift+PgUp RclMainBase&A keress rszleteiShow Query Details RclMainBase Tblzatos nzet Show as table RclMainBaseJA tallatok megjelentse tblzatban(Show results in a spreadsheet-like table RclMainBase>CskkenQ rendezs dtum szerintSort by date, newest first RclMainBase<NvekvQ rendezs dtum szerintSort by date, oldest first RclMainBase>CskkenQ rendezs dtum szerint#Sort by dates from newest to oldest RclMainBase<NvekvQ rendezs dtum szerint#Sort by dates from oldest to newest RclMainBase*Rendezsi belltsokSort parameters RclMainBase Egyedi indexelsSpecial Indexing RclMainBase&SzvizsglTerm &explorer RclMainBaseSzvizsglTerm explorer tool RclMainBase(Az &index frisstse Update &index RclMainBase6Webes gyorstr szerkesztseWebcache Editor RclMainBaseKilpsQuit RclTrayIcon,A Recoll megjelentseRestore RclTrayIcon Tartalmi kivonatAbstract RecollModel SzerzQAuthor RecollModel DtumDate RecollModelDtum s idQ Date and time RecollModel&A dokumentum dtuma Document date RecollModel&A dokumentum mrete Document size RecollModelA fjl dtuma File date RecollModelFjlnv File name RecollModelA fjl mrete File size RecollModel BelsQ elrsi tIpath RecollModelKulcsszavakKeywords RecollModelMIME tpus MIME type RecollModelMdosts idejeMtime RecollModel.Eredeti karakterkdolsOriginal character set RecollModelRelevanciaRelevancy rating RecollModelCmTitle RecollModelURLURL RecollModel>(a&nbsp;keress&nbsp;rszletei) (show query)ResList8<p><b>Nincs tallat.</b><br>

No results found
ResList`<p><i>Alternatv rsmd (kezetek nlkl): </i>4

Alternate spellings (accents suppressed): ResList<<p><i>Alternatv rsmd: </i>

Alternate spellings: ResListElQzmnyekDocument historyResList$Tallatok a lapon: DocumentsResListKvetkezQNextResListMegnyitsOpenResListElQnzetPreviewResList ElQzQPreviousResList&A keress rszletei Query detailsResList*Tallatok szma (kb.)Result count (est.)ResListTallati lista Result listResListrdemi rszekSnippetsResList.Elrhetetlen dokumentumUnavailable documentResListforResList* " Az sszes tallat:out of at leastResListOszlop &trlse&Delete columnResTable.&Rendezs alaphelyzetbe &Reset sortResTable$&Ments CSV fjlba &Save as CSVResTable, %1  oszlop hozzadsaAdd "%1" columnResTableFNem sikerlt megnyitni/ltrehozni: Can't open/create file: ResTable:A tblzat mentse CSV fjlbaSave table to CSV fileResTableX eltr a jelenlegi belltstl (megtartva).' differ from current preferences (kept)SSearchMinden sz All termsSSearchBrmely szAny termSSearchZA mentett keress automatikus toldalkolsa:  Auto suffixes for stored query: SSearchAz automatikus rszmondat  be van kapcsolva, de a keress mentsekor tiltva volt.3Autophrase is set but it was unset for stored querySSearchAz automatikus rszmondat  ki van kapcsolva, de a keress mentsekor engedlyezve volt.3Autophrase is unset but it was set for stored querySSearch*Hibs keresQkifejezsBad query stringSSearchzA fjlnv megadshoz helyettestQ karakterek is hasznlhatk$Enter file name wildcard expression.SSearchKeresQnyelvi kifejezs megadsa. Segtsg:<br> <i>szo1 szo2</i> : 'szo1' s 'szo2' brmely mezQben.<br> <i>mezo:szo1</i> : 'szo1' a 'mezo' nevq mezQben.<br> Szabvnyos mezQnevek/szinonmk:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> PszeudmezQk: dir, mime/format, type/rclcat, date, size.<br> Pda dtumtartomnyra: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>szo1 szo2 OR szo3</i> : szo1 AND (szo2 OR szo3).<br> A jobb olvashatsg rdekben hasznlhatk zrjelek.<br> <i>"szo1 szo2"</i> : rszmondat (pontosan gy kell elQfordulnia). Lehetsges mdostk:<br> <i>"szo1 szo2"p</i> : szavak egymshoz kzel, brmilyen sorrendben, alaprtelmezett tvolsggal.<br> <b>A keress rszletei</b> segthet feltrni a nem vrt tallatok okt. Rszletesebb lers a kziknyvben (&lt;F1>) tallhat. Enter query language expression. Cheat sheet:
term1 term2 : 'term1' and 'term2' in any field.
field:term1 : 'term1' in field 'field'.
Standard field names/synonyms:
title/subject/caption, author/from, recipient/to, filename, ext.
Pseudo-fields: dir, mime/format, type/rclcat, date, size.
Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.
term1 term2 OR term3 : term1 AND (term2 OR term3).
You can use parentheses to make things clearer.
"term1 term2" : phrase (must occur exactly). Possible modifiers:
"term1 term2"p : unordered proximity search with default distance.
Use Show Query link when in doubt about result and see manual (<F1>) for more detail. SSearchIde kell rni a keresQszavakat. ESC SZKZ billentyqsorozat: a sz lehetsges kiegsztseit ajnlja fel.FEnter search terms here. Type ESC SPC for completions of current term.SSearch@A mentett keress klsQ indexe: #External indexes for stored query: SSearchFjlnv File nameSSearch&Elfogyott a memria Out of memorySSearchKeresQnyelvQuery languageSSearchJA mentett keress sztQkpzQ nyelve: %Stemming languages for stored query: SSearch>A keress mdjnak kivlasztsaChoose search type. SSearchBase TrlsClear SSearchBase Ctrl+SCtrl+S SSearchBaseIde kell rni a keresQszavakat. ESC SZKZ billentyqsorozat: a sz lehetsges kiegsztseit ajnlja fel.FEnter search terms here. Type ESC SPC for completions of current term. SSearchBase(A keresQmezQ trlseErase search entry SSearchBaseSSearchBase SSearchBase SSearchBaseKeressSearch SSearchBase$A keress indtsa Start query SSearchBaseMindAll SearchClauseWBrmely szAny SearchClauseWFjlnv File name SearchClauseWNincs mezQNo field SearchClauseW semmiNone SearchClauseWA keresett szavak kztt tallhat tovbbi szavak megengedett szmaHNumber of additional words that may be interspersed with the chosen ones SearchClauseWRszmondatPhrase SearchClauseWTvolsg Proximity SearchClauseWvA megadott szavakkal vgzett keress tpusnak kivlasztsa>Select the type of query that will be performed with the words SearchClauseWKeress:Find:SnippetsKvetkezQNextSnippets ElQzQPrevSnippetsrdemi rszekSnippetsSnippets<p>Sajnos a megadott hatrok kztt nincs pontos egyezs. Taln tl nagy a dokumentum, s a feldolgoz elakadt...</p>

Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...

 SnippetsWKeressSearch SnippetsWTallzsBrowseSpecIdxWA knyvtr, melyet rekurzvan indexelni kell.<br>A belltfjlban megadott kezdQ knyvtron (topdir) bell kell lennie.Directory to recursively index. This must be inside the regular indexed area
as defined in the configuration file (topdirs).SpecIdxWbA korbban sikertelenl indexelt fjlok kihagysa%Do not retry previously failed files.SpecIdxWEgybknt csak a mdosult vagy korbban sikertelenl indexelt fjlok lesznek feldolgozva5Else only modified or failed files will be processed.SpecIdxWtA kijellt fjlok trolt adatainak trlse indexels elQtt*Erase selected files data before indexing.SpecIdxWAz sszes fjl feldolgozshoz resen kell hagyni. Szkzkkel elvlasztva tbb shell tpus minta is megadhat.<br>A szkzt tartalmaz mintkat kettQs idzQjellel kell vdeni.<br>Csak kezdQ knyvtr megadsval egytt hasznlhat.Leave empty to select all files. You can use multiple space-separated shell-type patterns.
Patterns with embedded spaces should be quoted with double quotes.
Can only be used if the start target is set.SpecIdxW&Kijells mintval:Selection patterns:SpecIdxW Egyedi indexelsSpecial IndexingSpecIdxW`KezdQ knyvtr (resen a rendes kezdQ knyvtr):+Start directory (else use regular topdirs):SpecIdxW:Az indexelendQ kezdQ knyvtrTop indexed entitySpecIdxW&Bezrs&Close SpellBase&Listzs&Expand  SpellBasekezetekAccents SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBaseKis-s nagybetqCase SpellBase"Egyb belltsokMatch SpellBaseBNincs informci az adatbzisrl. No db info. SpellBaseSzvizsgl Term Explorer SpellBaseF ltrehozott/frisstett dokumentum Documents created/updatedSpellW vizsglt fjl Files testedSpellW& nem indexelt fjl Unindexed filesSpellW%1 tallat %1 resultsSpellW4Aspell toldalkolsi hiba.Aspell expansion error. SpellW^Az aspell indtsa nem sikerlt. Teleptve van?)Aspell init failed. Aspell not installed?SpellWNA szavak tlagos szma dokumentumonkntAverage terms per documentSpellW&Az adatbzis mreteDatabase directory sizeSpellWDok. / ssz. Doc. / Tot.SpellWfIndex: %1 dokumentum, tlagosan %2 sz. %3 tallat.7Index: %1 documents, average length %2 terms.%3 resultsSpellWMegnevezsItemSpellWhEz egy rvidtett, betqrend szerinti lista, gyakori 1List was truncated alphabetically, some frequent SpellWTA szavak szma a leghosszabb dokumentumbanLongest document length (terms)SpellWMIME tpusok: MIME types:SpellW*Nincsenek toldalkok.No expansion foundSpellW(A dokumentumok szmaNumber of documentsSpellW&Regulris kifejezsRegexpSpellW@A legutbbi indexels eredmnye:Results from last indexing:SpellW IndexstatisztikaShow index statisticsSpellWTA szavak szma a legrvidebb dokumentumban Smallest document length (terms)SpellW rsmd/fonetikaSpelling/PhoneticSpellW&SztQ s toldalkokStem expansionSpellWSzTermSpellW rtkValueSpellW.HelyettestQ karakterek WildcardsSpellWVhiba a sztQkpzs nyelvnek felismersekor#error retrieving stemming languagesSpellWtszavak hinyozhatnak. Javallott hosszabb szgyk megadsa..terms may be missing. Try using a longer root.SpellWMinden sz All terms UIPrefsDialogBrmely szAny term UIPrefsDialogBCsak egy indexet lehet kijellni.$At most one index should be selected UIPrefsDialogEltrQ kis-s nagybetq-, ill. kezetkezelsq index nem adhat hozz.>Cant add index with different case/diacritics stripping option UIPrefsDialogTallzsChoose UIPrefsDialogHAlaprtelmezett QtWebkit betqkszletDefault QtWebkit font UIPrefsDialogFjlnv File name UIPrefsDialogKeresQnyelvQuery language UIPrefsDialogbA tallati lista fejlce (alaprtelmezetten res)%Result list header (default is empty) UIPrefsDialogA tallati lista bekezdsformtuma (trlssel visszall az alaprtelmezettre)j rtk:</b>New Values:ViewActionBaseNMqvelet (res -> Recoll alaprtelmezs) Action (empty -> recoll default)ViewActionBase2Alkalmazs a kijelltekreApply to current selectionViewActionBaseBezrsCloseViewActionBaseBEltrs az asztali belltsoktl Exception to Desktop preferencesViewActionBase<Dokumentumtpusok megjelentQiNative ViewersViewActionBaseRecoll mqvelet:Recoll action:ViewActionBaseNKijellhetQ egy vagy tbb elrsi t iskSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseEgy vagy tbb MIME tpus kijellse utn az als keretben llthat be az adott tpusokhoz elvrt mqvelet.lSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBase&Azonosak kijellse Select sameViewActionBaseJAz asztali alaprtelmezs alkalmazsa"Use Desktop preferences by defaultViewActionBasejelenlegi rtk current valueViewActionBase<Keress regulris kifejezssel Search regexpWebcache6Webes gyorstr szerkesztseWebcache editorWebcacheURL msolsaCopy URL WebcacheEdit(A kijelltek trlseDelete selection WebcacheEdithAz indexelQ fut. A webes gyorstr nem szerkeszthetQ.-Indexer is running. Can't edit webcache file. WebcacheEditAz indexelQ llapota ismeretlen. A webes gyorstr nem szerkeszthetQ.0Unknown indexer state. Can't edit webcache file. WebcacheEditA webes gyorstr mdosult. Ezen ablak bezrsa utn indtani kell az indexelQt.RWebcache was modified, you will need to run the indexer after closing this window. WebcacheEditMIMEMIME WebcacheModelURLUrl WebcacheModelA Firefoxszal ltogatott oldalak indexelse<br>(a Firefox Recoll kiegsztQjt is telepteni kell)\Enables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin)confgui::ConfBeaglePanelW|A mret elrsekor a legkorbbi bejegyzsek trlQdnek.<br>Csak a nvelsnek van haszna, mivel cskkentskor a mr ltezQ fjl nem lesz kisebb (csak egy rsze llandan kihasznlatlan marad).Entries will be recycled once the size is reached.
Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end).confgui::ConfBeaglePanelW>A webes trol max. mrete (MB) Max. size for the web store (MB)confgui::ConfBeaglePanelW>A webes elQzmnyek feldolgozsaProcess the WEB history queueconfgui::ConfBeaglePanelWA ltogatott weblapok msolatt trol knyvtr neve.<br>Relatv elrsi t a belltsokat trol knyvtrhoz kpest rtendQ.The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory.confgui::ConfBeaglePanelWBA weblapokat trol knyvtr neveWeb page store directory nameconfgui::ConfBeaglePanelW>A belltfjl rsa sikertelenCan't write configuration fileconfgui::ConfIndexWTallzsChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW<p>Automatikusan klnbzQnek tekinti a kis-s nagybetqket, ha az elsQ karakter kivtelvel brhol tartalmaz nagybetqt a kifejezs. Egybknt a keresQnyelv <i>C</i> mdostjval rhetQ el ugyanez.

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity.confgui::ConfSearchPanelW<p>Automatikusan klnbzQnek tekinti az kezetes betqket az kezet nlkli prjuktl, ha tartalmaz kezetes betqt a kifejezs (az unac_except_trans kivtelvel). Egybknt a keresQnyelv <i>D</i> mdostjval rhetQ el ugyanez.

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity.confgui::ConfSearchPanelW<p>Egy sz toldalkainak maximlis szma (pl. helyettestQ karakterek hasznlatakor). Az alaprtelmezett 10 000 elfogadhat rtk, s elkerlhetQ vele a felhasznli fellet idQleges vlaszkptelensge is.

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.confgui::ConfSearchPanelWEgy Xapian keresshez felhasznlhat elemi felttelek maximlis szma. Nha a szavak toldalkolsa szorz hats, ami tlzott memriahasznlathoz vezethet. Az alaprtelmezett 100 000 a legtbb esetben elegendQ, de nem is tmaszt klnleges ignyeket a hardverrel szemben.5

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.confgui::ConfSearchPanelW\Kis-s nagybetqk automatikus megklnbztetse$Automatic character case sensitivityconfgui::ConfSearchPanelW:Automatikus kezetrzkenysg Automatic diacritics sensitivityconfgui::ConfSearchPanelWFA Xapian felttelek maximlis szmaMaximum Xapian clauses countconfgui::ConfSearchPanelW8A toldalkok maximlis szmaMaximum term expansion countconfgui::ConfSearchPanelWAz indexelendQ MIME tpusok listja.<br>Csak ezek a tpusok kerlnek az indexbe. Rendesen res s inaktv.eAn exclusive list of indexed mime types.
Nothing else will be indexed. Normally empty and inactiveconfgui::ConfSubPanelW&Kizrt MIME tpusokExclude mime typesconfgui::ConfSubPanelW,A tl hossz ideig fut klsQ szqrQk lelltsa Nha elQfordul (pl. postscript esetn), hogy a szqrQ vgtelen ciklusba kerl. -1 esetn nincs korlt. External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. confgui::ConfSubPanelW8Minden knyvtrra vonatkozikGlobalconfgui::ConfSubPanelWHa be van lltva (nem egyenlQ -1), akkor a szvegfjlok indexelse ilyen mretq darabokban trtnik. Ez segtsget nyjt a nagyon nagy mretq szvegfjlokban (pl. naplfjlok) val keresshez.If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files).confgui::ConfSubPanelWHA tmrtett fjlok max. mrete (KB)Max. compressed file size (KB)confgui::ConfSubPanelW:A szqrQ max. futsi ideje (s)Max. filter exec. time (S)confgui::ConfSubPanelW6Szvegfjl max. mrete (MB)Max. text file size (MB)confgui::ConfSubPanelW\Ezek a MIME tpusok kimaradnak az indexelsbQlMime types not to be indexedconfgui::ConfSubPanelWMIME tpusokOnly mime typesconfgui::ConfSubPanelW2Szvegfjl lapmrete (KB)Text file page size (KB)confgui::ConfSubPanelWA tmrtett fjlok indexbe kerlsnek hatrrtke. -1 esetn nincs korlt. 0 esetn soha nem trtnik kicsomagols.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever.confgui::ConfSubPanelWA szvegfjlok indexbe kerlsnek hatrrtke. -1 esetn nincs korlt. Az risira nQtt naplfjlok feldolgozsa kerlhetQ el gy.This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index.confgui::ConfSubPanelWF<p>Az unac alaprtelmezetten eltvolt minden kezetet s sztbontja a ligatrkat. Az itt megadott kivtelekkel lehetQsg van adott karakterek esetn tiltani a mqveletet, ha a hasznlt nyelv ezt szksgess teszi. Ezen kvl elQrhatk tovbbi felbontand karakterek is. Az egyes elemeket egymstl szkzzel kell elvlasztani. Egy elem elsQ karaktere az eredetit, a tovbbi karakterek a vrt eredmnyt hatrozzk meg.l

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.confgui::ConfTopPanelW Az aspell nyelveAspell languageconfgui::ConfTopPanelW2Az adatbzis knyvtrneveDatabase directory nameconfgui::ConfTopPanelWA szvizsglban az aspell hasznlatnak mellQzse a hasonl szavak keressekor.<br>Hasznos, ha az aspell nincs teleptve vagy nem mqkdik.Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work. confgui::ConfTopPanelW6Indexrsi intervallum (MB)Index flush megabytes intervalconfgui::ConfTopPanelW A naplfjl neve Log file nameconfgui::ConfTopPanelW$A naplzs szintjeLog verbosity levelconfgui::ConfTopPanelW.Max. lemezhasznlat (%)Max disk occupation (%)confgui::ConfTopPanelW&Az aspell mellQzseNo aspell usageconfgui::ConfTopPanelW&Kizrt elrsi utak Skipped pathsconfgui::ConfTopPanelW(A sztQkpzs nyelveStemming languagesconfgui::ConfTopPanelWAz zenetek kirsnak a helye.<br>A stderr  a terminlra kldi az zeneteket.PThe file where the messages will be written.
Use 'stderr' for terminal outputconfgui::ConfTopPanelW,Az aspell sztr nyelve. pl. en  vagy hu ...<br>Ha nincs megadva, akkor az NLS krnyezet alapjn lesz belltva, ez ltalban megfelelQ. A rendszerre teleptett nyelveket az aspell config  parancs kiadsa utn a data-dir  knyvtrban tallhat .dat fjlokbl lehet megtudni.3The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. confgui::ConfTopPanelWtEzen nyelvekhez kszljn sztvezQ s -toldalkol sztrIThe languages for which stemming expansion
dictionaries will be built.confgui::ConfTopPanelWA megadott knyvtrak rekurzv indexelse. Alaprtelmezett rtke a sajt knyvtr.LThe list of directories where recursive indexing starts. Default: your home.confgui::ConfTopPanelWAz indexet tartalmaz knyvtr neve.<br>Relatv elrsi t a belltsi knyvtrhoz kpest rtendQ. Alaprtelmezett: xapiandb .The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.confgui::ConfTopPanelWAz indexelQ ltal mellQzendQ knyvtrak nevei.<br>Hasznlhatk a helyettestQ karakterek. Csak az indexelQ hatkrbe esQ elrsi utakat lehet megadni (pl.: ha a kezdQ knyvtr a /home/felhasznalo  s a /home  egy link a /usr/home -ra, akkor helyes elrsi t a /home/felhasznalo/tmp* , de nem az a /usr/home/felhasznalo/tmp* ).#These are names of directories which indexing will not enter.
May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')confgui::ConfTopPanelWLSzzalkos lemezfoglals, melyen tllpve az indexelQ nem mqkdik tovbb (megelQzendQ az sszes szabad hely elfoglalst).<br>0 esetn nincs korlt (alaprtelmezett).This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).
0 means no limit (this is the default).confgui::ConfTopPanelWAz az adatmennyisg, melyet kt lemezre rs kztt az indexelQ feldolgoz.<br>Segthet kzben tartani a memriafoglalst. Alaprtelmezett: 10MBThis value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB confgui::ConfTopPanelWAz zenetek mennyisgnek szablyozsa,<br>a hibazenetekre szortkoztl a rszletes hibakeressig.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data.confgui::ConfTopPanelW KezdQ knyvtrakTop directoriesconfgui::ConfTopPanelWUnac kivtelekUnac exceptionsconfgui::ConfTopPanelW&Mgsem&CanceluiPrefsDialogBase&OK&OKuiPrefsDialogBase<BR>
uiPrefsDialogBase <PRE>

uiPrefsDialogBase<PRE> + wrap
 + wrapuiPrefsDialogBaseHa pldul a keresQkifejezs a [rolling stones] (kt sz), akkor helyettestQdik
a [rolling OR stones OR (rolling PHRASE 2 stones)] kifejezssel.
gy elQbbre kerlnek azok a tallatok, meylek a keresett szavakat
pontosan gy tartalmazzk, ahogyan meg lettek adva.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBase@A kivonat elemeinek elvlasztjaAbstract snippet separatoruiPrefsDialogBase Mindet bekapcsolActivate AlluiPrefsDialogBase Index hozzadsa	Add indexuiPrefsDialogBase6A vltoztatsok alkalmazsa
Apply changesuiPrefsDialogBaseFAutomatikus keress szkz hatsra-Auto-start simple search on whitespace entry.uiPrefsDialogBaselAz egyszerq keress automatikus bQvtse rszmondattal+Automatically add phrase to simple searchesuiPrefsDialogBase|Az automatikus rszmondatok szzalkos gyakorisgi hatrrtke.Autophrase term frequency threshold percentageuiPrefsDialogBaseRdigombok
Buttons PaneluiPrefsDialogBaseTallzsChooseuiPrefsDialogBase.A trstsok belltsaChoose editor applicationsuiPrefsDialogBaseTovbbi index felvtele a listra. Egy Recoll belltsi knyvtrat vagy egy Xapian indexknyvtrat kell megadni.{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBase^Bezrs az rtestsi terletre kilps helyett!Close to tray instead of exiting.uiPrefsDialogBase6Dtumformtum (strftime(3))Date format (strftime(3))uiPrefsDialogBase Mindet kikapcsolDeactivate AlluiPrefsDialogBaseA szqrQk megjelenthetQk rdigombokkal, legrdlQ listban az eszkztron vagy menbenQDecide if document filters are shown as radio buttons, toolbar combobox, or menu.uiPrefsDialogBasepA Qt automatikus kiegsztsnek tiltsa a keresQmezQben*Disable Qt autocompletion in search entry.uiPrefsDialogBase0A vltoztatsok elvetseDiscard changesuiPrefsDialogBasezKivonatoljon akkor is, ha a dokumentum mr rendelkezik ezzel?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBasePrbljon-e tartalmi kivonatot kszteni a keresQszavak alapjn a tallati lista elemeihez?
Nagy dokumentumok esetn lass lehet.zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBase0A szqrQvlaszt stlusa:Document filter choice style:uiPrefsDialogBase*Dinamikus kivonatolsDynamically build abstractsuiPrefsDialogBaseHA tallatok lapjnak fejlcformtuma#Edit result page html header insertuiPrefsDialogBase:A tallatok bekezdsformtuma#Edit result paragraph format stringuiPrefsDialogBaseBekapcsolsEnableuiPrefsDialogBaseKlsQ indexekExternal IndexesuiPrefsDialogBaserEgy olyan gyakorisgi hatrrtk, mely felett az adott szavak kihagyandk a rszmondatokbl.
Rszmondatkeresskor a gyakori szavak a teljestmnybeli problmk fQ okai.
A kihagyott szavak laztjk a rszek kzti kapcsolatot s gyengtik az automatikus rszmondat hatsfokt.
Az alaprtelmezett rtk 2 (szzalk).Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10Helvetica-10uiPrefsDialogBase<Tbbszrs tallatok elrejtseHide duplicate results.uiPrefsDialogBaseBA keresQszavak kiemelsnek szneHighlight color for query termsuiPrefsDialogBaseA klnbzQ nevq, de azonos tartalm tallatokbl csak egy jelenjen megXIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBaseA PRE tagok kzti sorok nincsenek trve.
BR tag estn a behzsok elveszhetnek.
A PRE+wrap valsznqleg a legjobb vlaszts.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBaseRAz elQnzeti kiemels korltja (megabyte)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseMenMenuuiPrefsDialogBase6A tallatok szma laponknt"Number of entries in a result pageuiPrefsDialogBaseAz <i>rdemi rszek</i> ablak tartalmnak stlust ler fjl kivlasztsaAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBaseZA tallati lista betqkszletnek kivlasztsa-Opens a dialog to select the result list fontuiPrefsDialogBaseZA megjelens stlust ler fjl kivlasztsa-Opens a dialog to select the style sheet fileuiPrefsDialogBase,Elrsi t talaktsaPaths translationsuiPrefsDialogBase^Az egyszerq szvegbQl alkotott HTML sor stlusaPlain text to HTML line styleuiPrefsDialogBaseVAz elQnzetben HTML egyszerq szveg helyett&Prefer Html to plain text for preview.uiPrefsDialogBaseTKeresQnyelvi mgikus fjlnvkiterjesztsek(Query language magic file name suffixes.uiPrefsDialogBase6A rendezsi llapot mentseRemember sort activation state.uiPrefsDialogBasehTrls a listbl. Az index a lemezrQl nem trlQdik.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase$A kijellt trlseRemove selecteduiPrefsDialogBase"A kivonat cserje Replace abstracts from documentsuiPrefsDialogBaseAlaphelyzetResetuiPrefsDialogBase~Az <i>rdemi rszek</i> ablak stlusnak alaphelyzetbe lltsa Resets the Snippets window styleuiPrefsDialogBaseA tallati lista betqkszletnek rendszerbeli alaprtelmezsre lltsa1Resets the result list font to the system defaultuiPrefsDialogBaseXA stlus visszalltsa az alaprtelmezettre!Resets the style sheet to defaultuiPrefsDialogBaseTallati listaResult ListuiPrefsDialogBase:A tallati lista betqkszleteResult list fontuiPrefsDialogBase0Keress minden letskorSearch as you type.uiPrefsDialogBase(Keressi belltsokSearch parametersuiPrefsDialogBase8Ikon az rtestsi terletenShow system tray icon.uiPrefsDialogBase\Ideiglenes fjlok megnyitsakor figyelmeztets)Show warning when opening temporary file.uiPrefsDialogBaseHCSS az <i>rdemi rszek</i> ablakhozSnippets window CSS fileuiPrefsDialogBasenAz sszetett keress ablaka is legyen nyitva indulskor'Start with advanced search dialog open.uiPrefsDialogBaseHAz egyszerq keress mdja indulskorStart with simple search modeuiPrefsDialogBase(A sztQkpzs nyelveStemming languageuiPrefsDialogBaseStluslapStyle sheetuiPrefsDialogBaseSzinonmafjl
Synonyms fileuiPrefsDialogBaseFAz kivonat krnyezQ szavainak szma Synthetic abstract context wordsuiPrefsDialogBase6A kivonat mrete (karakter)$Synthetic abstract size (characters)uiPrefsDialogBaseEzen mret felett az elQnzetben nem alkalmaz kiemelst (tl lass)CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBaseSzavak listja, melyek keresQszknt megadva
automatikusan ext:xxx keresQnyelvi kifejezss alakttatnakbThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase$A kijellt vltsaToggle selecteduiPrefsDialogBaseLegrdlQ listaToolbar ComboboxuiPrefsDialogBase(Felhasznli felletUser interfaceuiPrefsDialogBaseBelltsokUser preferencesuiPrefsDialogBaseAz <i>rdemi rszek</i> ablak tartalmra alkalmazand stlus.<br>A tallati lista fejlce az <i>rdemi rszek</i> ablakban is megjelenik.User style to apply to the snippets window.
Note: the result page header insert is also included in the snippets window header.uiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_ru.qm0000644000175000017500000033175013566424763014272 00000000000000VEVE:fkf"lx=qty]SjSE&$—+2'H% (i*%\*0/+L+f!+/>1:dAVnFBr,GG H6H`H6HJ+J+8sJ6 J6bLbMz=Ph SHSZ4T5WTsWXZ0[ %[f3=\Segw+j^ssv8vuvAzϳzjϥ!Ra 0+́^fJ jfS`$R 2&4(S_nUԟnNgt: En'..a3 8( O.+1*0,+s;sQn>.cKDo2_N,XMXuO`^ hwlt:nYwO wwa[HQvr*QLcjxC+"ͺF: L<=SI6$؅K K}p#\v#YvTw 5w 56w 5:Bw 5XwU"Eis".65;U ֳS6GMf3WAͼuggnH׸~;UـvUesg9 !D&~'.N/=d-?d$G$ΛJUYJUY!YO[u _nL|JEuʷʗ>ʗ ^ cLxYx*Jg_=c A%UAB l V'l<.8d81M;[I<%LcNBaX4e 2'lPJvhCwTWI(Ppb"3 Z,IxIbn*n?fWeflfu  8,xWwWJG¾y%t\y% ŠlM΄: #BX|IWs/̔ i$\i= -Zo-"5)T>jPB9$xBShp#J`XMz$8" 9-օ\UKidp hru9!v2vƒ՜{^i!T#K4*U %Cnd<7$sjn{^-[XXnDРw);BCsR!v],#++^3RQ7I^<~TkCjFW#:FN{H:"R_gu7ap,f ypiTM |N7wϗ"n <H݈M l|Gnk`ÓtÓt>ȍɆt]8w)Mi= 0#TO En@%<)4*snw],y&;:uJ¢0JnpQYl}};E'c05gjcchO94@JʶP #!COc 8䴥Ѡ?I- TI k Nk H" -(x" 7Cp 9Zy8 ;3 D" K ]# cC) k lM{  qDK=  PY { 9K ÛCE ü> *3 32 e t i? Dw *N ͹ :^VW c 5 *Ru +</ 6ۿ >Vg G.~ S`2 `P ` aE cE d8` y Im IK s) 6 VT-/ C "W   6  E  ԅ ^œ yet% TH V ,xC =! KjH Xb X h Ld H t Σ \ r) ڻ ٷ ۷ uZ ?X  Vd9 4_ d YF_ 'И +bC .ʢ# / 49 97 9ɝ= L*% P֙Ӥ RVL T#| V. \iCj ], `F  h%L vD {lB !Y !Y# W |   Ҭt }݂ e h i# z  S q ~* M Nbw G m< #D_ 'R - 7Q\ 8n F OEW X^B ]y ]da ^ ^F u0 y\ y~ 3( ȩw u75 uXE P P 5dm 5 s 7! i u ՒwC H ` Q5. £R qѾ'{if%nî.ʢh/.3Uhm6G8bPW9Q<{XQ~>SY~s[s\te3vg3hx5p~|? @y!qZcm&c|.|Bx' lLiA5< CA;>28O< All clauses AdvSearchN1><C CA;>28N Any clause AdvSearchH525@=K9 <=>68B5;L 2 D8;LB@5 @07<5@0$Bad multiplier suffix in size filter AdvSearch <5480media AdvSearchA>>1I5=85message AdvSearch 4@C3>5other AdvSearch?@575=B0F8O presentation AdvSearchB01;8F0 spreadsheet AdvSearchB01;8FK spreadsheets AdvSearch B5:ABtext AdvSearch B5:ABKtexts AdvSearch<----- A5 <----- All AdvSearchBase"<----- K45;5==K5 <----- Sel AdvSearchBase >1028BL CA;>285 Add clause AdvSearchBase!;>6=K9 ?>8A:Advanced search AdvSearchBaseA5 ----> All ----> AdvSearchBaseA5 70?>;=5==K5 ?>;O A?@020 1C4CB >1J548=5=K ;>38G5A:8<  (A5 CA;>28O) 8;8  (N1>5 CA;>285). <br> ?>;OE B8?0 N1K5, A5 8;8 57 4>?CAB8<K A>G5B0=8O ?@>ABKE A;>2 8 D@07K, 70:;NGQ==K5 2 42>9=K5 :02KG:8.<br>CABK5 ?>;O 83=>@8@CNBAO.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBase 17>@Browse AdvSearchBase> :0B53>@8O< By categories AdvSearchBase::;NG8BL D8;LB@>20=85 ?> 40B5'Check this to enable filtering on dates AdvSearchBase6$8;LB@>20BL ?> B8?0< D09;>2,Check this to enable filtering on file types AdvSearchBase@:;NG8BL D8;LB@>20=85 ?> @07<5@C'Check this to enable filtering on sizes AdvSearchBaseLA?>;L7>20BL :0B53>@88, 0 =5 B8?K MIME;Check this to use file categories instead of raw mime types AdvSearchBase0:@KBLClose AdvSearchBase#40;8BL CA;>285 Delete clause AdvSearchBase^#:070BL 8<O :0B0;>30 25@E=53> C@>2=O 4;O ?>8A:0Enter top directory for search AdvSearchBase $8;LB@Filter AdvSearchBase&$8;LB@>20BL ?> 40B5 Filter dates AdvSearchBase,$8;LB@>20BL ?> @07<5@C Filter sizes AdvSearchBase >8A:Find AdvSearchBaseAFrom AdvSearchBase03=>@8@C5<K5 B8?K D09;>2Ignored file types AdvSearchBase1@0B8BLInvert AdvSearchBase0:A8<C< Max. Size AdvSearchBase0:A8<0;L=K9 @07<5@. >?CA:05BAO 8A?>;L7>20=85 <=>68B5;59 :/, </, 3/4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase8=8<C< Min. Size AdvSearchBase8=8<0;L=K9 @07<5@. >?CA:05BAO 8A?>;L7>20=85 <=>68B5;59 :/, </, 3/4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase,3@0=8G8BL B8?K D09;>2Restrict file types AdvSearchBasef3@0=8G8BL @57C;LB0BK ?>8A:0 D09;0<8 2 ?>4:0B0;>35:%Restrict results to files in subtree: AdvSearchBase>!45;0BL ?0@0<5B@>< ?> C<>;G0=8NSave as default AdvSearchBaseRA:0BL <br>4>:C<5=BK,<br>C4>2;5B2>@ONI85:'Search for
documents
satisfying: AdvSearchBaseA:0BL A@548Searched file types AdvSearchBase K45;5==K5 ----> Sel -----> AdvSearchBase0G0BL ?>8A: Start Search AdvSearchBase?>To AdvSearchBase|<p>2B><0B8G5A:8 2:;NG05B CGQB @538AB@0, 5A;8 AB@>:0 ?>8A:0 A>45@68B 703;02=K5 1C:2K (:@><5 ?5@2>9 1C:2K).  ?@>B82=>< A;CG05 8A?>;L7C9B5 O7K: 70?@>A>2 8 <>48D8:0B>@ <i>C</i> CGQB0 @538AB@0.

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity. ConfIndexW<p>2B><0B8G5A:8 2:;NG05B CGQB 480:@8B8G5A:8E 7=0:>2, 5A;8 AB@>:0 ?>8A:0 A>45@68B 480:@8B8G5A:85 7=0:8 (:@><5 unac_except_trans).  ?@>B82=>< A;CG05 8A?>;L7C9B5 O7K: 70?@>A>2 8 <>48D8:0B>@ <i>D</i> 4;O CGQB0 480:@8B8G5A:8E 7=0:>2.

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity. ConfIndexW<p>0:A8<0;L=>5 G8A;> >4=>:>@5==KE A;>2 4;O >4=>3> A;>20 (=0?@8<5@, ?@8 8A?>;L7>20=88 <0A>:). !B0=40@B=>5 7=0G5=85 10 000 O2;O5BAO @07C<=K< 8 ?><>65B 871560BL A8BC0F89, :>340 70?@>A :065BAO 7028AH8< ?@8 ?5@51>@5 A?8A:0 A;>2.

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. ConfIndexWf<p>0:A8<0;L=>5 G8A;> M;5<5=B0@=KE CA;>289, 4>102;O5<KE : 70?@>AC Xapian.  =5:>B>@KE A;CG0OE @57C;LB0B ?>8A:0 >4=>:>@5==KE A;>2 <>65B 1KBL 871KB>G=K< 8 70=OBL A;8H:>< 1>;LH>9 >1JQ< ?0<OB8. !B0=40@B=>5 7=0G5=85 2 100 000 4>AB0B>G=> 4;O 1>;LH8=AB20 A;CG052 8 ?>4E>48B 4;O A>2@5<5==KE 0??0@0B=KE :>=D83C@0F89.5

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfIndexW<p>-B> 8A:;NG5=8O 4;O <5E0=87<0 unac, :>B>@K9 ?> C<>;G0=8N >B1@0AK205B 2A5 480:@8B8G5A:85 7=0:8 8 ?@>2>48B :0=>=8G5A:CN 45:><?>78F8N. >6=> ?5@5>?@545;8BL <5E0=87< C40;5=8O =04AB@>G=KE 7=0:>2 4;O >B45;L=KE A8<2>;>2 8;8 4>1028BL ?@028;0 45:><?>78F88 (=0?@8<5@, 4;O ;830BC@).  :064>9, >B45;Q==>9 70?OB>9 70?8A8 ?5@2K9 A8<2>; O2;O5BAO 8AE>4=K<, 0 >AB0;L=K5  53> 8=B5@?@5B0F8O.l

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. ConfIndexW/7K: aspellAspell language ConfIndexW82B><0B8G5A:89 CGQB @538AB@0$Automatic character case sensitivity ConfIndexWR2B><0B8G5A:89 CGQB 480:@8B8G5A:8E 7=0:>2 Automatic diacritics sensitivity ConfIndexWJ52>7<>6=> 70?8A0BL D09; :>=D83C@0F88Can't write configuration file ConfIndexW&0B0;>3 107K 40==KEDatabase directory name ConfIndexWB:;NG05B 8A?>;L7>20=85 aspell 4;O A>740=8O 20@80=B>2 =0?8A0=8O 2 >1>7@520B5;5 B5@<8=>2.<br> >;57=>, 5A;8 aspell >BACBAB2C5B 8;8 =5 @01>B05B. Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexW:;NG05B 8=45:A8@>20=85 AB@0=8F, ?@>A<>B@5==KE 2 Firefox.<br>(B@51C5BAO CAB0=>2:0 4>?>;=5=8O Recoll 4;O Firefox)\Enables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin) ConfIndexW0?8A8 1C4CB C40;5=K ?@8 4>AB865=88 <0:A8<0;L=>3> @07<5@0 E@0=8;8I0.<br>&5;5A>>1@07=> C25;8G8BL @07<5@, B0: :0: C<5=LH5=85 7=0G5=8O =5 ?>2;5GQB CA5G5=85 ACI5A2CNI53> D09;0 (2 @57C;LB0B5 ?@8254QB B>;L:> : @0AE>4>20=8N ?@>AB@0=AB20 2?CABCN).Entries will be recycled once the size is reached.
Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). ConfIndexW1I85 ?0@0<5B@KGlobal parameters ConfIndexWF=B5@20; A1@>A0 40==KE 8=45:A0 ()Index flush megabytes interval ConfIndexW"'0AB=K5 ?0@0<5B@KLocal parameters ConfIndexW$09; 6C@=0;0 Log file name ConfIndexW6#@>25=L ?>4@>1=>AB8 6C@=0;0Log verbosity level ConfIndexWL0:A8<0;L=K9 @07<5@ 251-E@0=8;8I0 () Max. size for the web store (MB) ConfIndexWJ0:A8<0;L=>5 G8A;> Xapian-?@54;>65=89Maximum Xapian clauses count ConfIndexWH0:A8<0;L=>5 G8A;> >4=>:>@5==KE A;>2Maximum term expansion count ConfIndexW,5 8A?>;L7>20BL aspellNo aspell usage ConfIndexWN1@010BK20BL ?@>A<>B@5==K5 251-AB@0=8FKProcess the WEB history queue ConfIndexW 0@0<5B@K ?>8A:0Search parameters ConfIndexW@>?CA:0BL Skipped paths ConfIndexW*/7K:8 A> A;>2>D>@<0<8Stemming languages ConfIndexW$09;, :C40 1C4CB 70?8AK20BLAO A>>1I5=8O.<br>A?>;L7C9B5 'stderr' 4;O 2K2>40 2 B5@<8=0;PThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexW</7K: A;>20@O aspell. K3;048B :0: en 8;8 ru...<br>A;8 7=0G5=85 =5 CAB0=>2;5=>, 4;O 53> @0AGQB0 1C4eB 8A?>;L7>20=s ?0@0<5B@K A8AB5<K (;>:0;L). 'B>1K C7=0BL, :0:85 ?0@0<5B@K 4>ABC?=K 2 A8AB5<5, =015@8B5 aspell config 8 ?@>25@LB5, :0:85 .dat-D09;K A>45@60BAO 2 :0B0;>35 'data-dir'. 3The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory.  ConfIndexW/7K:8, 4;O :>B>@KE 1C4CB ?>AB@>5=K<br>A;>20@8 >4=>:>@5==KE A;>2.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexW!?8A>: :0B0;>3>2, 345 =0G8=05BAO @5:C@A82=>5 8=45:A8@>20=85. > C<>;G0=8N: 4><0H=89 :0B0;>3.LThe list of directories where recursive indexing starts. Default: your home. ConfIndexW<O :0B0;>30 E@0=5=8O ?@>A<>B@5==KE 251-AB@0=8F.<br>CBL C:07K205BAO >B=>A8B5;L=> :0B0;>30 :>=D83C@0F88 8 =5 O2;O5BAO 01A>;NB=K<.The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory. ConfIndexW$<O :0B0;>30, 2 :>B>@>< E@0=8BAO 8=45:A<br>CBL C:07K205BAO >B=>A8B5;L=> :0B0;>30 :>=D83C@0F88 8 =5 O2;O5BAO 01A>;NB=K<. > C<>;G0=8N: xapiandb.The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. ConfIndexW>;=K9 ?CBL : 48@5:B>@88, :>B>@0O =5 1C45B 70B@03820BLAO ?@8 8=45:A8@>20=88. <br>>65B A>45@60BL <0A:8. 0?8A8 4>;6=K A>2?040BL A ?CBO<8, :>B>@K5 2848B 8=45:A0B>@ (=0?@8<5@, 5A;8 topdirs 2:;NG05B /home/me, 0 /home =0 A0<>< 45;5 254QB : /usr/home, ?@028;L=>9 70?8ALN skippedPath 1C45B /home/me/tmp*, 0 =5 /usr/home/me/tmp*)BThese are pathnames of directories which indexing will not enter.
Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') ConfIndexW@>F5=B 70=OB>3> ?@>AB@0=AB20 =0 48A:5  >1I55 ?@>AB@0=AB2> 48A:0, 70=OB>5 =5 B>;L:> 8=45:A><,  ?@8 :>B>@>< 8=45:A8@>20=85 7025@H8BAO >H81:>9 8 ?@5:@0B8BAO.<br>> C<>;G0=8N 7=0G5=85 0 A=8<05B ;N1K5 >3@0=8G5=8O.This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.
The default value of 0 removes any limit. ConfIndexWX-B> 7=0G5=85 >?@545;O5B :>;8G5AB2> 40==KE, 8=45:A8@C5<<KE <564C A1@>A0<8 =0 48A:.<br>><>305B :>=B@>;8@>20BL 8A?>;L7>20=85 ?0<OB8 8=45:A0B>@><. =0G5=85 ?> C<>;G0=8N: 10 This value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexW-B> 7=0G5=85 >?@545;O5B ?>4@>1=>ABL ?>ABC?0NI8E A>>1I5=89,<br>>B >H81>: 4> >B;04>G=KE 40==KE.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexW00B0;>38 25@E=53> C@>2=OTop directories ConfIndexWA:;NG5=8O unacUnac exceptions ConfIndexWB<O :0B0;>30 E@0=5=8O 251-AB@0=8FWeb page store directory name ConfIndexWAG5@?K20NI89 ?5@5G5=L 8=45:A8@C5<KE B8?>2 MIME.<br>@C385 B8?K 8=45:A8@>20BLAO =5 1C4CB. 1KG=> ?CAB 8 =50:B825=eAn exclusive list of indexed mime types.
Nothing else will be indexed. Normally empty and inactive ConfSubPanelW&A:;NG8BL MIME-B8?KExclude mime types ConfSubPanelW 01>B0 2=5H=8E D8;LB@>2, 4;OI0OAO 4>;LH5 C:070==>3> 2@5<5=8, 1C45B ?@5@20=0. @8<5=O5BAO 4;O @54:8E A;CG052 (=0?@8<5@, A D8;LB@>< postscript), :>340 2>7=8:05B 70F8:;820=85 D8;LB@0 ?@8 >1@01>B:5 :0:>3>-B> 4>:C<5=B0. #AB0=>28B5 7=0G5=85 -1, GB>1K A=OBL >3@0=8G5=85. External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.  ConfSubPanelW 1I55Global ConfSubPanelWA;8 MB> 7=0G5=85 CAB0=>2;5=> (B.5. =5 @02=> -1), B> ?@8 8=45:A8@>20=88 B5:AB>2K5 D09;K @071820NBAO =0 1;>:8 A>>B25BAB2CNI53> @07<5@0. 0==K9 ?0@0<5B@ ?>;575= ?@8 2K?>;=5=88 ?>8A:0 2 >G5=L 1>;LH8E B5:AB>2KE D09;0E (=0?@8<5@, D09;0E 6C@=0;>2).If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). ConfSubPanelWL0:A8<0;L=K9 @07<5@ A60B>3> D09;0 (KB)Max. compressed file size (KB) ConfSubPanelWR0:A8<0;L=K9 @07<5@ B5:AB>2>3> D09;0 (MB)Max. text file size (MB) ConfSubPanelWl"8?K MIME, 8=45:A8@>20=85 :>B>@KE ?@>2>48BLAO =5 1C45BMime types not to be indexed ConfSubPanelW ">;L:> MIME-B8?KOnly mime types ConfSubPanelWJ 07<5@ AB@0=8FK B5:AB>2>3> D09;0 (KB)Text file page size (KB) ConfSubPanelW$-B> 7=0G5=85 CAB0=02;8205B ?@545;L=K9 @07<5@ A60BKE D09;>2, :>B>@K5 1C4CB >1@010BK20BLAO. =0G5=85 -1 A=8<05B >3@0=8G5=85, 0 >B:;NG05B @0A?0:>2:C.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. ConfSubPanelW-B> 7=0G5=85 CAB0=02;8205B ?@545;L=K9 @07<5@ B5:AB>2KE D09;>2, :>B>@K5 1C4CB >1@010BK20BLAO. =0G5=85 -1 A=8<05B >3@0=8G5=85.  5:><5=4C5BAO 8A?>;L7>20BL 4;O 8A:;NG5=8O D09;>2 6C@=0;0 1>;LH>3> @07<5@0 87 ?@>F5AA0 8=45:A8@>20=8O.This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. ConfSubPanelW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">=45:A8@>20=85 <span style=" font-weight:600;">Recoll</span> ?> @0A?8A0=8N (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">064>5 ?>;5 <>65B A>45@60BL <0A:C (*), 548=8G=>5 G8A;>2>5 7=0G5=85, @0745;Q==K9 70?OBK<8 A?8A>: (1,3,5) 8;8 480?07>= G8A5; (1-7). -B8 ?>;O 1C4CB 8A?>;L7>20=K <span style=" font-style:italic;">:0: 5ABL</span> 2 D09;5 crontab, B0:65 <>6=> C:070BL =5>1E>48<K5 ?0@0<5B@K 2 A0<>< D09;5, A<. crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />0?@8<5@, 5A;8 225AB8 7=0: <span style=" font-family:'Courier New,courier';">*</span> 2 ?>;5 <span style=" font-style:italic;">=8 =545;8</span>, <span style=" font-family:'Courier New,courier';">12,19</span>  2 ?>;5 <span style=" font-style:italic;">'0AK</span> 8 <span style=" font-family:'Courier New,courier';">15</span>  2 ?>;5 <span style=" font-style:italic;">8=CBK</span>, 8=45:A8@>20=85 1C45B ?@>872>48BLAO 5654=52=> 2 12:15 8 19:15.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"> 0A?8A0=85 A >G5=L G0ABK<8 70?CA:0<8 <>65B >:070BLAO <5=55 MDD5:B82=K<, G5< 8=45:A8@>20=85 2 @50;L=>< 2@5<5=8.</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">;O >AB0=>2:8 02B><0B8G5A:>3> 845:A8@>20=8O ?> @0A?8A0=8N =06<8B5 <span style=" font-style:italic;">B:;NG8BL</span>, 4;O 70?CA:0  <span style=" font-style:italic;">:;NG8BL</span>, 4;O >B<5=K 2=5AQ==KE 87<5=5=89  <span style=" font-style:italic;">B<5=0</span>.</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolW,0AB@>9:0 7040=89 Cron Cron Dialog CronToolWZ=8 =545;8 (* 8;8 0-7, 0 8;8 7  2>A:@5A5=L5))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWB:;NG8BLDisable CronToolW:;NG8BLEnable CronToolWnH81:0 CAB0=>2:8 70?8A8 cron. 525@=K9 A8=B0:A8A ?>;59?3Error installing cron entry. Bad syntax in fields ? CronToolW"'0AK (* 8;8 0-23)Hours (* or 0-23) CronToolW>E>65, GB> 4;O recollindex 5ABL 2@CG=CN 8A?@02;5==K5 70?8A8, @540:B8@>20=85 crontab =52>7<>6=>PIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolW8=CBK (0-59)Minutes (0-59) CronToolW 80;>3Dialog EditDialog&H81:0 :>=D83C@0F88 Config error EditTrans>:0;L=K9 ?CBL Local path EditTrans 7=0G0;L=K9 ?CBL Original path EditTransAE>4=K9 ?CBL Source path EditTrans>1028BLAdd EditTransBase B<5=0Cancel EditTransBase#40;8BLDelete EditTransBase&>@@5:B8@>2:0 ?CB59Path Translations EditTransBase!>E@0=8BLSave EditTransBaseK15@8B5 B8?K D09;>2 8 8A?>;L7C9B5 :=>?:8 C?@02;5=8O =865, GB>1K 87<5=8BL ?>@O4>: >1@01>B:8 D09;>2kSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBase2040BL :>@@5:B8@>2:C 4;O Setting path translations for  EditTransBase <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">>E>65, GB> 8=45:A 4;O MB>9 :>=D83C@0F88 =5 ACI5AB2C5B.</span><br /><br />;O 8=45:A8@>20=8O B>;L:> 4><0H=53> :0B0;>30 A =01>@>< C<>;G0=89 =06<8B5 :=>?:C <span style=" font-style:italic;">0?CAB8BL 8=45:A8@>20=85</span>. 5B0;L=CN =0AB@>9:C <>6=> 1C45B ?@>25AB8 ?>765. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A;8 B@51C5BAO 1>;LH5 :>=B@>;O, 2>A?>;L7C9B5AL ?@8254Q==K<8 =865 AAK;:0<8 4;O =0AB@>9:8 ?0@0<5B@>2 8 @0A?8A0=8O 8=45:A8@>20=8O.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">5@59B8 : MB8< 8=AB@C<5=B0< ?>74=55 <>6=> G5@57 <5=N <span style=" font-style:italic;">0AB@>9:0</span>.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialog@0AB@>9:0 ?5@2>3> 8=45:A8@>20=8OFirst indexing setupFirstIdxDialog00AB@>9:0 8=45:A8@>20=8OIndexing configurationFirstIdxDialog2 0A?8A0=85 8=45:A8@>20=8OIndexing scheduleFirstIdxDialog00?CAB8BL 8=45:A8@>20=85Start indexing nowFirstIdxDialogl45AL <>6=> C:070BL, :0:85 :0B0;>38 B@51C5BAO 8=45:A8@>20BL, 0 B0:65 =0AB@>8BL B0:85 ?0@0<5B@K, :0: ?CB8 : D09;0<-8A:;NG5=8O< 8;8 8E 8<5=0, 8A?>;L7C5<K5 ?> C<>;G0=8N :>48@>2:8 8 B.4.This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialog(45AL <>6=> 2K1@0BL @568< 8=45:A8@>20=8O: ?> @0A?8A0=8N 8;8 2 @50;L=>< 2@5<5=8, 0 B0:65 =0AB@>8BL @0A?8A0=85 8=45:A8@>20=8O (A 8A?>;L7>20=85< cron).This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog%1 =5 =0945=. %1 not found.FragButs%1: %2%1: %2FragButs"$@03<5=BK 70?@>A0Query FragmentsFragButs <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">=45:A8@>20=85 <span style=" font-weight:600;">Recoll</span> <>65B @01>B0BL ?>AB>O==>, 8=45:A8@CO 87<5=ONI85AO D09;K, 8;8 70?CA:0BLAO 48A:@5B=> G5@57 >?@545;Q==K5 ?@><56CB:8 2@5<5=8. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"> 5:><5=4C5BAO >7=0:><8BLAO A @C:>2>4AB2>< ?>;L7>20B5;O ?@>3@0<<K, GB>1K 2K1@0BL =081>;55 ?>4E>4OI89 @568< @01>BK (=06<8B5 F1 4;O 2K7>20 A?@02:8). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">-B>B 8=AB@C<5=B ?>72>;O5B 2K1@0BL, 1C45B ;8 8=45:A8@>20=85 ?@>872>48BLAO ?> @0A?8A0=8N 8;8 2 @50;L=>< 2@5<5=8 ?@8 2E>45 2 A8AB5<C (8;8 >10 20@80=B0 A@07C, GB> 2@O4 ;8 8<55B A<KA;). </p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedW. 0A?8A0=85 70?CA:0 cronCron scheduling IdxSchedW45AL <>6=> 2K1@0BL, =C6=> ;8 =0G8=0BL 8=45:A8@>20=85 2 @50;L=>< 2@5<5=8 ?@8 2E>45 2 A8AB5<C (B>;L:> 4;O 8=45:A0 ?> C<>;G0=8N).ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedWF0AB@>9:0 @0A?8A0=8O 8=45:A8@>20=8OIndex scheduling setup IdxSchedWP0?CA: 8=45:A8@>20=8O 2 @50;L=>< 2@5<5=8Real time indexing start up IdxSchedW-B>B 8=AB@C<5=B ?>72>;O5B 2K1@0BL, 2 :0:>5 2@5<O 70?CA:0BL 8=45:A8@>20=85, 0 B0:65 A45;0BL 70?8AL 2 crontab._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedW 80;>3Dialog ListDialogGroupBoxGroupBox ListDialogf$09; 8AB>@88 ?>2@564Q=, ?@>25@LB5 8;8 C40;8B5 53>: 6"history" file is damaged, please check or remove it: MainF0B0;>3  2 :>=D83C@0F88 =5 C:070= No db directory in configurationMain&!;54CNI89&NextPreview&@54K4CI89 &PreviousPreview&A:0BL: &Search for:Preview52>7<>6=> A:>=25@B8@>20BL 4>:C<5=B 2> 2=CB@5==55 ?@54AB02;5=85 4;O 0Can't turn doc into internal representation for Preview B<5=0CancelPreviewG8AB8BLClearPreview:!>740=85 B5:AB0 4;O ?@>A<>B@0Creating preview textPreview403@C7:0 B5:AB0 2 @540:B>@ Loading preview text into editorPreview$&! CGQB>< @538AB@0 Match &CasePreviewPBACBAB2C5B 2A?><>30B5;L=>5 ?@8;>65=85: Missing helper program: PreviewB:@KBLOpenPreview>?8@>20BLCopyPreviewTextEdit8=88 A3810 Fold linesPreviewTextEdit"!>E@0=OBL >BABC?KPreserve indentationPreviewTextEdit 5G0BLPrintPreviewTextEdit(5G0BL B5:CI53> 2840Print Current PreviewPreviewTextEdit2!>E@0=8BL 4>:C<5=B 2 D09;Save document to filePreviewTextEditK45;8BL 2AQ Select AllPreviewTextEdit>:070BL ?>;O Show fieldsPreviewTextEdit(>:070BL 87>1@065=85 Show imagePreviewTextEdit.>:070BL >A=>2=>9 B5:ABShow main textPreviewTextEdit><b>>;L7>20B5;LA:85 ?>4:0B0;>38Customised subtreesQObject>48@>2:0, :>B>@0O 1C45B 8A?>;L7>20=0 ?@8 GB5=88 D09;>2, 2 :>B>@KE :>48@>2:0 =5 C:070=0 O2=>, =0?@8<5@, G8AB> B5:AB>2KE D09;>2.<br>=0G5=85 ?> C<>;G0=8N =5 CAB0=>2;5=> 8 15@QBAO 87 ?0@0<5B@>2 A8AB5<K (;>:0;8).Character set used for reading files which do not identify the character set internally, for example pure text files.
The default value is empty, and the value from the NLS environnement is used.QObject2>48@>2:0<br>?> C<>;G0=8NDefault
character setQObject<B:@K20BL A8<2>;8G5A:85 AAK;:8Follow symbolic linksQObjectB:@K20BL A8<2>;8G5A:85 AAK;:8 ?@8 8=45:A8@>20=88. > C<>;G0=8N 459AB285 =5 2K?>;=O5BAO 2> 871560=85 4C1;8@>20==>3> 8=45:A8@>20=8OTFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject,3=>@8@>20BL >:>=G0=8OIgnored endingsQObject<=45:A8@>20BL 2A5 8<5=0 D09;>2Index all file namesQObject,=45:A8@>20BL 8<5=0 D09;>2, A>45@68<>5 :>B>@KE =52>7<>6=> >?@545;8BL 8;8 >1@01>B0BL (=58725AB=K9 8;8 =5?>445@68205<K9 B8? MIME). > C<>;G0=8N 2:;NG5=>}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject@>?CA:0BL Skipped namesQObject!?8A>: ?>4:0B0;>3>2 8=45:A8@C5<>3> 45@520,<br>: :>B>@K< 4>;6=K ?@8<5=OBLAO >A>1K5 ?0@0<5B@K. > C<>;G0=8N: ?CAB>.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectH:>=G0=8O 8<5= D09;>2, :>B>@K5 1C4CB 8=45:A8@>20BLAO B>;L:> ?> 8<5=8 (157 ?>?KB>: >?@545;5=8O MIME-B8?0, 157 @072>@0G820=8O D09;0, 157 8=45:A8@>20=8O A>45@68<>3>).These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing).QObject(01;>=K 8<Q= D09;>2 8;8 :0B0;>3>2, 8<5=0 :>B>@KE =5 A;54C5B 8=45:A8@>20BL.LThese are patterns for file or directory names which should not be indexed.QObject&B:@KBL&OpenQWidgetL&B:@KBL @>48B5;LA:89 4>:C<5=B/:0B0;>3&Open Parent document/folderQWidget&@>A<>B@&PreviewQWidget &0?8A0BL 2 D09;&Write to FileQWidget:52>7<>6=> 872;5GL 4>:C<5=B: Cannot extract document: QWidget8K15@8B5 B>;L:> >48= :0B0;>3Choose exactly one directoryQWidget*>?8@>20BL &8<O D09;0Copy &File NameQWidget>?8@>20BL &URL Copy &URLQWidget<52>7<>6=> ?@>G8B0BL :0B0;>3: Could not read directory: QWidgetL!>740BL 8;8 2K1@0BL :0B0;>3 A>E@0=5=8OCreate or choose save directoryQWidget009B8 &?>E>685 4>:C<5=BKFind &similar documentsQWidget0B:@KBL >:=> &D@03<5=B>2Open &Snippets windowQWidget"B:@KBL A ?><>ILN Open WithQWidgetT&@>A<>B@ @>48B5;LA:>3> 4>:C<5=B0/:0B0;>30Preview P&arent document/folderQWidget:0?CAB8BL 2K?>;=5=85 AF5=0@8O Run ScriptQWidget6!>E@0=8BL 2K45;5=85 2 D09;KSave selection to filesQWidget8>:070BL 2;>65==K5 4>:C<5=BKShow subdocuments / attachmentsQWidgetd5>6840==K9 :>=D;8:B 8<Q= D09;>2, >B<5=0 459AB28O.+Unexpected file name collision, cancelling.QWidget*>;LH5 =5 ?>:07K20BL.Do not show again.QxtConfirmationMessage<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">=45:A8@>20=85 ?@8 ?><>I8 <span style=" font-weight:600;">Recoll</span> <>65B 1KBL =0AB@>5=> :0: A5@28A, >1=>2;ONI89 8=45:A >4=>2@5<5==> A 87<5=5=85< D09;>2, B> 5ABL 2 @50;L=>< 2@5<5=8. @8 MB>< ?>AB>O==>5 >1=>2;5=85 8=45:A0 1C45B ?@>8AE>48BL 70 AGQB =5?@5@K2=>3> 8A?>;L7>20=8O A8AB5<=KE @5AC@A>2.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>.

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWf"0:65 70?CAB8BL ?@O<> A59G0A A;C61C 8=45:A8@>20=8O.%Also start indexing daemon right now.RTIToolWh$09; 02B>70?CA:0 C40;Q=. @5:@0B8BL B5:CI89 ?@>F5AA?2Autostart file deleted. Kill current process too ?RTIToolW(52>7<>6=> A>740BL: Can't create: RTIToolW@5 C40;>AL 70?CAB8BL recollindexCould not execute recollindexRTIToolW#40;5=85 D09;0 Deleting fileRTIToolW#40;5=85:  Deleting: RTIToolWn2B><0B8G5A:89 70?CA: 8=45:A8@>20=8O 2 @50;L=>< 2@5<5=8"Real time indexing automatic startRTIToolW$B<5=0 02B>70?CA:0Removing autostartRTIToolW0<5=0 D09;0Replacing fileRTIToolW0<5=0:  Replacing: RTIToolW0?CA:0BL A;C61C 8=45:A8@>20=8O >4=>2@5<5==> A A50=A>< @01>G53> AB>;0..Start indexing daemon with my desktop session.RTIToolW@54C?@5645=85WarningRTIToolW< A 4>?>;=8B5;L=K< A>>1I5=85<:  with additional message: RclMain(2A5 O7K:8)(all languages)RclMain.(157 >4=>:>@5==KE A;>2) (no stemming)RclMain ?@>3@0<<5 About RecollRclMainA5AllRclMain525@=0O A?5F8D8:0F8O 4;O =0AB>;L=>3> ?@8;>65=8O %1: [%2] @>25@LB5 D09; .desktop?Bad desktop app spec for %1: [%2] Please check the desktop fileRclMain525@=K5 ?CB8 Bad pathsRclMainH81:0 :><0=4=>9 AB@>:8 ?@>3@0<<K ?@>A<>B@0 %1: [%2] @>25@LB5 D09; mimeviewCBad viewer command line for %1: [%2] Please check the mimeview fileRclMainH52>7<>6=> ?>;CG8BL 4>ABC? : D09;C: Can't access file: RclMainB52>7<>6=> A>740BL >:=> ?@>A<>B@0Can't create preview windowRclMainl52>7<>6=> CAB0=>28BL D09; A8=>=><>2 (>H81:0 0=0;870?)&Can't set synonyms file (parse error?)RclMain:52>7<>6=> @0A?0:>20BL D09;: Can't uncompress file: RclMaind52>7<>6=> >1=>28BL 8=45:A: 8=45:A0B>@ C65 70?CI5=#Can't update index: indexer runningRclMainl52>7<>6=> 872;5GL 4>:C<5=B 8;8 A>740BL 2@5<5==K9 D09;0Cannot extract document or create temporary fileRclMainL52>7<>6=> =09B8 @>48B5;LA:89 4>:C<5=BCannot find parent documentRclMainZ52>7<>6=> 872;5GL A2545=8O > 4>:C<5=B5 87 +Cannot retrieve document info from databaseRclMain4K1>@ D09;0 4;O A>E@0=5=8OChoose file to saveRclMain0:@KB85ClosingRclMainN5 C40;>AL 703@C78BL A>E@0=Q==K9 70?@>ACould not load saved queryRclMain5 C40;>AL >B:@KBL 2=5H=89 8=45:A.  =5 >B:@KB0. @>25@LB5 A?8A>: 2=5H=8E 8=45:A>2.HCould not open external index. Db not open. Check external indexes list.RclMain25 C40;>AL >B:@KBL D09;: Could not open file: RclMainD5 C40;>AL 2K?>;=8BL 70?8AL 2 D09;Could not write to fileRclMainB:;NG5=>, B0: :0: =5 1K; A:><?8;8@>20= 8=45:A0B>@ 40==KE 2 @50;L=>< 2@5<5=8.;Disabled because the real time indexer was not compiled in.RclMain>;LH5 =5 ?>:07K20BL (4;O 2>AAB0=>2;5=8O 7=0G5=89 8A?>;L7C9B5 >:=> =0AB@>9:8 8=B5@D59A0).DDo not show this warning next time (use GUI preferences to restore).RclMain"$8;LB@ 4>:C<5=B>2Document filterRclMain.@>A<>B@5==K5 4>:C<5=BKDocument historyRclMain >B>2>DoneRclMain.C1;8@CNI85AO 4>:C<5=BKDuplicate documentsRclMain !B8@0=85 8=45:A0 Erasing indexRclMain H81:0ErrorRclMainK?>;=O5BAO: [ Executing: [RclMain=5H=85 ?@8;>65=8O/:><0=4K, B@51C5<K5 4;O 8=45:A8@>20=8O D09;>2, =5 =0945=K, :0: C:070=> 2 @57C;LB0B0E ?>A;54=53> 8=45:A8@>20=8O 2 pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMainAB>@8O History dataRclMain&=45:A 701;>:8@>20= Index lockedRclMainH81:0 70?@>A0Index query errorRclMain2 0A?8A0=85 8=45:A8@>20=8OIndex schedulingRclMain8@>8=45:A8@>20==K5 MIME-B8?KIndexed MIME TypesRclMain4QB 8=45:A8@>20=85. 52>7<>6=> ?>;CG8BL 4>ABC? : D09;C 251-:MH0./Indexer is running. Can't access webcache file.RclMainF5 C40;>AL 2K?>;=8BL 8=45:A8@>20=85Indexing failedRclMain*4QB 8=45:A8@>20=85: Indexing in progress: RclMain.=45:A8@>20=85 ?@5@20=>Indexing interruptedRclMainH81:0 703@C7:8 Load errorRclMainLA?><>30B5;L=K5 ?@8;>65=8O >BACBAB2CNBMissing helper programsRclMain>=8B>@MonitorRclMainp5 =0AB@>5=0 2=5H=OO ?@>3@0<<0 4;O ?@>A<>B@0 mime-B8?0 [-No external viewer configured for mime type [RclMainNA5 2A?><>30B5;L=K5 ?@8;>65=8O 4>ABC?=KNo helpers found missingRclMainjBACBAB2CNB A>E@0=Q==K5 @57C;LB0BK ?@54K4CI53> ?>8A:0No preserved previous searchRclMain0>8A: =5 40; @57C;LB0B>2No results foundRclMain: 57C;LB0BK ?>8A:0 >BACBAB2CNB No searchRclMain^!>>1I5=85 > =5:@8B8G=>9 >H81:5 8=45:A8@>20=8O: Non-fatal indexing message: RclMainBACBAB2C5BNoneRclMainB:@K205BAO 2@5<5==0O :>?8O. 7<5=5=8O 1C4CB CB5@O=K, 5A;8 8E =5 A>E@0=8BL<br/>2 ?>AB>O==>< <5AB>?>;>65=88.`Opening a temporary copy. Edits will be lost if you don't save
them to a permanent location.RclMainG8AB:0PurgeRclMain4QB >1@01>B:0 70?@>A0.<br>7-70 >3@0=8G5=89 181;8>B5:8<br>>B<5=0 459AB28O ?@8254QB : 70:@KB8N ?@8;>65=8OeQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMain$ 57C;LB0BK 70?@>A0 Query resultsRclMainH81:0 GB5=8O Read failedRclMain@!1@>A8BL 8=45:A 8 =0G0BL 70=>2>?(Reset the index and start from scratch ?RclMain<>;-2> @57C;LB0B>2 (@0AGQB=>5)Result count (est.)RclMain 57C;LB0BKResultsRclMain!>E@0=5=85 D09; Save fileRclMain8!>E@0=5==K5 70?@>AK (*.rclq)Saved Queries (*.rclq)RclMain(01;>=K >B1>@0 <>3CB 1KBL 8A?>;L7>20=K B>;L:> c :>@=52K< :0B0;>3><:Selection patterns can only be used with a start directoryRclMainH;O H01;>=>2 >B1>@0 B@51C5BAO topdirSelection patterns need topdirRclMain A>60;5=8N, @01>B0 A Windows 2 40==K9 <><5=B =52>7<>6=0. A?>;L7C9B5 ?C=:BK <5=N $09; 4;O >1=>2;5=8O 8=45:A0YSorry, not available under Windows for now, use the File menu entries to update the indexRclMain StemdbStemdbRclMain4&AB0=>28BL 8=45:A8@>20=85Stop &IndexingRclMain&;>65==K5 4>:C<5=BKSub-documents and attachmentsRclMain*;O 70?CA:0 B5:CI53> ?@>F5AA0 8=45:A8@>20=8O 1K; 8A?>;L7>20= 4@C3>9 8=B5@D59A. 06<8B5 OK 4;O ?@5:@0I5=8O ?@>F5AA0 8;8 B<5=0 4;O 53> ?@>4>;65=8OyThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMain>:C<5=B >B=>A8BAO : 2=5H=5<C 8=45:AC, :>B>@K9 =52>7<>6=> >1=>28BL. @The document belongs to an external index which I can't update. RclMain=45:A0F8O 2K?>;=O5BAO, ?> 7025@H5=88 A8BC0F8O 4>;6=0 C;CGH8BLAO. @The indexer is running so things should improve when it's done. RclMain@>3@0<<0 ?@>A<>B@0, C:070==0O 2 mimeview 4;O %1: %2, =5 =0945=0. B:@KBL 480;>3 =0AB@>9:8?hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMaind0==K5 URL ( | ipath) 8<5NB >48=0:>2>5 A>45@68<>5:-These Urls ( | ipath) share the same content:RclMain0==K9 8=AB@C<5=B =0AB@>9:8 ?@8<5=8< B>;L:> : >A=>2=K<C 8=45:AC.6This configuration tool only works for the main index.RclMain8-B>B ?>8A: 1>;LH5 =5 0:B825="This search is not active any moreRclMain=AB@C<5=BKToolsRclMain!?8A>: B8?>2 ?CAB: ?>4>640BL ?@>4>;65=8O 2K?>;=5=8O 8=45:A8@>20=8O?6Types list empty: maybe wait for indexing to progress?RclMain58725AB=>UnknownRclMain58725AB=K9 AB0BCA 8=45:A0B>@0. 52>7<>6=> ?>;CG8BL 4>ABC? : D09;C 251-:MH0.2Unknown indexer state. Can't access webcache file.RclMain 1=>28BL &8=45:A Update &IndexRclMain1=>2;5=85UpdatingRclMain :><0=4=>9 AB@>:5 ?@>3@0<<K ?@>A<>B@0 %1 C:070= :0: A0< D09;, B0: 8 @>48B5;LA:89 D09;: :><0=40 =5 ?>445@68205BAOQViewer command line for %1 specifies both file and parent file value: unsupportedRclMain :><0=4=>9 AB@>:5 ?@>3@0<<K ?@>A<>B@0 %1 C:070= @>48B5;LA:89 D09;, 0 2 URL  A5B52>9 ?@>B>:>; http[s]: :><0=40 =5 ?>445@68205BAOPViewer command line for %1 specifies parent file but URL is http[s]: unsupportedRclMain@54C?@5645=85WarningRclMainH81:0 70?8A8 Write failedRclMain<>H81:0 ?>;CG5=8O A?8A:0 O7K:>2#error retrieving stemming languagesRclMainD8;LB@>20==K5filteredRclMain <5480mediaRclMainA>>1I5=85messageRclMain 4@C3>5otherRclMain?@575=B0F8O presentationRclMainA>@B8@>20==K5sortedRclMainB01;8F0 spreadsheetRclMain B5:ABtextRclMainX ! ?>2B>@=>9 >1@01>B:>9 D09;>2 A >H81:0<8 With failed files retrying RclMainBase& ?@>3@0<<5 &About Recoll RclMainBase&!;>6=K9 ?>8A:&Advanced Search RclMainBaseR&G8AB8BL A?8A>: ?@>A<>B@5==KE 4>:C<5=B>2&Erase document history RclMainBase0G8AB8BL 8AB>@8N &?>8A:0&Erase search history RclMainBase &$09;&File RclMainBase> 25AL &M:@0= &Full Screen RclMainBase*0AB@>9:0 8&=B5@D59A0&GUI configuration RclMainBase&!?@02:0&Help RclMainBase$0AB@>9:0 &8=45:A0&Index configuration RclMainBase&0AB@>9:0 &Preferences RclMainBase&5@5&AB@>8BL 8=45:A&Rebuild index RclMainBase&!?8A>: &@57C;LB0B>2&Results RclMainBase*&0@0<5B@K A>@B8@>2:8&Sort parameters RclMainBase&=AB@C<5=BK&Tools RclMainBase2& C:>2>4AB2> ?>;L7>20B5;O &User manual RclMainBase&84&View RclMainBase!;>6=K9 ?>8A:Advanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBase.@>A<>B@5==K5 4>:C<5=BKDocument History RclMainBase0&@>A<>B@5==K5 4>:C<5=BKDocument &History RclMainBase &KE>4E&xit RclMainBase60AB@>9:0 &2=5H=53> 8=45:A0E&xternal index dialog RclMainBase$#G8BK20BL A8=>=8<KEnable synonyms RclMainBase40AB@>9:0 2=5H=53> 8=45:A0External index dialog RclMainBaseF11F11 RclMainBase5@20O AB@0=8F0 First Page RclMainBase5@20O AB@0=8F0 First page RclMainBase> 25AL M:@0= Full Screen RclMainBaseJ5@59B8 : ?5@2>9 AB@0=8F5 @57C;LB0B>2Go to first page of results RclMainBase&&!B0B8AB8:0 8=45:A0Index &statistics RclMainBase:@>8=45:A8@>20==K5 &MIME-B8?KIndexed &MIME types RclMainBase4& 0A?8A0=85 8=45:A8@>20=8OIndexing &schedule RclMainBaseH=45:A8@>20=85 A >A>1K<8 ?0@0<5B@0<8Indexing with special options RclMainBase803@C78BL A>E@0=Q==K9 70?@>ALoad saved query RclMainBaseN54>AB0NI85 &2A?><>30B5;L=K5 ?@8;>65=8OMissing &helpers RclMainBase$!;54CNI0O AB@0=8F0 Next Page RclMainBase$!;54CNI0O AB@0=8F0 Next page RclMainBase<!;54CNI0O AB@0=8F0 @57C;LB0B>2Next page of results RclMainBase@8 A;54CNI5< >1=>2;5=88 1C4CB ?>2B>@=> >1@01>B0=K D09;K A >H81:0<8.Next update will retry previously failed files RclMainBase PgDownPgDown RclMainBasePgUpPgUp RclMainBase&@54K4CI0O AB@0=8F0 Previous Page RclMainBase&@54K4CI0O AB@0=8F0 Previous page RclMainBase>@54K4CI0O AB@0=8F0 @57C;LB0B>2Previous page of results RclMainBase"$@03<5=BK 70?@>A0Query Fragments RclMainBase RecollRecoll RclMainBase(!>E@0=8BL 2 CSV-D09;Save as CSV (spreadsheet) file RclMainBase4!>E@0=8BL ?>A;54=89 70?@>ASave last query RclMainBase!>E@0=8BL @57C;LB0BK 2 B5:AB>2K9 D09; A @0745;8B5;O<8, >B:@K205<K9 :0: B01;8F0@Saves the result into a file which you can load in a spreadsheet RclMainBaseShift-PgUp Shift+PgUp RclMainBase6>:070BL A2545=8O > 70?@>A5Show Query Details RclMainBase.>:070BL 2 2845 B01;8FK Show as table RclMainBaseD>:070BL @57C;LB0BK 2 2845 B01;8FK(Show results in a spreadsheet-like table RclMainBaseJ!>@B8@>20BL ?> 40B5 >B =>2KE : AB0@K<Sort by date, newest first RclMainBaseJ!>@B8@>20BL ?> 40B5 >B AB0@KE : =>2K<Sort by date, oldest first RclMainBaseJ!>@B8@>20BL ?> 40B5 >B =>2KE : AB0@K<#Sort by dates from newest to oldest RclMainBaseJ!>@B8@>20BL ?> 40B5 >B AB0@KE : =>2K<#Sort by dates from oldest to newest RclMainBase(0@0<5B@K A>@B8@>2:8Sort parameters RclMainBase4!?5F80;L=>5 8=45:A8@>20=85Special Indexing RclMainBaseV1>7@520B5;L &B5@<8=>2=0;870B>@ &B5@<8=>2Term &explorer RclMainBase4=AB@C<5=B >17>@0 B5@<8=>2Term explorer tool RclMainBase40?CAB8BL ?>H03>2K9 ?@>E>4Trigger incremental pass RclMainBase 1=>28BL &8=45:A Update &index RclMainBase" 540:B>@ 251-:MH0Webcache Editor RclMainBase KE>4Quit RclTrayIcon>AAB0=>28BLRestore RclTrayIcon!>45@68<>5Abstract RecollModel 2B>@Author RecollModel0B0Date RecollModel0B0 8 2@5<O Date and time RecollModel0B0 4>:C<5=B0 Document date RecollModel  07<5@ 4>:C<5=B0 Document size RecollModel0B0 D09;0 File date RecollModel<O D09;0 File name RecollModel 07<5@ D09;0 File size RecollModel IpathIpath RecollModel;NG52K5 A;>20Keywords RecollModel"8? MIME MIME type RecollModel7<5=5=>Mtime RecollModel$AE>4=0O :>48@>2:0Original character set RecollModel 5;520=B=>ABLRelevancy rating RecollModel03>;>2>:Title RecollModelURLURL RecollModel"(?>:070BL 70?@>A) (show query)ResListL<p><b>>8A: =5 40; @57C;LB0B>2</b><br>

No results found
ResListt<p><i>0@80=BK =0?8A0=8O (157 480:@8B8G5A:8E 7=0:>2): </i>4

Alternate spellings (accents suppressed): ResList<<p><i>0@80=BK =0?8A0=8O: </i>

Alternate spellings: ResList.@>A<>B@5==K5 4>:C<5=BKDocument historyResList>:C<5=BK DocumentsResList!;54CNI89NextResListB:@KBLOpenResList@>A<>B@PreviewResList@54K4CI89PreviousResList$!2545=8O > 70?@>A5 Query detailsResList<>;-2> @57C;LB0B>2 (@0AGQB=>5)Result count (est.)ResList$!?8A>: @57C;LB0B>2 Result listResList$@03<5=BKSnippetsResList&>:C<5=B =54>ABC?5=Unavailable documentResList4;OforResList87 <8=8<C<out of at leastResList &#40;8BL AB>;15F&Delete columnResTable(&!1@>A8BL A>@B8@>2:C &Reset sortResTable$&!>E@0=8BL :0: CSV &Save as CSVResTable*>1028BL AB>;15F %1Add "%1" columnResTableB52>7<>6=> >B:@KBL/A>740BL D09;: Can't open/create file: ResTable8!>E@0=8BL B01;8FC 2 CSV-D09;Save table to CSV fileResTableZ >B;8G0NBAO >B B5:CI8E ?0@0<5B@>2 (A>E@0=5=>)' differ from current preferences (kept)SSearchA5 A;>20 All termsSSearchN1>5 A;>2>Any termSSearch~2B><0B8G5A:8 ?>4AB02;O5<K5 ACDD8:AK 4;O A>E@0=Q==>3> 70?@>A0:  Auto suffixes for stored query: SSearch2B><0B8G5A:8 ?>4AB02;O5<0O D@070 7040=0, => 4;O A>E@0=Q==>3> 70?@>A0 >=0 7040=0 =5 1K;03Autophrase is set but it was unset for stored querySSearch2B><0B8G5A:8 ?>4AB02;O5<0O D@070 =5 7040=0, => 4;O A>E@0=Q==>3> 70?@>A0 >=0 1K;0 7040=03Autophrase is unset but it was set for stored querySSearch.H81:0 2 AB@>:5 70?@>A0Bad query stringSSearch4#:068B5 <0A:C 8<5=8 D09;0.$Enter file name wildcard expression.SSearch2548B5 D@07C =0 O7K:5 70?@>A0. >4A:07:0:<br> <i>A;>2>1 A;>2>2</i> : A;>2>1 8 A;>2>2 2 ;N1>< ?>;5.<br> <i>?>;5:A;>2>1</i> : A;>2>1 2 ?>;5 ?>;5.<br> !B0=40@B=K5 =0720=8O/A8=>=8<K =0720=89 ?>;59:<br> =0720=85/B5<0/?>4?8AL, 02B>@/>B, ?>;CG0B5;L/:><C, 8<O D09;0, @0AH8@5=85.<br> A524>-?>;O: dir, mime/D>@<0B, B8?/rclcat, 40B0, @07<5@.<br> 20 ?@8<5@0 >1>7=0G5=8O 2@5<5==>3> 8=B5@20;0: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>A;>2>1 A;>2>2  A;>2>3</i> : A;>2>1  (A;>2>2  A;>2>3).<br> >?CA:05BAO 8A?>;L7>20=85 :02KG5:.<br> <i>A;>2>1 A;>2>2</i> : D@070 (B@51C5BAO ?>;=>5 A>2?045=85). >7<>6=K5 <>48D8:0B>@K:<br> <i>A;>2>1 A;>2>2p</i> : =5C?>@O4>G5==K9 ?>8A: A 7040==K< ?> C<>;G0=8N @0AAB>O=85< <564C A;>20<8.<br> 5@5948B5 ?> AAK;:5 <b>>:070BL 70?@>A</b>, 5A;8 =5 C25@5=K 2 @57C;LB0B5. >;55 ?>4@>1=CN 8=D>@<0F8N <>6=> ?>;CG8BL 87 @C:>2>4AB20 ?>;L7>20B5;O (&lt;F1>). Enter query language expression. Cheat sheet:
term1 term2 : 'term1' and 'term2' in any field.
field:term1 : 'term1' in field 'field'.
Standard field names/synonyms:
title/subject/caption, author/from, recipient/to, filename, ext.
Pseudo-fields: dir, mime/format, type/rclcat, date, size.
Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.
term1 term2 OR term3 : term1 AND (term2 OR term3).
You can use parentheses to make things clearer.
"term1 term2" : phrase (must occur exactly). Possible modifiers:
"term1 term2"p : unordered proximity search with default distance.
Use Show Query link when in doubt about result and see manual (<F1>) for more detail. SSearchT=5H=85 8=45:AK 4;O A>E@0=Q==>3> 70?@>A0: #External indexes for stored query: SSearch<O D09;0 File nameSSearch&54>AB0B>G=> ?0<OB8 Out of memorySSearch/7K: 70?@>A0Query languageSSearch`/7K:8 A> A;>2>D>@<0<8 4;O A>E@0=Q==>3> 70?@>A0: %Stemming languages for stored query: SSearch(K15@8B5 B8? ?>8A:0.Choose search type. SSearchBaseG8AB8BLClear SSearchBase Ctrl+SCtrl+S SSearchBase(G8AB8BL ?>;5 ?>8A:0Erase search entry SSearchBaseSSearchBase SSearchBase SSearchBase >8A:Search SSearchBase0G0BL ?>8A: Start query SSearchBaseA5All SearchClauseW N1>5Any SearchClauseW<O D09;0 File name SearchClauseW(>;5 =5 8A?>;L7C5BAONo field SearchClauseW57None SearchClauseWP>;8G5AB2> A;>2 <564C 2K1@0==K<8 A;>20<8HNumber of additional words that may be interspersed with the chosen ones SearchClauseW $@070Phrase SearchClauseW0 0AAB>O=85 <564C A;>20<8 Proximity SearchClauseWlK15@8B5, :0:>9 B8? 70?@>A0 ?> A;>20< 1C45B ?@>87254Q=>Select the type of query that will be performed with the words SearchClauseW 09B8:Find:Snippets!;54CNI89NextSnippets@54K4CI89PrevSnippets$@03<5=BKSnippetsSnippets <p> A>60;5=8N, B>G=K5 A>2?045=8O A 7040==K<8 ?0@0<5B@0<8 =5 =0945=K. >7<>6=>, 4>:C<5=B A;8H:>< 1>;LH>9, 8 35=5@0B>@ D@03<5=B>2 40; A1>9...</p>

Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...

 SnippetsW A:0BLSearch SnippetsW 17>@BrowseSpecIdxW,0B0;>3 4;O @5:C@A82=>3> 8=45:A8@>20=8O. >;65= =0E>48BLAO 2=CB@8 AB0=40@B=>9 ?@>8=45:A8@>20==>9 >1;0AB8,<br> :0: C:070=> 2 D09;5 =0AB@>9:8 (topdirs).Directory to recursively index. This must be inside the regular indexed area
as defined in the configuration file (topdirs).SpecIdxW;8 1C4CB >1@010BK20BLAO B>;L:> 87<5=Q==K5 D09;K 8;8 D09;K A >H81:0<8.5Else only modified or failed files will be processed.SpecIdxW!B8@0BL A2545=8O > 2K1@0==KE D09;0E ?5@54 =0G0;>< 8=45:A8@>20=8O.*Erase selected files data before indexing.SpecIdxWAB028BL ?>;5 ?CABK< 4;O 2K1>@0 2A5E D09;>2. >6=> 8A?>;L7>20BL =5A:>;L:> @0745;Q==KE ?@>15;>< H01;>=>2.<br>(01;>=K, 2:;NG0NI85 2 A51O ?@>15;, 4>;6=K 1KBL 27OBK 2 42>9=K5 :02KG:8.<br>>6=> 8A?>;L7>20BL, B>;L:> 5A;8 7040= :>@=52>9 :0B0;>3 4;O 8=45:A8@>20=8O.Leave empty to select all files. You can use multiple space-separated shell-type patterns.
Patterns with embedded spaces should be quoted with double quotes.
Can only be used if the start target is set.SpecIdxW(01;>=K >B1>@0:Selection patterns:SpecIdxW4!?5F80;L=>5 8=45:A8@>20=85Special IndexingSpecIdxWT@>8=45:A8@>20==K9 M;5<5=B 25@E=53> C@>2=OTop indexed entitySpecIdxW&0:@KBL&Close SpellBase(&4=>:>@5==K5 A;>20 &Expand  SpellBase(480:@8B8G5A:85 7=0:8Accents SpellBase Alt+CAlt+C SpellBase Alt-EAlt+E SpellBase@538AB@Case SpellBase#G8BK20BLMatch SpellBase6!2545=8O 87  >BACBAB2CNB. No db info. SpellBase*1>7@520B5;L B5@<8=>2 Term Explorer SpellBase< !>740=>/>1=>2;5=> 4>:C<5=B>2 Documents created/updatedSpellW$ @>25@5=> D09;>2 Files testedSpellW: 5?@>8=45:A8@>20==KE D09;>2 Unindexed filesSpellW %1 @57C;LB0B(>2) %1 resultsSpellW>!@54=55 :>;-2> A;>2 2 4>:C<5=B5Average terms per documentSpellW6 07<5@ :0B0;>30 107K 40==KEDatabase directory sizeSpellW& 4>:C<5=B5 / A53> Doc. / Tot.SpellW=45:A: %1 4>:C<5=B(>2), A@54=OO 4;8=0 %2 A;>2(>). %3 @57C;LB0B(>2)7Index: %1 documents, average length %2 terms.%3 resultsSpellW-;5<5=BItemSpellW5@5G8A;8BL D09K, :>B>@K5 =5 C40;>AL ?@>8=45:A8@>20BL (<54;5==>),List files which could not be indexed (slow)SpellW A?8A:5 ?@82545=K A>:@0IQ==K5 D>@<K 2 0;D028B=>< ?>@O4:5, =5:>B>@K5 G0AB> ?>2B>@ONI85AO 1List was truncated alphabetically, some frequent SpellWP081>;LH0O 4;8=0 4>:C<5=B0 (:>;-2> A;>2)Longest document length (terms)SpellWMIME-B8?K: MIME types:SpellW84=>:>@5==KE A;>2 =5 =0945=>No expansion foundSpellW '8A;> 4>:C<5=B>2Number of documentsSpellW( 53C;O@=>5 2K@065=85RegexpSpellWJ 57C;LB0BK ?>A;54=53> 8=45:A8@>20=8O:Results from last indexing:SpellW6>:070BL AB0B8AB8:C 8=45:A0Show index statisticsSpellWB08<5=LH0O 4;8=0 4>:C<5=B0 (A;>2) Smallest document length (terms)SpellWBH81:0 ?>8A:0 >4=>:>@5==KE A;>2. Spell expansion error. SpellW,0?8A0=85/?@>87=>H5=85Spelling/PhoneticSpellW$4=>:>@5==K5 A;>20Stem expansionSpellW !;>2>TermSpellW=0G5=85ValueSpellW 0A:8 WildcardsSpellW<>H81:0 ?>;CG5=8O A?8A:0 O7K:>2#error retrieving stemming languagesSpellWA;>20 <>3CB >BACBAB2>20BL. >?@>1C9B5 8A?>;L7>20BL 1>;55 4;8==K9 :>@5=L..terms may be missing. Try using a longer root.SpellWA5 A;>20 All terms UIPrefsDialogN1>5 A;>2>Any term UIPrefsDialogP!;54C5B 2K1@0BL =5 1>;LH5 >4=>3> 8=45:A0$At most one index should be selected UIPrefsDialog52>7<>6=> 4>1028BL 8=45:A A 4@C38<8 =0AB@>9:0<8 CGQB0 @538AB@0 8 480:@8B8G5A:8E 7=0:>2>Cant add index with different case/diacritics stripping option UIPrefsDialogK1@0BLChoose UIPrefsDialog6(@8DB QtWebkit ?> C<>;G0=8NDefault QtWebkit font UIPrefsDialog<O D09;0 File name UIPrefsDialog/7K: 70?@>A0Query language UIPrefsDialog`03>;>2>: A?8A:0 @57C;LB0B>2 (?> C<>;G0=8N ?CAB)%Result list header (default is empty) UIPrefsDialog$>@<0B 0170F0 2 A?8A:5 @57C;LB0B>2 (>G8AB8B5 4;O A1@>A0 : 7=0G5=8O< ?> C<>;G0=8N)3 :>=D83C@0F88 Recoll 8;8 :0B0;>3 8=45:A>2 Xapian (=0?@8<5@, /home/me/.recoll 8;8 /home/me/.recoll/xapiandb)nSelect recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) UIPrefsDialognK1@0==K9 :0B0;>3 =5 O2;O5BAO :0B0;>3>< 8=45:A>2 Xapian;The selected directory does not appear to be a Xapian index UIPrefsDialogR-B>B :0B0;>3 C65 C:070= 2 A?8A:5 8=45:A>23The selected directory is already in the index list UIPrefsDialog5@>OB=>, 2K1@0==K9 :0B0;>3 O2;O5BAO :0B0;>3>< :>=D83C@0F88 Recoll, => GB5=85 :>=D83C0@F88 =52>7<>6=>jThe selected directory looks like a Recoll configuration directory but the configuration could not be read UIPrefsDialogN-B>B 8=45:A O2;O5BAO 3;02=K</;>:0;L=K<!This is the main/local index! UIPrefsDialogR=0G5=85 87 ?@54K4CI53> 70?CA:0 ?@>3@0<<K Value from previous program exit UIPrefsDialog<>H81:0 ?>;CG5=8O A?8A:0 O7K:>2#error retrieving stemming languages UIPrefsDialogd7<5=5=85 70?8A59 A @07;8G=K<8 B5:CI8<8 7=0G5=8O<8.Changing entries with different current values ViewAction><0=40Command ViewAction$7OBL 87 >:@C65=8ODesktop Default ViewAction"8? MIME MIME type ViewAction,<b>>2K5 7=0G5=8O:</b>New Values:ViewActionBase@59AB285 (?CAB> -> ?> C<>;G0=8N) Action (empty -> recoll default)ViewActionBase*@8<5=8BL : 2K45;5=8NApply to current selectionViewActionBase0:@KBLCloseViewActionBaseZA:;NG5=8O 4;O ?0@0<5B@>2 =0AB@>9:8 >:@C65=8O Exception to Desktop preferencesViewActionBase.AB@>5==K5 ?@>A<>B@I8:8Native ViewersViewActionBase 59AB285 recoll:Recoll action:ViewActionBaseK15@8B5 B8?K D09;>2 8 8A?>;L7C9B5 :=>?:8, @0A?>;>65==K5 2 @0<:5 =865, GB>1K 87<5=8BL ?>@O4>: >1@01>B:8 D09;>2kSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseK15@8B5 MIME-B8?K 8 8A?>;L7C9B5 :=>?:8 2 =86=59 @0<:5, GB>1K 87<5=8BL ?>@O4>: >1@01>B:8 D09;>2.lSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBase"K45;8BL B0:85 65 Select sameViewActionBasenA?>;L7>20BL ?0@0<5B@K =0AB@>9:8 >:@C65=8O ?> C<>;G0=8N"Use Desktop preferences by defaultViewActionBase B5:CI55 7=0G5=85 current valueViewActionBase<>8A: ?> @53C;O@=><C 2K@065=8N Search regexpWebcache" 540:B>@ 251-:MH0Webcache editorWebcache>?8@>20BL URLCopy URL WebcacheEdit$#40;8BL 2K45;5==K5Delete selection WebcacheEditx4QB 8=45:A8@>20=85. 52>7<>6=> @540:B8@>20BL D09; 251-:MH0.-Indexer is running. Can't edit webcache file. WebcacheEdit58725AB=K9 AB0BCA 8=45:A0B>@0. 52>7<>6=> @540:B8@>20BL D09; 251-:MH0.0Unknown indexer state. Can't edit webcache file. WebcacheEdit!>45@68<>5 251-:MH0 1K;K> 87<5=5=>, ?>A;5 70:@KB8O MB>3> >:=0 =5>1E>48<> 70?CAB8BL 8=45:A8@>20=85.RWebcache was modified, you will need to run the indexer after closing this window. WebcacheEditMIMEMIME WebcacheModelUrlUrl WebcacheModelK1@0BLChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW&B<5=0&CanceluiPrefsDialogBase&O&OKuiPrefsDialogBase*<BR> (?5@52>4 AB@>:8)
uiPrefsDialogBase <PRE>
uiPrefsDialogBase<PRE> + wrap
 + wrapuiPrefsDialogBase>8A: [rolling stones] (420 A;>20) 1C45B 87<5=Q= =0 [rolling 8;8 stones 8;8 (rolling phrase 2 stones)].
-B> ?>72>;8B ?>2KA8BL ?@8>@8B5B ?>8A:0 @57C;LB0B>2, 2 :>B>@KE A;>20 A;54CNB 8<5==> 2 C:070==>< ?>@O4:5.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBaseH 0745;8B5;L D@03<5=B>2 2 @57C;LB0B0EAbstract snippet separatoruiPrefsDialogBase:;NG8BL 2A5Activate AlluiPrefsDialogBase@:B828@>20BL AAK;:8 2 ?@>A<>B@5.Activate links in preview.uiPrefsDialogBase>1028BL 8=45:A	Add indexuiPrefsDialogBase&@8<5=8BL 87<5=5=8O
Apply changesuiPrefsDialogBaset2B><0B8G5A:8 >1J548=OBL A;>20 2> D@07C ?@8 ?@>AB>< ?>8A:5+Automatically add phrase to simple searchesuiPrefsDialogBaseh>@>3 G0AB>BK ?>O2;5=8O A;>2 2 02B>D@075 2 ?@>F5=B0E.Autophrase term frequency threshold percentageuiPrefsDialogBase0=5;L :=>?>:
Buttons PaneluiPrefsDialogBaseK1@0BLChooseuiPrefsDialogBase6K1>@ ?@8;>65=89-@540:B>@>2Choose editor applicationsuiPrefsDialogBase)Q;:=8B5, GB>1K 4>1028BL 4@C3>9 :0B0;>3 8=45:A0 2 A?8A>:. >6=> 2K1@0BL :0B0;>3 :>=D83C@0F88 Recoll 8;8 8=45:A Xapian.{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBase>!:@K20BL 2 ;>B:5 2<5AB> 2KE>40.!Close to tray instead of exiting.uiPrefsDialogBase8$>@<0B 40BK (?> strftime(3))Date format (strftime(3))uiPrefsDialogBaseB:;NG8BL 2A5Deactivate AlluiPrefsDialogBase!B8;L >B>1@065=8O D8;LB@>2: 2 2845 :=>?>:-?5@5:;NG0B5;59, ?>;5 A> A?8A:>< =0 ?0=5;8 8=AB@C<5=B>2 8;8 <5=N.QDecide if document filters are shown as radio buttons, toolbar combobox, or menu.uiPrefsDialogBaserB:;NG8BL Qt-02B>7025@H5=85 ?@8 70?>;=5=88 AB@>:8 ?>8A:0.*Disable Qt autocompletion in search entry.uiPrefsDialogBase$B<5=8BL 87<5=5=8ODiscard changesuiPrefsDialogBase|!>74020BL D@03<5=B, 5A;8 >= C65 8<55BAO 4;O 40==>3> 4>:C<5=B0?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBase!>74020BL D@03<5=BK 4;O @57C;LB0B>2 ?>8A:0 A 8A?>;L7>20=85< :>=B5:AB0 A;>2 70?@>A0?
@>F5AA <>65B >:070BLAO <54;5==K< 4;O 1>;LH8E 4>:C<5=B>2.zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBase6!B8;L >B>1@065=8O D8;LB@>2:Document filter choice style:uiPrefsDialogBase@8=0<8G5A:>5 A>740=85 D@03<5=B>2Dynamically build abstractsuiPrefsDialogBasen 540:B8@>20BL 2AB02:C HTML-703>;>2:0 A?8A:0 @57C;LB0B>2#Edit result page html header insertuiPrefsDialogBase^ 540:B8@>20BL AB@>:C D>@<0B0 0170F0 @57C;LB0B>2#Edit result paragraph format stringuiPrefsDialogBase:;NG8BLEnableuiPrefsDialogBase=5H=85 8=45:AKExternal IndexesuiPrefsDialogBase>@>3 G0AB>BK 2 ?@>F5=B0E, 2KH5 :>B>@>3> A;>20 2 02B>D@075 =5 8A?>;L7CNBAO. 
'0AB> ?>O2;ONI85AO A;>20 ?@54AB02;ONB >A=>2=CN ?@>1;5<C >1@01>B:8 D@07. 
@>?CA: A;>2 C25;8G8205B AB5: D@07K 8 C<5=LH05B MDD5:B82=>ABL DC=:F88 02B>D@07K. 
=0G5=85 ?> C<>;G0=8N   2 (?@>F5=B0). Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10Helvetica-10uiPrefsDialogBaseD!:@K20BL ?>2B>@ONI85AO @57C;LB0BK.Hide duplicate results.uiPrefsDialogBase@CSS-AB8;L ?>4A25B:8 A;>2 70?@>A0#Highlight CSS style for query termsuiPrefsDialogBase>:07K20BL @57C;LB0BK A >48=0:>2K< A>45@60=85< ?>4 @07=K<8 8<5=0<8 =5 1>;55 >4=>3> @070.XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBase!B@>:8 B530 PRE =5 ?5@5=>AOBAO. "53 BR B5@O5B G0ABL >BABC?>2. "53 PRE-Wrap <>65B 40BL 65;05<K9 @57C;LB0B.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBase5;0BL AAK;:8 2=CB@8 >:=0 ?@>A<>B@0 0:B82=K<8 8 >B:@K20BL 8E 2 1@0C75@5 ?> I5;G:C.dMake links inside the preview window clickable, and start an external browser when they are clicked.uiPrefsDialogBase|0:A8<0;L=K9 @07<5@ B5:AB0, ?>4A25G8205<>3> ?@8 ?@>A<>B@5 (1)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBase5=NMenuuiPrefsDialogBaseT>;8G5AB2> 70?8A59 =0 AB@0=8F5 @57C;LB0B>2"Number of entries in a result pageuiPrefsDialogBasezB:@KBL 480;>3 2K1>@0 B01;8FK AB8;59 CSS 4;O >:=0 $@03<5=BKAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBase^B:@KBL 480;>3 2K1>@0 H@8DB0 A?8A:0 @57C;LB0B>2-Opens a dialog to select the result list fontuiPrefsDialogBaseDB:@KBL 480;>3 2K1>@0 D09;0 AB8;59-Opens a dialog to select the style sheet fileuiPrefsDialogBase&>@@5:B8@>2:0 ?CB59Paths translationsuiPrefsDialogBaseP!B8;L >B>1@065=8O ?@>AB>3> B5:AB0 2 HTMLPlain text to HTML line styleuiPrefsDialogBaseP@>A<>B@ ?@>AB>3> B5:AB0 2 D>@<0B5 HTML.&Prefer Html to plain text for preview.uiPrefsDialogBase 0A?>7=020=85 B8?0 D09;>2 ?@8 ?><>I8 D09;0 <038G5A:8E G8A5; magic file.(Query language magic file name suffixes.uiPrefsDialogBaseFRecoll   >;L7>20B5;LA:0O =0AB@>9:0Recoll - User PreferencesuiPrefsDialogBaseR0?><=8BL ?>@O4>: A>@B8@>2:8 @57C;LB0B>2.Remember sort activation state.uiPrefsDialogBasev#40;8BL 87 A?8A:0. =45:A =0 48A:5 >AB0=5BAO 157 87<5=5=89.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase$#40;8BL 2K45;5==K5Remove selecteduiPrefsDialogBase:0<5=OBL D@03<5=BK 4>:C<5=B>2 Replace abstracts from documentsuiPrefsDialogBase
!1@>AResetuiPrefsDialogBase8!1@>A AB8;O >:=0 $@03<5=BK Resets the Snippets window styleuiPrefsDialogBase#AB0=>2:0 8A?>;L7C5<>3> ?> C<>;G0=8N A8AB5<=>3> H@8DB0 4;O A?8A:0 @57C;LB0B>21Resets the result list font to the system defaultuiPrefsDialogBaseX!1@>A B01;8FK AB8;59 : 7=0G5=8N ?> C<>;G0=8N!Resets the style sheet to defaultuiPrefsDialogBase$!?8A>: @57C;LB0B>2Result ListuiPrefsDialogBase0(@8DB A?8A:0 @57C;LB0B>2Result list fontuiPrefsDialogBase 0@0<5B@K ?>8A:0Search parametersuiPrefsDialogBase040BL :>@@5:B8@>2:C ?CB59 4;O 2K1@0==>3> 8;8 3;02=>3> 8=45:A0, 5A;8 =8G53> =5 2K1@0=>.XSet path translations for the selected index or for the main one if no selection exists.uiPrefsDialogBaseH>:07K20BL 7=0G>: 2 A8AB5<=>< ;>B:5.Show system tray icon.uiPrefsDialogBasep>:07K20BL ?@54C?@5645=85 ?@8 >B:@KB88 2@5<5==>3> D09;0.)Show warning when opening temporary file.uiPrefsDialogBase:CSS-D09; 4;O >:=0 $@03<5=BKSnippets window CSS fileuiPrefsDialogBaseZB:@K20BL 480;>3 A;>6=>3> ?>8A:0 ?@8 70?CA:5.'Start with advanced search dialog open.uiPrefsDialogBaseXB:@K20BL 480;>3 ?@>AB>3> ?>8A:0 ?@8 70?CA:5Start with simple search modeuiPrefsDialogBase(/7K: A> A;>2>D>@<0<8Stemming languageuiPrefsDialogBase"01;8F0 AB8;59Style sheetuiPrefsDialogBase$09; A8=>=8<>2
Synonyms fileuiPrefsDialogBaseX>;-2> A;>2 2>:@C3 A;>2 ?>8A:0 2> D@03<5=B0E Synthetic abstract context wordsuiPrefsDialogBase^ 07<5@ A>740205<>3> D@03<5=B0 (:>;-2> A8<2>;>2)$Synthetic abstract size (characters)uiPrefsDialogBase"5:ABK 1>;LH53> @07<5@0 =5 1C4CB ?>4A25G820BLAO ?@8 ?@>A<>B@5 (<54;5==>).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBase!?8A>: A;>2, :>B>@K5 1C4CB 02B><0B8G5A:8 ?@5>1@07>20=K 2 @0AH8@5=85 D09;0 2840 ext:xxx 2 70?@>A5.bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase,5@5:;NG8BL 2K45;5==K5Toggle selecteduiPrefsDialogBase>;5 A> A?8A:><Toolbar ComboboxuiPrefsDialogBase,=B5@D59A ?>;L7>20B5;OUser interfaceuiPrefsDialogBase(@8<5=8BL ?>;L7>20B5;LA:89 AB8;L >:=0 D@03<5=B>2.<br> @8<5G0=85: 2AB02:0 703>;>2:0 AB@0=8FK @57C;LB0B>2 B0:65 2:;NG5=0 2 703>;>2>: >:=0 D@03<5=B>2.User style to apply to the snippets window.
Note: the result page header insert is also included in the snippets window header.uiPrefsDialogBase ) , recoll-1.26.3/qtgui/i18n/recoll_uk.ts0000644000175000017500000043764513566424763014306 00000000000000 AdvSearch All clauses Всі поля Any clause Будь-яке поле texts тексти spreadsheets таблиці presentations презентації media мультимедіа messages повідомлення other інше Bad multiplier suffix in size filter text текст spreadsheet таблиці presentation презентації message повідомлення AdvSearchBase Advanced search Складний пошук Restrict file types Обмежити типи файлів Save as default Зберегти як типові Searched file types Бажані All ----> Всі -----> Sel -----> Виб -----> <----- Sel <----- Виб <----- All <----- Всі Ignored file types Ігноровані Enter top directory for search Шукати тільки у каталозі Browse Перегляд Restrict results to files in subtree: Обмежити пошук по файлах з піддерева: Start Search Шукати Search for <br>documents<br>satisfying: Шукати<br>документи,</br>що задовільняють: Delete clause Прибрати поле Add clause Додати поле Check this to enable filtering on file types Використовувати фільтрацію по типах файлів By categories По категоріях Check this to use file categories instead of raw mime types Використовувати категорії замість типів MIME Close Закрити All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Всі непусті поля буде об'єднано за допомогою АБО ("усі слова") або ТА ("будь-які слова").<br>Поля типу "будь-які слова", "усі слова" та "без цих слів" приймають суміш слів та фраз у подвійних лапках.<br>Поля без даних не беруться до уваги. Invert Minimum size. You can use k/K,m/M,g/G as multipliers Min. Size Maximum size. You can use k/K,m/M,g/G as multipliers Max. Size Filter From To Check this to enable filtering on dates Filter dates Find Check this to enable filtering on sizes Filter sizes ConfIndexW Can't write configuration file Неможливо записати файл конфіґурації Global parameters Загальні параметри Local parameters Місцеві параметри Search parameters Параметри пошуку Top directories Верхні теки The list of directories where recursive indexing starts. Default: your home. Список тек, з яких починається рекурсивне індексування. Типово: домашня тека. Skipped paths Пропускати шляхи These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Stemming languages Мови зі словоформами The languages for which stemming expansion<br>dictionaries will be built. Мови, для яких буде побудовано<br>словники розкриття словоформ. Log file name Файл журналу The file where the messages will be written.<br>Use 'stderr' for terminal output Файл, куди підуть повідомлення.<br>'stderr' для терміналу Log verbosity level Докладність журналу This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Встановити обсяг повідомлень,<br>від помилок до даних зневадження. Index flush megabytes interval Інтервал скидання індексу (Мб) This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Скільки даних буде проіндексовано між скиданнями індексу на диск.<br>Допомагає контролювати використання пам'яті індексатором. Типово: 10Мб Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. No aspell usage Не використовувати aspell Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Вимикає використання aspell для генерації наближень у написання в навіґаторі термінів.<br>Корисне, коли aspell відсутній або зламаний. Aspell language Мова aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Database directory name Тека бази даних The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Unac exceptions <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. Process the WEB history queue Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Web page store directory name The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Max. size for the web store (MB) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Automatic diacritics sensitivity <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. Automatic character case sensitivity <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. Maximum term expansion count <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. Maximum Xapian clauses count <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfSubPanelW Only mime types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Exclude mime types Mime types not to be indexed Max. compressed file size (KB) Межа розміру стиснених файлів (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Це значення встановлює поріг розміру стиснених файлів, більші за нього не буде опрацьовано. -1 вимикає ліміт, 0 вимикає декомпресію. Max. text file size (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Text file page size (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Global Глобальні CronToolW Cron Dialog <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Hours (* or 0-23) Minutes (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> Enable Disable It seems that manually edited entries exist for recollindex, cannot edit crontab Error installing cron entry. Bad syntax in fields ? EditDialog Dialog EditTrans Source path Local path Config error Original path EditTransBase Path Translations Setting path translations for Select one or several file types, then use the controls in the frame below to change how they are processed Add Delete Cancel Відмінити Save FirstIdxDialog First indexing setup <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> Indexing configuration This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Indexing schedule This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Start indexing now FragButs %1 not found. %1: %2 Query Fragments IdxSchedW Index scheduling setup <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> Cron scheduling The tool will let you decide at what time indexing should run and will install a crontab entry. Real time indexing start up Decide if real time indexing will be started when you log in (only for the default index). ListDialog Dialog GroupBox Main No db directory in configuration В конфігурації немає каталогу БД Could not open database in Не можу відкрити базу даних в . Click Cancel if you want to edit the configuration file before indexing starts, or Ok to let it proceed. . Натисніть Відміна, якщо бажаєте відредагувати конфіґурацію до початку індексування, чи OK для продовження. Configuration problem (dynconf Проблема конфігурації (dynconf "history" file is damaged, please check or remove it: Preview &Search for: &Шукати: &Next &Наступне &Previous &Попереднє Clear Стерти Match &Case &Чутливість до реєстру Cannot create temporary directory Не можу створити тимчасову теку Cancel Відмінити Creating preview text Створюю текст для перегляду Loading preview text into editor Завантажую текст перегляду в редактор Close Tab Закрити вкладку Missing helper program: Не знайдено допоміжну програму: Can't turn doc into internal representation for Неможливо перетворити документ на внутрішнє представлення для Form Tab 1 Open Відкрити Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text PreviewTextEdit Show fields Показувати поля Show main text Показувати основний текст Print Print Current Preview Show image Select All Copy Save document to file Fold lines Preserve indentation Open document QObject Global parameters Загальні параметри Local parameters Місцеві параметри <b>Customised subtrees <b>Піддерева з налаштуваннями The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. Список тек у індексованій ієрархії,<br>для яких деякі параметри потрібно змінити. Типово: пустий. <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>Нижченаведені параметри змінюються або на верхньому рівні, якщо<br>не вибрано нічого або пустий рядок, або для вибраної теки.<br>Ви можете додати або прибрати теки кнопками +/-. Skipped names Пропускати назви These are patterns for file or directory names which should not be indexed. Шаблони назв файлів або тек, які не буде індексовано. Default character set Типове кодування This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Кодування, яке буде застосовано при читанні файлів, які не вказують таке особливо (наприклад, чисто текстових файлів).<br>Типово невказане, тоді використовується значення з оточення (локалі). Follow symbolic links Розкривати символічні посилання Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Ходити по симлінках при індексації. Типово "ні" для уникнення дублікатів Index all file names Індексувати всі назви файлів Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Індексувати також назви файлів, вміст яких не може бути впізнано чи оброблено (невідомий або непідтримуваний тип MIME). Типово "так" Search parameters Параметри пошуку Default<br>character set Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Ignored endings These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. QWidget Create or choose save directory Choose exactly one directory Could not read directory: Unexpected file name collision, cancelling. Cannot extract document: &Preview &Переглянути &Open Open With Run Script Copy &File Name Копіювати &назву файла Copy &URL Копіювати &URL &Write to File Save selection to files Preview P&arent document/folder &Open Parent document/folder Find &similar documents Знайти &схожі документи Open &Snippets window Show subdocuments / attachments QxtConfirmationMessage Do not show again. RTIToolW Real time indexing automatic start <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Also start indexing daemon right now. Replacing: Replacing file Can't create: Warning Попередження Could not execute recollindex Deleting: Deleting file Removing autostart Autostart file deleted. Kill current process too ? RclMain Indexing in progress: Індексується: Query results Результати запиту Cannot retrieve document info from database Не можу здобути документ з бази даних Warning Попередження Can't create preview window Не можу створити вікно перегляду Executing: [ Виконую: [ About Recoll Про Recoll Document history Історія документів History data Дані історії Files Файли Purge Очистити Stemdb База коренів Closing Закриваю Unknown Невідомо This search is not active any more Цей пошук вже неактивний Bad viewer command line for %1: [%2] Please check the mimeconf file Невірний командний рядок для переглядача %1: [%2] Перевірте файл mimeconf Cannot extract document or create temporary file Неможливо здобути документ чи створити тимчасовий файл (no stemming) (без словоформ) (all languages) (всі мови) error retrieving stemming languages помилка здобування списку мов Update &Index Поновити &індекс Indexing interrupted Індексування перервано Stop &Indexing Пе&рервати індексування Can't start query: Неможливо почати запит: All всі media медіа message повідомлення other інше presentation презентації spreadsheet таблиці text текст sorted сортоване filtered фільтроване External applications/commands needed and not found for indexing your file types: Відсутні зовнішні додатки/команди, що потрібні для індексування ваших документів: No helpers found missing Всі додаткові програми наявні Missing helper programs Відсутні додаткові програми Save file dialog Зберегти файл Choose a file name to save under Оберіть ім'я файла для збереження No external viewer configured for mime type [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Can't access file: Can't uncompress file: Save file Result count (est.) Query details Деталі запиту Could not open external index. Db not open. Check external indexes list. No results found None Updating Done Monitor Indexing failed The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Erasing index Reset the index and start from scratch ? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Error Index query error Can't update index: indexer running Indexed MIME Types Bad viewer command line for %1: [%2] Please check the mimeview file Viewer command line for %1 specifies both file and parent file value: unsupported Cannot find parent document External applications/commands needed for your file types and not found, as stored by the last indexing pass in Sub-documents and attachments Document filter The indexer is running so things should improve when it's done. Duplicate documents These Urls ( | ipath) share the same content: Bad desktop app spec for %1: [%2] Please check the desktop file Bad paths Selection patterns need topdir Selection patterns can only be used with a start directory No search No preserved previous search Choose file to save Saved Queries (*.rclq) Write failed Could not write to file Read failed Could not open file: Load error Could not load saved query Index scheduling Sorry, not available under Windows for now, use the File menu entries to update the index Disabled because the real time indexer was not compiled in. This configuration tool only works for the main index. Can't set synonyms file (parse error?) The document belongs to an external index which I can't update. Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Do not show this warning next time (use GUI preferences to restore). Index locked Unknown indexer state. Can't access webcache file. Indexer is running. Can't access webcache file. with additional message: Non-fatal indexing message: Types list empty: maybe wait for indexing to progress? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported Tools Results Content has been indexed for these MIME types: Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Indexing done Can't update index: internal error Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> documents document files file errors error total files) No information: initial indexing not yet performed. RclMainBase Recoll Recoll &File &Файл &Tools &Інструменти &Preferences &Налаштування &Help &Довідка Search tools Інструменти пошуку Result list Список результатів E&xit &Вихід Ctrl+Q Ctrl+Q &About Recoll Про &Recoll &User manual &Довідник користувача Document &History &Історія документів Document History Історія документів &Advanced Search &Складний пошук Advanced/complex Search Складний (поглиблений) пошук &Sort parameters &Параметри сортування Sort parameters Параметри сортування Next page Наступна сторінка Next page of results Наступна сторінка результатів Previous page Попередня сторінка Previous page of results Попередня сторінка результатів &Query configuration Конфіґурація &запиту Update &index &Поновити індекс Term &explorer &Навіґатор термінів Term explorer tool Інструмент для вивчання термінів External index dialog Діалог зовнішнього індексу &Erase document history &Очистити історію документів First page Перша сторінка Go to first page of results Перейти до першої сторінки результатів &Indexing configuration &Конфіґурація індексування All всі &Show missing helpers Відсутні програми PgDown PgUp &Full Screen F11 Full Screen &Erase search history Sort by dates from oldest to newest Sort by dates from newest to oldest Show Query Details &Rebuild index Shift+PgUp E&xternal index dialog &Index configuration &GUI configuration &Results Sort by date, oldest first Sort by date, newest first Show as table Show results in a spreadsheet-like table Save as CSV (spreadsheet) file Saves the result into a file which you can load in a spreadsheet Next Page Previous Page First Page Query Fragments With failed files retrying Next update will retry previously failed files Indexing &schedule Enable synonyms Save last query Load saved query Special Indexing Indexing with special options &View Missing &helpers Indexed &MIME types Index &statistics Webcache Editor Trigger incremental pass RclTrayIcon Restore Quit RecollModel File name Ім'я файлу Mime type Тип MIME Date Дата Abstract Author Document size Document date File size File date Keywords Original character set Relevancy rating Title URL Mtime Date and time Ipath MIME type Can't sort by inverse relevance ResList Result list Список результатів Unavailable document Документ недосяжний Previous Попередня Next Наступна <p><b>No results found</b><br> <p><b>Не знайдено</b><br> &Preview &Переглянути Find &similar documents Знайти &схожі документи Query details Деталі запиту (show query) (показати запит) Copy &File Name Копіювати &назву файла Copy &URL Копіювати &URL filtered фільтроване sorted сортоване Document history Історія документів Preview Перегляд Open Відкрити <p><i>Alternate spellings (accents suppressed): </i> Documents Документи out of at least з принаймні for по <p><i>Alternate spellings: </i> Result count (est.) Snippets ResTable &Reset sort &Delete column Save table to CSV file Can't open/create file: &Preview &Переглянути Copy &File Name Копіювати &назву файла Copy &URL Копіювати &URL Find &similar documents Знайти &схожі документи &Save as CSV Add "%1" column ResTableDetailArea &Preview &Переглянути Copy &File Name Копіювати &назву файла Copy &URL Копіювати &URL Find &similar documents Знайти &схожі документи ResultPopup &Preview &Переглянути Copy &File Name Копіювати &назву файла Copy &URL Копіювати &URL Find &similar documents Знайти &схожі документи SSearch Any term Будь-яке слово All terms Усі слова File name Ім'я файлу Completions Доповнення Select an item: Оберіть: Too many completions Занадто багато доповнень Query language Мова запиту Bad query string Невірний рядок запиту Out of memory Недостатньо пам'яті Enter file name wildcard expression. Enter search terms here. Type ESC SPC for completions of current term. Введіть пошукові слова. Можна використовувати Esc-пробіл для доповнення. Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Stemming languages for stored query: differ from current preferences (kept) Auto suffixes for stored query: External indexes for stored query: Autophrase is set but it was unset for stored query Autophrase is unset but it was set for stored query Enter search terms here. SSearchBase SSearchBase SSearchBase Clear Стерти Ctrl+S Ctrl+S Erase search entry Стерти вміст рядка запита Search Знайти Start query Почати запит Enter search terms here. Type ESC SPC for completions of current term. Введіть пошукові слова. Можна використовувати Esc-пробіл для доповнення. Choose search type. Оберіть тип пошуку. Show query history SearchClauseW Any of these будь-які слова All of these усі слова None of these без цих слів This phrase фраза Terms in proximity слова поблизу File name matching назва файлу Select the type of query that will be performed with the words Виберіть тип запиту, який буде зроблено по цих словах Number of additional words that may be interspersed with the chosen ones Кількість додаткових слів, що можуть бути між обраними No field Any All всі None Phrase Proximity File name Ім'я файлу Snippets Snippets Find: Next Наступна Prev SnippetsW Search Знайти <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> Sort By Relevance Sort By Page SortForm Date Дата Mime type Тип MIME SortFormBase Sort Criteria Критерії сортування Sort the Сортувати most relevant results by: кращих результатів за: Descending спаданням Close Закрити Apply Застосувати SpecIdxW Special Indexing Else only modified or failed files will be processed. Erase selected files data before indexing. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Browse Перегляд Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Selection patterns: Top indexed entity Retry previously failed files. Start directory. Must be part of the indexed tree. Use full indexed area if empty. SpellBase Term Explorer Навіґатор термінів &Expand &Розкрити Alt+E &Close &Закрити Alt+C Alt+C Term Слово No db info. Match Case Accents SpellW Wildcards Шаблони Regexp Регвираз Spelling/Phonetic Напис/звучання Aspell init failed. Aspell not installed? Не вдалося запустити aspell. Воно взагалі встановлене? Aspell expansion error. Помилка розкриття aspell. Stem expansion Розкриття словоформ error retrieving stemming languages помилка здобування списку мов No expansion found Розкриття не знайдене Term Слово Doc. / Tot. Index: %1 documents, average length %2 terms.%3 results %1 results List was truncated alphabetically, some frequent terms may be missing. Try using a longer root. Show index statistics Number of documents Average terms per document Database directory size MIME types: Item Value Smallest document length (terms) Longest document length (terms) Results from last indexing: Documents created/updated Files tested Unindexed files List files which could not be indexed (slow) Spell expansion error. UIPrefsDialog The selected directory does not appear to be a Xapian index Обрана тека не схожа на індекс Xapian This is the main/local index! Це основний/локальний індекс! The selected directory is already in the index list Обрана тека вже у списку індексів Select xapian index directory (ie: /home/buddy/.recoll/xapiandb) Оберіть теку із індексом Xapian (наприклад, /home/приятель/.recoll/xapiandb) error retrieving stemming languages помилка здобування списку мов Choose Перегляд Result list paragraph format (erase all to reset to default) Result list header (default is empty) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read At most one index should be selected Cant add index with different case/diacritics stripping option Default QtWebkit font Any term Будь-яке слово All terms Усі слова File name Ім'я файлу Query language Мова запиту Value from previous program exit UIPrefsDialogBase User interface Інтерфейс Number of entries in a result page Кількість результатів на сторінку Result list font Шрифт списку результатів Helvetica-10 Helvetica-10 Opens a dialog to select the result list font Відкриває діалог вибору шрифту списку результатів Reset Скинути Resets the result list font to the system default Повертає шрифт у типовий системний Auto-start simple search on whitespace entry. Починати простий пошук при введенні пробілу. Start with advanced search dialog open. Відкривати діалог складного пошуку при старті. Start with sort dialog open. Відкривати діалог сортування при старті. Search parameters Параметри пошуку Stemming language Мова словоформ Dynamically build abstracts Динамічно будувати конспекти Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Чи намагатися будувати конспекти для результатів пошуку, використовуючі контекст знайдених слів? Може працювати повільно для великих документів. Replace abstracts from documents Заміняти наявні у документах конспекти Do we synthetize an abstract even if the document seemed to have one? Чи робити новий конспект, навіть якщо якийсь вже є в документі? Synthetic abstract size (characters) Розмір синтетичного конспекту (у символах) Synthetic abstract context words Контекстних слів у конспекті External Indexes Зовнішні індекси Add index Додати індекс Select the xapiandb directory for the index you want to add, then click Add Index Оберіть потрібну теку із індексом Xapian та натисніть "Додати індекс" Browse Перегляд &OK &OK Apply changes Застосувати зміни &Cancel &Відміна Discard changes Відмінити зміни Result paragraph<br>format string Рядок форматування<br>блоку результатів Automatically add phrase to simple searches Автоматично додавати фразу до простих пошуків A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Пошук [rolling stones] (2 слова) буде змінено на [rolling or stones or (rolling phrase 2 stones)]. Це може підняти результати, в яких пошукові слова зустрічаються саме в такій послідовності, як в запиті. User preferences Вподобання Use desktop preferences to choose document editor. Використовувати налаштування десктопу щодо редактору документів. External indexes Зовнішні індекси Toggle selected Переключити вибране Activate All Включити все Remove selected Видалити вибране Remove from list. This has no effect on the disk index. Видалити зі списку. Не впливає на дисковий індекс. Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Визначає формат для кожного блоку списку результатів. Використовуйте html-формат qt та схожі на printf заміни:<br>%A анотація<br> %D дата<br> %I назва піктограми<br> %K ключові слова (якщо є)<br> %L посилання перегляду та редагування<br> %M тип MIME<br> %N кількість результатів<br> %R релевантність<br> %S розмір<br> %T назва<br> %U URL<br> Remember sort activation state. Запам'ятати стан сортування. Maximum text size highlighted for preview (megabytes) Максимальний розмір тексту із підсвічуванням (Мб) Texts over this size will not be highlighted in preview (too slow). Тексти із розміром, більшим за вказаний, не буде підсвічено у попередньому перегляді (повільно). Highlight color for query terms Колір виділення ключових слів Deactivate All Виключити все Prefer Html to plain text for preview. Віддавати перевагу HTML над текстом для перегляду. If checked, results with the same content under different names will only be shown once. Якщо увімкнене, результати с таким самим змістом та різними назвами буде показано не більше одного разу. Hide duplicate results. Ховати дублікати Choose editor applications Оберіть редактори ViewAction Changing actions with different current values Зміна дій із різними поточними значеннями Mime type Тип MIME Command MIME type Desktop Default Changing entries with different current values ViewActionBase File type Тип файлу Action Дія Select one or several file types, then click Change Action to modify the program used to open them Оберіть один або декілька типів файлів, потім натисніть "Змінити дію", щоб змінити програму для них Change Action Змінити дію Close Закрити Native Viewers Рідні переглядачі Select one or several mime types then use the controls in the bottom frame to change how they are processed. Use Desktop preferences by default Select one or several file types, then use the controls in the frame below to change how they are processed Exception to Desktop preferences Action (empty -> recoll default) Apply to current selection Recoll action: current value Select same <b>New Values:</b> Webcache Webcache editor Search regexp WebcacheEdit Copy URL Unknown indexer state. Can't edit webcache file. Indexer is running. Can't edit webcache file. Delete selection Webcache was modified, you will need to run the indexer after closing this window. WebcacheModel MIME Url confgui::ConfIndexW Can't write configuration file Неможливо записати файл конфіґурації confgui::ConfParamFNW Browse Перегляд Choose Перегляд confgui::ConfParamSLW + + - - Add entry Delete selected entries ~ Edit selected entries confgui::ConfSubPanelW Global Глобальні Max. compressed file size (KB) Межа розміру стиснених файлів (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Це значення встановлює поріг розміру стиснених файлів, більші за нього не буде опрацьовано. -1 вимикає ліміт, 0 вимикає декомпресію. confgui::ConfTopPanelW Top directories Верхні теки The list of directories where recursive indexing starts. Default: your home. Список тек, з яких починається рекурсивне індексування. Типово: домашня тека. Skipped paths Пропускати шляхи Stemming languages Мови зі словоформами The languages for which stemming expansion<br>dictionaries will be built. Мови, для яких буде побудовано<br>словники розкриття словоформ. Log file name Файл журналу The file where the messages will be written.<br>Use 'stderr' for terminal output Файл, куди підуть повідомлення.<br>'stderr' для терміналу Log verbosity level Докладність журналу This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Встановити обсяг повідомлень,<br>від помилок до даних зневадження. Index flush megabytes interval Інтервал скидання індексу (Мб) Max disk occupation (%) Максимальне використання диску (%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). Відсоток зайнятого диску, коли індексування буде зупинено (щоб уникнути заповнення доступного простору).<br>Типово: 0 (без ліміту). No aspell usage Не використовувати aspell Aspell language Мова aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Мова словника aspell. Має виглядати як 'en' або 'uk'...<br>Якщо не встановлене, буде використане оточення (локаль), що зазвичай робить. Щоб з'ясувати, що маємо на системі, наберіть 'aspell config' та перегляньте файли .dat у теці 'data-dir'. Database directory name Тека бази даних The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Назва теки, де міститься індекс<br>Відносний шлях буде трактовано відносно теки конфіґурації. Типово: 'xapiandb'. Use system's 'file' command Використовувати системну 'file' Use the system's 'file' command if internal<br>mime type identification fails. Використовувати команду 'file' з системи, коли внутрішнє<br>визначення типу MIME дає збій. These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Це назви тек, у які індексування не потрапить.<br> Може містити шаблони. Має співпадати із шляхами, що бачить індексатор (наприклад, якщо topdirs містить '/home/me' та '/home' є посиланням на '/usr/home', то вірний запис буде '/home/me/tmp*', а не '/usr/home/me/tmp*') This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Скільки даних буде проіндексовано між скиданнями індексу на диск.<br>Допомагає контролювати використання пам'яті індексатором. Типово: 10Мб Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Вимикає використання aspell для генерації наближень у написання в навіґаторі термінів.<br>Корисне, коли aspell відсутній або зламаний. uiPrefsDialogBase User preferences Вподобання User interface Інтерфейс Number of entries in a result page Кількість результатів на сторінку If checked, results with the same content under different names will only be shown once. Якщо увімкнене, результати с таким самим змістом та різними назвами буде показано не більше одного разу. Hide duplicate results. Ховати дублікати Highlight color for query terms Колір виділення ключових слів Result list font Шрифт списку результатів Opens a dialog to select the result list font Відкриває діалог вибору шрифту списку результатів Helvetica-10 Helvetica-10 Resets the result list font to the system default Повертає шрифт у типовий системний Reset Скинути Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Визначає формат для кожного блоку списку результатів. Використовуйте html-формат qt та схожі на printf заміни:<br>%A анотація<br> %D дата<br> %I назва піктограми<br> %K ключові слова (якщо є)<br> %L посилання перегляду та редагування<br> %M тип MIME<br> %N кількість результатів<br> %R релевантність<br> %S розмір<br> %T назва<br> %U URL<br> Result paragraph<br>format string Рядок форматування<br>блоку результатів Texts over this size will not be highlighted in preview (too slow). Тексти із розміром, більшим за вказаний, не буде підсвічено у попередньому перегляді (повільно). Maximum text size highlighted for preview (megabytes) Максимальний розмір тексту із підсвічуванням (Мб) Use desktop preferences to choose document editor. Використовувати налаштування десктопу щодо редактору документів. Choose editor applications Оберіть редактори Auto-start simple search on whitespace entry. Починати простий пошук при введенні пробілу. Start with advanced search dialog open. Відкривати діалог складного пошуку при старті. Start with sort dialog open. Відкривати діалог сортування при старті. Remember sort activation state. Запам'ятати стан сортування. Prefer Html to plain text for preview. Віддавати перевагу HTML над текстом для перегляду. Search parameters Параметри пошуку Stemming language Мова словоформ A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Пошук [rolling stones] (2 слова) буде змінено на [rolling or stones or (rolling phrase 2 stones)]. Це може підняти результати, в яких пошукові слова зустрічаються саме в такій послідовності, як в запиті. Automatically add phrase to simple searches Автоматично додавати фразу до простих пошуків Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Чи намагатися будувати конспекти для результатів пошуку, використовуючі контекст знайдених слів? Може працювати повільно для великих документів. Dynamically build abstracts Динамічно будувати конспекти Do we synthetize an abstract even if the document seemed to have one? Чи робити новий конспект, навіть якщо якийсь вже є в документі? Replace abstracts from documents Заміняти наявні у документах конспекти Synthetic abstract size (characters) Розмір синтетичного конспекту (у символах) Synthetic abstract context words Контекстних слів у конспекті The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Query language magic file name suffixes. Enable External Indexes Зовнішні індекси Toggle selected Переключити вибране Activate All Включити все Deactivate All Виключити все Remove from list. This has no effect on the disk index. Видалити зі списку. Не впливає на дисковий індекс. Remove selected Видалити вибране Add index Додати індекс Apply changes Застосувати зміни &OK &OK Discard changes Відмінити зміни &Cancel &Відміна Abstract snippet separator Style sheet Opens a dialog to select the style sheet file Choose Перегляд Resets the style sheet to default Result List Edit result paragraph format string Edit result page html header insert Date format (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Autophrase term frequency threshold percentage Plain text to HTML line style Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. <BR> <PRE> <PRE> + wrap Disable Qt autocompletion in search entry. Paths translations Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Snippets window CSS file Opens a dialog to select the Snippets window CSS style sheet file Resets the Snippets window style Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Document filter choice style: Buttons Panel Toolbar Combobox Menu Show system tray icon. Close to tray instead of exiting. Start with simple search mode User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Synonyms file Show warning when opening temporary file. Highlight CSS style for query terms Recoll - User Preferences Set path translations for the selected index or for the main one if no selection exists. Activate links in preview. Make links inside the preview window clickable, and start an external browser when they are clicked. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Suppress all beeps. recoll-1.26.3/qtgui/i18n/recoll_nl.ts0000644000175000017500000035415713303776057014267 00000000000000 AdvSearch All clauses Alle termen Any clause Elke term media media other andere Bad multiplier suffix in size filter Geen juist achtervoegsel in grootte filter text tekst spreadsheet spreadsheet presentation presentatie message bericht AdvSearchBase Advanced search geavanceerd zoeken Search for <br>documents<br>satisfying: Zoek naar<br>documenten<br> die bevatten: Delete clause verwijder term Add clause voeg term toe Restrict file types beperk tot bestandstype Check this to enable filtering on file types vink dit aan om filetype filtering te activeren By categories Per categorie Check this to use file categories instead of raw mime types Vink dit aan om bestands catergorie te gebruiken in plaats van raw mime Save as default Sla op als standaard Searched file types Gezochte bestands type All ----> Alle ----> Sel -----> Sel ----> <----- Sel <----- Sel <----- All alle Ignored file types negeer bestandstype Enter top directory for search voer de top bestandsmap in om te doorzoeken Browse doorbladeren Restrict results to files in subtree: Beperk de resultaten tot de bestanden in de subtak Start Search Begin met zoeken Close sluit All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Elk niet lege veld aan de rechterzijde zal worden gecombineerd met En ("Alle clausules" keuze) en Of ("Bepalingen" keuze) voegwoorden. <br> "Elk", "en" Of "Geen" veldtypen kan een mix van eenvoudige woorden en uitdrukkingen tussen dubbele aanhalingstekens te accepteren. <br> Velden zonder gegevens worden genegeerd. Invert omkeren Minimum size. You can use k/K,m/M,g/G as multipliers Minimummaat. U kunt k / K, m / M gebruiken, g / G als multipliers Min. Size Min Grootte Maximum size. You can use k/K,m/M,g/G as multipliers Maximale grootte. U kunt k / K, m / M gebruiken, g / G als multipliers Max. Size Max grootte Filter Filter From Van To Tot Check this to enable filtering on dates Vink dit aan om op datum te kunnen filteren Filter dates Filter datums Find Vind Check this to enable filtering on sizes Vink dit aan om te filteren op grootte Filter sizes Filter grootte CronToolW Cron Dialog Cron dialoogvenster <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexeer schema (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"> Een enkele numerieke waarde, door komma's gescheiden lijsten (1,3,5) en reeksen (1-7). Meer in het algemeen zullen de velden worden gebruikt <span style=" font-style:italic;">als </span> in het crontab bestand, en het volledige crontab syntax kan worden gebruikt, zie crontab (5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Bijvoorbeeld invoeren <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Dagen, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Uren</span> en <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minuten</span> zal recollindex starten op elke dag om 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Een schema met zeer frequent activering is waarschijnlijk minder efficiënt dan real time indexeren.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Dagen van de week (* of 0-7, of 7 is Zondag) Hours (* or 0-23) Uren (*of 0-23 Minutes (0-59) Minuten (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Klik <span style=" font-style:italic;">Uitzetten</span> om automatisch batch indexeren uit te zetten, <span style=" font-style:italic;">Aanzetten</span> om het te activeren, <span style=" font-style:italic;">Annuleren</span> om niets te doen</p></body></html> Enable Aanzetten Disable Uitzetten It seems that manually edited entries exist for recollindex, cannot edit crontab Het lijkt erop dat met de hand bewerkt ingaves bestaan voor recollindex, kan niet crontab bewerken Error installing cron entry. Bad syntax in fields ? Fout bij het instellen van cron job. Slechte syntax in de ingave? EditDialog Dialog Dialoog EditTrans Source path bronpad Local path lokaal pad Config error Configuratie fout Original path Oorspronkelijk pad EditTransBase Path Translations Pad vertalingen Setting path translations for zet vertalingspad voor Select one or several file types, then use the controls in the frame below to change how they are processed Selecteer één of meerdere bestandstypen, gebruik dan de bediening in het kader hieronder om te veranderen hoe ze worden verwerkt Add toevoegen Delete Verwijderen Cancel Annuleer Save Bewaar FirstIdxDialog First indexing setup Setup van eerste indexering <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Het blijkt dat de index voor deze configuratie niet bestaat.</span><br /><br />Als u gewoon uw home directory wilt indexeren met een set van redelijke standaardinstellingen, drukt u op de<span style=" font-style:italic;">Start indexeer nu</span>knop. Je zult in staat zijn om de details later aan te passen.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Als u meer controle wil, gebruik dan de volgende links om de indexering configuratie en het schema aan te passen.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Deze tools kunnen later worden geopend vanuit het<span style=" font-style:italic;">Voorkeuren</span> menu.</p></body></html> Indexing configuration Configuratie inedexering This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Dit laat u de mappen die u wilt indexeren, en andere parameters aan passen, zoals uitgesloten bestandspaden of namen, standaard character sets, enz. Indexing schedule Indexerings schema This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Dit zal u laten kiezen tussen batch en real-time indexering, en het opzetten van een automatisch schema voor batch indexeren (met behulp van cron) Start indexing now Begin nu met indexering FragButs %1 not found. %1 niet gevonden. %1: %2 %1: %2 Query Fragments Zoekterm fragmenten IdxSchedW Index scheduling setup indexing schema setup <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span>indexering kan permanent draaien, het indexeren van bestanden als ze veranderen, of lopen op vaste intervallen.</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Het lezen van de handleiding kan helpen om te beslissen tussen deze benaderingen (druk op F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Deze tool kan u helpen bij het opzetten van een schema om batch indexeren runs te automatiseren, of het starten van real time indexeren wanneer u zich aanmeldt (of beide, dat is echter zelden zinvol). </p></body></html> Cron scheduling Cron schema The tool will let you decide at what time indexing should run and will install a crontab entry. Deze tool zal u laten beslissen op welk tijdstip het indexeren moet worden uitgevoerd en zal een crontab installeren. Real time indexing start up Real time indexering opstart Decide if real time indexing will be started when you log in (only for the default index). Beslis of real time indexeren wordt gestart wanneer u inlogt (alleen voor de standaard-index). ListDialog Dialog Dialoog venster GroupBox GroepVenster Main No db directory in configuration Geen db bestand in configuratie "history" file is damaged or un(read)writeable, please check or remove it: Het "Geschiedenis" bestand is beschadigd of on(lees)schrijfbaar geworden, graag controleren of verwijderen: Preview Close Tab Sluit tab Cancel Annuleer Missing helper program: Help programma ontbreekt Can't turn doc into internal representation for Kan doc omzetten in een interne representatie Creating preview text preview tekst aan het maken Loading preview text into editor Preview tekst in editor aan het laden &Search for: &Zoek naar: &Next &Volgende &Previous &Vorige Clear Wissen Match &Case Hoofd/kleine letter Error while loading file Fout bij het laden van bestand PreviewTextEdit Show fields Toon veld Show main text Toon hoofd tekst Print Druk af Print Current Preview Druk huidige Preview af Show image Toon afbeelding Select All Selecteer alles Copy Kopieer Save document to file Bewaar document als bestand Fold lines Vouw lijnen Preserve indentation Behoud inspringing QObject Global parameters Globale parameters Local parameters Lokale parameters <b>Customised subtrees <b>Aangepaste substructuur The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. De lijst van de submappen in de geïndexeerde hiërarchie <br> waar sommige parameters moeten worden geherdefinieerd. Standaard: leeg. <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>De parameters die volgen zijn ingesteld, hetzij op het hoogste niveau, als er niets <br>of een lege regel is geselecteerd in de keuzelijst boven, of voor de geselecteerde submap.<br> U kunt mappen toevoegen of verwijderen door op de +/- knoppen te klikken. Skipped names Overgeslagen namen These are patterns for file or directory names which should not be indexed. Dit zijn patronen voor bestand of de mappen namen die niet mogen worden geïndexeerd. Follow symbolic links Volg symbolische links Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Volg symbolische links tijdens het indexeren. De standaard is niet volgen, om dubbele indexering te voorkomen Index all file names Indexeer alle bestandsnamen Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Indexeer de namen van bestanden waarvan de inhoud niet kan worden geïdentificeerd of verwerkt (geen of niet-ondersteunde MIME-type). standaard true Search parameters Zoek parameters Web history Web geschiedenis Default<br>character set Standaard<br>karakter set Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Tekenset die wordt gebruikt voor het lezen van bestanden die het intern tekenset niet worden herkend, bijvoorbeeld pure tekstbestanden. Ondernemingen De standaard waarde is leeg en de waarde van de NLS-omgeving wordt gebruikt. Ignored endings Genegeerde eindes These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). Dit zijn bestandsnaam eindes voor bestanden die zullen worden geïndexeerd door alleen de naam (geen MIME-type identificatie poging, geen decompressie, geen inhoud indexering). QWidget Create or choose save directory Maak of kies een bestandsnaam om op te slaan Choose exactly one directory Kies exact een map Could not read directory: kon map niet lezen Unexpected file name collision, cancelling. Onverwachte bestandsnaam botsing, annuleren. Cannot extract document: Kan het document niet uitpakken &Preview &Preview &Open &Openen Open With Open met Run Script Voer script uit Copy &File Name Kopieer &Bestands Naam Copy &URL Kopieer &URL &Write to File &Schijf naar Bestand Save selection to files Bewaar selektie naar bestanden Preview P&arent document/folder Preview B&ovenliggende document/map &Open Parent document/folder &Open Bovenliggend document/map Find &similar documents Vindt &gelijksoortige documenten Open &Snippets window Open &Knipsel venster Show subdocuments / attachments Toon subdocumenten / attachments QxtConfirmationMessage Do not show again. Niet nogmaals tonen RTIToolW Real time indexing automatic start Automatisch Starten realtime-indexeren <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span>indexering kan worden ingesteld om te draaien als een daemon, het bijwerken van de index als bestanden veranderen, in real time. Je krijgt dan een altijd up-to-date index, maar systeembronnen worden permanent gebruikt.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Start met indexeren bij mijn desktop-sessie Also start indexing daemon right now. start nu ook de indexatie daemon Replacing: Vervanging Replacing file Vervang bestand Can't create: Kan niet aanmaken Warning Waarschuwing Could not execute recollindex Kon recollindex niet starten Deleting: Verwijderen Deleting file Verwijder bestand Removing autostart Verwijder autostart Autostart file deleted. Kill current process too ? Autostart ongedaan gemaakt proces ook stoppen ? RclMain (no stemming) Geen taal (all languages) alle talen error retrieving stemming languages Fout bij het ophalen van de stam talen Indexing in progress: Indexering is bezig Purge Wissen Stemdb Stemdb Closing Sluiten Unknown Onbekend Query results Zoekresultaat Cannot retrieve document info from database kan info van het document uit database niet lezen Warning Waarschuwing Can't create preview window kan preview venster niet maken This search is not active any more Deze zoekopdracht is niet meer aktief Cannot extract document or create temporary file kan het document niet uitpakken of een tijdelijk bestand maken Executing: [ Uitvoeren: [ About Recoll Over Recoll History data Geschiedenis data Document history Document geschiedenis Update &Index Indexeren &bijwerken Stop &Indexing Stop &Indexing All Alle media media message bericht other anders presentation presentatie spreadsheet spreadsheet text tekst sorted gesorteerd filtered gefilterd No helpers found missing Alle hulpprogrammas zijn aanwezig Missing helper programs Missende hulp programmas No external viewer configured for mime type [ Geen externe viewer voor dit mime type geconfigureerd [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? De viewer gespecificeerd in mimeview voor %1: %2 is niet gevonden Wilt u het dialoogvenster voorkeuren openen? Can't access file: Geen toegang tot het bestand Can't uncompress file: Kan het bestand niet uitpakken Save file Bestand opslaan Result count (est.) Telresultaat(est.) Could not open external index. Db not open. Check external indexes list. kon externe index niet openen. Db niet geopend. Controleer externe indexlijst No results found Geen resultaten gevonden None Geen Updating Bijwerken Done afgerond Monitor Monitoren Indexing failed Indexering mislukt The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Het huidige indexering proces werdt niet gestart vanaf deze interface. Klik Ok om het toch te stoppen, of annuleren om het zo te laten Erasing index Wis index Reset the index and start from scratch ? De index resetten en geheel opnieuw beginnen? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Bezig met opdracht <br>Vanwege beperkingen van de indexeerder zal bij,<br>stop het programma in zijn geheel sluiten! Error Fout Index not open Index is niet open Index query error Index vraag fout Content has been indexed for these mime types: De inhoud is bijgewerkt voor deze mime types Can't update index: indexer running kan het index niet bijwerken:indexeren is al aktief Indexed MIME Types Geindexeerd MIME Types Bad viewer command line for %1: [%2] Please check the mimeview file Verkeerde command line voor viewer %1:[%2'] controleer mimeview van bestand Viewer command line for %1 specifies both file and parent file value: unsupported Viewer command line voor %1 specificeerd zowel het bestandtype als het parentfile type waarde: niet ondersteund Cannot find parent document kan parent van document niet vinden Indexing did not run yet Indexering is nog niet bezig External applications/commands needed for your file types and not found, as stored by the last indexing pass in Externe toepassingen / commandos die nodig zijn voor dit bestandstype en niet gevonden, zoals opgeslagen in de laatste indexerings poging Sub-documents and attachments Sub-documenten en attachments Document filter Document filter Index not up to date for this file. Refusing to risk showing the wrong entry. Index voor dit bestand is niet op tu date. geweigerd om verkeerde inforamtie te tonen te riskeren Click Ok to update the index for this file, then you will need to re-run the query when indexing is done. Klik Ok om de index voor dit bestand bij te werken, daarna moet u de opdracht opnieuw uitvoeren na het indexeren The indexer is running so things should improve when it's done. De indexeerder is bezig dus er zou een verbetering moeten optreden als hij klaar is. Duplicate documents Vermenigvuldig documenten These Urls ( | ipath) share the same content: Deze Urls (ipath) hebben dezelfde inhoud: Bad desktop app spec for %1: [%2] Please check the desktop file Verkeerde desktop snelkoppeling for %1:[%2] Graag de desktop snelkoppeling controleren Indexing interrupted Indexering onderbroken The current indexing process was not started from this interface, can't kill it Het huidige indexerings proces werdt niet gestart vanaf deze interface, kan het niet stoppen Bad paths Pad verkeerd Bad paths in configuration file: Verkeerd pad in configuratie bestand Selection patterns need topdir Patronen selecteren vraagt een begin folder Selection patterns can only be used with a start directory Patronen selecteren kan alleen gebruikt worden met een start folder No search Niets gezocht No preserved previous search Geen opgeslagen vorige zoekresultaten Choose file to save Kies bestand om op te slaan Saved Queries (*.rclq) Bewaarde Zoekopdrachten (*.rclq) Write failed Schrijf fout Could not write to file Kan niet schrijven naar bestand Read failed Lees fout Could not open file: Kan bestand niet openen Load error Laad fout Could not load saved query Kon bewaarde zoekopdracht niet laden Index scheduling Index schema Sorry, not available under Windows for now, use the File menu entries to update the index Het spijt ons, dit is nog niet beschikbaar voor het windows platform, gebruik het bestands ingave menu om de index te updaten Disabled because the real time indexer was not compiled in. Uitgeschakeld omdat real-time indexering niet ingeschakeld is This configuration tool only works for the main index. Deze configuratie tool werkt alleen voor de hoofdindex Can't set synonyms file (parse error?) kan synomiemen bestand niet instellen ( parse error?) The document belongs to an external index which I can't update. Het document hoort bij een externe index die niet up te daten is Click Cancel to return to the list. <br>Click Ignore to show the preview anyway (and remember for this session). Klik op annuleren om terug te keren naar de lijst. <br>Klik negeren om het voorbeeld toch te tonen( en te onthouden voor deze sessie) Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Openen van tijdelijke kopie.Alle bewerkingen zullen verloren gaan als u ze niet opslaat naar een permanente lokatie Do not show this warning next time (use GUI preferences to restore). Laat deze waarschuwing niet meer zien (gebruik GUI voorkeuren om te herstellen) Index locked index geblokkeerd Unknown indexer state. Can't access webcache file. De staat van de indexer is onbekend. Kan geen toegang krijgen tot het webcache bestand. Indexer is running. Can't access webcache file. De indexeerder is bezig. Geen toegang tot webcache RclMainBase Recoll Recoll &File &Bestand &Tools &Gereedschappen &Preferences &Voorkeuren &Help &Help E&xit V&erlaten Ctrl+Q Crtl+ Q Update &index Update &indexeerder &Erase document history &Wis bestands geschiedenis &About Recoll &Over Recoll &User manual &Gebruiks handleiding Document &History Document & Geschiedenis Document History Document geschiedenis &Advanced Search &Geavanceerd Zoeken Advanced/complex Search Uitgebreid/ Geavanceerd Zoeken &Sort parameters &Sorteer parameters Sort parameters Sorteer parameters Term &explorer Term &onderzoeker Term explorer tool Termen onderzoekers gereedschap Next page Volgende pagina Next page of results Volgende resultaten pagina First page Eerste pagina Go to first page of results Ga naar de eerste pagina van resultaten Previous page Vorige pagina Previous page of results Vorige pagina met resultaten External index dialog Extern index dialoog PgDown PgDown PgUp PgUp &Full Screen &Volledig Scherm F11 F11 Full Screen Volledig Scherm &Erase search history &Wis zoekgeschiedenis Sort by dates from oldest to newest Sorteer op datum van oud naar nieuw Sort by dates from newest to oldest Sorteer op datum van oud naar nieuw Show Query Details Toon zoek detials &Rebuild index &Vernieuw de gehele index Shift+PgUp Shift+PgUp E&xternal index dialog E&xternal index dialoog &Index configuration &Index configuratie &GUI configuration &GUI configuratie &Results &Resultaten Sort by date, oldest first Sorteer op datume, oudste eerst Sort by date, newest first Sorteer op datum, nieuwste eerst Show as table Toon als tabel Show results in a spreadsheet-like table Toon het resultaat in een spreadsheet achtig tabel Save as CSV (spreadsheet) file Bewaar als CVS ( spreadsheet) bestand Saves the result into a file which you can load in a spreadsheet Bewaar het resultaat naar een bestand die te laden is in een spreadsheet Next Page Volgende Pagina Previous Page Vorige Pagina First Page Eerste Pagina Query Fragments Zoek fragmenten With failed files retrying Opnieuw proberen met mislukte bestand Next update will retry previously failed files De volgende update zal de eerder mislukte bestanden opnieuw proberen Indexing &schedule Indexing &schema Enable synonyms Schakel synoniemen in Save last query Bewaar laatste zoekopdracht Load saved query Laad bewaarde zoekopdracht Special Indexing Speciale Indexering Indexing with special options Indexeren met speciale opties &View &Bekijken Missing &helpers Missend & Hulpprogrammas Indexed &MIME types Geindexeerd &MIME types Index &statistics Index & statistieken Webcache Editor Webcache Editor RclTrayIcon Restore Herstellen Quit Afsluiten RecollModel Abstract Uittreksel Author Auteur Document size Bestands grootte Document date Bestands datum File size Bestands grootte File name Bestands naam File date Bestands datum Keywords Sleutelwoorden Original character set Origineel karakter set Relevancy rating relevantiewaarde Title Titel URL URL Mtime Mtijd Date Datum Date and time Datum en tijd Ipath Ipad MIME type MIME type ResList Result list Resultaatslijst (show query) (toon zoekopdracht) Document history Document historie <p><b>No results found</b><br> <p><b>Geen resultaat gevonden</b><br> Previous Vorige Next Volgende Unavailable document Document niet beschikbaar Preview Bekijken Open Openen <p><i>Alternate spellings (accents suppressed): </i> <p><i>Alternatieve spellingen (accenten onderdrukken): </i> Documents Documenten out of at least van tenminste for voor <p><i>Alternate spellings: </i> <p><i>Alternatieve spelling: </i> Result count (est.) Resultaten telling (est.) Query details Zoekopdracht details Snippets Knipsel ResTable &Reset sort &Opnieuw sorteren &Delete column &Verwijder kolom Save table to CSV file Bewaar lijst als cvs bestand Can't open/create file: Kan bestand niet openen/ bewaren: &Save as CSV &Bewaar als CVS Add "%1" column Voeg "%1" kolom toe SSearch Any term Elke term All terms Alle termen File name Bestandsnaam Query language Zoek taal Bad query string Foute zoekterm Out of memory Geen geheugen meer Enter file name wildcard expression. Voer bestandsnaam wildcard uitdrukking in. Enter search terms here. Type ESC SPC for completions of current term. Voer zoekterm hier in. Type ESC SPC als aanvulling voor huidige term Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Zoekterm'taal expressie. Cheat sheet: <br> <i> term1 term2 </i>. 'Term1' en 'term2' op elk gebied <br> <i> veld: term1 </i>. 'Term1' in 'het veld' veld <br> Standaard veldnamen / synoniemen: <br> titel / onderwerp / titel, auteur / uit, ontvanger / to, filename, ext. <br> Pseudo-velden: dir, mime / format, het type / rclcat, datum, grootte <br>. Twee datuminterval Voorbeelden: 2009-03-01 / 2009-05-20 2009-03-01 / P2M <br>. <i> term1 term2 OR term3 </i>: term1 AND (term2 OR term3) <br>. U kunt haakjes gebruiken om dingen duidelijker te maken. <br> <i> "term1 term2" </i>: zin (moet precies gebeuren). Mogelijke modifiers: <br> <i> "term1 term2" p </i>. Ongeordende nabijheid zoeken met de standaard afstand <br> Gebruik <b> Toon Zoekterm </b> in geval van twijfel over de uitslag en zie handleiding (& lt; F1>) voor meer informatie. Stemming languages for stored query: Stam taal voor opgeslagen zoekopdrachten: differ from current preferences (kept) Afwijken van de uidig (bewaarde) voorkeuren Auto suffixes for stored query: Automatische aanvullingen voor opgeslagen zoeken External indexes for stored query: External indexen voor opgeslagen zoekopdrachten: Autophrase is set but it was unset for stored query Auto aanvullen is ingesteld, maar het was uitgeschakeld voor de opgeslagen zoekopdracht Autophrase is unset but it was set for stored query Automatisch aanvullen is uitgeschakeld maar was ingesteld voor opegeslagen zoekopdracht SSearchBase SSearchBase SZoekBasis Clear Wissen Ctrl+S Crtl+S Erase search entry Wis zoekopdracht Search Zoeken Start query Start zoekopdracht Enter search terms here. Type ESC SPC for completions of current term. Voer de zoekopdracht term hier in. Type ESC SPC om huidige termen aan te vullen Choose search type. Kies zoektype. SearchClauseW Select the type of query that will be performed with the words Selecteer het type zoekopdracht dat zal worden uitgevoerd met de woorden: Number of additional words that may be interspersed with the chosen ones Aantal extra woorden die kunnen worden ingevoegd met de gekozen woorden No field Geen veld Any Elke All Alle None Geen Phrase Frase Proximity Ongeveer File name Bestandsnaam Snippets Snippets Knipsels Find: Vindt: Next Volgende Prev Vorige SnippetsW Search Zoek <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> <P> Sorry, niet iets precies kunnen vinden. Waarschijnlijk is het document zeer groot en is de knipsels generator verdwaald in een doolhof ... </ p> SpecIdxW Special Indexing Speciale indexering Do not retry previously failed files. Probeerniet nog eens de vorig niet gelukte bestanden Else only modified or failed files will be processed. Anders zullen alleen de veranderende of gefaalde bestanden verwerkt worden Erase selected files data before indexing. Wis de geselecteerde bestandens data voor de indexering Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Map om recursief te indexeren. Dit moet binnen het reguliere geindexeerde gebied zijn<br>zoals ingesteld in het configuratiebestand (hoofdmappen) Browse Bladeren Start directory (else use regular topdirs): Begin Map (anders de normale hoofdmappen gebruiken) Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Laat dit leeg om alle bestanden te kunnen selecteren. U kunt meerdere spaties gescheiden shell-type patronen gebruiken. <br> Patronen met ingesloten ruimtes moeten aangeduid worden met dubbele aanhalingstekens. <br> Kan alleen worden gebruikt als het hoofddoel is ingesteld Selection patterns: Selecteer patronen Top indexed entity Hoofd index identiteit SpellBase Term Explorer Term onderzoeker &Expand &Uitvouwen Alt+E Alt+E &Close &Sluiten Alt+C Alt+C No db info. Geen db info. Match Gelijk Case Hoofdletter Accents Accenten SpellW Wildcards wildcards Regexp Regexp Stem expansion Stam expansie Spelling/Phonetic Spelling/Phonetisch error retrieving stemming languages Fout bij het ophalen van woordstam talen Aspell init failed. Aspell not installed? Aspell init faalt. Is Aspell niet geinstalleerd? Aspell expansion error. Aspell expansie fout. No expansion found Geen expansie gevonden Term Term Doc. / Tot. Doc./Tot. Index: %1 documents, average length %2 terms.%3 results Index: %1 documenten, wisselende lengte %2 termen.%3 resultaten %1 results %1 resultaten List was truncated alphabetically, some frequent De lijst is alfabetisch afgebroken, sommige frequenter terms may be missing. Try using a longer root. Er kunnen termen ontbreken. Probeer gebruik te maken van een langere root Show index statistics Toon indexeer statistieken Number of documents Aantal documenten Average terms per document Gemiddelde termen per document Database directory size Database map grootte MIME types: MIME types Item Item Value Waarde Smallest document length (terms) Kleinste document lengte (termen) Longest document length (terms) Langste document lengte (termen) Results from last indexing: resultaten van vorige indexering Documents created/updated Documenten gemaakt/bijgewerkt Files tested Bestanden getest Unindexed files Ongeindexeerde bestanden UIPrefsDialog error retrieving stemming languages fout bij het ophalen van de stam talen The selected directory does not appear to be a Xapian index De geselecteerde map schijnt geen Xapian index te zijn This is the main/local index! Dit is de hoofd/lokale index! The selected directory is already in the index list De geselecteerde map bestaat al in de index lijst Choose Kies Result list paragraph format (erase all to reset to default) Resultaten lijst paragrafen formaat (wist alles en reset naar standaard) Result list header (default is empty) Resultaten koppen lijst ( is standaard leeg) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) Selecteer recoll config map of xapian index map (bijv.: /home/me/.recoll of /home/me/.recoll/xapian db) The selected directory looks like a Recoll configuration directory but the configuration could not be read De geselecteerde map ziet eruit als een Recoll configuratie map, maar de configuratie kon niet worden gelezen At most one index should be selected Tenminste moet er een index worden geselecteerd Cant add index with different case/diacritics stripping option Kan index met verschillende hoofdletters/ diakritisch tekens opties niet toevoegen Default QtWebkit font Standaard QtWebkit lettertype Any term Elke term All terms Alle termen File name Bestandsnaam Query language Zoek taal Value from previous program exit Waarde van vorige programma afsluiting ViewAction Command Opdracht MIME type MIME type Desktop Default Desktop Standaard Changing entries with different current values invoering van verschillende huidige waardes veranderd ViewActionBase Native Viewers Standaard Viewers Close Afsluiten Select one or several mime types then use the controls in the bottom frame to change how they are processed. Slecteer een of meerdere mime types gebruik vervolgens de instellingen onderin het venster om de verwerkingen aan te passen Use Desktop preferences by default Gebruik Desktop voorkeuren als standaard Select one or several file types, then use the controls in the frame below to change how they are processed Selecteer een of meerdere bestandstypes, gebruik vervolgens de instellingen onderin het venster hoe ze verwerkt worden Exception to Desktop preferences Uitzonderingen op Desktop voorkeuren Action (empty -> recoll default) Aktie (leeg -> recoll standaard) Apply to current selection Toepassen op huidige selectie Recoll action: Recoll acties current value huidige waarde Select same Selecteer dezelfde <b>New Values:</b> <b>Nieuwe Waardes:</b> Webcache Webcache editor Webcache bewerker Search regexp Zoek regexp WebcacheEdit Copy URL Kopieer URL Unknown indexer state. Can't edit webcache file. Status van indexer onbekend. Kan webcache bestand niet bewerken. Indexer is running. Can't edit webcache file. Indexer is aan het werken. Kan webcache bestand niet bewerken. Delete selection Verwijder selectie Webcache was modified, you will need to run the indexer after closing this window. Webcache is gewijzigd, u zult de indexer opnieuw moeten uitvoeren na het sluiten van dit venster WebcacheModel MIME MIME Url Url confgui::ConfBeaglePanelW Web page store directory name Web pagina map naam om op te slaan The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. De naam voor een map waarin de kopieen van de bezochte webpaginas opgeslagen zullen worden.<br>Een niet absoluut pad zal worden gekozen ten opzichte van de configuratie map Max. size for the web store (MB) Max. grootte voor het web opslaan (MB) Process the WEB history queue Verwerk de WEB geschiedenis wachtrij Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Zet het indexeren van firefox bezochte paginas aan. <br> (hiervoor zal ook de Firefox Recoll plugin moeten worden geinstalleerd door uzelf) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Invoeringen zullen worden gerecycled zodra de groote is bereikt. <br> Het verhogen van de groote heeft zin omdat het beperken van de waarde de bestaande waardes niet zal afkappen ( er is alleen afval ruimte aan het einde). confgui::ConfIndexW Can't write configuration file Kan configuratie bestand niet lezen confgui::ConfParamFNW Choose Kies confgui::ConfParamSLW + + - - confgui::ConfSearchPanelW Automatic diacritics sensitivity Automatische diakritische tekens gevoeligheid <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <P> Automatisch activeren diakritische tekens gevoeligheid als de zoekterm tekens zijn geaccentueerd (niet in unac_except_trans). Wat je nodig hebt om de zoek taal te gebruiken en de <i> D</i> modifier om diakritische tekens gevoeligheid te specificeren. Automatic character case sensitivity Automatische karakter hoofdletter gevoeligheid <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <P> Automatisch activeren hoofdletters gevoeligheid als de vermelding hoofdletters heeft in elke, behalve de eerste positie. Anders moet u zoek taal gebruiken en de <i>C</i> modifier karakter-hoofdlettergevoeligheid opgeven. Maximum term expansion count Maximale term uitbreidings telling <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p> Maximale uitbreidingstelling voor een enkele term (bijv.: bij het gebruik van wildcards) Een standaard van 10.000 is redelijk en zal zoekpodrachten die lijken te bevriezen terwijl de zoekmachine loopt door de termlijst vermijden. Maximum Xapian clauses count Maximaal Xapian clausules telling <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p> Maximale aantal elementaire clausules die we kunnen toevoegen aan een enkele Xapian zoeken. In sommige gevallen kan het resultaatvan de term uitbreiding multiplicatief zijn, en we willen voorkomen dat er overmatig gebruik word gemaakt van het werkgeheugen. De standaard van 100.000 zou hoog genoeg moeten zijn in beidde gevallen en compatible zijn met moderne hardware configuraties. confgui::ConfSubPanelW Global Globaal Max. compressed file size (KB) Maximaal gecomprimeerd bestands formaat (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Deze waarde stelt een drempel waarboven gecomprimeerde bestanden niet zal worden verwerkt. Ingesteld op -1 voor geen limiet, op 0 voor geen decompressie ooit. Max. text file size (MB) Max. tekstbestand groote (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Deze waarde stelt een drempel waarboven tekstbestanden niet zal worden verwerkt. Ingesteld op -1 voor geen limiet. Dit is voor het uitsluiten van monster logbestanden uit de index. Text file page size (KB) Tekst bestand pagina grootte (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Als deze waarde is ingesteld (niet gelijk aan -1), zal tekstbestanden worden opgedeeld in blokken van deze grootte voor indexering. Dit zal helpen bij het zoeken naar zeer grote tekstbestanden (bijv: log-bestanden). Max. filter exec. time (S) Max. filter executie tijd (S) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Externe filters die langer dan dit werken worden afgebroken. Dit is voor het zeldzame geval (bijv: postscript) wanneer een document een filterlus zou kunnen veroorzaken. Stel in op -1 voor geen limiet. Only mime types Alleen mime types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Een exclusieve lijst van geïndexeerde typen mime. <br> Niets anders zal worden geïndexeerd. Normaal gesproken leeg en inactief Exclude mime types Sluit mime types uit Mime types not to be indexed Mime types die niet geindexeerd zullen worden confgui::ConfTopPanelW Top directories Top mappen The list of directories where recursive indexing starts. Default: your home. Een lijst van mappen waar de recursive indexering gaat starten. Standaard is de thuismap. Skipped paths Paden overgeslagen These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Dit zijn de namen van de mappen die indexering niet zal doorzoeken. <br> Kan wildcards bevatten. Moet overeenkomen met de paden gezien door de indexer (bijv: als topmappen zoals '/ home/me en '/ home' is eigenlijk een link naar '/usr/home', een correcte overgeslagen pad vermelding zou zijn '/home/me/tmp * ', niet' /usr/home/me/tmp * ') Stemming languages Stam talen The languages for which stemming expansion<br>dictionaries will be built. De talen waarvoor de stam uitbreidings<br>wooordenboeken voor zullen worden gebouwd. Log file name Log bestandsnaam The file where the messages will be written.<br>Use 'stderr' for terminal output Het bestand waar de boodschappen geschreven zullen worden.<br>Gebruik 'stderr' voor terminal weergave Log verbosity level Log uitgebreidheids nivo This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Deze waarde bepaald het aantal boodschappen,<br>van alleen foutmeldingen tot een hoop debugging data. Index flush megabytes interval Index verversings megabyte interval This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Deze waarde past de hoeveelheid data die zal worden geindexeerd tussen de flushes naar de schijf.<br> Dit helpt bij het controleren van het gebruik van geheugen. Standaad 10MB Max disk occupation (%) maximale schijf gebruik This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). Dit is het precentage van schijfgebruike waar indexering zal falen en stoppen (om te vermijden dat uw schijf volraakt.<br>0 betekend geen limit (dit is standaard). No aspell usage Gebruik aspell niet Aspell language Aspell taal Database directory name Database map naam Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Schakelt het gebruik van aspell uit om spellings gissingen in het term onderzoeker gereedschap te genereren. <br> Handig als aspell afwezig is of niet werkt. The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Taal instelling voor het aspell woordenboek. Dit zou er uit moeten zien als 'en'of 'nl'...<br> als deze waarde niet is ingesteld, zal de NLS omgeving gebruikt worden om het te berekenen, wat meestal werkt. Om een idee te krijgen wat er op uw systeem staat, type 'aspell config' en zoek naar .dat bestanden binnen de 'data-dir'map. The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. De naam voor een map om de index in op te slaan<br> Een niet absoluut pad ten opzichte van het configuratie bestand is gekozen. Standaard is het 'xapian db'. Unac exceptions Unac uitzonderingen <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. Dit zijn uitzonderingen op het unac mechanisme dat, standaard, alle diakritische tekens verwijderd, en voert canonische ontbinding door. U kunt unaccenting voor sommige karakters veranderen, afhankelijk van uw taal, en extra decomposities specificeren, bijv. voor ligaturen. In iedere ruimte gescheiden ingave , waar het eerste teken is de bron is, en de rest de vertaling. uiPrefsDialogBase User preferences Gebruikers voorkeuren User interface Gebruikers interface Number of entries in a result page Opgegeven aantal van weergaves per resultaten pagina If checked, results with the same content under different names will only be shown once. Indien aangevinkt, zullen de resultaten met dezelfde inhoud onder verschillende namen slecht eenmaal worden getoond. Hide duplicate results. Verberg duplicaat resultaten. Highlight color for query terms Highlight kleur voor zoektermen Result list font Resultaten lijst lettertype Opens a dialog to select the result list font Opent een dialoog om de resultaten lijst lettertype te selecteren Helvetica-10 Helvetica-10 Resets the result list font to the system default Reset het resultaten lijst lettertype naar systeem standaardwaarde Reset Herstel Texts over this size will not be highlighted in preview (too slow). Teksten groter dan dit zullen niet worden highlighted in previews (te langzaam). Maximum text size highlighted for preview (megabytes) Maximale tekst groote highlighted voor preview (megabytes) Choose editor applications Kies editor toepassingen Auto-start simple search on whitespace entry. Autostart eenvoudige zoekopdracht bij ingave in de witruimte. Start with advanced search dialog open. Start met geavanceerd zoek dialog open. Remember sort activation state. Onthoud sorteer activatie status Prefer Html to plain text for preview. Html voorkeur in plaats van gewoon tekst als preview Search parameters Zoek parameters Stemming language Stam taal A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Een zoekopdracht naar '[rollende stenen] (2 termen) wordt gewijzigd in [rollen of stenen of (rollende frase 2 stenen)]. Dit zou een hogere prioriteit moeten geven aan de resultaten, waar de zoektermen precies zoals ingevoerd moeten verschijnen. Automatically add phrase to simple searches Automatisch aanvullen van eenvoudige zoekopdrachten Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Moeten we proberen om abstracten voor resultatenlijst invoering op te bouwen met behulp van de context van de zoektermen? Kan traag zijn met grote documenten. Dynamically build abstracts Dynamisch abstracten bouwen Do we synthetize an abstract even if the document seemed to have one? Moeten we een abstract maken, zelfs als het document er al een blijkt te hebben? Replace abstracts from documents Vervang abstracten van documenten Synthetic abstract size (characters) Synthetische abstractie grootte (tekens) Synthetic abstract context words Synthetische abstract context woorden The words in the list will be automatically turned to ext:xxx clauses in the query language entry. De woorden in de lijst zal automatisch omgezet worden naar ext:xxx clausules in de zoektaal ingave. Query language magic file name suffixes. Zoek taal magic bestandsnaam achtervoegsel Enable Aanzetten External Indexes Externe indexen Toggle selected Toggle geselecteerde Activate All Alles Activeren Deactivate All Alles Deactiveren Remove from list. This has no effect on the disk index. Verwijder van de lijst. Dit heeft geen effect op de schijf index. Remove selected Geselecteerde verwijderen Add index Index toevoegen Apply changes Veranderingen doorvoeren &OK &OK Discard changes Veranderingen ongedaan maken &Cancel &Annuleren Abstract snippet separator Abstract knipsel scheiding Style sheet Style sheet Opens a dialog to select the style sheet file Opend een dialoog venster om style sheet te selecteren Choose Kies Resets the style sheet to default Reset de style sheet naar standaard Result List Resultaten lijst Edit result paragraph format string Bewerk resultaten paragraaf formaat string Edit result page html header insert Bewerk resultaat pagina html header invoeg Date format (strftime(3)) Datum notatie (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Frequentie percentage drempel waarover wij geen termen gebruiken binnen autofrase. Frequente termen zijn een belangrijk prestatie probleem met zinnen en frases. Overgeslagen termen vergroten de zins verslapping, en verminderen de autofrase doeltreffendheid. De standaardwaarde is 2 (procent). Autophrase term frequency threshold percentage Autofrase term frequentie drempelwaarde percentage Plain text to HTML line style Platte tekst naar HTML lijn stijl Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. Lijnen in PRE tekst worden niet gevouwen. Met behulp van BR kan inspringen verwijderen. PRE + Wrap stijl zou wenselijk kunnen zijn. <BR> <BR> <PRE> <PRE> <PRE> + wrap <PRE> + wrap Disable Qt autocompletion in search entry. Schakel Qt auto-aanvullen uit in zoek invoegveld Search as you type. Zoek terwijl u typed. Paths translations Paden vertalingen Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Klik hier om een andere index map toe te voegen aan de lijst. U kunt een Recoll configuratie map of een Xapian index te selecteren. Snippets window CSS file Knipsel venster CSS bestand Opens a dialog to select the Snippets window CSS style sheet file Opent een dailoog venster om het knipsel venster CSS stijl sheet bestand te selecteren Resets the Snippets window style Herstel de Knipsel venster stijl Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Bepaal of document mappen moeten worden weergegeven als keuzerondjes, gereedschap combinatiebox of menu. Document filter choice style: Document filter keuze stijl: Buttons Panel Knoppen Paneel Toolbar Combobox Gereedschaps-menu combinatiebox Menu Menu Show system tray icon. Toon pictogram in het systeemvak. Close to tray instead of exiting. Sluit naar systeemvak in plaats van sluiten. Start with simple search mode Start met een eenvoudige zoek modus User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Gebruiker stijl toe te passen op het knipsel-venster <br>. Let op: het resultaat pagina header invoegen is ook opgenomen in het'kop knipsel-venster . Synonyms file Synoniemen bestand Show warning when opening temporary file. Toon waarschuwing bij het openen van een temp bestand. recoll-1.26.3/qtgui/i18n/recoll_da.qm0000644000175000017500000031262113566424763014224 0000000000000081s:dAVTGtGH62AH62nJ+J+# J6 J6J:LbqMz'Ph S2SAT5 WTWXuZ0q4[ % \;3e.gwj^hs/sv8ߏvvNzϳ`j!KHc0,́EfJ cqfSG× @!Y!;FnU nGgtw:qEn.H3x( 7+10,,;s9>.cDo2NXMm:Xm`^ hp7lt%nwODw wHH9vY'+F!cEQqxC,^ͺ@ LǩSI!%؅5e (Kcp#vJv ow 5 w 5!Lw 5$w 5?w U#.07O;Uֳ;]61\f3W,ͼunOggyHyK׸~B;Ụo4Uve1 !Dwh&dS.N/=dw?dG$JUY]JUYY8R[up_n5|4}!ufʷʗ^ʗp^jcL^x+Jge{=cZA5n%=YA8B  V;<.T"hd1Mֆ;[3<LJNBIFX4%e 2&lPvh>wT?3ʂ(P˥b$"3Z.IpInn*f?fSf[.#7,qWo~W>¾Ey%ly%f ŠlG΄U: #fLX|C3Wk̔ iIi(! -ZW-"5)Tv>j9$B9%BS&kUo&\r?) r\|p[k,8<԰""y#*l,tASâX  =R3[ǢI^~C?,>?{p#4Y`=Ms@$_" $$-օmUKx2i{p hXu9cv26vƒ{^P5T#K5JU %En+RA7U^[WXnDw)&<CYz!!vD#+{+^7I^Z<~" l9|1SnR<ÓtÓt>{ȍɆt"]8o(^ 1#+8 En*%<*sn]o,y':ur!J¢dJnQWY}};E|y064uNcJhhw#@4ʶ#+=c 6䴥ŭ?. T3 Nx H -(pz 7- 9Zy#i ;3q D# K ]# cC kT lM ~ qD4  I a 9E ÛC@h ü>s  3 e7 Z# iz ? *N~ ͹ :^= c 3 *Rn +<0w 6 >VNt G.~t `P ` aE cEH d8H- y' ITF Cg sJ  VT.M C N  W ԅ ^ yelq TH  ,x>= =! Kj E X X h) 6  Σ rd ٷ ۷ [b ?k  Vdr 4 KX Y0 'Иh +bC .ʢ6 /ٿ 977 9ɝ L* P֙ RV T# V/ \iCu> ]-S `F C hO v& {lr !Y !Y W | &  ҬZ }Ѝ Ė OY i$  : X ~js # NI 1 m M #DFS 'RI - 7QM 8Ue FD OE X^-X ]q ]K ^ ^1 u0 yC y~, 3 ȩ^ u! u? P P 5do k 7 i2 [ Ւo H Q5/ £:q qiѾ){Pr%n.ʢO/.3UO6L8b89F<}sQ~tS|,Y~sa[s\le3g3hx5{p~, :`!Xcmsc ~|/|xB^' lLiAlle stninger All clauses AdvSearch"Vilkrlig stning Any clause AdvSearch^Forkert multiplikator suffiks i strrelsefilter$Bad multiplier suffix in size filter AdvSearch mediermedia AdvSearch beskedmessage AdvSearch andetother AdvSearchprsentation presentation AdvSearchregneark spreadsheet AdvSearchregneark spreadsheets AdvSearch teksttext AdvSearchtekstertexts AdvSearch<----- Alle <----- All AdvSearchBase<----- Valg <----- Sel AdvSearchBaseTilfj stning Add clause AdvSearchBase"Avanceret sgningAdvanced search AdvSearchBaseAlle ----> All ----> AdvSearchBase6Alle felter med indhold til hjre vil blive kombineret med AND ("Alle stninger" valgt) eller OR ("Vilkrlig stning" valgt) bindeord. <br>"Enhver" "Alle" og "Ingen" felttyper kan acceptere en blanding af simple ord, og fraser i dobbelte anfrselstegn.<br>Felter uden data ignoreres.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBaseGennemseBrowse AdvSearchBase Efter kategorier By categories AdvSearchBasedAfkryds dette for at aktivere filtrering p datoer'Check this to enable filtering on dates AdvSearchBasehAfkryds dette for at aktivere filtrering p filtyper,Check this to enable filtering on file types AdvSearchBaselAfkryds dette for at aktivere filtrering p strrelser'Check this to enable filtering on sizes AdvSearchBaseAfkryds dette for at bruge filkategorier i stedet for r mime-typer;Check this to use file categories instead of raw mime types AdvSearchBaseLukClose AdvSearchBaseSlet stning Delete clause AdvSearchBaseBIndtast verste mappe for sgningEnter top directory for search AdvSearchBase FilterFilter AdvSearchBaseFiltrer datoer Filter dates AdvSearchBase$Filtrer strrelser Filter sizes AdvSearchBaseFindFind AdvSearchBaseFraFrom AdvSearchBase&Ignorerede filtyperIgnored file types AdvSearchBaseInverterInvert AdvSearchBaseMaks. strrelse Max. Size AdvSearchBaseMaksimal strrelse. Du kan bruge k/K,m/M g/G som multiplikatorer4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaseMin. strrelse Min. Size AdvSearchBase~Mindste strrelse. Du kan bruge k/K,m/M,g/G som multiplikatorer4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase Begrns filtyperRestrict file types AdvSearchBaseVBegrns resultater til filer i undermapper:%Restrict results to files in subtree: AdvSearchBase Gem som standardSave as default AdvSearchBaseRSg efter <br>dokumenter<br>der opfylder:'Search for
documents
satisfying: AdvSearchBaseSgte filtyperSearched file types AdvSearchBaseValg -----> Sel -----> AdvSearchBaseStart sgning Start Search AdvSearchBaseTilTo AdvSearchBase<p>Udlser automatisk flsomhed over for store/sm bogstaver, hvis indgangen har store bogstaver i andet end den frste position. Ellers er du nd til bruge foresprgselssproget og <i>C</i> modifikatoren, for at angive flsomhed over for store/sm bogstaver.

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity. ConfIndexW<p>Udlser automatisk flsomhed over for diakritiske tegn, hvis sgeordet har accent tegn (ikke i unac_except_trans). Ellers er du nd til bruge foresprgselssproget og <i>D</i> modifikatoren, for at angive flsomhed over for diakritiske tegn.

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity. ConfIndexW<p>Maksimal antal udvidelser-for et enkelt ord (fx: nr der bruges jokertegn). Standarden p 10 000 er rimeligt og vil undg foresprgsler, der synes at fryse mens motoren arbejder sig igennem ordlisten.

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. ConfIndexW<p>Maksimalt antal grundlggende stninger vi fjer til en enkel Xapian foresprgsel. I nogle tilflde kan resultatet af ordudvidelse vre multiplikativ, og vi nsker at undg at bruge overdreven hukommelse. Standarden p 100 000 br vre bde hj nok i de fleste tilflde og kompatibel med de nuvrende typiske hardware konfigurationer.5

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfIndexW<p>Disse er undtagelser fra unac mekanismen, der, som standard, fjerner alle diakritiske tegn, og udfrer kanonisk nedbrydning. Du kan tilsidestte fjernelse af accent for nogle tegn, afhngigt af dit sprog, og angive yderligere nedbrydninger, f.eks. for ligaturer. I hver indgang adskilt af mellemrum, er det frste tegn kildedelen, og resten er oversttelsen.l

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. ConfIndexWAspell sprogAspell language ConfIndexWdAutomatisk flsomhed over for store/sm bogstaver $Automatic character case sensitivity ConfIndexW\Automatisk flsomhed over for diakritiske tegn Automatic diacritics sensitivity ConfIndexWBKan ikke skrive konfigurationsfilCan't write configuration file ConfIndexW(Databasens mappenavnDatabase directory name ConfIndexW2Deaktiver brug af aspell til at generere stavnings-tilnrmelse i vrktj for sgning efter ord. <br> Nyttigt hvis aspell er fravrende eller ikke virker.Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexWAktiverer indeksering af sider besgt af Firefox.<br>(Du skal ogs installere Firefox Recoll plugin)\Enables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin) ConfIndexWIndgangene vil blive genbrugt, nr strrelsen er net.<br>Kun en gning af strrelsen giver god mening, da en reducering af vrdien ikke vil afkorte en eksisterende fil (kun spildplads i slutningen).Entries will be recycled once the size is reached.
Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). ConfIndexW"Globale parametreGlobal parameters ConfIndexWPMegabyte interval for skrivning af IndexIndex flush megabytes interval ConfIndexW Lokale parametreLocal parameters ConfIndexWNavn p logfil Log file name ConfIndexW,Log informationsniveauLog verbosity level ConfIndexWBMax. strrelse til web-lager (MB) Max. size for the web store (MB) ConfIndexW>Maksimale antal XapianstningerMaximum Xapian clauses count ConfIndexW:Maksimale antal ordudvidelserMaximum term expansion count ConfIndexW Brug ikke aspellNo aspell usage ConfIndexW:Behandl ken for WEB-historikProcess the WEB history queue ConfIndexWSgeparametreSearch parameters ConfIndexWUdeladte stier Skipped paths ConfIndexW.Ordstammer for sprogeneStemming languages ConfIndexWFilen hvor meddelelser vil blive skrevet.<br>Brug 'stderr' for terminal outputPThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexWvSproget for aspell ordbog. Det skal se ud som "en" eller "fr" ...<br>Hvis denne vrdi ikke er angivet, s vil NLS omgivelser blive brugt til at finde det, det fungerer normalt. For at f en id om, hvad der er installeret p dit system, kan du skrive 'aspell konfig "og se efter .dat filer inde i 'data-dir' mappen.3The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory.  ConfIndexWDe sprog, hvor ordstamme-udvidelses<br>ordbger vil blive bygget.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexWListen over mapper hvor rekursiv indeksering starter. Standard: din hjemme-mappe (home).LThe list of directories where recursive indexing starts. Default: your home. ConfIndexWNavnet p en mappe hvor du vil gemme kopier af besgte websider.<br>En relativ sti er taget i forhold til konfigurationsmappen.The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory. ConfIndexWNavnet p en mappe hvor du vil gemme indekset<br>En relativ sti er taget i forhold til konfigurationsmappen. Standard er "xapiandb.The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. ConfIndexWbDenne vrdi justere mngden af data, der er indekseret mellem skrivning til disken.<br>Dette hjlper med at kontrollere indekseringsprogrammets brug af hukommelse. Standard 10MBThis value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWDenne vrdi justerer mngden af meddelelser,<br>fra kun fejl til en masse fejlretningsdata.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexWverste mapperTop directories ConfIndexW Unac-undtagelserUnac exceptions ConfIndexWDMappenavn for lageret til WebsiderWeb page store directory name ConfIndexWEn eksklusiv liste over indekserede MIME-typer.<br>Intet andet vil blive indekseret. Normalt tom og inaktiveAn exclusive list of indexed mime types.
Nothing else will be indexed. Normally empty and inactive ConfSubPanelW"Udeluk mime-typerExclude mime types ConfSubPanelWEksterne filtre der arbejder lngere end dette vil blive afbrudt. Dette er for det sjldne tilflde (dvs.: postscript) hvor et dokument kan forrsage, at et filter laver et loop. Indstil til -1 for ingen grnse.External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.  ConfSubPanelW GlobalGlobal ConfSubPanelWnHvis denne vrdi er angivet (ikke lig med -1), vil tekstfiler opdeles i bidder af denne strrelse for indeksering. Dette vil hjlpe sgning i meget store tekstfiler (dvs.: log-filer).If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). ConfSubPanelWFMaks. komprimeret filstrrelse (KB)Max. compressed file size (KB) ConfSubPanelW@Maks. strrelse p tekstfil (MB)Max. text file size (MB) ConfSubPanelWFMime-typer der ikke skal indekseresMime types not to be indexed ConfSubPanelWKun mime-typerOnly mime types ConfSubPanelW<Sidestrrelse p tekstfil (KB)Text file page size (KB) ConfSubPanelWFDenne vrdi angiver en grnse for, hvornr komprimerede filer ikke vil blive behandlet. Indstil til -1 for ingen grnse, til 0 for ingen dekomprimering nogensinde.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. ConfSubPanelWRDenne vrdi angiver en grnse for, hvornr tekstfiler ikke vil blive behandlet. Indstil til -1 for ingen grnse. Dette er for at udelukke monster logfiler fra indekset.This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. ConfSubPanelW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indekseringstidsplan (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Hvert felt kan indeholde et jokertegn (*), en enkelt numerisk vrdi, kommaseparerede lister (1,3,5) og intervaller (1-7). Mere generelt vil felterne blive brugt <span style=" font-style:italic;"> som de er</span> inde i crontabfilen, og den fulde crontab syntaks kan bruges, se crontab (5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For eksempel, indtastning af <span style=" font-family:'Courier New,courier';">*</span> i <span style=" font-style:italic;">Dage, </span><span style=" font-family:'Courier New,courier';">12,19</span> i <span style=" font-style:italic;">Timer</span> og <span style=" font-family:'Courier New,courier';">15</span> i <span style=" font-style:italic;">Minutter</span> ville starte recollindex hver dag kl. 00:15 og 19:15 </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">En tidsplan med meget hyppige aktiveringer er formentlig mindre effektiv end realtid indeksering.</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolWh<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Deaktiver</span> for at stoppe automatisk batch indeksering, <span style=" font-style:italic;">Aktiver</span> for at aktivere den, <span style=" font-style:italic;">Annuller</span> for ikke at ndre noget.</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolWCron vindue Cron Dialog CronToolWZUgens dage (* eller 0-7, 0 eller 7 er Sndag))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWDeaktiverDisable CronToolWAktiverEnable CronToolWFejl ved installation af cron-indgange. Forkert syntaks i felter?3Error installing cron entry. Bad syntax in fields ? CronToolW(Timer (* eller 0-23)Hours (* or 0-23) CronToolWDet ser ud til, at manuelt redigerede indgange findes for recollindeks, kan ikke redigere crontabPIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWMinutter (0-59)Minutes (0-59) CronToolW VindueDialog EditDialog$Konfigureringsfejl Config error EditTransLokal sti Local path EditTransOriginal sti Original path EditTransKildesti Source path EditTrans TilfjAdd EditTransBaseAnnullerCancel EditTransBaseSletDelete EditTransBase*Oversttelse af stierPath Translations EditTransBaseGemSave EditTransBaseVlg en eller flere filtyper, brug derefter knapperne i rammen nedenfor for at ndre, hvordan de skal behandleskSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBaseRIndstilling af oversttelser af stier forSetting path translations for  EditTransBase <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Det fremgr, at indekset for denne konfiguration ikke eksisterer.</span><br /><br />Hvis du blot nsker at indeksere din hjemmemappe med et st fornuftige standardindstillinger, skal du trykke p <span style=" font-style:italic;">Start indeksering nu</span> knappen. Du vil vre i stand til at justere detaljerne senere. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Hvis du nsker mere kontrol, kan du bruge flgende link til at justere indekseringskonfiguration og tidsplan.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Disse vrktjer kan tilgs senere fra <span style=" font-style:italic;">Prference</span> menuen.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialog>Opstning af frste indekseringFirst indexing setupFirstIdxDialog8Konfiguration af indekseringIndexing configurationFirstIdxDialog0Tidsplan for indekseringIndexing scheduleFirstIdxDialog(Start indeksering nuStart indexing nowFirstIdxDialog Dette vil lade dig justere de mapper, du vil indeksere, og andre parametre som udelukkede filstier eller navne, standard tegnst etc.This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialogDette vil lade dig vlge mellem batch og realtime indeksering, og oprette en automatisk tidsplan for batch indeksering (ved hjlp af cron).This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog%1 ikke fundet. %1 not found.FragButs%1: %2%1: %2FragButs:Foresprgsel efter fragmenterQuery FragmentsFragButs <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indeksering kan kre permanent, indeksere filer nr de ndrer sig, eller kre med adskilte intervaller. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Lsning af manualen kan hjlpe dig med at vlge mellem disse tilgange (tryk F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Dette vrktj kan hjlpe dig med at oprette en tidsplan for at automatisere krsler af batch indeksering, eller starte realtid indeksering nr du logger ind (eller begge dele, hvilket sjldent giver mening). </p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedW Cron skeduleringCron scheduling IdxSchedWBeslut, om realtid indeksering skal startes nr du logger ind (kun for standard-indekset).ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedW>Opstning af indeks skeduleringIndex scheduling setup IdxSchedW<Opstart af realtid indekseringReal time indexing start up IdxSchedWVrktjet vil lade dig afgre, p hvilket tidspunkt indeksering skal kre og det vil installere en crontab indgang._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedW VindueDialog ListDialogGruppeboksGroupBox ListDialog>Ingen dbmappe i konfigurationen No db directory in configurationMain &Nste&NextPreview&Forrige &PreviousPreview&Sger efter: &Search for:Preview`Kan ikke lave dok til intern reprsentation for 0Can't turn doc into internal representation for PreviewAnnullerCancelPreviewRydClearPreview6Laver forhndsvisningstekstCreating preview textPreviewVHenter forhndsvisningstekst for redigering Loading preview text into editorPreview(Store/sm &Bogstaver Match &CasePreview2Manglende hjlpeprogram: Missing helper program: PreviewbnOpenPreview KopierCopyPreviewTextEditOmbryd linjer Fold linesPreviewTextEdit Bevar indrykningPreserve indentationPreviewTextEditUdskrivPrintPreviewTextEdit*Udskriv denne VisningPrint Current PreviewPreviewTextEdit(Gem dokument til filSave document to filePreviewTextEditVlg alle Select AllPreviewTextEditVis felter Show fieldsPreviewTextEditVis billede Show imagePreviewTextEditVis hovedtekstShow main textPreviewTextEdit2<b>Tilpassede undermapperCustomised subtreesQObjectxTegnst, der bruges til at lse filer, hvor tegnsttet ikke kan identificeres ud fra indholdet, f.eks. rene tekstfiler.<br>Standardvrdien er tom, og vrdien fra NLS-omgivelserne anvendes.Character set used for reading files which do not identify the character set internally, for example pure text files.
The default value is empty, and the value from the NLS environnement is used.QObject&Standard<br>tegnstDefault
character setQObject(Flg symbolske linksFollow symbolic linksQObjectFlg symbolske link under indeksering. Standarden er nej, for at undg dobbelt indekseringTFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject&ignorerede endelserIgnored endingsQObject,Indekser alle filnavneIndex all file namesQObjectIndekser navnene p filer, hvor indholdet ikke kan identificeres eller behandles (ingen eller ikke-understttet mime-type). Standard er true}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObjectUdeladte navne Skipped namesQObjectListen over undermapper i det indekserede hierarki <br>hvor nogle parametre behver at blive omdefineret. Standard: tom.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectbDette er endelser p filnavne for filer, hvor kun navnet vil blive indekseret (ingen forsg p identifikation af MIME-type, ingen dekomprimering, ingen indeksering af indhold).These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing).QObjectDette er mnstre for fil- eller mappenavne, der ikke skal indekseres.LThese are patterns for file or directory names which should not be indexed.QObject&bn&OpenQWidget6&bn Forlderdokument/mappe&Open Parent document/folderQWidget &Forhndsvisning&PreviewQWidget&Skriv til fil&Write to FileQWidget8Kan ikke udtrkke dokument: Cannot extract document: QWidget(Vlg prcis en mappeChoose exactly one directoryQWidgetKopier &FilnavnCopy &File NameQWidgetKopier &URL Copy &URLQWidget.Kunne ikke lse mappe: Could not read directory: QWidgetJOpret eller vlg mappe til at gemme iCreate or choose save directoryQWidget2Find &lignende dokumenterFind &similar documentsQWidget8bn vindue til &tekststumperOpen &Snippets windowQWidgetbn med Open WithQWidgetFForhndsvis &Forlderdokument/mappePreview P&arent document/folderQWidgetKr skript Run ScriptQWidget0Gem det valgte til filerSave selection to filesQWidgetLVis underdokumenter / vedhftede filerShow subdocuments / attachmentsQWidgetRUventet kollision af filnavn, annullerer.+Unexpected file name collision, cancelling.QWidgetVis ikke igen.Do not show again.QxtConfirmationMessager<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> Indeksering kan sttes til at kre som en dmon, der opdatere indekset nr filer ndres, i realtid. Du fr et indeks, som altid er opdateret, men systemressourcer anvendes permanent..</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>.

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWJStart ogs indekseringsdmon lige nu.%Also start indexing daemon right now.RTIToolWhAutostartfil er slettet. Stop ogs nuvrende proces?2Autostart file deleted. Kill current process too ?RTIToolW$Kan ikke oprette: Can't create: RTIToolW6Kunne ikke kre recollindexCould not execute recollindexRTIToolWSletter fil Deleting fileRTIToolWSletter:  Deleting: RTIToolWNAutomatisk start af realtid indeksering"Real time indexing automatic startRTIToolW"Fjerner autostartRemoving autostartRTIToolWErstatter filReplacing fileRTIToolWErstatter:  Replacing: RTIToolWjStart indekseringsdmonen med min skrivebordssession..Start indexing daemon with my desktop session.RTIToolWAdvarselWarningRTIToolW(alle sprog)(all languages)RclMain$(Ingen ordstammer) (no stemming)RclMainOm Recoll About RecollRclMainAlleAllRclMainForkert desktop app spec for %1: [%2] Tjek venligst desktopfilen?Bad desktop app spec for %1: [%2] Please check the desktop fileRclMainUgyldige stier Bad pathsRclMainForkert kommandolinje for fremviser for %1: [%2] Kontroller venligst mimeview-filenCBad viewer command line for %1: [%2] Please check the mimeview fileRclMain(Kan ikke tilg fil: Can't access file: RclMainNKan ikke oprette forhndsvisningsvindueCan't create preview windowRclMain\Kan ikke aktivere synonymer-fil (analysefejl?)&Can't set synonyms file (parse error?)RclMain6Kan ikke dekomprimere fil: Can't uncompress file: RclMainVKan ikke opdatere indeks: indeksering krer#Can't update index: indexer runningRclMainpKan ikke udtrkke dokument eller oprette midlertidig fil0Cannot extract document or create temporary fileRclMain>Kan ikke finde forlderdokumentCannot find parent documentRclMainRKan ikke hente dokumentinfo fra databasen+Cannot retrieve document info from databaseRclMain2Vlg fil, der skal gemmesChoose file to saveRclMainAfslutterClosingRclMainJKunne ikke indlse gemte foresprgselCould not load saved queryRclMainKunne ikke bne ekstern indeks. DB er ikke ben. Tjek liste over eksterne indekser.HCould not open external index. Db not open. Check external indexes list.RclMain*Kunne ikke bne fil: Could not open file: RclMain2Kunne ikke skrive til filCould not write to fileRclMain~Deaktiveret fordi realtid indeksering ikke blev kompileret ind.;Disabled because the real time indexer was not compiled in.RclMainVis ikke denne advarsel nste gang (brug GUI prferencer for at gendanne).DDo not show this warning next time (use GUI preferences to restore).RclMainDokumentfilterDocument filterRclMain DokumenthistorikDocument historyRclMain FrdigDoneRclMain(Identiske dokumenterDuplicate documentsRclMainSletter indeks Erasing indexRclMainFejlErrorRclMainUdfrer: [ Executing: [RclMainEksterne programmer/kommandoer ndvendige for dine filtyper blev ikke fundet, som gemt af den sidste indeksering pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMainHistorik-data History dataRclMainIndeks lst Index lockedRclMain0Indeks foresprgselsfejlIndex query errorRclMain$Indeks skeduleringIndex schedulingRclMain,Indekserede MIME-typerIndexed MIME TypesRclMain\Indeksering krer. Kan ikke tilg webcachefil./Indexer is running. Can't access webcache file.RclMain.Indeksering mislykkedesIndexing failedRclMain(Indeksering i gang: Indexing in progress: RclMain&indeksering afbrudtIndexing interruptedRclMainIndlsningsfejl Load errorRclMain4Manglende hjlpeprogrammerMissing helper programsRclMainMonitorMonitorRclMainhIngen ekstern fremviser konfigureret for mime-type [-No external viewer configured for mime type [RclMain,Ingen hjlpere manglerNo helpers found missingRclMainDIngen tidligere sgning er bevaretNo preserved previous searchRclMain.Ingen resultater fundetNo results foundRclMainIngen sgning No searchRclMain IngenNoneRclMainbner en midlertidig kopi. ndringer vil g tabt, hvis du ikke gemmer<br/>dem til et permanent sted.`Opening a temporary copy. Edits will be lost if you don't save
them to a permanent location.RclMainRydder opPurgeRclMainForesprgsel er i gang<br>P grund af begrnsninger i indekseringsbiblioteket,<br>vil en annullering afslutte programmeteQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMain4Resultater af foresprgsel Query resultsRclMain&Lsning mislykkedes Read failedRclMainBNulstil indekset og start forfra?(Reset the index and start from scratch ?RclMain8Optlling af resultat (est.)Result count (est.)RclMainGem fil Save fileRclMain8Gemte foresprgsler (*.rclq)Saved Queries (*.rclq)RclMainnMnstre for udvlgelse kan kun bruges med en startmappe:Selection patterns can only be used with a start directoryRclMainbMnstre for udvlgelse skal have en verste mappeSelection patterns need topdirRclMainBeklager, er endnu ikke tilgngelig for Windows, bruge Fil menuindgange for at opdatere indeksetYSorry, not available under Windows for now, use the File menu entries to update the indexRclMainstammedbStemdbRclMain"Stop &IndekseringStop &IndexingRclMainFUnderdokumenter og vedhftede filerSub-documents and attachmentsRclMain*Den nuvrende indekseringsproces blev ikke startet fra denne grnseflade. Klik p OK for at stoppe den alligevel, eller Annuller for at lade den kreyThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainDokumentet tilhrer et eksternt indeks, som jeg ikke kan opdatere.@The document belongs to an external index which I can't update. RclMainIndeksering krer, s ting burde vre bedre, nr den er frdig. @The indexer is running so things should improve when it's done. RclMainFremviseren angivet i mimeview for %1: %2 er ikke fundet. nsker du at bne indstillingsvinduet?hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMainbDisse webadresser ( | ipath) deler samme indhold:-These Urls ( | ipath) share the same content:RclMainrDette konfigurationsvrktj virker kun for hovedindekset.6This configuration tool only works for the main index.RclMainFDenne sgning er ikke lngere aktiv"This search is not active any moreRclMain UkendtUnknownRclMaintIndeksering i ukendt tilstand. Kan ikke tilg webcachefil.2Unknown indexer state. Can't access webcache file.RclMainOpdater &Indeks Update &IndexRclMainOpdatererUpdatingRclMainFremviser kommandolinje for %1 angiver bde fil og forlderfil vrdier: er ikke understttetQViewer command line for %1 specifies both file and parent file value: unsupportedRclMainAdvarselWarningRclMain*Skrivning mislykkedes Write failedRclMain\fejl under hentning af ordstammer for sprogene#error retrieving stemming languagesRclMainfiltreretfilteredRclMain mediermediaRclMain beskedmessageRclMain andetotherRclMainprsentation presentationRclMainsorteretsortedRclMainregneark spreadsheetRclMain teksttextRclMainR Forsg igen med filer der mislykkedes With failed files retrying RclMainBase&Om Recoll &About Recoll RclMainBase$&Avanceret sgning&Advanced Search RclMainBase,&Slet dokumenthistorik&Erase document history RclMainBase$&Slet sgehistorik&Erase search history RclMainBase&Fil&File RclMainBase&Fuld skrm &Full Screen RclMainBase,&Konfiguration for GUI&GUI configuration RclMainBase &Hjlp&Help RclMainBase2&Konfiguration for Indeks&Index configuration RclMainBase&Prferencer &Preferences RclMainBase &Genopbyg indeks&Rebuild index RclMainBase&Resultater&Results RclMainBase*&Sorterings-parametre&Sort parameters RclMainBase&Vrktjer&Tools RclMainBase&Brugermanual &User manual RclMainBase&Vis&View RclMainBase4Avanceret/kompleks sgningAdvanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBase DokumenthistorikDocument History RclMainBase"Dokument&historikDocument &History RclMainBaseA&fslutE&xit RclMainBase$E&ksterne indekserE&xternal index dialog RclMainBase"Aktiver synonymerEnable synonyms RclMainBase"Eksterne indekserExternal index dialog RclMainBaseF11F11 RclMainBaseFrste side First Page RclMainBaseFrste side First page RclMainBaseFuld skrm Full Screen RclMainBaseBG til frste side med resultaterGo to first page of results RclMainBase Indeks&statistikIndex &statistics RclMainBase.Indekserede &MIME-typerIndexed &MIME types RclMainBase2Tid&splan for IndekseringIndexing &schedule RclMainBaseLIndeksering med srlige indstillinger Indexing with special options RclMainBase2Indls gemte foresprgselLoad saved query RclMainBase&Manglende &hjlpereMissing &helpers RclMainBaseNste side Next Page RclMainBaseNste side Next page RclMainBase2Nste side med resultaterNext page of results RclMainBaseNste opdatering vil igen forsge med filer, der tidligere mislykkedes.Next update will retry previously failed files RclMainBase PgDownPgDown RclMainBasePgUpPgUp RclMainBaseForrige side Previous Page RclMainBaseForrige side Previous page RclMainBase6Forrige side med resultaterPrevious page of results RclMainBase:Foresprgsel efter fragmenterQuery Fragments RclMainBase RecollRecoll RclMainBase4Gem som CSV (regneark) filSave as CSV (spreadsheet) file RclMainBase.Gem sidste foresprgselSave last query RclMainBasexGemmer resultatet i en fil, som du kan indlse i et regneark@Saves the result into a file which you can load in a spreadsheet RclMainBaseShift+PgUp Shift+PgUp RclMainBase6Vis Detaljer i foresprgselShow Query Details RclMainBaseVis som tabel Show as table RclMainBaseVVis resultater i en regneark-lignende tabel(Show results in a spreadsheet-like table RclMainBase>Sorter efter dato, nyeste frstSort by date, newest first RclMainBase>Sorter efter dato, ldste frstSort by date, oldest first RclMainBaseNSorter efter dato fra nyeste til ldste#Sort by dates from newest to oldest RclMainBaseNSorter efter dato fra ldste til nyeste#Sort by dates from oldest to newest RclMainBase(Sorterings-parametreSort parameters RclMainBase$Srlig indekseringSpecial Indexing RclMainBase&Sg efter ordTerm &explorer RclMainBase:Vrktj for sgning efter ordTerm explorer tool RclMainBaseOpdater &Indeks Update &index RclMainBase Rediger webcacheWebcache Editor RclMainBase AfslutQuit RclTrayIcon GendanRestore RclTrayIconSammendragAbstract RecollModelForfatterAuthor RecollModelDatoDate RecollModelDato og tid Date and time RecollModel Dokumentets dato Document date RecollModel*Dokumentets strrelse Document size RecollModelFildato File date RecollModelFilnavn File name RecollModelFilstrrelse File size RecollModel IpathIpath RecollModelNgleordKeywords RecollModelMIME-type MIME type RecollModelMtidMtime RecollModel"Originale tegnstOriginal character set RecollModel&Relevans bedmmelseRelevancy rating RecollModel TitelTitle RecollModelURLURL RecollModel$(vis foresprgsel) (show query)ResListJ<p><b>Ingen resultater fundet</b><br>

No results found
ResListp<p><i>Alternative stavemder (accenter undertrykt): </i>4

Alternate spellings (accents suppressed): ResListD<p><i>Alternative stavemder: </i>

Alternate spellings: ResList DokumenthistorikDocument historyResListDokumenter DocumentsResList NsteNextResListbnOpenResListForhndsvisningPreviewResListForrigePreviousResList.Detaljer i Foresprgsel Query detailsResList8Optlling af resultat (est.)Result count (est.)ResListResultatliste Result listResListTekststumperSnippetsResList2Dokument ikke tilgngeligUnavailable documentResListforforResListud af mindstout of at leastResList&Slet kolonne&Delete columnResTable$&Nulstil sortering &Reset sortResTable&Gem som CSV &Save as CSVResTable&Tilfj "%1" kolonneAdd "%1" columnResTable6Kan ikke bne/oprette fil: Can't open/create file: ResTable*Gem tabel til CSV-filSave table to CSV fileResTablej adskiller sig fra de nuvrende prferencer (beholdt)' differ from current preferences (kept)SSearchAlle ord All termsSSearchVilkrlig ordAny termSSearchZAutomatiske suffikser for gemte foresprgsel: Auto suffixes for stored query: SSearchAutofrase er aktiveret, men var deaktiveret for gemte foresprgsel3Autophrase is set but it was unset for stored querySSearchAutofrase er deaktiveret, men var aktiveret for gemte foresprgsel3Autophrase is unset but it was set for stored querySSearch6Forkert foresprgselsstrengBad query stringSSearchBIndtast filnavn jokertegn udtryk.$Enter file name wildcard expression.SSearch$Indtast foresprgselssprogets udtryk. Snydeark:<br> <i>ord1 ord2</i> : 'ord1' og 'ord2' i et hvilken som helst felt.<br> <i>felt:ord1</i> : 'ord1' i feltet 'felt'.<br> Standard feltnavne/synonymer:<br> titel/emne/billedtekst, forfatter/fra, modtager/til, filnavn, ekst.<br> Pseudofelter: dir, mime/format, type/rclcat, dato, strrelse.<br> To datointerval-eksempler: 2009-03-01/2009-05-20 2009-03-01/P2M:<br>. <i>ord1 ord2 OR ord3</i>: ord1 AND (ord2 OR ord3).<br> Du kan bruge parenteser for at gre tingene klarere.<br> <i>"ord1 ord2"</i> : frase (skal forekomme njagtigt). Mulige modifikatorer:<br> <i>"ord1 ord2"p </i> : uordnet nrheds-sgning med standard afstand.<br> Brug <b>Vis Foresprgsel</b> link nr i tvivl om resultatet og se manual (&lt;F1>) for flere detaljer.Enter query language expression. Cheat sheet:
term1 term2 : 'term1' and 'term2' in any field.
field:term1 : 'term1' in field 'field'.
Standard field names/synonyms:
title/subject/caption, author/from, recipient/to, filename, ext.
Pseudo-fields: dir, mime/format, type/rclcat, date, size.
Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.
term1 term2 OR term3 : term1 AND (term2 OR term3).
You can use parentheses to make things clearer.
"term1 term2" : phrase (must occur exactly). Possible modifiers:
"term1 term2"p : unordered proximity search with default distance.
Use Show Query link when in doubt about result and see manual (<F1>) for more detail. SSearchREksterne Indekser for gemte foresprgsel:#External indexes for stored query: SSearchFilnavn File nameSSearch(Ikke mere hukommelse Out of memorySSearch$ForesprgselssprogQuery languageSSearch^Ordstammer til sprogene for gemte foresprgsel:%Stemming languages for stored query: SSearchVlg sgetype.Choose search type. SSearchBaseRydClear SSearchBase Ctrl+SCtrl+S SSearchBase Slet sgeindgangErase search entry SSearchBaseSSgeBase SSearchBase SSearchBaseSgSearch SSearchBase$Start foresprgsel Start query SSearchBaseAlleAll SearchClauseWVilkrligAny SearchClauseWFilnavn File name SearchClauseWIntet feltNo field SearchClauseW IngenNone SearchClauseWtAntal yderligere ord, der kan vre blandet med de udvalgteHNumber of additional words that may be interspersed with the chosen ones SearchClauseW FrasePhrase SearchClauseW Nrhed Proximity SearchClauseWvVlg den type foresprgsel, der vil blive udfrt med ordene>Select the type of query that will be performed with the words SearchClauseW Find:Find:Snippets NsteNextSnippetsForrigePrevSnippetsTekststumperSnippetsSnippetsh<p>Desvrre blev der ikke, inden for rimelige grnser, fundet en njagtig match. Sandsynligvis fordi dokumentet er meget stort, s tekststump-generatoren for vild i mngden...</ p>

Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...

 SnippetsWSgSearch SnippetsWGennemseBrowseSpecIdxW$Mappe for rekursiv indeksering. Dette skal vre indenfor det regulre indekserede omrde<br> som defineret i konfigurationsfilen (verste mapper).Directory to recursively index. This must be inside the regular indexed area
as defined in the configuration file (topdirs).SpecIdxW|Ellers vil kun ndrede eller mislykkede filer blive behandlet.5Else only modified or failed files will be processed.SpecIdxWVSlet udvalgte filers data, fr indeksering.*Erase selected files data before indexing.SpecIdxWLad st tomt for at vlge alle filer. Du kan bruge adskillige mellemrums-adskilte shell-type mnstre.<br>Mnstre med indlejrede mellemrum skal citeres med dobbelte anfrselstegn.<br>Kan kun bruges, hvis startmlet er angivet.Leave empty to select all files. You can use multiple space-separated shell-type patterns.
Patterns with embedded spaces should be quoted with double quotes.
Can only be used if the start target is set.SpecIdxW.Mnstre for udvlgelse:Selection patterns:SpecIdxW$Srlig indekseringSpecial IndexingSpecIdxW*Top indekserede enhedTop indexed entitySpecIdxW&Luk&Close SpellBase&Udvid &Expand  SpellBaseAccenterAccents SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBase$Stor/Sm bogstaverCase SpellBaseSammenlignMatch SpellBaseIngen dbinfo. No db info. SpellBaseSg efter ord Term Explorer SpellBase> Dokumenter oprettet/opdateret Documents created/updatedSpellW Filer testet Files testedSpellW0 ikke-indekserede filer Unindexed filesSpellW%1 resultater %1 resultsSpellW<Gennemsnitlige ord pr dokumentAverage terms per documentSpellW6Mappestrrelse for databaseDatabase directory sizeSpellWDok. / Tot. Doc. / Tot.SpellWxIndex: %1 dokumenter, gennemsnitslngde %2 ord %3 resultater7Index: %1 documents, average length %2 terms.%3 resultsSpellWElementItemSpellWXListe blev afkortet alfabetisk, nogle ofte 1List was truncated alphabetically, some frequent SpellW8Lngste dokumentlngde (ord)Longest document length (terms)SpellWMIME-typer: MIME types:SpellW,Ingen udvidelse fundetNo expansion foundSpellW Antal dokumenterNumber of documentsSpellW RegexRegexpSpellWDResultater fra sidste indeksering:Results from last indexing:SpellW0Vis statistik for indeksShow index statisticsSpellW8Mindste dokumentlngde (ord) Smallest document length (terms)SpellW"Stavning/FonetiskSpelling/PhoneticSpellW&Udvidelse af stammeStem expansionSpellWOrdTermSpellW VrdiValueSpellWJokertegn WildcardsSpellW^fejl under hentning af ordstammer for sprogene #error retrieving stemming languagesSpellWbDer kan mangle ord. Prv at bruge en lngere rod..terms may be missing. Try using a longer root.SpellWAlle ord All terms UIPrefsDialogVilkrlig ordAny term UIPrefsDialog@Der burde vlges hjst et indeks$At most one index should be selected UIPrefsDialogKan ikke tilfje indeks med en anden indstilling for fjernelse af store-bogstaver/diakritiske tegn>Cant add index with different case/diacritics stripping option UIPrefsDialogVlgChoose UIPrefsDialog@Standard skrifttype for QtWebkitDefault QtWebkit font UIPrefsDialogFilnavn File name UIPrefsDialog$ForesprgselssprogQuery language UIPrefsDialog\Overskrift for resultatliste (standard er tom)%Result list header (default is empty) UIPrefsDialogAfsnitformat for resultatliste (slet alt for at nulstille til standard)Nye vrdier:</b>New Values:ViewActionBaseBHandling (tom -> recoll standard) Action (empty -> recoll default)ViewActionBase2Anvend p aktuelle udvalgApply to current selectionViewActionBaseLukCloseViewActionBasePUndtagelse til indstillinger for Desktop Exception to Desktop preferencesViewActionBase,Oprindelige fremvisereNative ViewersViewActionBase Recoll handling:Recoll action:ViewActionBaseVlg en eller flere filtyper, og brug derefter knapperne i rammen nedenfor for at ndre, hvordan de behandleskSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseVlg en eller flere Mime-typer og brug derefter knapperne i bundrammen til at ndre, hvordan de behandles.lSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBaseVlg det samme Select sameViewActionBaseVBrug indstillinger for Desktop som standard"Use Desktop preferences by defaultViewActionBaseaktuelle vrdi current valueViewActionBaseRegex sgning Search regexpWebcache Rediger webcacheWebcache editorWebcacheKopier URLCopy URL WebcacheEditSlet det valgteDelete selection WebcacheEditbIndeksering krer. Kan ikke redigere webcachefil.-Indexer is running. Can't edit webcache file. WebcacheEditzIndeksering i ukendt tilstand. Kan ikke redigere webcachefil.0Unknown indexer state. Can't edit webcache file. WebcacheEditWebCache blev ndret, du er nd til at kre indeksering efter lukning af dette vindue.RWebcache was modified, you will need to run the indexer after closing this window. WebcacheEditMIMEMIME WebcacheModelUrlUrl WebcacheModelVlgChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW&Annuller&CanceluiPrefsDialogBase&OK&OKuiPrefsDialogBase<BR>
uiPrefsDialogBase <PRE>
uiPrefsDialogBase<PRE> + wrap
 + wrapuiPrefsDialogBaseEn sgning efter [Rullende Sten] (2 ord) vil blive ndret til [rullende eller sten eller (rullende frase 2 sten)].
Dette skulle give hjere forrang til resultaterne, hvor sgeordene vises njagtigt som angivet.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBaseVSeparator mellem sammendragets tekststumperAbstract snippet separatoruiPrefsDialogBaseAktiver alleActivate AlluiPrefsDialogBaseTilfj index	Add indexuiPrefsDialogBase Anvend ndringer
Apply changesuiPrefsDialogBaseXTilfj automatisk frase til simple sgninger+Automatically add phrase to simple searchesuiPrefsDialogBasebTrskelprocentsats for ordhyppighed ved autofrase.Autophrase term frequency threshold percentageuiPrefsDialogBase"Panel med knapper
Buttons PaneluiPrefsDialogBaseVlgChooseuiPrefsDialogBase4Vlg redigeringsprogrammerChoose editor applicationsuiPrefsDialogBaseKlik for at tilfje endnu en indeksmappe til listen. Du kan vlge enten en Recoll konfigurationsmappe eller et Xapianindeks.{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBaseZLuk til systembakke i stedet for at afslutte.!Close to tray instead of exiting.uiPrefsDialogBase0Datoformat (strftime(3))Date format (strftime(3))uiPrefsDialogBaseDeaktiver alleDeactivate AlluiPrefsDialogBaseBestemmer om dokumentfiltre er vist som radioknapper, vrktjslinje kombinationsfelt eller menu.QDecide if document filters are shown as radio buttons, toolbar combobox, or menu.uiPrefsDialogBaseXDeaktiver Qt autofuldfrelse i sgeindgange.*Disable Qt autocompletion in search entry.uiPrefsDialogBase"Kassere ndringerDiscard changesuiPrefsDialogBaseSkal vi sammenfatte et sammendrag, selvom dokumentet synes at have et?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBase8Skal vi forsge at lave sammendrag af indgange til resultatliste ved at bruge sammenhnget med foresprgselsordene? 
Kan vre langsomt for store dokumenter.zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBase<Valgmetode for dokumentfilter:Document filter choice style:uiPrefsDialogBase.Lav dynamisk sammendragDynamically build abstractsuiPrefsDialogBasepRediger kode for indstnig i html-hoved for resultatside#Edit result page html header insertuiPrefsDialogBaseNRediger formatstreng for resultatafsnit#Edit result paragraph format stringuiPrefsDialogBaseAktiverEnableuiPrefsDialogBase"Eksterne IndekserExternal IndexesuiPrefsDialogBaseHyppighedens procentvise trskel, hvorover vi ikke bruger ord inde i autofrase.
Hyppige ord er et stort problem for ydeevnen med fraser.
Udeladte ord forger frase stilstand, og reducere effektiviteten af autofrase.
Standardvrdien er 2 (procent).Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10Helvetica-10uiPrefsDialogBase6Skjul identiske resultater.Hide duplicate results.uiPrefsDialogBaseAfkryds forrsager, at resultater med samme indhold under forskellige navne kun bliver rapporteret en gang.XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBaseLinjer i PRE tekst ombrydes ikke. Brug af BR mister en del indrykning. PRE + Wrap stil kunne vre, hvad du nsker.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBaseMaksimal tekststrrelse der fremhves for forhndsvisning (megabyte)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseMenuMenuuiPrefsDialogBase@Antal indgange i en resultatside"Number of entries in a result pageuiPrefsDialogBasebner et vindue til at vlge CSS stilark-fil for vinduet til tekststumperAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBasenbner et vindue til at vlge resultatlistens skrifttype-Opens a dialog to select the result list fontuiPrefsDialogBasePbn et vindue for at vlge stilark-filen-Opens a dialog to select the style sheet fileuiPrefsDialogBase,Oversttelser af stierPaths translationsuiPrefsDialogBaseFAlmindelig tekst til HTML linjetypePlain text to HTML line styleuiPrefsDialogBasenForetrk Html til almindelig tekst for forhndsvisning.&Prefer Html to plain text for preview.uiPrefsDialogBase\Foresprgselssprogets magiske filnavnendelser.(Query language magic file name suffixes.uiPrefsDialogBaseLHusk sorteringens aktiveringstilstand.Remember sort activation state.uiPrefsDialogBase~Fjern fra listen. Dette har ingen virkning p indeks p disken.7Remove from list. This has no effect on the disk index.uiPrefsDialogBaseFjern valgteRemove selecteduiPrefsDialogBase@Erstat sammendrag fra dokumenter Replace abstracts from documentsuiPrefsDialogBaseNulstilResetuiPrefsDialogBaseVNulstil stilen for vinduet til tekststumper Resets the Snippets window styleuiPrefsDialogBasexNulstiller resultatlistens skrifttype til systemets standard1Resets the result list font to the system defaultuiPrefsDialogBase8Nulstil stilark til standard!Resets the style sheet to defaultuiPrefsDialogBaseResultatlisteResult ListuiPrefsDialogBase8Skrifttype for resultatlisteResult list fontuiPrefsDialogBaseSgeparametreSearch parametersuiPrefsDialogBaseVis statusikon.Show system tray icon.uiPrefsDialogBase^Vis advarsel, nr der bnes en midlertidig fil.)Show warning when opening temporary file.uiPrefsDialogBaseFCSS-fil for vindue til tekststumperSnippets window CSS fileuiPrefsDialogBaseJStart med bent avanceret sgevindue.'Start with advanced search dialog open.uiPrefsDialogBase8Start med enkel sgetilstandStart with simple search modeuiPrefsDialogBase(Ordstammer for sprogStemming languageuiPrefsDialogBaseStilarkStyle sheetuiPrefsDialogBaseSynonymer-fil
Synonyms fileuiPrefsDialogBasebSammenhngende ord for det genererede sammendrag  Synthetic abstract context wordsuiPrefsDialogBaseZStrrelse p det genererede sammendrag (tegn)$Synthetic abstract size (characters)uiPrefsDialogBaseTekster over denne strrelse vil ikke blive fremhvet i forhndsvisning (for langsom).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBaseOrdene p listen bliver automatisk vendt til ext: xxx stninger i foresprgselssprogets indgang.bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase Skift det valgteToggle selecteduiPrefsDialogBase<vrktjslinje kombinationsfeltToolbar ComboboxuiPrefsDialogBase"brugergrnsefladeUser interfaceuiPrefsDialogBase(Brugerstil der skal anvendes p vinduet til tekststumper.<br>Bemrk: Det frdige sidehoved-indstik er ogs inkluderet i tekststumper-vinduets hoved.User style to apply to the snippets window.
Note: the result page header insert is also included in the snippets window header.uiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_tr.qm0000644000175000017500000005424713566424763014274 00000000000000W$KXMewss&v8$zvv&zϳAE 0 ́=]%9=E%0, >.c<XMHvCx cxC C L SI2  p#v0w 5.w 52w 54w 5;lU [ֳ967(f3(ͼuI;U!Ue Q&G.NV?d1 JUY#JUY/Y85PLDx A-L>!I1Inn6f;:f?fD~)3y#H_X| -Z-.")TM9BS5r?)/4r w,-O8<),4L6AStâ$ 1 :R7Ǣ~CzOD`," 4xUKMp hC3K %A71MXCCFW#4FNFJwϗ0fn/| fɆtQQ}}O04 '0c X H'G 9Zy3 K,8 Ee ü>K c +< 6" `P# ` cE+F yj I? C * * *i TH6 Z h Σ+ "A ۷, 'И) 97 P֙ 4 RV% T# V  \iCL `FU 9 B 7X 8@ F# y< 3E u3 u; P  P% 5dJF H £8H/.<P=(`G7J#Wb'zc1zc:;Pc.|˓ L; Q~KY~s([s-g3+ | |NT l&tLiR-Tm ifadeler All clauses AdvSearch00fadelerin herhangi biri Any clause AdvSearchortamlarmedia AdvSearch dierother AdvSearchhesap tablolar1 spreadsheets AdvSearchmetinlertexts AdvSearch<----- Tm <----- All AdvSearchBase<----- Se <----- Sel AdvSearchBase0fade ekle Add clause AdvSearchBaseGeli_mi_ aramaAdvanced search AdvSearchBaseTm ----> All ----> AdvSearchBase GzatBrowse AdvSearchBase"Kategorilere gre By categories AdvSearchBaseDosya tipleri zerinde filtreleme kullanmak iin bunu i_aretleyin,Check this to enable filtering on file types AdvSearchBaseDosya tipleri yerine ham mime tipleri zerinde filtreleme kullanmak iin bunu i_aretleyin;Check this to use file categories instead of raw mime types AdvSearchBase KapatClose AdvSearchBase0fadeyi sil Delete clause AdvSearchBase<Arama iin en st dizini girinEnter top directory for search AdvSearchBase0Yoksay1lan dosya tipleriIgnored file types AdvSearchBase6Dosya tiplerini s1n1rland1rRestrict file types AdvSearchBaselArama sonular1n1 bu dizin ve a_a1s1 ile s1n1rland1r:%Restrict results to files in subtree: AdvSearchBase.ntan1ml1 olarak kaydetSave as default AdvSearchBase4Uyan <br>belgeleri<br>ara:'Search for
documents
satisfying: AdvSearchBase(Aranan dosya tipleriSearched file types AdvSearchBaseSe -----> Sel -----> AdvSearchBaseAramay1 Ba_lat Start Search AdvSearchBaseAspell diliAspell language ConfIndexW>Yap1land1rma dosyas1 yaz1lamad1Can't write configuration file ConfIndexW0Veritaban1 dizininin ad1Database directory name ConfIndexW$Genel parametrelerGlobal parameters ConfIndexW20ndex dzeltme MB aral11Index flush megabytes interval ConfIndexW$Yerel parametrelerLocal parameters ConfIndexW*Gnlk dosyas1n1n ad1 Log file name ConfIndexW:Gnlk dosyas1 ayr1nt1 dzeyiLog verbosity level ConfIndexW(Aspell kullan1m1 yokNo aspell usage ConfIndexW&Arama parametreleriSearch parameters ConfIndexWAtlanan yollar Skipped paths ConfIndexWLSzck kkleri ayr1_t1r1labilir dillerStemming languages ConfIndexW0letilerin yaz1laca1 dosya.<br>Ubirim 1kt1s1 iin 'stderr' kullan1nPThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexWKk ayr1_t1rma geni_lemesi iin szlkleri<br>in_a edilecek olan diller.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexWzyinelemeli indesklemenin ba_layaca1 dizinlerin listesi. ntan1ml1: ev dizininiz.LThe list of directories where recursive indexing starts. Default: your home. ConfIndexWBu deer diske gnderilecek indekslenmi_ veri miktar1n1 ayarlar.<br>Bu indeksleyicinin bellek kullan1m1n1 kontrol etmeye yarar. ntan1ml1 10MB This value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWBu deer ileti boyutunu ayarlar,<br>sadece hatalardan hata ay1klama verilerine kadar.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexWst dizinlerTop directories ConfIndexW GenelGlobal ConfSubPanelW 0ptalCancel EditTransBaseZYap1land1rma ierisinde veritaban1 dizini yok No db directory in configurationMain&Sonraki&NextPreview&nceki &PreviousPreview A&ra: &Search for:PreviewD^unun iin i gsterim yap1lam1yor0Can't turn doc into internal representation for Preview 0ptalCancelPreviewTemizleClearPreview8nizleme metni olu_turuluyorCreating preview textPreviewNnizleme metni dzenleyiciye ykleniyor Loading preview text into editorPreviewE_le_me ^a&rt1 Match &CasePreview0Yard1mc1 program kay1p: Missing helper program: Preview<<b>zelle_tirilmi_ alt aalarCustomised subtreesQObject4Sembolik balant1lar1 izleFollow symbolic linksQObject0ndekslerken sembolik balant1lar1 izle. Ayn1 gelerin yeniden indekslenmesinden ka1nmak iin ntan1ml1 deer hay1rTFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject:Tm dosya isimlerini indeksleIndex all file namesQObject0erii tan1nmayan ya da i_lenemeyen (ya da desteklenmeyen mime tipi) dosyalar1 indeksle. ntan1ml1 evet}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObjectAtlanan isimler Skipped namesQObject0ndekslenmi_ s1ralama ierisindeki alt dizinlerin listesi <br>ki burada baz1 parametrelerin yeniden tan1mlanmas1 gerekir. ntan1ml1: bo_.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectBu nitelikler insekslenmemesi gereken dosya ve dizinler iindir.LThese are patterns for file or directory names which should not be indexed.QObject&nizle&PreviewQWidget(&Dosya Ad1n1 KopyalaCopy &File NameQWidget&Adresi Kopyala Copy &URLQWidget*Benzer belgeleri &bulFind &similar documentsQWidget Uyar1WarningRTIToolW(tm diller)(all languages)RclMain2(kk ayr1_t1rma kullanma) (no stemming)RclMainRecoll Hakk1nda About RecollRclMainDnizleme penceresi olu_turulam1yorCan't create preview windowRclMainbBelge a1lamad1 ya da geici dosya olu_turulamad10Cannot extract document or create temporary fileRclMainPVeritaban1ndan belge bilgileri al1namad1+Cannot retrieve document info from databaseRclMainKapat1l1yorClosingRclMainBelge gemi_iDocument historyRclMain"al1_t1r1l1yor: [ Executing: [RclMainGemi_ verileri History dataRclMain20ndeksleme devam ediyor: Indexing in progress: RclMainTemizlePurgeRclMainArama Sonular1 Query resultsRclMain.KkAyr1_t1rmaVeritaban1StemdbRclMain4Bu arama atr1k etkin deil"This search is not active any moreRclMainBilinmeyenUnknownRclMain Uyar1WarningRclMainxszck kkleri ayr1_t1r1labilir diller al1n1rken hata olu_tu#error retrieving stemming languagesRclMainortamlarmediaRclMain dierotherRclMain &Recoll Hakk1nda &About Recoll RclMainBase&Geli_mi_ arama&Advanced Search RclMainBase0&Belge gemi_ini temizle&Erase document history RclMainBase &Dosya&File RclMainBase&Yard1m&Help RclMainBase&Tercihler &Preferences RclMainBase&&S1ralama ltleri&Sort parameters RclMainBase&Aralar&Tools RclMainBase(&Kullan1c1 El Kitab1 &User manual RclMainBase2Geli_mi_/karma_1k AramaAdvanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBaseBelge Gemi_iDocument History RclMainBaseBelge &Gemi_iDocument &History RclMainBase&1kE&xit RclMainBase.D1_ indeksler penceresiExternal index dialog RclMainBase0lk sayfa First page RclMainBase8Sonular1n ilk sayfas1na gitGo to first page of results RclMainBaseSonraki sayfa Next page RclMainBase4Sonular1n sonraki sayfas1Next page of results RclMainBasenceki sayfa Previous page RclMainBase2Sonular1n nceki sayfas1Previous page of results RclMainBase RecollRecoll RclMainBase$S1ralama ltleriSort parameters RclMainBase 0fade g&stericiTerm &explorer RclMainBase(0fade gsterme arac1Term explorer tool RclMainBase"0ndeksi g&ncelle Update &index RclMainBase TarihDate RecollModelDosya ad1 File name RecollModel (sorguyu gster) (show query)ResList<<p><b>Sonu bulunamad1</b><br>

No results found
ResListBelge gemi_iDocument historyResListSonrakiNextResList nizlePreviewResList ncekiPreviousResListSorgu detaylar1 Query detailsResListSonu listesi Result listResList Eri_ilemez belgeUnavailable documentResListTm szckler All termsSSearch2Szcklerin herhangi biriAny termSSearch,Uygunsuz arama ifadesiBad query stringSSearchDosya ad1 File nameSSearchYetersiz bellek Out of memorySSearchArama diliQuery languageSSearchTemizleClear SSearchBase Ctrl+SCtrl+S SSearchBase.Arama girdisini temizleErase search entry SSearchBaseSSearchBase SSearchBase SSearchBaseAraSearch SSearchBaseSorguyu ba_lat Start query SSearchBaseDosya ad1 File name SearchClauseWSeilen szcklerin aras1nda yer alabilecek ek szcklerin say1s1HNumber of additional words that may be interspersed with the chosen ones SearchClauseWZSzckler ile kullan1lacak sorgu biimini se>Select the type of query that will be performed with the words SearchClauseWSonrakiNextSnippetsAraSearch SnippetsW GzatBrowseSpecIdxW &Kapat&Close SpellBase&Geni_let &Expand  SpellBase Alt+KAlt+C SpellBase Alt+GAlt+E SpellBase0fade Gsterici Term Explorer SpellBase0Hi geni_leme bulunamad1No expansion foundSpellWDzenli ifadeRegexpSpellW Heceleme/FonetikSpelling/PhoneticSpellW4Kk ayr1_t1rma geni_lemesiStem expansionSpellW 0fadeTermSpellW zel karakterler WildcardsSpellWxszck kkleri ayr1_t1r1labilir diller al1n1rken hata olu_tu#error retrieving stemming languagesSpellWTm szckler All terms UIPrefsDialog2Szcklerin herhangi biriAny term UIPrefsDialog GzatChoose UIPrefsDialogDosya ad1 File name UIPrefsDialogArama diliQuery language UIPrefsDialoglSeilen dizin bir Xapian indeks dizini gibi grnmyor;The selected directory does not appear to be a Xapian index UIPrefsDialogRSeilen dizin zaten indeks listesinde var3The selected directory is already in the index list UIPrefsDialog0Bu ana/yerel veritaban1!This is the main/local index! UIPrefsDialogxszck kkleri ayr1_t1r1labilir diller al1n1rken hata olu_tu#error retrieving stemming languages UIPrefsDialog KapatCloseViewActionBase$Doal GstericilerNative ViewersViewActionBase GzatChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW &0ptal&CanceluiPrefsDialogBase &TAMAM&OKuiPrefsDialogBase[linux kernel] (2 szck) aramas1 [linux veya kernel veya (linux ifadesi 2 tane kernel)] olarak dei_tirilecektir. Bu, aranacak szcklerin tam olarak girildii gibi grntlendii sonulara yksek ncelik verilmesini salayacakt1r.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBase$Tmn Etkinle_tir Activate AlluiPrefsDialogBase0ndeks ekle Add indexuiPrefsDialogBase*Dei_iklikleri uygula Apply changesuiPrefsDialogBaseXBasit aramalara ifadeyi otomatik olarak ekle+Automatically add phrase to simple searchesuiPrefsDialogBase GzatChooseuiPrefsDialogBase$Tmn Pasifle_tirDeactivate AlluiPrefsDialogBase$Dei_iklikleri silDiscard changesuiPrefsDialogBasezBelgenin bir zeti varsa bile bir yapay zet olu_turulsun mu?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBaseSorgu szckleri kullan1larak sonu listesi girdileri iin zet olu_turulsun mu ? Byk boyutlu belgelerde yava_ olabilir.zDo we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents.uiPrefsDialogBase>zetleri dinamik olarak olu_turDynamically build abstractsuiPrefsDialogBaseD1_ indekslerExternal IndexesuiPrefsDialogBaseHelvetica-10 Helvetica-10uiPrefsDialogBasednizlemede vurgulanacak en fazla metin boyutu (MB)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseFBir sonu sayfas1ndaki sonu say1s1"Number of entries in a result pageuiPrefsDialogBasejSonu listesi yaz1tipini semek iin bir pencere aar-Opens a dialog to select the result list fontuiPrefsDialogBase:S1ralama kurallar1n1 hat1rla.Remember sort activation state.uiPrefsDialogBaseXListeden sil. Bu diskteki indeksi etkilemez.7Remove from list. This has no effect on the disk index.uiPrefsDialogBaseSeileni silRemove selecteduiPrefsDialogBase6Belgelerden zetleri kald1r Replace abstracts from documentsuiPrefsDialogBaseS1f1rlaResetuiPrefsDialogBasebSonu listesi yaz1tipini sistem ayarlar1na dndr1Resets the result list font to the system defaultuiPrefsDialogBase,Sonu listesi yaz1tipiResult list fontuiPrefsDialogBase&Arama parametreleriSearch parametersuiPrefsDialogBaseFGeli_mi_ arama penceresi ile ba_la.'Start with advanced search dialog open.uiPrefsDialogBase&Kk ayr1_t1rma diliStemming languageuiPrefsDialogBase*Yapay zet szckleri Synthetic abstract context wordsuiPrefsDialogBaseFYapay zet boyutu (karakter say1s1)$Synthetic abstract size (characters)uiPrefsDialogBaseBu boyuttan byk metinler nizlemede vurgulanmayacak (ok yava_).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBaseSe /B1rakToggle selecteduiPrefsDialogBase"Kullan1c1 arayzUser interfaceuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_zh_CN.ts0000644000175000017500000052216513566424763014660 00000000000000 AdvSearch All clauses 全部条件 Any clause 任意条件 texts 文本 spreadsheets 电子表格 presentations 演示文稿 media 多媒体文件 messages 邮件 other 其它 Bad multiplier suffix in size filter 文件尺寸过滤器的后缀单位不正确 text 文本 spreadsheet 电子表格 presentation 演示文档 message 邮件 AdvSearchBase Advanced search 高端搜索 Search for <br>documents<br>satisfying: 搜索<br>满足以下条件<br>的文档: Delete clause 删除条件 Add clause 添加条件 Restrict file types 限定文件类型 Check this to enable filtering on file types 选中这个,以便针对文件类型进行过滤 By categories 按大类来过滤 Check this to use file categories instead of raw mime types 选中这个,以便使用较大的分类,而不使用具体的文件类型 Save as default 保存为默认值 Searched file types 将被搜索的文件类型 All ----> 移动全部→ Sel -----> 移动选中项→ <----- Sel ←移动选中项 <----- All ←移动全部 Ignored file types 要忽略的文件类型 Enter top directory for search 输入要搜索的最上层目录 Browse 浏览 Restrict results to files in subtree: 将结果中的文件限定在此子目录树中: Start Search 开始搜索 Close 关闭 All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. 右边的所有非空字段都会按照逻辑与(“全部条件”选项)或逻辑或(“任意条件”选项)来组合。<br>“任意”“全部”和“无”三种字段类型都接受输入简单词语和双引号引用的词组的组合。<br>空的输入框会被忽略。 Invert 反转过滤条件 Minimum size. You can use k/K,m/M,g/G as multipliers 最小尺寸。你可使用k/K、m/M、g/G作为单位 Min. Size 最小尺寸 Maximum size. You can use k/K,m/M,g/G as multipliers 最大尺寸。你可使用k/K、m/M、g/G作为单位 Max. Size 最大尺寸 Filter 过滤 From To Check this to enable filtering on dates 选中这个,以便针对日期进行过滤 Filter dates 过滤日期 Find 查找 Check this to enable filtering on sizes 选中这个,以便针对文件尺寸进行过滤 Filter sizes 过滤尺寸 ConfIndexW Can't write configuration file 无法写入配置文件 Global parameters 全局参数 Local parameters 局部参数 Search parameters 搜索参数 Top directories 顶级目录 The list of directories where recursive indexing starts. Default: your home. 索引从这个列表中的目录开始,递归地进行。默认:你的家目录。 Skipped paths 略过的路径 These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Stemming languages 词根语言 The languages for which stemming expansion<br>dictionaries will be built. 将会针对这些语言<br>构造词根扩展词典。 Log file name 记录文件名 The file where the messages will be written.<br>Use 'stderr' for terminal output 程序输出的消息会被保存到这个文件。<br>使用'stderr'以表示将消息输出到终端 Log verbosity level 记录的话痨级别 This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. 这个值调整的是输出的消息的数量,<br>其级别从仅输出报错信息到输出一大堆调试信息。 Index flush megabytes interval 刷新索引的间隔,兆字节 This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 这个值调整的是,当积累咯多少索引数据时,才将数据刷新到硬盘上去。<br>用来控制索引进程的内存占用情况。默认为10MB Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. No aspell usage 不使用aspell Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 禁止在词语探索器中使用aspell来生成拼写相近的词语。<br>在没有安装aspell或者它工作不正常时使用这个选项。 Aspell language Aspell语言 The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. aspell词典的语言。表示方式是'en'或'fr'……<br>如果不设置这个值,则会使用系统环境中的自然语言设置信息,而那个通常是正确的。要想查看你的系统中安装咯哪些语言的话,就执行'aspell config',再在'data-dir'目录中找.dat文件。 Database directory name 数据库目录名 The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. 用来储存索引数据的目录的名字<br>如果使用相对路径,则路径会相对于配置目录进行计算。默认值是'xapiandb'。 Unac exceptions Unac例外 <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>这是针对unac机制的例外,默认情况下,该机制会删除所有的判断信息,并进行正规的分解。妳可以按照自己的语言的特点针对某个字符覆盖掉口音解除设置,以及指定额外的分解(例如,针对复数)。在每个由空格分隔的条目中,第一个字符是源字符,剩下的就是翻译。 Process the WEB history queue 处理网页历史队列 Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) 启用对火狐的已访问页面进行索引。<br>(妳还需要安装火狐的Recoll插件) Web page store directory name 网页储存目录名 The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. 用来储存复制过来的已访问网页的目录名。<br>如果使用相对路径,则会相对于配置目录的路径进行处理。 Max. size for the web store (MB) 网页存储的最大尺寸(MB) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Automatic diacritics sensitivity 自动判断大小写 <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>如果搜索语句中包含带有口音特征(不在unac_except_trans中)的话,则自动触发大小写的判断。否则,妳需要使用查询语言和<i>D</i>修饰符来指定对大小写的判断。 Automatic character case sensitivity 自动调整字符的大小写敏感性 <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>如果搜索语句中除首字母之外包含有大写字母的话,则自动触发大小写的判断。否则,妳需要使用查询语言和<i>C</i>修饰符来指定对大小写的判断。 Maximum term expansion count 最大词根扩展数目 <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>针对单个单词的最大词根扩展数目(例如:此选项在使用通配符时会生效)。默认的10000是一个狠合理的值,能够避免当引擎遍历词根列表时引起查询界面假死。 Maximum Xapian clauses count 最大的Xapian子句数目 <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>我们向单个Xapian查询语句中加入的最大的子句数目。某些情况下,词根扩展的结果会是倍增的,而我们想要避免使用过多内存。默认的100000应当既能满足日常的大部分要求,又能与当前的典型硬件配置相兼容。 ConfSubPanelW Only mime types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Exclude mime types Mime types not to be indexed Max. compressed file size (KB) 压缩文件最大尺寸(KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. 尺寸大于这个值的压缩文件不会被处理。设置成-1以表示不加任何限制,设置成0以表示根本不处理压缩文件。 Max. text file size (MB) 文本文件最大尺寸(MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. 尺寸大于这个值的文本文件不会被处理。设置成-1以表示不加限制。 其作用是从索引中排除巨型的记录文件。 Text file page size (KB) 文本文件单页尺寸(KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). 如果设置咯这个值(不等于-1),则文本文件会被分割成这么大的块,并且进行索引。 这是用来搜索大型文本文件的(例如记录文件)。 Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. 工作时间长于这个值的外部过滤器会被中断。这是针对某种特殊情况的,该情况下,一个文档可能引起过滤器无限循环下去(例如:postscript)。设置为-1则表示不设限制。 Global 全局 CronToolW Cron Dialog 计划任务对话框 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T19:47:37" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T19:56:53" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } .T3 { font-style:italic; } .T4 { font-family:Courier New,courier; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> 批量索引计划任务(cron) </p><p class="P1">每个字段都可以包括一个通配符(*)、单个数字值、逗号分隔的列表(1,3,5)和范围(1-7)。更准确地说,这些字段会被<span class="T3">按原样</span>输出到crontab 文件中,因此这里可以使用crontab 的所有语法,参考crontab(5)。</p><p class="P1"><br/>例如,在<span class="T3">日期</span>中输入<span class="T4">*</span>,<span class="T3">小时</span>中输入<span class="T4">12,19</span>,<span class="T3">分钟</span>中输入<span class="T4">15 </span>的话,会在每天的12:15 AM 和7:15 PM启动recollindex。</p><p class="P1">一个频繁执行的计划任务,其性能可能比不上实时索引。</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) 星期日(*或0-7,0或7是指星期天) Hours (* or 0-23) 小时(*或0-23) Minutes (0-59) 分钟(0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:08:00" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:11:47" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T2 { font-style:italic; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1">点击<span class="T2">禁用</span>以停止进行自动化的批量索引,点击<span class="T2">启用</span>以启用此功能,点击<span class="T2">取消</span>则不改变任何东西。</p></body></html> Enable 启用 Disable 禁用 It seems that manually edited entries exist for recollindex, cannot edit crontab 看起来已经有手动编辑过的recollindex条目了,因此无法编辑crontab Error installing cron entry. Bad syntax in fields ? 插入cron条目时出错。请检查语法。 EditDialog Dialog 对话框 EditTrans Source path 源路径 Local path 本地路径 Config error 配置错误 Original path 原始路径 EditTransBase Path Translations 路径变换 Setting path translations for 针对右侧事务设置路径变换 Select one or several file types, then use the controls in the frame below to change how they are processed 选中一个或多个文件类型,然后使用下面框框中的控件来设置要如何处理它们 Add 添加 Delete 删除 Cancel 取消 Save 保存 FirstIdxDialog First indexing setup 第一次索引设置 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:14:44" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:23:13" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T2 { font-weight:bold; } .T4 { font-style:italic; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T2">未找到对应于此配置实例的索引数据。</span><br/><br/>如果你只想以一组合理的默认参数来索引你的家目录的话,就直接按<span class="T4">立即开始索引</span>按钮。以后还可以调整配置参数的。</p><p class="P1">如果你想调整某些东西的话,就使用下面的链接来调整其中的索引配置和定时计划吧。</p><p class="P1">这些工具可在以后通过<span class="T4">选项</span>菜单访问。</p></body></html> Indexing configuration 索引配置 This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. 在这里可以调整你想要对其进行索引的目录,以及其它参数,例如:要排除和路径或名字、默认字符集…… Indexing schedule 定时索引任务 This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). 在这里可以选择是要进行批量索引还是实时索引,还可以设置一个自动化的定时(使用cron)批量索引任务。 Start indexing now 立即开始索引 FragButs %1 not found. %1: %2 Query Fragments IdxSchedW Index scheduling setup 定时索引设置 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T20:27:11" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T20:30:49" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> 索引程序可持续运行并且在文件发生变化时对其进行索引,也可以间隔一定时间运行一次。</p><p class="P1">你可以读一下手册,以便更好地做出抉择(按F1)。</p><p class="P1">这个工具可帮助你设置一个自动进行批量索引的定时任务,或者设置成当你登录时便启动实时索引(或者两者同时进行,当然那几乎没有意义)。</p></body></html> Cron scheduling 定时任务 The tool will let you decide at what time indexing should run and will install a crontab entry. 这个工具帮助你确定一个让索引运行的时间,它会插入一个crontab条目。 Real time indexing start up 实时索引设置 Decide if real time indexing will be started when you log in (only for the default index). 作出决定,是否要在登录时便启动实时索引(只对默认索引有效)。 ListDialog Dialog 对话框 GroupBox 分组框 Main No db directory in configuration 配置实例中没有数据库目录 "history" file is damaged or un(read)writeable, please check or remove it: "history"文件被损坏,或者不可(读)写,请检查一下或者删除它: "history" file is damaged, please check or remove it: Preview Close Tab 关闭标签页 Cancel 取消 Missing helper program: 缺少辅助程序: Can't turn doc into internal representation for 无法为此文件将文档转换成内部表示方式: Creating preview text 正在创建预览文本 Loading preview text into editor 正在将预览文本载入到编辑器中 &Search for: 搜索(&S): &Next 下一个(&N) &Previous 上一个(&P) Clear 清空 Match &Case 匹配大小写(&C) Cannot create temporary directory: 无法创建临时目录: Error while loading file 文件载入出错 Form Tab 1 Open 打开 Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text PreviewTextEdit Show fields 显示字段 Show main text 显示主文本 Print 打印 Print Current Preview 打印当前预览文本 Show image 显示图片 Select All 全选 Copy 复制 Save document to file 将文档保存到文件 Fold lines 自动换行 Preserve indentation 保留缩进符 Open document QObject Global parameters 全局参数 Local parameters 局部参数 <b>Customised subtrees <b>自定义的子目录树 The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. 这是已索引的目录树中的一些子目录组成的列表<br>,它们的某些参数需要重定义。默认:空白。 <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>以下的参数,当你在上面的列表中不选中任何条目或者选中一个空行时,<br>就是针对顶级目录起作用的,否则便是对选中的子目录起作用的。<br>你可以点击+/-按钮,以便添加或删除目录。 Skipped names 要略过的文件名 These are patterns for file or directory names which should not be indexed. 具有这些模式的文件或目录不会被索引。 Default character set 默认字符集 This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. 这是用来读取那些未标明自身的字符集的文件时所使用的字符集,例如纯文本文件。<br>默认值是空,会使用系统里的自然语言环境参数中的值。 Follow symbolic links 跟踪符号链接 Follow symbolic links while indexing. The default is no, to avoid duplicate indexing 在索引时跟踪符号链接。默认是不跟踪的,以避免重复索引 Index all file names 对所有文件名进行索引 Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true 对那些无法判断或处理其内容(未知类型或其类型不被支持)的文件的名字进行索引。默认为是 Beagle web history Beagle网页历史 Search parameters 搜索参数 Web history 网页历史 Default<br>character set Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Ignored endings These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. QWidget Create or choose save directory Choose exactly one directory Could not read directory: Unexpected file name collision, cancelling. Cannot extract document: &Preview 预览(&P) &Open 打开(&O) Open With Run Script Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) &Write to File 写入文件(&W) Save selection to files 将选中内容保存到文件中 Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) Find &similar documents 查找类似的文档(&s) Open &Snippets window 打开片断窗口(&S) Show subdocuments / attachments 显示子文档/附件 QxtConfirmationMessage Do not show again. RTIToolW Real time indexing automatic start 实时索引自动启动 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-22T21:00:38" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-22T21:02:43" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .P1 { font-size:12pt; margin-bottom:0cm; margin-top:0cm; font-family:Nimbus Roman No9 L; writing-mode:page; margin-left:0cm; margin-right:0cm; text-indent:0cm; } .T1 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="P1"><span class="T1">Recoll</span> 索引程序可以以守护进程的方式运行,在文件发生变化时便实时更新索引。这样你的索引一直是与文件同步的,但是会占用一定的系统资源。</p></body></html> Start indexing daemon with my desktop session. 在我的桌面会话启动时便启动索引进程。 Also start indexing daemon right now. 同时此次也立即启动索引进程。 Replacing: 正在替换: Replacing file 正在替换文件 Can't create: 无法创建: Warning 警告 Could not execute recollindex 无法执行recollindex Deleting: 正在删除: Deleting file 正在删除文件 Removing autostart 正在删除自动启动项 Autostart file deleted. Kill current process too ? 自动启动文件已经删除。也要杀死当前进程吗? RclMain (no stemming) (不进行词根计算) (all languages) (对全部语言进行词根计算) error retrieving stemming languages 提取词根语言时出错 Indexing in progress: 正在索引: Purge 删除 Stemdb Stem数据库 Closing 正在关闭 Unknown 未知 Query results 查询结果 Cannot retrieve document info from database 无法从数据库获取文档信息 Warning 警告 Can't create preview window 无法创建预览窗口 This search is not active any more 这个查询已经不是活跃的了 Bad viewer command line for %1: [%2] Please check the mimeconf file 针对%1的查看命令[%2]配置出错 请检查mimeconf文件 Cannot extract document or create temporary file 无法提取文档或创建临时文件 Executing: [ 正在执行:[ About Recoll Recoll说明 History data 历史数据 Document history 文档历史 Update &Index 更新索引(&I) Stop &Indexing 停止索引(&I) All 全部 media 多媒体文件 message 邮件 other 其它 presentation 演示文档 spreadsheet 电子表格 text 文本文件 sorted 已排序 filtered 已过滤 External applications/commands needed and not found for indexing your file types: 需要用来辅助对你的文件进行索引,却又找不到的外部程序/命令: No helpers found missing 目前不缺少任何辅助程序 Missing helper programs 未找到的辅助程序 Document category filter 文档分类过滤器 No external viewer configured for mime type [ 针对此种文件类型没有配置外部查看器[ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? 没有找到mimeview中为%1: %2配置的查看器。 是否要打开选项对话框? Can't access file: 无法访问文件: Can't uncompress file: 无法解压缩此文件: Save file 保存文件 Result count (est.) 结果数(估计值) Query details 查询语句细节 Could not open external index. Db not open. Check external indexes list. 无法打开外部索引。数据库未打开。请检查外部索引列表。 No results found 未找到结果 None Updating 正在更新 Done 已完成 Monitor 监视器 Indexing failed 索引失败 The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone 当前索引进程不是由此界面启动的。点击确定以杀死它,或者点击取消以让它自由运行 Erasing index 正在删除索引 Reset the index and start from scratch ? 从头重新开始索引吗? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program 查询正在进行中。<br>由于索引库的某些限制,<br>取消的话会导致程序退出 Error 错误 Index not open 索引未打开 Index query error 索引查询出错 Content has been indexed for these mime types: 已经为这些文件类型索引其内容: Index not up to date for this file. Refusing to risk showing the wrong entry. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. 此文件的索引已过时。程序拒绝显示错误的条目。请点击确定以更新此文件的索引,等待索引完成之后再查询。或者,取消。 Can't update index: indexer running 无法更新索引:索引程序已在运行 Indexed MIME Types 已索引的文件类型 Bad viewer command line for %1: [%2] Please check the mimeview file 针对%1的查看程序命令不对:%2 请检查mimeview文件 Viewer command line for %1 specifies both file and parent file value: unsupported 针对%1的查看程序命令中同时指定了文件及亲代文件值:这是不支持的 Cannot find parent document 无法找到亲代文档 Indexing did not run yet 还未开始索引 External applications/commands needed for your file types and not found, as stored by the last indexing pass in 在上次的索引过程中发现,针对妳的文件类型,还缺少一些外部的程序/命令,它们储存在右侧文件中 Index not up to date for this file. Refusing to risk showing the wrong entry. 此文件的索引内容不是最新的。如果妳按拒绝,则需要自行承担显示错误条目的风险。 Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. 点击确定来更新此文件的索引,在索引完成之后重新执行此查询。否则,请按取消。 Indexer running so things should improve when it's done 索引器正在运行,所以,当它完毕之后世界将变得更美好 Sub-documents and attachments 子文档及附件 Document filter Indexing interrupted The indexer is running so things should improve when it's done. Duplicate documents 重复文档 These Urls ( | ipath) share the same content: 以下路径(|内部路径)之间共享着相同的内容: Bad desktop app spec for %1: [%2] Please check the desktop file Bad paths Selection patterns need topdir Selection patterns can only be used with a start directory No search No preserved previous search Choose file to save Saved Queries (*.rclq) Write failed Could not write to file Read failed Could not open file: Load error Could not load saved query Index scheduling Sorry, not available under Windows for now, use the File menu entries to update the index Disabled because the real time indexer was not compiled in. This configuration tool only works for the main index. Can't set synonyms file (parse error?) The document belongs to an external index which I can't update. Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Do not show this warning next time (use GUI preferences to restore). Index locked Unknown indexer state. Can't access webcache file. Indexer is running. Can't access webcache file. with additional message: Non-fatal indexing message: Types list empty: maybe wait for indexing to progress? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported Tools Results Content has been indexed for these MIME types: Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Indexing done Can't update index: internal error Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> documents document files file errors error total files) No information: initial indexing not yet performed. RclMainBase Recoll Recoll Search tools 搜索工具 Result list 结果列表 &File 文件(&F) &Tools 工具(&T) &Preferences 选项(&P) &Help 帮助(&H) E&xit 退出(&x) Ctrl+Q Ctrl+Q Update &index 更新索引(&i) &Erase document history 删除文档历史(&E) &About Recoll Recoll说明(&A) &User manual 用户手册(&U) Document &History 文档历史(&H) Document History 文档历史 &Advanced Search 高端搜索(&A) Advanced/complex Search 高端/复杂搜索 &Sort parameters 排序参数(&S) Sort parameters 排序参数 Term &explorer 词语探索器(&e) Term explorer tool 词语探索器 Next page 下一页 Next page of results 下一页结果 First page 第一页 Go to first page of results 跳转到结果的第一页 Previous page 上一页 Previous page of results 上一页结果 &Query configuration 查询配置(&Q) External index dialog 外部索引对话框 &Indexing configuration 索引配置(&I) All 全部 &Show missing helpers 显示缺少的辅助程序列表(&S) PgDown 向下翻页 PgUp 向上翻页 &Full Screen 全屏(&F) F11 F11 Full Screen 全屏 &Erase search history 删除搜索历史(&E) sortByDateAsc 按日期升序排列 Sort by dates from oldest to newest 按日期排列,最旧的在前面 sortByDateDesc 按日期降序排列 Sort by dates from newest to oldest 按日期排列,最新的在前面 Show Query Details 显示查询语句细节 Show results as table 以表格的形式显示结果 &Rebuild index 重新构造索引(&R) &Show indexed types 显示已索引的文件类型(&S) Shift+PgUp Shift+向上翻页 &Indexing schedule 定时索引(&I) E&xternal index dialog 外部索引对话框(&x) &Index configuration 索引设置(&I) &GUI configuration 界面设置(&G) &Results 结果(&R) Sort by date, oldest first 按日期排序,旧文档在前 Sort by date, newest first 按日期排序,新文档在前 Show as table 以表格形式显示 Show results in a spreadsheet-like table 以一个类似于电子表格的形式来显示结果 Save as CSV (spreadsheet) file 保存为CSV(电子表格)文件 Saves the result into a file which you can load in a spreadsheet 将结果保存到一个可用电子表格打开的文件中 Next Page 下一页 Previous Page 上一页 First Page 第一页 Query Fragments With failed files retrying Next update will retry previously failed files Indexing &schedule Enable synonyms Save last query Load saved query Special Indexing Indexing with special options &View Missing &helpers Indexed &MIME types Index &statistics Webcache Editor Trigger incremental pass RclTrayIcon Restore Quit RecollModel Abstract 摘要 Author 作者 Document size 文档尺寸 Document date 文档日期 File size 文件尺寸 File name 文件名 File date 文件日期 Keywords 关键词 Original character set 原字符集 Relevancy rating 相关度 Title 标题 URL 路径 Mtime 修改时间 Date 日期 Date and time 日期及时间 Ipath 内部路径 MIME type 文件类型 Can't sort by inverse relevance ResList Result list 结果列表 (show query) (显示查询语句细节) &Preview 预览(&P) Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) Find &similar documents 查找类似的文档(&s) Document history 文档历史 <p><b>No results found</b><br> <p><b>未找到结果</b><br> Previous 上一个 Next 下一个 Unavailable document 无法访问文档 Preview 预览 Open 打开 <p><i>Alternate spellings (accents suppressed): </i> <p><i>其它拼写形式(忽视口音):</i> &Write to File 写入文件(&W) Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) &Open 打开(&O) Documents out of at least 个文档,最少共有 for 个文档,查询条件: <p><i>Alternate spellings: </i> <p><i>其它拼写形式:</i> Duplicate documents 重复文档 These Urls ( | ipath) share the same content: 以下路径(|内部路径)之间共享着相同的内容: Result count (est.) 结果数(估计值) Query details 查询语句细节 Snippets 片断 ResTable &Reset sort 重置排序条件(&R) &Delete column 删除此列(&D) Save table to CSV file 将表格保存成CSV文件 Can't open/create file: 无法打开/创建文件: &Preview 预览(&P) &Open 打开(&O) Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) &Write to File 写入文件(&W) Find &similar documents 查找类似的文档(&s) Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) &Save as CSV 保存为CSV(&S) Add "%1" column 添加"%1"列 ResTableDetailArea &Preview 预览(&P) &Open 打开(&O) Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) &Write to File 写入文件(&W) Find &similar documents 查找类似的文档(&s) Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) ResultPopup &Preview 预览(&P) &Open 打开(&O) Copy &File Name 复制文件名(&F) Copy &URL 复制路径(&U) &Write to File 写入文件(&W) Save selection to files 将选中内容保存到文件中 Preview P&arent document/folder 预览上一级文档/目录(&a) &Open Parent document/folder 打开上一级文档/目录(&O) Find &similar documents 查找类似的文档(&s) Open &Snippets window 打开片断窗口(&S) Show subdocuments / attachments 显示子文档/附件 SSearch Any term 任一词语 All terms 全部词语 File name 文件名 Query language 查询语言 Bad query string 查询语言格式不正确 Out of memory 内存不足 Too many completions 有太多与之相关的补全选项啦 Completions 补全选项 Select an item: 选择一个条目: Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> No actual parentheses allowed.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. <?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1 plus MathML 2.0//EN" "http://www.w3.org/Math/DTD/mathml2/xhtml-math11-f.dtd"> <html xmlns="http://www.w3.org/1999/xhtml"><!--This file was converted to xhtml by OpenOffice.org - see http://xml.openoffice.org/odf2xhtml for more info.--><head profile="http://dublincore.org/documents/dcmi-terms/"><meta http-equiv="Content-Type" content="application/xhtml+xml; charset=utf-8"/><title xmlns:ns_1="http://www.w3.org/XML/1998/namespace" ns_1:lang="en-US">- no title specified</title><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.title" content="" ns_1:lang="en-US"/><meta name="DCTERMS.language" content="en-US" scheme="DCTERMS.RFC4646"/><meta name="DCTERMS.source" content="http://xml.openoffice.org/odf2xhtml"/><meta name="DCTERMS.issued" content="2012-03-23T08:43:25" scheme="DCTERMS.W3CDTF"/><meta name="DCTERMS.modified" content="2012-03-23T09:07:39" scheme="DCTERMS.W3CDTF"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.provenance" content="" ns_1:lang="en-US"/><meta xmlns:ns_1="http://www.w3.org/XML/1998/namespace" name="DCTERMS.subject" content="," ns_1:lang="en-US"/><link rel="schema.DC" href="http://purl.org/dc/elements/1.1/" hreflang="en"/><link rel="schema.DCTERMS" href="http://purl.org/dc/terms/" hreflang="en"/><link rel="schema.DCTYPE" href="http://purl.org/dc/dcmitype/" hreflang="en"/><link rel="schema.DCAM" href="http://purl.org/dc/dcam/" hreflang="en"/><style type="text/css"> @page { } table { border-collapse:collapse; border-spacing:0; empty-cells:show } td, th { vertical-align:top; font-size:12pt;} h1, h2, h3, h4, h5, h6 { clear:both } ol, ul { margin:0; padding:0;} li { list-style: none; margin:0; padding:0;} <!-- "li span.odfLiEnd" - IE 7 issue--> li span. { clear: both; line-height:0; width:0; height:0; margin:0; padding:0; } span.footnodeNumber { padding-right:1em; } span.annotation_style_by_filter { font-size:95%; font-family:Arial; background-color:#fff000; margin:0; border:0; padding:0; } * { margin:0;} .Standard { font-size:12pt; font-family:Nimbus Roman No9 L; writing-mode:page; } .T1 { font-style:italic; } .T2 { font-style:italic; } .T4 { font-weight:bold; } <!-- ODF styles with no properties representable as CSS --> { } </style></head><body dir="ltr" style="max-width:21.001cm;margin-top:2cm; margin-bottom:2cm; margin-left:2cm; margin-right:2cm; writing-mode:lr-tb; "><p class="Standard">输入查询语言表达式。简要说明:<br/><span class="T2">词语</span><span class="T1">1 </span><span class="T2">词语</span><span class="T1">2</span> : '词语1'和'词语2'同时出现在任意字段中。<br/><span class="T2">字段</span><span class="T1">:</span><span class="T2">词语</span><span class="T1">1</span> : '词语1'出现在字段'字段'中。<br/>标准字段名/同义名:<br/>title/subject/caption、author/from、recipient/to、filename、ext。<br/>伪字段名:dir、mime/format、type/rclcat、date。<br/>日期段的两个示例:2009-03-01/2009-05-20 2009-03-01/P2M。<br/><span class="T2">词语</span><span class="T1">1 </span><span class="T2">词语</span><span class="T1">2 OR </span><span class="T2">词语</span><span class="T1">3</span> : 词语1 <span class="T4">与</span> (词语2 <span class="T4">或</span> 词语3)。<br/>不允许用真正的括号来表示逻辑关系。<br/><span class="T1">"</span><span class="T2">词语</span><span class="T1">1 </span><span class="T2">词语</span><span class="T1">2"</span> : 词组(必须按原样出现)。可用的修饰词:<br/><span class="T1">"</span><span class="T2">词语</span><span class="T1">1 </span><span class="T2">词语</span><span class="T1">2"p</span> : 以默认距离进行的无序近似搜索。<br/>有疑问时可使用<span class="T4">显示查询语句细节</span>链接来查看查询语句的细节,另外请查看手册(&lt;F1&gt;)以了解更多内容。</p></body></html> Enter file name wildcard expression. 输入文件名通配符表达式。 Enter search terms here. Type ESC SPC for completions of current term. 在此输入要搜索的词语。按Esc 空格来查看针对当前词语的补全选项。 Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Stemming languages for stored query: differ from current preferences (kept) Auto suffixes for stored query: External indexes for stored query: Autophrase is set but it was unset for stored query Autophrase is unset but it was set for stored query Enter search terms here. SSearchBase SSearchBase SSearchBase Clear 清空 Ctrl+S Ctrl+S Erase search entry 删除搜索条目 Search 搜索 Start query 开始查询 Enter search terms here. Type ESC SPC for completions of current term. 在此输入要搜索的词语。按Esc 空格来查看针对当前词语的补全选项。 Choose search type. 选择搜索类型。 Show query history SearchClauseW Select the type of query that will be performed with the words 选择要对右边的词语进行的查询类型 Number of additional words that may be interspersed with the chosen ones 允许在选中的词语之间出现的额外词语的个数 No field 不限字段 Any 任意 All 全部 None Phrase 词组 Proximity 近似 File name 文件名 Snippets Snippets 片断 Find: 查找: Next 下一个 Prev 上一个 SnippetsW Search 搜索 <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> Sort By Relevance Sort By Page SpecIdxW Special Indexing Else only modified or failed files will be processed. Erase selected files data before indexing. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Browse 浏览 Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Selection patterns: Top indexed entity Retry previously failed files. Start directory. Must be part of the indexed tree. Use full indexed area if empty. SpellBase Term Explorer 词语探索器 &Expand 展开(&E) Alt+E Alt+E &Close 关闭(&C) Alt+C Alt+C No db info. 未找到数据库信息。 Match 匹配 Case 大小写 Accents 口音 SpellW Wildcards 通配符 Regexp 正则表达式 Stem expansion 词根扩展 Spelling/Phonetic 拼写/发音检查 error retrieving stemming languages 提取词根语言时出错 Aspell init failed. Aspell not installed? Aspell初始化失败。是否未安装Aspell? Aspell expansion error. Aspell扩展出错。 No expansion found 未找到扩展 Term 词语 Doc. / Tot. 文档数/总数 Index: %1 documents, average length %2 terms 索引:%1个文档,平均长度为%2个词语 Index: %1 documents, average length %2 terms.%3 results 索引:%1个文档,平均长度为%2个单词。%3个结果 %1 results %1个结果 List was truncated alphabetically, some frequent 列表已按字母顺序截断,某个常见 terms may be missing. Try using a longer root. 的单词可能会缺失。请尝试使用一个更长的词根。 Show index statistics 显示索引统计信息 Number of documents 文档个数 Average terms per document 每个文档中的平均单词个数 Smallest document length 最小文档长度 Longest document length 最大文档长度 Database directory size 数据库目录尺寸 MIME types: 多媒体文档类型列表: Item 条目 Value Smallest document length (terms) Longest document length (terms) Results from last indexing: Documents created/updated Files tested Unindexed files List files which could not be indexed (slow) Spell expansion error. UIPrefsDialog error retrieving stemming languages 提取词根语言时出错 The selected directory does not appear to be a Xapian index 选中的目录不是Xapian索引 This is the main/local index! 这是主要/本地索引! The selected directory is already in the index list 选中的目录已经在索引列表中 Select xapian index directory (ie: /home/buddy/.recoll/xapiandb) 选择xapian索引目录(例如:/home/buddy/.recoll/xapiandb) Choose 选择 Result list paragraph format (erase all to reset to default) 结果列表的段落格式(删除全部内容即可重置为默认状态) Result list header (default is empty) 结果列表表头(默认为空) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) 选择recoll配置目录或xapian索引目录(例如:/home/me/.recoll 或 /home/me/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read 所选中的目录看起来像是一个Recoll配置目录,但是其中的配置内容无法读取 At most one index should be selected 最多应当选中一个索引 Cant add index with different case/diacritics stripping option 无法添加带有不同的大小写/诊断信息裁剪方式的索引 Default QtWebkit font Any term 任一词语 All terms 全部词语 File name 文件名 Query language 查询语言 Value from previous program exit ViewAction Changing actions with different current values 正在针对不同的当前值而改变动作 Command 命令 MIME type 文件类型 Desktop Default 桌面默认值 Changing entries with different current values 正在使用不同的当前值来修改条目 ViewActionBase Native Viewers 本地查看器 Select one or several file types, then click Change Action to modify the program used to open them 选中一个或多个文件类型,然后点击“修改动作”来修改用来打开这些文件的程序 Change Action 修改动作 Close 关闭 Select one or several mime types then click "Change Action"<br>You can also close this dialog and check "Use desktop preferences"<br>in the main panel to ignore this list and use your desktop defaults. 选中一个或多个文件类型祟点击“修改动作”<br>或者可以关闭这个对话框,而在主面板中选中“使用桌面默认设置”<br>那样就会无视这个列表而使用桌面的默认设置。 Select one or several mime types then use the controls in the bottom frame to change how they are processed. 选中一个或多个文件类型,然后使用下面框框中的控件来设置要如何处理它们。 Use Desktop preferences by default 默认使用桌面本身的设置 Select one or several file types, then use the controls in the frame below to change how they are processed 选中一个或多个文件类型,然后使用下面框框中的控件来设置要如何处理它们 Exception to Desktop preferences 针对桌面默认值的例外 Action (empty -> recoll default) 动作(空白则表示使用recoll的默认值) Apply to current selection 应用到当前选中项上 Recoll action: Recoll动作: current value 当前值 Select same 选中相同的项 <b>New Values:</b> <b>新的值:</b> Webcache Webcache editor Search regexp WebcacheEdit Copy URL Unknown indexer state. Can't edit webcache file. Indexer is running. Can't edit webcache file. Delete selection Webcache was modified, you will need to run the indexer after closing this window. WebcacheModel MIME Url confgui::ConfBeaglePanelW Steal Beagle indexing queue 窃取Beagle索引队列 Beagle MUST NOT be running. Enables processing the beagle queue to index Firefox web history.<br>(you should also install the Firefox Beagle plugin) 不可运行Beagle。启用对beagle队列的处理,以索引火狐网页历史。<br>(你还需要安装火狐Beagle插件) Entries will be recycled once the size is reached 当尺寸达到设定值时,这些条目会被循环使用 Web page store directory name 网页储存目录名 The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. 用来储存复制过来的已访问网页的目录名。<br>如果使用相对路径,则会相对于配置目录的路径进行处理。 Max. size for the web store (MB) 网页存储的最大尺寸(MB) Process the WEB history queue 处理网页历史队列 Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) 启用对火狐的已访问页面进行索引。<br>(妳还需要安装火狐的Recoll插件) confgui::ConfIndexW Can't write configuration file 无法写入配置文件 confgui::ConfParamFNW Choose 选择 confgui::ConfParamSLW + + - - Add entry Delete selected entries ~ Edit selected entries confgui::ConfSearchPanelW Automatic diacritics sensitivity 自动判断大小写 <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>如果搜索语句中包含带有口音特征(不在unac_except_trans中)的话,则自动触发大小写的判断。否则,妳需要使用查询语言和<i>D</i>修饰符来指定对大小写的判断。 Automatic character case sensitivity 自动调整字符的大小写敏感性 <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>如果搜索语句中除首字母之外包含有大写字母的话,则自动触发大小写的判断。否则,妳需要使用查询语言和<i>C</i>修饰符来指定对大小写的判断。 Maximum term expansion count 最大词根扩展数目 <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>针对单个单词的最大词根扩展数目(例如:此选项在使用通配符时会生效)。默认的10000是一个狠合理的值,能够避免当引擎遍历词根列表时引起查询界面假死。 Maximum Xapian clauses count 最大的Xapian子句数目 <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>我们向单个Xapian查询语句中加入的最大的子句数目。某些情况下,词根扩展的结果会是倍增的,而我们想要避免使用过多内存。默认的100000应当既能满足日常的大部分要求,又能与当前的典型硬件配置相兼容。 confgui::ConfSubPanelW Global 全局 Max. compressed file size (KB) 压缩文件最大尺寸(KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. 尺寸大于这个值的压缩文件不会被处理。设置成-1以表示不加任何限制,设置成0以表示根本不处理压缩文件。 Max. text file size (MB) 文本文件最大尺寸(MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. 尺寸大于这个值的文本文件不会被处理。设置成-1以表示不加限制。 其作用是从索引中排除巨型的记录文件。 Text file page size (KB) 文本文件单页尺寸(KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). 如果设置咯这个值(不等于-1),则文本文件会被分割成这么大的块,并且进行索引。 这是用来搜索大型文本文件的(例如记录文件)。 Max. filter exec. time (S) 过滤器的最长执行时间(S) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loopSet to -1 for no limit. 外部过滤器的执行时间如果超过这个值,则会被强行中断。在罕见的情况下,某些文档(例如postscript)会导致过滤器陷入死循环。设置成-1以表示不加限制。 External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. 工作时间长于这个值的外部过滤器会被中断。这是针对某种特殊情况的,该情况下,一个文档可能引起过滤器无限循环下去(例如:postscript)。设置为-1则表示不设限制。 confgui::ConfTopPanelW Top directories 顶级目录 The list of directories where recursive indexing starts. Default: your home. 索引从这个列表中的目录开始,递归地进行。默认:你的家目录。 Skipped paths 略过的路径 These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') 索引进程不会进入具有这些名字的目录。<br>可以包含通配符。必须匹配索引进程自身所见到的路径(例如:如果topdirs包含'/home/me',而实际上'/home'是到'/usr/home'的链接,则正确的skippedPath条目应当是'/home/me/tmp*',而不是'/usr/home/me/tmp*') Stemming languages 词根语言 The languages for which stemming expansion<br>dictionaries will be built. 将会针对这些语言<br>构造词根扩展词典。 Log file name 记录文件名 The file where the messages will be written.<br>Use 'stderr' for terminal output 程序输出的消息会被保存到这个文件。<br>使用'stderr'以表示将消息输出到终端 Log verbosity level 记录的话痨级别 This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. 这个值调整的是输出的消息的数量,<br>其级别从仅输出报错信息到输出一大堆调试信息。 Index flush megabytes interval 刷新索引的间隔,兆字节 This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 这个值调整的是,当积累咯多少索引数据时,才将数据刷新到硬盘上去。<br>用来控制索引进程的内存占用情况。默认为10MB Max disk occupation (%) 最大硬盘占用率(%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). 当硬盘的占用率达到这个数时,索引会失败并且停止(以避免塞满你的硬盘)。<br>设为0则表示不加限制(这是默认值)。 No aspell usage 不使用aspell Aspell language Aspell语言 The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. aspell词典的语言。表示方式是'en'或'fr'……<br>如果不设置这个值,则会使用系统环境中的自然语言设置信息,而那个通常是正确的。要想查看你的系统中安装咯哪些语言的话,就执行'aspell config',再在'data-dir'目录中找.dat文件。 Database directory name 数据库目录名 The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. 用来储存索引数据的目录的名字<br>如果使用相对路径,则路径会相对于配置目录进行计算。默认值是'xapiandb'。 Use system's 'file' command 使用系统里的'file'命令 Use the system's 'file' command if internal<br>mime type identification fails. 当内部的文件类型识别功能失效时<br>使用系统里的'file'命令。 Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 禁止在词语探索器中使用aspell来生成拼写相近的词语。<br>在没有安装aspell或者它工作不正常时使用这个选项。 The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. aspell词典的语言。表示方式是'en'或'fr'……<br>如果不设置这个值,则会使用系统环境中的自然语言设置信息,而那个通常是正确的。要想查看你的系统中安装咯哪些语言的话,就执行'aspell config',再在'data-dir'目录中找.dat文件。 The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. 用来储存索引数据的目录的名字<br>如果使用相对路径,则路径会相对于配置目录进行计算。默认值是'xapiandb'。 Unac exceptions Unac例外 <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>这是针对unac机制的例外,默认情况下,该机制会删除所有的判断信息,并进行正规的分解。妳可以按照自己的语言的特点针对某个字符覆盖掉口音解除设置,以及指定额外的分解(例如,针对复数)。在每个由空格分隔的条目中,第一个字符是源字符,剩下的就是翻译。 uiPrefsDialogBase User preferences 用户选项 User interface 用户界面 Number of entries in a result page 一个结果页面中显示的结果条数 If checked, results with the same content under different names will only be shown once. 如果选中这个,则拥有相同文件内容的不同文件名只会显示一个。 Hide duplicate results. 隐藏重复结果。 Highlight color for query terms 查询词语的高亮颜色 Result list font 结果列表字体 Opens a dialog to select the result list font 打开一个对话框,以选择用于结果列表的字体 Helvetica-10 文泉驿微米黑-12 Resets the result list font to the system default 将结果列表中的字体重设为系统默认值 Reset 重置 Texts over this size will not be highlighted in preview (too slow). 超过这个长度的文本不会在预览窗口里高亮显示(太慢)。 Maximum text size highlighted for preview (megabytes) 在预览中对其进行高亮显示的最大文本尺寸(兆字节) Use desktop preferences to choose document editor. 使用桌面系统的设置来选择文档编辑器。 Choose editor applications 选择编辑器程序 Display category filter as toolbar instead of button panel (needs restart). 将文件类型过滤器显示成工具条,而不是按钮面板(需要重启程序)。 Auto-start simple search on whitespace entry. 输入空格时自动开始进行简单搜索。 Start with advanced search dialog open. 启动时打开高端搜索对话框。 Remember sort activation state. 记住排序状态。 Prefer Html to plain text for preview. 预览中优先使用Html。 Search parameters 搜索参数 Stemming language 词根语言 A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. 对[滚 石] (2个词语)的搜索会变成[滚 or 石 or (滚 2个词语 石)]。 对于那些搜索词语在其中按照原样出现的结果,其优先级会高一些。 Automatically add phrase to simple searches 自动将词组添加到简单搜索中 Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. 是否要使用查询词语周围的上下文来构造结果列表条目中的摘要? 对于大的文档可能会很慢。 Dynamically build abstracts 动态构造摘要 Do we synthetize an abstract even if the document seemed to have one? 即使文档本身拥有一个摘要,我们仍然自行合成摘要信息? Replace abstracts from documents 取代文档中自带的摘要 Synthetic abstract size (characters) 合成摘要长度(字符个数) Synthetic abstract context words 合成摘要上下文 The words in the list will be automatically turned to ext:xxx clauses in the query language entry. 这个列表中的词语会在查询语言输入框里自动变成ext:xxx语句。 Query language magic file name suffixes. 查询语言神奇文件名后缀。 Enable 启用 External Indexes 外部索引 Toggle selected 切换选中项 Activate All 全部激活 Deactivate All 全部禁用 Remove from list. This has no effect on the disk index. 从列表中删除。这不会对硬盘上的索引造成损害。 Remove selected 删除选中项 Click to add another index directory to the list 点击这里,以将另一个索引目录添加到列表中 Add index 添加索引 Apply changes 使改变生效 &OK 确定(&O) Discard changes 放弃这些改变 &Cancel 取消(&C) Abstract snippet separator 摘要中的片段的分隔符 Style sheet 样式单 Opens a dialog to select the style sheet file 打开一个对话框,以选择样式单文件 Choose 选择 Resets the style sheet to default 将样式单重置为默认值 Lines in PRE text are not folded. Using BR loses some indentation. PRE中的文字不会换行。使用BR的话会使一些缩进失效。 Use <PRE> tags instead of <BR>to display plain text as html in preview. 在将纯文本显示成html预览的时候,使用<PRE>标签,而不是<BR>标签。 Result List 结果列表 Edit result paragraph format string 编辑结果段落的格式字符串 Edit result page html header insert 编辑结果页面的html头部插入项 Date format (strftime(3)) 日期格式(strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). 这是一个频率阈值,超过这个值的话,我们就不会把词语放到自动词组中。 高频词语是词组中性能问题的主要来源。 略过的词语会增加词组的空缺值,因此会降低自动词组功能的效率。 默认值是2(百分比)。 Autophrase term frequency threshold percentage 自动词组频率阈值百分比 Plain text to HTML line style 纯文本转换为HTML换行符的风格 Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. PRE文本中的那些行是不会被折叠的。使用BR会丢失一些缩进信息。PRE+换行风格可能才是妳想要的。 <BR> <BR> <PRE> <PRE> <PRE> + wrap <PRE>+换行 Disable Qt autocompletion in search entry. 禁止在查询输入框中使用Qt的自动补全 Search as you type. 在输入的同时进行搜索。 Paths translations 路径变换 Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. 点击此处以向列表中加入另一个索引目录。妳可以选择一个Recoll配置目录或一个Xapian索引。 Snippets window CSS file 片断窗口的CSS文件 Opens a dialog to select the Snippets window CSS style sheet file 打开一个对话框,以选择片断窗口的CSS样式单文件 Resets the Snippets window style 重置片断窗口的样式 Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Document filter choice style: Buttons Panel Toolbar Combobox Menu Show system tray icon. Close to tray instead of exiting. Start with simple search mode User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Synonyms file Show warning when opening temporary file. Highlight CSS style for query terms Recoll - User Preferences Set path translations for the selected index or for the main one if no selection exists. Activate links in preview. Make links inside the preview window clickable, and start an external browser when they are clicked. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Suppress all beeps. recoll-1.26.3/qtgui/i18n/recoll_cs.qm0000644000175000017500000032734513566424763014256 000000000000001:dAVi~BmGQGH6CH6D'J+J+4J6 J6]Lb Mz9VPhSDSU0T5WT WX,Z0=[ %[f3\Ne=gw(j^s)sv8vv]zϳuj?!S\0,́Yf\J |jfS[> 2"4O%ZnUKnOgt>:En~.\3 ~Q( J+1@0,,i;sL>.ccDo2NXMiXu`^hwklt6nwO #wdw\eHLvm+3M1ciuexC9,ͺG L>SI3%؅Gx Kxp#v 3v$w 5gw 52w 56w 5RwU#!isp.7/;U ֳN6CYf3W>5ͼupggH'׸~;UvhUeg: !D&y{.N/=dY?d G$)JUYJUYYK0[u_nG|FMu{ʷhʗ`ʗ.^ZciLsix+Jgz=c =AZ%PAB  V$"<.˝4Pcd1Mo;[D<"L^NB\X4te 2}lPvhE!wTRԽ(Pb"3Z.Ix(Inn<;fRcfhLfoA4,x\WvW9¾ y%t y%{ .ŠlNG΄: #{XX|IWr̔ i!i9 -Z_-"53)T>jLB9%|BS8k Po8 r?)r}|p%<k,,EK<N԰3"5< ,ttAS,â $ PREHǢQI^]~CC>p#Fd`Mz$" 5-օUKip hmru9iv2gvƒ6{^d7T#TK5,Uc %EnF7!Hsi^E[XnD8w)8ECDCn  !vXB#++^W3Ms7I^<~OCjFW#7;FNwhH:"|_guap-f xpiTM|N9wϗn4C݈ܯ l|BnfÓtÓt> fȍϝɆt]8w+)I#: 1V#J En -(w 7? 9Zy5 ;3z D#i K  ]#] cC k lM  qDG  P vK 9L ÛCG* ü> & 3ނ e n is E *N ͹ :^Qy c  *RuX +<03 6+ >Vb8 G.~h S` `P" ` aE cE d8[ y$ Ih J/ s 2H VT.S C ? = T s  ԅ ^ yes TH  ,xD =! Kj* X X hJ H   Σ r[ ; ٷ ۷? p* ?  Vd 4ݽ ^ YBA 'И +bC .ʢ / 4} 973 9ɝ L*"O P֙6 RV T# V/ \iC ]-/ `F  h" v {l !YH !Y Ws | s  Ҭo }  c! i$ zL t N7 l ~R N] C m" #DZ 'R+ - 7Q 8j F) OE X^> ]y ]_9 ^ ^B u0| yW y~$ 3 ȩr u3y uS? PО P 5d M s8 7 i p Ւv H p Q5/ £M qѾ({d%n.ʢc/.3Uck6Q8bK9w<{Q~vSBY~s[s\t<e3 g3 hx5 p~Tq Aeu(!lXcm#@c n|Bs'8 lLiVaechny vrazy All clauses AdvSearch Nkter z vrazo Any clause AdvSearchZ`patn pYpona nsobitele ve filtru velikosti$Bad multiplier suffix in size filter AdvSearchMultimediamedia AdvSearch Zprvamessage AdvSearchJinother AdvSearchPYedstaven presentation AdvSearchTabulky spreadsheet AdvSearchTabulky spreadsheets AdvSearchTexttext AdvSearch Textytexts AdvSearch<----- Vae <----- All AdvSearchBase<----- Vbr <----- Sel AdvSearchBase"PYidat nov vraz Add clause AdvSearchBase"Pokro il hlednAdvanced search AdvSearchBaseVae ----> All ----> AdvSearchBase:Vaechna pole napravo, kter nejsou przdn, budou spojovna spojenmi AND (volba "Vaechny vrazy") nebo OR (volba "Nkter z vrazo"). <br>Typy pol "Jakkoli" "Vae" a "}dn" mohou pYijmout sms jednoduchch slov, a vty uzavYen dvojitmi uvozovkami.<br>Pole bez dat jsou pYehl~ena.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBaseProchzetBrowse AdvSearchBasePodle skupin By categories AdvSearchBaseVZaakrtnte pro zapnut filtrovn podle dat'Check this to enable filtering on dates AdvSearchBasepZaakrtnte pro zapnut filtrovn podle souborovch typo,Check this to enable filtering on file types AdvSearchBasebZaakrtnte pro zapnut filtrovn podle velikost'Check this to enable filtering on sizes AdvSearchBasenZaakrtnte pro pou~vn skupin souboro msto MIME typo;Check this to use file categories instead of raw mime types AdvSearchBase ZavYtClose AdvSearchBase*Smazat posledn vraz Delete clause AdvSearchBaseHZadejte zkladn adresY pro hlednEnter top directory for search AdvSearchBaseFiltrovatFilter AdvSearchBaseFiltrovat data Filter dates AdvSearchBase&Filtrovat velikosti Filter sizes AdvSearchBase NajtFind AdvSearchBaseOdFrom AdvSearchBase2PYehl~en souborov typyIgnored file types AdvSearchBaseObrtitInvert AdvSearchBase"Nejvta velikost Max. Size AdvSearchBasexNejvta velikost: Mo~ete pou~t k/K,m/M,g/G jako nsobitele4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase"Nejmena velikost Min. Size AdvSearchBasexNejmena velikost: Mo~ete pou~t k/K,m/M,g/G jako nsobitele4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase.Omezit souborovch typoRestrict file types AdvSearchBaselOmezit vsledky na soubory v nsledujcm podadresYi:%Restrict results to files in subtree: AdvSearchBase&Ulo~it jako vchozSave as default AdvSearchBasezHledat <br>dokumenty<br>, kter splHuj nsledujc hlediska:'Search for
documents
satisfying: AdvSearchBase,Hledan souborov typySearched file types AdvSearchBaseVbr -----> Sel -----> AdvSearchBaseSpustit hledn Start Search AdvSearchBaseDoTo AdvSearchBaseh<p>Zapnout automaticky rozliaovn velkch a malch psmen, kdy~ zznam obsahuje velk psmena (mimo na prvnm mst). Jinak pro muste pou~t jazyk dotazu a modifiktor <i>C</i>.

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity. ConfIndexW|<p>Zapnout automaticky rozliaovn diakritickch znamnek, kdy~ hledan pojem obsahuje znaky a akcenty (ne v unac_except_trans). Jinak pro muste pou~t jazyk dotazu a modifiktor <i>D</i>.

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity. ConfIndexW<p>Nejvta po et rozaYen pro jeden vraz (napY. pYi pou~it ~olko). Standardn vchoz hodnota 10 000 je rozumn a zabrn tomu, aby se hledan pojem jevil jako zamrzl, zatmco je prochzen seznam pojmo.

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. ConfIndexW<p>Nejvta po et zkladnch vrazo, kter pYidme do jednoho dotazu Xapian. V nkterch pYpadech se mohou vsledky rozaYen vrazu vynsobit, a my se chceme vyvarovat nadbyte n spotYeb pamti. Standardn vchoz hodnota 100 000 by mla ve vtain pYpado naprosto posta ovat a hodit se k typickmu sou asnmu sestaven zaYzen (hardware).5

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfIndexW<p>Toto jsou vjimky pro mechanismus unac, kter ve vchozm nastaven odstraHuje vaechny diakritick znaky a nahrazuje je kanonickmi obdobami. Toto odstraHovn akcento mo~ete (v zvislosti na vaa Ye i) pro nkter znaky potla it a zadat dodate n nahrazen, napY. pro ligatury. V ka~dm mezerou oddlenm zznamu je prvn znak zdrojovm (vchozm) a zbytek je nahrazen.l

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. ConfIndexWJazyk aspelluAspell language ConfIndexWVAutomaticky rozliaovat velk a mal psmena$Automatic character case sensitivity ConfIndexWVAutomaticky rozliaovat diakritick znamnka Automatic diacritics sensitivity ConfIndexW@Nelze zapsat soubor s nastavenmCan't write configuration file ConfIndexW2Nzev adresYe s databzDatabase directory name ConfIndexW6Zak~e pou~vn aspellu pro vytvoYen pYibli~n podoby pravopisu v nstroji prozkumnka vrazo.<br> U~ite n, pokud aspell nen pYtomen anebo nepracuje. Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexWPovol rejstYkovn Firefoxem navatvench strnek.<br>(tak je potYeba, abyste nainstalovali pYdavn modul Recollu pro Firefox)\Enables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin) ConfIndexWZznamy budou po dosa~en velikosti vrceny do povodnho stavu.<br>Skute n dv smysl jen zvtaen velikosti, proto~e zmenaen hodnoty neosek stvajc soubor (na konci jen pltvn mstem).Entries will be recycled once the size is reached.
Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). ConfIndexW"Celkov parametryGlobal parameters ConfIndexWXInterval v megabytech pro vymazn rejstYkuIndex flush megabytes interval ConfIndexW Mstn parametryLocal parameters ConfIndexW6Nzev pro soubor se zpisem Log file name ConfIndexW2roveH podrobnosti zpisuLog verbosity level ConfIndexWrNejvta velikost pro ukldn internetovch strnek (MB) Max. size for the web store (MB) ConfIndexW8Nejvta po et vrazo XapianMaximum Xapian clauses count ConfIndexW>Nejvta po et rozaYen vrazuMaximum term expansion count ConfIndexW"Nepou~vat aspellNo aspell usage ConfIndexW8Zpracovat Yadu historie WEBuProcess the WEB history queue ConfIndexW PYesko en cesty Skipped paths ConfIndexW&Jazyky s kmeny slovStemming languages ConfIndexWSoubor, do kterho se zapae vstupn zprva.<br>Pro vstupy na terminl pou~ijte 'stderr'PThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexWJazyk pro adresY aspellu. Mlo by to vypadat jako 'en' nebo 'fr' nebo 'cs'...<br>Pokud nen tato hodnota nastavena, pou~ije se pro jej vypo tn prostYed NLS, co~ obvykle pracuje. Pro zskn pYedstavy o tom, co je ve vaaem systmu nainstalovno, napiate 'aspell config' a hledejte soubory .dat v adresYi 'data-dir'. 3The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory.  ConfIndexW~Jazyky, pro kter se vytvoY <br>adresYe rozaYen kmeno slov.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexWSeznam adresYo, ve kterch za n rejstYkovn v etn podslo~ek. Vchoz: adresY Home.LThe list of directories where recursive indexing starts. Default: your home. ConfIndexW"Nzev pro adresY, kam se maj ukldat kopie navatvench internetovch strnek.<br>Neabsolutn cesta je vzata relativn k adresYi s nastavenm.The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory. ConfIndexWNzev pro adresY, v nm~ se m ukldat rejstYk.<br>Neabsolutn cesta je vzata relativn k adresYi s nastavenm. Vchoz je 'xapiandb'.The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. ConfIndexWNzvy nzvy cest adresYo, kter se nebudou rejstYkovat.<br>Mo~e obsahovat zstupn symboly (~olky). Mus odpovdat cestm, kter rejstYkova vid (napY. pokud v po te nch adresYch stoj '/home/me' a '/home' je ve skute nosti odkazem na '/usr/home', potom by byl sprvnm zpisem skippedPath '/home/me/tmp*' a ne '/usr/home/me/tmp*')BThese are pathnames of directories which indexing will not enter.
Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') ConfIndexWToto je procentn podl vyu~vn disku - celkov vyu~it disku, ne velikost rejstYku , kdy rejstYkovn sel~e a zastav se (kvoli vyhnut se zaplnn vaaeho disku).<br>Vchoz hodnota 0 odstran vaechna omezen, znamen ~dn omezen.This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.
The default value of 0 removes any limit. ConfIndexW4Tato hodnota upravuje mno~stv dat, kter jsou rejstYkovna mezi splchnutmi na disk.<br>Pomh to Ydit pou~it pamti rejstYkova e. Vchoz je 10 MB This value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWTato hodnota upravuje mno~stv zprv,<br>od pouze chyb a~ po velk mno~stv dat zajmavch pro ladn.ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexW$Po te n adresYeTop directories ConfIndexWVjimky unacUnac exceptions ConfIndexW^Nzev adresYe pro ukldn internetov strnkyWeb page store directory name ConfIndexWVybran seznam rejstYkovanch typo MIME.<br>Nic jinho se nebude rejstYkovat. Oby ejn je seznam przdn a ne inneAn exclusive list of indexed mime types.
Nothing else will be indexed. Normally empty and inactive ConfSubPanelW&Vylou en typy MIMEExclude mime types ConfSubPanelWVnja filtry pracujc dle ne~ po tak dlouhou dobu budou pYeruaeny. Je to pro ten zYdkav pYpad (napY. postscript), kdy by dokument mohl zapY init vejit filtru do smy ky. Nastavte na -1 pro ~dn omezen. External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.  ConfSubPanelWCelkovGlobal ConfSubPanelWPokud je nastavena tato hodnota (nerovn se -1), textov soubory budou pro rejstYkovn rozdleny na kousky o tto velikosti. To pomo~e pYi prohledvn velmi velkch textovch souboro (napY. souboro se zpisem).If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). ConfSubPanelWRNejvta velikost zabalenho souboru (KB)Max. compressed file size (KB) ConfSubPanelWPNejvta velikost textovho souboru (KB)Max. text file size (MB) ConfSubPanelWNTypy MIME, kter se nemaj rejstYkovatMime types not to be indexed ConfSubPanelWPouze typy MIMEOnly mime types ConfSubPanelWNVelikost strnky textovho souboru (KB)Text file page size (KB) ConfSubPanelW"Tato hodnota nastavuje prh, za kterm nebudou zabalen soubory zpracovvny. Nastavte na -1 pro ~dn omezen, na 0 pro vobec ~dn rozbalovn.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. ConfSubPanelW^Tato hodnota nastavuje prh, za kterm nebudou textov soubory zpracovvny. Nastavte na -1 pro ~dn omezen. Je to kvoli vylou en obYch souboro se zpisem z rejstYkovn.This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. ConfSubPanelW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> dvkov rejstYkovac rozvrh (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Ka~d pole mo~e obsahovat zstupn symbol (*), jednoduchou  selnou hodnotu,  rkou oddlen seznamy (1,3,5) a rozsahy (1-7). Obecnji, pole se budou pou~vat <span style=" font-style:italic;">jak je</span> uvnitY souboru crontab, a lze pou~t plnou stavbu crontab, podvejte se na crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />NapYklad, zadn <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Dny, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> a <span style=" font-family:'Courier New,courier';">15</span> v <span style=" font-style:italic;">Minuty</span> spust rejstYkovn (recollindex) ka~d den v 12:15 dopoledne a 7:15 odpoledne</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Rozvrh s velmi  astm spuatnm je pravdpodobn mn  inn ne~ je rejstYkovn ve skute nm  ase.</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Klepnte na <span style=" font-style:italic;">Zakzat</span> pro zastaven automatickho dvkovho rejstYkovn, <span style=" font-style:italic;">Povolit</span> pro jeho zapnut, <span style=" font-style:italic;">Zruait</span>, aby vae zostalo beze zmny.</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolWDialog Cron Cron Dialog CronToolWXDny v tdnu (* nebo 0-7, 0 nebo 7 je nedle))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWZakzatDisable CronToolWPovolitEnable CronToolWtChyba pYi instalaci zznamu cron. `patn skladba v polch?3Error installing cron entry. Bad syntax in fields ? CronToolW(Hodiny (* nebo 0-23)Hours (* or 0-23) CronToolWZd se, ~e pro recollindex existuj ru n upraven zznamy, nelze upravit crontabPIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWMinuty (0-59)Minutes (0-59) CronToolW DialogDialog EditDialog"Chyba v nastaven Config error EditTransMstn cesta Local path EditTransPovodn cesta Original path EditTransCesta ke zdroji Source path EditTrans PYidatAdd EditTransBase ZruaitCancel EditTransBase SmazatDelete EditTransBasePYeklady cestPath Translations EditTransBase Ulo~itSave EditTransBaseVyberte jeden nebo vce datovch typo a pou~ijte ovldac prvky v rme ku n~e pro zmnu zposobu, jakm jsou zpracovnykSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBase8Nastaven pYeklado cest pro Setting path translations for  EditTransBase <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Zd se, ~e rejstYk pro toto nastaven neexistuje.</span><br /><br />Pokud chcete pouze zrejstYkovat svoj domovsk adresY sadou rozumnch vchozch nastaven, stisknte tla tko <span style=" font-style:italic;">Spustit rejstYkovn nyn</span>. Podrobnosti budete moci upravit pozdji. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Pokud chcete mt vta dohled, pou~ijte nsledujc odkazy pro upraven nastaven rejstYkovn a rozvrhu.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">K tmto nstrojom lze pYistupovat pozdji v nabdce <span style=" font-style:italic;">Nastaven</span>.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialog:Prvn nastaven rejstYkovnFirst indexing setupFirstIdxDialog.Nastaven rejstYkovnIndexing configurationFirstIdxDialog(Rozvrh rejstYkovnIndexing scheduleFirstIdxDialog4Spustit rejstYkovn nynStart indexing nowFirstIdxDialog"Toto vm umo~n nastavit adresYe, kter chcete rejstYkovat, a dala parametry, jako jsou cesty pro vylou en soubory, vchoz znakov sady atd.This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialogFToto vm umo~n zvolit mezi dvkovm rejstYkovnm a rejstYkovnm ve skute nm  ase, a nastavit automatick rozvrh pro dvkov rejstYkovn (za pou~it cronu).This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog%1 nenalezen. %1 not found.FragButs%1: %2%1: %2FragButsKousky hlednQuery FragmentsFragButs $<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> rejstYkovn mo~e b~et nepYetr~it, soubory se rejstYkuj pYi jejich zmn, nebo b~et v samostatnch intervalech. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"> etba pYru ky vm mo~e pomoci pYi rozhodovn se mezi tmito pYstupy (stisknte F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Tento nstroj vm mo~e pomoci s nastavenm rozvrhu pro automatizaci bho dvkovho rejstYkovn, nebo spustit rejstYkovn ve skute nm  ase, kdy~ se pYihlste (nebo oboj, co~ zYdkakdy dv smysl). </p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedWRozvrh cronCron scheduling IdxSchedWRozhodnte, zda se rejstYkovn ve skute nm  ase spust, kdy~ se pYihlste (pouze pro vchoz rejstYk).ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedW>Nastaven rozvrhu rejstYkovnIndex scheduling setup IdxSchedWPSpuatn rejstYkovn ve skute nm  aseReal time indexing start up IdxSchedWNstroj vm umo~n rozhodnout se, kdy m rejstYkovn b~et, a nainstaluje zznam crontab._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedW DialogDialog ListDialog$Seskupovac oknkoGroupBox ListDialogSoubor "history" je poakozen. ProvYte jej, prosm, anebo jej odstraHte: 6"history" file is damaged, please check or remove it: MainFNenastaven ~dn databzov adresY No db directory in configurationMain &Dala&NextPreview&PYedchoz &PreviousPreview&Hledat: &Search for:PreviewDChyba pYi rejstYkovn dokumentu 0Can't turn doc into internal representation for Preview ZruaitCancelPreviewVyprzdnitClearPreview2VytvY se nhledov textCreating preview textPreviewHNhledov text se nahrv do editoru Loading preview text into editorPreviewLDbt na &psan velkch a malch psmen Match &CasePreview4Chyb program s npovdou:Missing helper program: PreviewOtevYtOpenPreviewKoprovatCopyPreviewTextEditZalomit Ydky Fold linesPreviewTextEdit"Zachovat odsazenPreserve indentationPreviewTextEditTiskPrintPreviewTextEdit2Vytisknout nynja nhledPrint Current PreviewPreviewTextEdit4Ulo~it dokument do souboruSave document to filePreviewTextEditVybrat vae Select AllPreviewTextEditUkzat pole Show fieldsPreviewTextEditUkzat obrzek Show imagePreviewTextEdit$Ukzat hlavn textShow main textPreviewTextEdit(<b>Vlastn podstromyCustomised subtreesQObject~Toto je znakov sada, kter se pou~v pro  ten souboro, kter svou znakovou sadu vnitYn neur uj, napY.. soubory s textem.<br>Vchoz hodnota je przdn a pou~v se hodnota prostYed NLS.Character set used for reading files which do not identify the character set internally, for example pure text files.
The default value is empty, and the value from the NLS environnement is used.QObject.Vchoz<br>znakov sadaDefault
character setQObject4Sledovat symbolick odkazyFollow symbolic linksQObjectBhem rejstYkovn sledovat symbolick odkazy. Vchoz nastaven je ne kvoli vyvarovan se dvojitho rejstYkovnTFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject(PYehl~en zakon enIgnored endingsQObjectHRejstYkovat vaechny souborov nzvyIndex all file namesQObjectRejstYkovat vaechny nzvy souboro, jejich~ obsah nelze ur it nebo zpracovat (~dn nebo nepodporovan MIME typ). Vchoz hodnota je ano}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject PYesko en nzvy Skipped namesQObjectSeznam podadresYo v rejstYkovan hierarchii <br>kde nkter parametry je potYeba nov vymezit. Vchoz: przdn.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectTToto jsou zakon en souboro pro soubory, kter se budou rejstYkovat vhradn podle svho nzvu (~dn ur ovn typu MIME, ~dn rozbalovn, ~dn rejstYkovn obsahu).These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing).QObjectToto jsou vzory pro nzvy souboro nebo adresYo, kter se nemaj rejstYkovat.LThese are patterns for file or directory names which should not be indexed.QObject&OtevYt&OpenQWidgetF&OtevYt rodi ovsk dokument/slo~ku&Open Parent document/folderQWidget&Nhled&PreviewQWidget$&Zapsat do souboru&Write to FileQWidget4Nelze vythnout dokument: Cannot extract document: QWidget6Vybrat pYesn jeden adresYChoose exactly one directoryQWidget0Koprovat nzev &souboruCopy &File NameQWidget.Koprovat adresu (&URL) Copy &URLQWidget>NepodaYilo se  st z adresYe: Could not read directory: QWidgetJVytvoYit nebo vybrat ukldac adresYCreate or choose save directoryQWidget0Najt &podobn dokumentyFind &similar documentsQWidget,OtevYt okno s r&yvkyOpen &Snippets windowQWidgetOtevYt s Open WithQWidgetJNhled na &rodi ovsk dokument/slo~kuPreview P&arent document/folderQWidgetSpustit skript Run ScriptQWidget.Ulo~it vbr do souboroSave selection to filesQWidgetDUkzat podYzen dokumenty/pYlohyShow subdocuments / attachmentsQWidget\Neo ekvan stYet v souborovm nzvu. Rua se.+Unexpected file name collision, cancelling.QWidget"Neukazovat znovu.Do not show again.QxtConfirmationMessage<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> rejstYkovn mo~e bt nastaveno tak, aby b~elo jako dmon. Soubory jsou aktualizovny pYi jejich zmn, ve skute nm  ase. Zskte tak v~dy nejnovja rejstYk, ale prostYedky systmu se pYi tom pou~vaj nepYetr~it.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>.

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWJSpustit rejstYkovacho dmona ihned.%Also start indexing daemon right now.RTIToolW|Soubor automatickho spuatn smazn. Zabt i sou asn proces?2Autostart file deleted. Kill current process too ?RTIToolW Nelze vytvoYit: Can't create: RTIToolWBNepodaYilo se spustit recollindexCould not execute recollindexRTIToolWSmazn souboru Deleting fileRTIToolWMazn: Deleting: RTIToolWhAutomatick spuatn rejstYkovn ve skute nm  ase"Real time indexing automatic startRTIToolWBOdstrann automatickho spuatnRemoving autostartRTIToolW"Nahrazen souboruReplacing fileRTIToolWNahrazen: Replacing: RTIToolWzSpustit rejstYkovacho dmona s mm sezenm pracovn plochy..Start indexing daemon with my desktop session.RTIToolWVarovnWarningRTIToolW. s dodate nou zprvou:  with additional message: RclMainVaechny jazyky(all languages)RclMain6ydn rozaYen kmene slova (no stemming)RclMain"O programu Recoll About RecollRclMainVaeAllRclMainChybn specifikace aplikace pro %1: [%2] ProvYte soubor pracovn plochy?Bad desktop app spec for %1: [%2] Please check the desktop fileRclMain`patn cesty Bad pathsRclMainChybov pYkaz pro prohl~e pro %1: [%2] ProvYte soubor mimeconfCBad viewer command line for %1: [%2] Please check the mimeview fileRclMain8Nelze pYistoupit k souboru: Can't access file: RclMain:Nelze vytvoYit nhledov oknoCan't create preview windowRclMainNelze nastavit soubor se slovy majcmi stejn vznam (synonyma). Chyba pYi zpracovn?&Can't set synonyms file (parse error?)RclMain.Nelze rozbalit soubor: Can't uncompress file: RclMainRNelze obnovit rejstYk: b~ rejstYkova #Can't update index: indexer runningRclMainjNelze vythnout dokument nebo vytvoYit do asn soubor0Cannot extract document or create temporary fileRclMain>Nelze najt rodi ovsk dokumentCannot find parent documentRclMainL}dn informace o dokumentu v databzi+Cannot retrieve document info from databaseRclMain.Vybrat soubor k ulo~enChoose file to saveRclMainZavYenClosingRclMainHNepodaYilo se nahrt ulo~en hlednCould not load saved queryRclMainNepodaYilo se otevYt vnja rejstYk. Databze neotevYena. ProvYte seznam vnjach rejstYko.HCould not open external index. Db not open. Check external indexes list.RclMain<NepodaYilo se otevYt soubor: Could not open file: RclMain>NepodaYilo se zapsat do souboruCould not write to fileRclMain~Zakzno proto~e rejstYkova ve skute nm  ase nebyl sestaven.;Disabled because the real time indexer was not compiled in.RclMainNeukazovat toto varovn pYat (pou~t nastaven u~ivatelskho rozhran pro obnoven).DDo not show this warning next time (use GUI preferences to restore).RclMainFiltr dokumentuDocument filterRclMain$Historie dokumentuDocument historyRclMain HotovoDoneRclMain$Zdvojen dokumentyDuplicate documentsRclMain"Smazn rejstYku Erasing indexRclMain ChybaErrorRclMainProvd se: [ Executing: [RclMainPro vaae souborov typy jsou potYeba vnja programy/pYkazy, kter ale nebyly nalezeny, jak byly ulo~eny pYi poslednm rejstYkovn v pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMainHistorick data History dataRclMain"RejstYk uzamknut Index lockedRclMain:Chyba pYi hledn v rejstYkuIndex query errorRclMain.Rozvr~en rejstYkovnIndex schedulingRclMain.RejstYkovan MIME typyIndexed MIME TypesRclMainRejstYkova b~. Nelze pYistupovat k souboru s internetovou vyrovnvac pamt./Indexer is running. Can't access webcache file.RclMain4RejstYkovn se nezdaYiloIndexing failedRclMain RejstYkuje se: Indexing in progress: RclMain.RejstYkovn pYeruaenoIndexing interruptedRclMain&Chyba pYi nahrvn Load errorRclMain,Chyb pomocn programyMissing helper programsRclMain DohledMonitorRclMainh}dn vnja prohl~e nebyl nastaven pro MIME typ [-No external viewer configured for mime type [RclMainBNenalezeny ~dn pomocn programyNo helpers found missingRclMainB}dn zachovan pYedchoz hlednNo preserved previous searchRclMain2Nenalezeny ~dn vsledkyNo results foundRclMain}dn hledn No searchRclMainFNekritick zprva o rejstYkovn: Non-fatal indexing message: RclMain }dnNoneRclMainOtevr se do asn kopie. pravy budou ztraceny, pokud je neulo~te<br/>do trvalho umstn.`Opening a temporary copy. Edits will be lost if you don't save
them to a permanent location.RclMainVy istitPurgeRclMainHledn b~.<br>Kvoli omezenm rejstYkovac knihovny<br>zruaen ukon programeQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMain Vsledky hledn Query resultsRclMain*NepodaYilo se pYe st Read failedRclMainPNastavit rejstYk znovu a za t od nuly?(Reset the index and start from scratch ?RclMain,Po et vsledko (odhad)Result count (est.)RclMainVsledkyResultsRclMainUlo~it soubor Save fileRclMain.Ulo~en dotazy (*.rclq)Saved Queries (*.rclq)RclMainhVbrov vzory lze pou~t jen s po te nm adresYem:Selection patterns can only be used with a start directoryRclMainTVbrov vzory potYebuj po te n adresYSelection patterns need topdirRclMainPromiHte. Nen nyn dostupn pod OS Windows. Pou~ijte polo~ek v nabdce Soubor k aktualizaci rejstYkuYSorry, not available under Windows for now, use the File menu entries to update the indexRclMainKmeny slovStemdbRclMain.Zastavit &rejstYkovnStop &IndexingRclMain:PodYzen dokumenty a pYlohySub-documents and attachmentsRclMainNynja rejstYkovac proces nebyl spuatn z tohoto rozhran. Klepnte na OK pro jeho zabit, nebo na Zruait, aby byl ponechn smyThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainDokument je sou st vnjaho rejstYku, kter nelze aktualizovat. @The document belongs to an external index which I can't update. RclMainRejstYkova b~, tak~e vci by se po dokon en rejstYkovn mly zlepait. @The indexer is running so things should improve when it's done. RclMainProhl~e stanoven v MIME zobrazen pro %1: %2 nenalezen. Chcete spustit dialog s nastavenm?hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMainZTyto adresy ( | ipath) sdlej toto~n obsah:-These Urls ( | ipath) share the same content:RclMaintTento nastavovac nstroj pracuje jen pro hlavn rejstYk.6This configuration tool only works for the main index.RclMain4Toto hledn u~ nen  inn"This search is not active any moreRclMainNstrojeToolsRclMain~Pae seznam przdn: Mo~n po kat na pokra ovn rejstYkovn?6Types list empty: maybe wait for indexing to progress?RclMainNeznmUnknownRclMainNeznm stav rejstYkova e. Nelze pYistupovat k souboru s internetovou vyrovnvac pamt.2Unknown indexer state. Can't access webcache file.RclMain"Obnovit &rejstYk Update &IndexRclMain ObnovaUpdatingRclMainPYkaz pro prohl~e pro %1 stanovuje jak hodnotu souboru tak hodnotu rodi ovskho souboru: nepodporovnoQViewer command line for %1 specifies both file and parent file value: unsupportedRclMainPYkaz pro prohl~e pro %1 stanovuje rodi ovsk soubor, ale adresa (URL) je http[s]: nepodporovnoPViewer command line for %1 specifies parent file but URL is http[s]: unsupportedRclMainVarovnWarningRclMain(NepodaYilo se zapsat Write failedRclMainNChyba pYi vyhledn jazyko s kmeny slov#error retrieving stemming languagesRclMainFiltrovnofilteredRclMainMultimediamediaRclMain ZprvamessageRclMainJinotherRclMainPYedstaven presentationRclMainTYdnosortedRclMainTabulky spreadsheetRclMainTexttextRclMain^S novm pokusem o zpracovn selhavach souboro With failed files retrying RclMainBase$&O programu Recoll &About Recoll RclMainBase$&Pokro il hledn&Advanced Search RclMainBase6&Vymazat historii dokumentu&Erase document history RclMainBase2&Vymazat historii hledn&Erase search history RclMainBase&Soubor&File RclMainBase&Cel obrazovka &Full Screen RclMainBaseBNastaven u~ivatelskho roz&hran&GUI configuration RclMainBase&Npovda&Help RclMainBase(Nastaven &rejstYku&Index configuration RclMainBase&Nastaven &Preferences RclMainBase0&Sestavit rejstYk znovu&Rebuild index RclMainBase&Vsledky&Results RclMainBase$Parametry &tYdn&Sort parameters RclMainBase&Nstroje&Tools RclMainBase*&U~ivatelsk pYru ka &User manual RclMainBase&Pohled&View RclMainBase2Pokro il/Slo~en hlednAdvanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBase$Historie dokumentuDocument History RclMainBase&&Historie dokumentuDocument &History RclMainBase&Ukon itE&xit RclMainBase@Dialog pro &vnja rejstYkovnE&xternal index dialog RclMainBaseDPovolit slova majc stejn vznamEnable synonyms RclMainBase>Dialog pro vnja rejstYkovnExternal index dialog RclMainBaseF11F11 RclMainBasePrvn strana First Page RclMainBasePrvn strana First page RclMainBase$Na celou obrazovku Full Screen RclMainBase<Jt na prvn stranu s vsledkyGo to first page of results RclMainBase*&Statistika rejstYkuIndex &statistics RclMainBase0RejstYkovan &MIME typyIndexed &MIME types RclMainBase*&Rozvrh rejstYkovnIndexing &schedule RclMainBaseFRejstYkovn se zvlatnmi volbamiIndexing with special options RclMainBase,Nahrt ulo~en hlednLoad saved query RclMainBase6Chybjc &pomocn programyMissing &helpers RclMainBaseDala strana Next Page RclMainBaseDala strana Next page RclMainBase.Dala strana s vsledkyNext page of results RclMainBaseNov aktualizace rejstYku se pokus znovu zpracovat nyn nezpracovan soubory.Next update will retry previously failed files RclMainBase,O stranu dolo (PgDown)PgDown RclMainBase,O stranu nahoru (PgUp)PgUp RclMainBase PYedchoz strana Previous Page RclMainBase PYedchoz strana Previous page RclMainBase6PYedchoz strana s vsledkyPrevious page of results RclMainBaseKousky hlednQuery Fragments RclMainBase RecollRecoll RclMainBaseVUlo~it jako soubor CSV (tabulkov dokument)Save as CSV (spreadsheet) file RclMainBase.Ulo~it posledn hlednSave last query RclMainBaseUlo~it vsledek do souboru, jej~ mo~ete nahrt jako seait s listy v tabulkovm kalkultoru@Saves the result into a file which you can load in a spreadsheet RclMainBaseShift+PgUp Shift+PgUp RclMainBase4Ukzat podrobnosti hlednShow Query Details RclMainBase&Ukzat jako tabulku Show as table RclMainBaseUkzat vsledky v tabulce na zposob seaitu s listy v tabulkovm kalkultoru(Show results in a spreadsheet-like table RclMainBasePRoztYdit podle data, nejprve nejnovjaSort by date, newest first RclMainBaseNRoztYdit podle data, nejprve nejstaraSort by date, oldest first RclMainBasebRoztYdit podle data od nejnovjaho po nejstara#Sort by dates from newest to oldest RclMainBasebRoztYdit podle data od nejstaraho po nejnovja#Sort by dates from oldest to newest RclMainBase"Parametry tYdnSort parameters RclMainBase,Zvlatn rejstYkovnSpecial Indexing RclMainBase$Prozkumnk &vrazoTerm &explorer RclMainBase4Nstroj prozkumnka vrazoTerm explorer tool RclMainBase<Spustit pYrostkov prochzenTrigger incremental pass RclMainBase"Obnovit &rejstYk Update &index RclMainBaseJEditor internetov vyrovnvac pamtiWebcache Editor RclMainBaseUkon itQuit RclTrayIconObnovitRestore RclTrayIcon VtahAbstract RecollModel AutorAuthor RecollModel DatumDate RecollModelDatum a  as Date and time RecollModelDatum dokumentu Document date RecollModel$Velikost dokumentu Document size RecollModelDatum souboru File date RecollModelNzev souboru File name RecollModel Velikost souboru File size RecollModel IpathIpath RecollModelKl ov slovaKeywords RecollModelTyp MIME MIME type RecollModel MtimeMtime RecollModel(Povodn znakov sadaOriginal character set RecollModel(Hodnocen zva~nostiRelevancy rating RecollModel NzevTitle RecollModelAdresa (URL)URL RecollModel (ukzat hledn) (show query)ResListX<p><b>Nebyly nalezeny ~dn vsledky</b><br>

No results found
ResListd<p><i>Nhradn pravopis (pYzvuky potla eny): </i>4

Alternate spellings (accents suppressed): ResList:<p><i>Nhradn pravopis: </i>

Alternate spellings: ResList$Historie dokumentuDocument historyResListDokumenty DocumentsResList DalaNextResListOtevYtOpenResList NhledPreviewResListPYedchozPreviousResList*Podrobnosti o hledn Query detailsResList,Po et vsledko (odhad)Result count (est.)ResListVsledky Result listResList ryvkySnippetsResList&Nedostupn dokumentUnavailable documentResListproforResListmimo alespoHout of at leastResList&Smazat sloupec&Delete columnResTable.Nastavit tYdn &znovu &Reset sortResTable &Ulo~it jako CSV &Save as CSVResTable&PYidat sloupec "%1"Add "%1" columnResTable>Nelze otevYt/vytvoYit soubor: Can't open/create file: ResTable<Ulo~it tabulku jako soubor CSVSave table to CSV fileResTableV lia se od nynjaho nastaven (ponechno)' differ from current preferences (kept)SSearchVaechny vrazy All termsSSearchJakkoli vrazAny termSSearchRAutomatick pYpony pro ulo~en hledn:  Auto suffixes for stored query: SSearchAutomatick tvoYen slovnch obrato je nastaveno, ale bylo zruaeno pro ulo~en hledn3Autophrase is set but it was unset for stored querySSearchAutomatick tvoYen slovnch obrato je zruaeno, ale bylo nastaveno pro ulo~en hledn3Autophrase is unset but it was set for stored querySSearch,`patn Yetzec hlednBad query stringSSearchvZadejte ~olkov vraz (zstupn symbol) pro nzev souboru.$Enter file name wildcard expression.SSearch^Zadejte vraz jazyka hledn. Seznam:<br> <i>term1 term2</i> : 'term1' a 'term2' do kterhokoli pole.<br> <i>field:term1</i> : 'term1' do pole 'field'.<br> Obvykl nzvy pol/synonyma:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudopole: dir, mime/format, type/rclcat, date.<br> PYklady intervalo dvou dat: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> Mo~ete pou~t kulat zvorky, aby byly vci zYetelnja.<br> <i>"term1 term2"</i> : vtn sek (mus se objevit pYesn). Mo~n modifiktory:<br> <i>"term1 term2"p</i> : neuspoYdan hledn podle blzkosti s vchoz vzdlenost.<br> Pou~ijte odkaz <b>Ukzat hledn</b>, kdy~ mte o vsledku pochybnost, a podvejte se do pYru ky (&lt;F1>) na dala podrobnosti. Enter query language expression. Cheat sheet:
term1 term2 : 'term1' and 'term2' in any field.
field:term1 : 'term1' in field 'field'.
Standard field names/synonyms:
title/subject/caption, author/from, recipient/to, filename, ext.
Pseudo-fields: dir, mime/format, type/rclcat, date, size.
Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.
term1 term2 OR term3 : term1 AND (term2 OR term3).
You can use parentheses to make things clearer.
"term1 term2" : phrase (must occur exactly). Possible modifiers:
"term1 term2"p : unordered proximity search with default distance.
Use Show Query link when in doubt about result and see manual (<F1>) for more detail. SSearchLVnja rejstYky pro ulo~en hledn: #External indexes for stored query: SSearchNzev souboru File nameSSearch>Nen dostupn ~dn dala pame Out of memorySSearchJazyk hlednQuery languageSSearchRJazyky s kmeny slov pro ulo~en hledn: %Stemming languages for stored query: SSearch(Vyberte typ hledn.Choose search type. SSearchBase SmazatClear SSearchBase Ctrl+SCtrl+S SSearchBase*Smazat hledan zznamErase search entry SSearchBaseSSearchBase SSearchBase SSearchBase HledatSearch SSearchBaseSpustit hledn Start query SSearchBaseVaeAll SearchClauseWJakkolivAny SearchClauseWNzev souboru File name SearchClauseW}dn poleNo field SearchClauseW }dnNone SearchClauseWdPo et slov, kter se smj nachzet mezi hledanmiHNumber of additional words that may be interspersed with the chosen ones SearchClauseWTato slovaPhrase SearchClauseWPodobn vrazy Proximity SearchClauseWjVyberte druh hledn, se kterm se slova budou hledat>Select the type of query that will be performed with the words SearchClauseWHledat:Find:Snippets DalaNextSnippetsPYedchozPrevSnippets ryvkySnippetsSnippetsP<p>V rmci omezen hledn nebyla bohu~el nalezena ~dn shoda. Pravdpodobn je dokument velice velk a vyvje ryvko se v nm ztratil (nebo skon il ve akarp)...</p>

Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...

 SnippetsW HledatSearch SnippetsWProchzetBrowseSpecIdxW(AdresY k rejstYkovn v etn podadresYo. Mus bt uvnitY rejstYkovan oblasti<br>, jak je stanovena v souboru s nastavenm (po te n adresYe).Directory to recursively index. This must be inside the regular indexed area
as defined in the configuration file (topdirs).SpecIdxWrJinak jen zmnn nebo selhava soubory budou zpracovny.5Else only modified or failed files will be processed.SpecIdxWfVymazat pYed rejstYkovnm data vybranch souboro.*Erase selected files data before indexing.SpecIdxWPonechat przdn pro vybrn vaech souboro. Mo~ete pou~t vce vzoro oddlench mezerami.<br>Vzory s vlo~enmi mezerami musej bt uzavYeny ve dvojitch uvozovkch.<br>Lze pou~t, jen kdy~ je nastaven za te n cl.Leave empty to select all files. You can use multiple space-separated shell-type patterns.
Patterns with embedded spaces should be quoted with double quotes.
Can only be used if the start target is set.SpecIdxWVbrov vzory:Selection patterns:SpecIdxW,Zvlatn rejstYkovnSpecial IndexingSpecIdxWBPYedmt rejstYkovan od spuatnTop indexed entitySpecIdxW&ZavYt&Close SpellBase&Rozbalit &Expand  SpellBasePYzvukyAccents SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBaseFRozliaovn velkch a malch psmenCase SpellBase ShodaMatch SpellBase6}dn informace o databzi. No db info. SpellBase"Prozkumnk vrazo Term Explorer SpellBaseLDokumenty vytvoYen nebo aktualizovan Documents created/updatedSpellW$Vyzkouaen soubory Files testedSpellW0NezrejstYkovan soubory Unindexed filesSpellW%1 vsledky(o) %1 resultsSpellWBPromrn po et vrazo na dokumentAverage terms per documentSpellW8Velikost adresYe s databzDatabase directory sizeSpellWDok. / Tot. Doc. / Tot.SpellWRejstYk: %1 dokumento, promrn dlka %2 vrazy(o). %3 vsledky(o)7Index: %1 documents, average length %2 terms.%3 resultsSpellWPolo~kaItemSpellWtVypsat soubory, kter se nepodaYilo zrejstYkovat (pomal),List files which could not be indexed (slow)SpellWVSeznam byl zkrcen abecedn, nkter  etn 1List was truncated alphabetically, some frequent SpellW<Nejvta dlka dokumentu (mez)Longest document length (terms)SpellWTypy MIME: MIME types:SpellW4Nenalezeno ~dn rozaYenNo expansion foundSpellWPo et dokumentoNumber of documentsSpellWRegulrn vrazRegexpSpellWDVsledky poslednho rejstYkovn:Results from last indexing:SpellW6Ukzat statistiku rejstYkuShow index statisticsSpellW<Nejmena dlka dokumentu (mez) Smallest document length (terms)SpellW>Chyba v pravopisnch nvrzch. Spell expansion error. SpellW(Pravopis/HlskoslovSpelling/PhoneticSpellW*RozaYen kmene slovaStem expansionSpellW VrazTermSpellWHodnotaValueSpellW Zstupn symboly WildcardsSpellWNChyba pYi vyhledn jazyka s kmeny slov#error retrieving stemming languagesSpellW\pojmy mohou chybt. Zkuste pou~t dela koYen..terms may be missing. Try using a longer root.SpellWVaechny vrazy All terms UIPrefsDialogJakkoli vrazAny term UIPrefsDialogPJe potYeba vybrat alespoH jeden rejstYk$At most one index should be selected UIPrefsDialogNelze pYidat rejstYk s odlianou volbou pro velikost psma/diakritiku>Cant add index with different case/diacritics stripping option UIPrefsDialog VybratChoose UIPrefsDialog,Vchoz psmo QtWebkitDefault QtWebkit font UIPrefsDialogNzev souboru File name UIPrefsDialogJazyk hlednQuery language UIPrefsDialog^Zhlav seznamu s vsledky (vchoz je przdn)%Result list header (default is empty) UIPrefsDialogFormt odstavce seznamu s vsledky (vymazat vaechny pro znovunastaven na vchoz)Toto je hlavn/mstn rejstYk!This is the main/local index! UIPrefsDialog^Hodnota obdr~en z poslednho ukon en programu Value from previous program exit UIPrefsDialogNChyba pYi vyhledn jazyka s kmeny slov#error retrieving stemming languages UIPrefsDialog^Mn se zznamy s odlianmi nynjami hodnotami.Changing entries with different current values ViewAction PYkazCommand ViewActionVchoz plochaDesktop Default ViewActionTyp MIME MIME type ViewAction(<b>Nov hodnoty:</b>New Values:ViewActionBaseN innost (przdn -> vchoz pro Recoll) Action (empty -> recoll default)ViewActionBase.Pou~t na nynja vbrApply to current selectionViewActionBase ZavYtCloseViewActionBaseJVjimka pro nastaven pracovn plochy Exception to Desktop preferencesViewActionBaseProhl~e eNative ViewersViewActionBase AktionRecoll action:ViewActionBaseVyberte jeden nebo vce datovch typo a pou~ijte ovldac prvky v rme ku n~e pro zmnu zposobu, jakm jsou zpracovnykSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseVyberte jeden nebo vce MIME typo a pou~ijte ovldac prvky v rme ku s tla tky pro zmnu zposobu, jakm jsou zpracovny.lSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBaseVybrat stejn Select sameViewActionBaseZPou~t nastaven pracovn plochy jako vchoz"Use Desktop preferences by defaultViewActionBasecurrent value current valueViewActionBase,Hledat regulrn vraz Search regexpWebcacheJEditor internetov vyrovnvac pamtiWebcache editorWebcache,Koprovat adresu (URL)Copy URL WebcacheEditSmazat vbrDelete selection WebcacheEditRejstYkova b~. Nelze upravovat soubor s internetovou vyrovnvac pamt.-Indexer is running. Can't edit webcache file. WebcacheEditNeznm stav rejstYkova e. Nelze upravovat soubor s internetovou vyrovnvac pamt.0Unknown indexer state. Can't edit webcache file. WebcacheEditInternetov vyrovnvac pame byla zmnna. Po zavYen tohoto okna budete muset spustit rejstYkova .RWebcache was modified, you will need to run the indexer after closing this window. WebcacheEditMIMEMIME WebcacheModelURLUrl WebcacheModel VybratChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLWZ&ruait&CanceluiPrefsDialogBase&OK&OKuiPrefsDialogBase<BR>
uiPrefsDialogBase <PRE>
uiPrefsDialogBase <PRE> + zalomen
 + wrapuiPrefsDialogBase\Hledn [Je~a Kristus] se zmn na [Je~a OR Kristus OR (Je~a PHRASE 2 Kristus)].
Tmto by mly bt silnji zv~eny vsledky, kter obsahuj pYesn shody s hledanm slovem.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBase Oddlova
 ryvkoAbstract snippet separatoruiPrefsDialogBaseZapnout vaeActivate AlluiPrefsDialogBase2Zapnout odkazy v nhledu.Activate links in preview.uiPrefsDialogBasePYidat rejstYk	Add indexuiPrefsDialogBasePou~t zmny
Apply changesuiPrefsDialogBase^Automatick pYidn vt do jednoduchho hledn+Automatically add phrase to simple searchesuiPrefsDialogBaseetnost vskytu vrazu (procento) pro automatick tvoYen slovnch obrato.Autophrase term frequency threshold percentageuiPrefsDialogBase Panel s tla
tky
Buttons PaneluiPrefsDialogBaseVybratChooseuiPrefsDialogBase.Vybrat programy editoroChoose editor applicationsuiPrefsDialogBaseKlepnout pro pYidn dalaho rejstYkovho adresYe do seznamu. Mo~ete vybrat bu adresY s nastavenm pro Recoll nebo rejstYk Xapian.{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBasejZavYt do oznamovac oblasti panelu namsto ukon
en.!Close to tray instead of exiting.uiPrefsDialogBase2Formt data (strftime(3))Date format (strftime(3))uiPrefsDialogBaseVypnout vaeDeactivate AlluiPrefsDialogBaseRozhodnout, zda se dokumentov filtry ukazuj jako kulat tla
tka, rozbalovac seznamy v nstrojovm pruhu, nebo jako nabdka.QDecide if document filters are shown as radio buttons, toolbar combobox, or menu.uiPrefsDialogBaseZakzat automatick doplHovn Qt pYi zadvn v poli pro hledn.*Disable Qt autocompletion in search entry.uiPrefsDialogBaseZahodit zmnyDiscard changesuiPrefsDialogBaseStanoven, zda se m vytvoYit pYehled i tehdy, kdy~ dokument ji~ njak pYehled obsahuje.EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBaseStanoven, zda se m vytvoYit pYehled pro vsledky v souvislosti s parametrem hledn.
U velkch dokumento mo~e bt pomal.zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBase:Styl vbru filtro dokumento:Document filter choice style:uiPrefsDialogBase6VytvYet pYehledy dynamickyDynamically build abstractsuiPrefsDialogBaseRUpravit zhlav html na stran s vsledky#Edit result page html header insertuiPrefsDialogBaseHUpravit Yetzec formtu pro vsledky#Edit result paragraph format stringuiPrefsDialogBasePovolitEnableuiPrefsDialogBase Vnja rejstYkyExternal IndexesuiPrefsDialogBase>Prh 
etnosti (procento), od kterho se vrazy nepou~vaj.
Slovn obraty obsahujc pYlia 
etn vrazy zposobuj vkonnostn pot~e.
PYesko
en vrazy zvtauj vzdlenost slovnho obratu a zmenauj 
innost funkce automatickho hledn slovnho obratu.
Vchoz hodnota je 2 (procenta).Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10Helvetica-10uiPrefsDialogBase0Skrt zdvojen vsledky.Hide duplicate results.uiPrefsDialogBaseBZvraznit styl CSS vrazo hledn#Highlight CSS style for query termsuiPrefsDialogBaseJe-li zaakrtnuto, budou vsledky se stejnm obsahem pod jinmi nzvy ukzny jen jednou.XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBaseXdky v PRE textu nejsou zalomeny. PYi pou~it BR dojde ke ztrt nkterch zalomen. Mo~n je to, co chcete styl PRE + zalomen.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBaseUdlat odkazy uvnitY nhledovho okna klepnutelnmi a spustit vnja prohl~e
, kdy~ je na n klepnuto.dMake links inside the preview window clickable, and start an external browser when they are clicked.uiPrefsDialogBasetNejvta velikost textu zvraznnho pro nhled (megabyty)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBaseNabdkaMenuuiPrefsDialogBase0Po
et vsledko na stranu"Number of entries in a result pageuiPrefsDialogBaseOtevYe dialog pro vbr souboru CSS se stylovm listem okna s ryvkyAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBase\OtevYe dialog pro vbr psma seznamu vsledko-Opens a dialog to select the result list fontuiPrefsDialogBasedOtevYe dialog pro vbr souboru se stylovm listem-Opens a dialog to select the style sheet fileuiPrefsDialogBasePYeklady cestPaths translationsuiPrefsDialogBase>Prost text do stylu Ydku HTMLPlain text to HTML line styleuiPrefsDialogBasedUpYednostHovat pro nhled HTML pYed prostm textem&Prefer Html to plain text for preview.uiPrefsDialogBasebKouzeln pYpony souborovho nzvu jazyka hledn(Query language magic file name suffixes.uiPrefsDialogBase<Recoll - U~ivatelsk nastavenRecoll - User PreferencesuiPrefsDialogBaseFZapamatovat si stav zapnut hlednRemember sort activation state.uiPrefsDialogBase~Odstranit ze seznamu. Nem to ~dn 
inek na ulo~en rejstYk.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase"Odstranit vybranRemove selecteduiPrefsDialogBase>Nahradit pYehledy v dokumentech Replace abstracts from documentsuiPrefsDialogBaseNastavit znovuResetuiPrefsDialogBase@Nastav znovu styl okna s ryvky Resets the Snippets window styleuiPrefsDialogBasexNastav psmo pro seznam s vsledky znovu na vchoz hodnotu1Resets the result list font to the system defaultuiPrefsDialogBaseJNastav stylov list znovu na vchoz!Resets the style sheet to defaultuiPrefsDialogBase"Seznam s vsledkyResult ListuiPrefsDialogBase6Psmo pro seznam s vsledkyResult list fontuiPrefsDialogBase"Parametry hlednSearch parametersuiPrefsDialogBaseNastavit pYeklady cest pro vybran rejstYk nebo pro hlavn, pokud ~dn vybrn nen.XSet path translations for the selected index or for the main one if no selection exists.uiPrefsDialogBaseRUkzat ikonu v oznamovac oblasti panelu.Show system tray icon.uiPrefsDialogBase`Ukzat varovn pYi otevrn do
asnho souboru.)Show warning when opening temporary file.uiPrefsDialogBase0Soubor CSS okna s ryvkySnippets window CSS fileuiPrefsDialogBasexPo spuatn automaticky otevYt dialog pro rozaYen hledn'Start with advanced search dialog open.uiPrefsDialogBaseRSpustit v jednoduchm vyhledvacm re~imuStart with simple search modeuiPrefsDialogBase$Jazyk s kmeny slovStemming languageuiPrefsDialogBaseStylov listStyle sheetuiPrefsDialogBaseLSoubor se slovy majcmi stejn vznam
Synonyms fileuiPrefsDialogBaseFPo
et souvisejcch slov v pYehledu Synthetic abstract context wordsuiPrefsDialogBasePDlka vytvoYenho pYehledu (po
et znako)$Synthetic abstract size (characters)uiPrefsDialogBaseTexty nad tuto velikost nebudou v nhledu zvrazHovny (pYlia pomal).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBaseSlova v seznamu budou v zznamu jazyka hledn automaticky obrcena na vty ext:xxx.bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase PYepnout vybranToggle selecteduiPrefsDialogBaseNRozbalovac seznam v nstrojovm paneluToolbar ComboboxuiPrefsDialogBase(U~ivatelsk rozhranUser interfaceuiPrefsDialogBaseU~ivatelsk styl k pou~it v okn s ryvky.<br>Poznmka: Zhlav strnky s vsledky je zahrnuto i v zhlav okna s ryvky.User style to apply to the snippets window.
Note: the result page header insert is also included in the snippets window header.uiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_fr.ts0000644000175000017500000060306713566716270014264 00000000000000 AdvSearch All clauses Toutes les clauses Any clause Une des clauses texts textes spreadsheets feuilles de calcul presentations présentations media multimédia messages messages other autres Bad multiplier suffix in size filter Suffixe multiplicateur incorrect dans un filtre de taille (k/m/g/t) text texte spreadsheet feuille de calcul presentation présentation message message AdvSearchBase Advanced search Recherche avancée Restrict file types Restreindre les types de fichier Save as default Sauver comme valeur initiale Searched file types Types de fichier recherchés All ----> Tout ----> Sel -----> Sel -----> <----- Sel <----- Sel <----- All <----- Tout Ignored file types Types de fichiers ignorés Enter top directory for search Entrer le répertoire où démarre la recherche Browse Parcourir Restrict results to files in subtree: Restreindre les résultats aux fichiers de l'arborescence : Start Search Lancer la recherche Search for <br>documents<br>satisfying: Rechercher les <br>documents<br>vérifiant : Delete clause Enlever une clause Add clause Ajouter une clause Check this to enable filtering on file types Cocher pour permettre le filtrage des types de fichiers By categories Par catégories Check this to use file categories instead of raw mime types Cocher pour utiliser les catégories de fichiers au lieu des types mimes Close Fermer All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Tous les champs de droite non vides seront combinés par une conjonction ET (choix "Toutes les clauses") ou OU (choix "Une des clauses"). <br> Les champs de type "Un de ces mots", "Tous ces mots" et "Aucun de ces mots" acceptent un mélange de mots et de phrases contenues dans des apostrophes "une phrase".<br>Les champs non renseignés sont ignorés. Invert Inverser Minimum size. You can use k/K,m/M,g/G as multipliers Taille minimum. Vous pouvez utiliser un suffixe multiplicateur : k/K, m/M, g/G Min. Size Taille Min Maximum size. You can use k/K,m/M,g/G as multipliers Taille Maximum. Vous pouvez utiliser un suffixe multiplicateur : k/K, m/M, g/G Max. Size Taille Max Select Sélectionner Filter Filtrer From À partir de To Jusqu'à Check this to enable filtering on dates Cocher pour activer le filtrage sur les dates Filter dates Filtrer sur les dates Find Trouver Check this to enable filtering on sizes Cocher pour activer le fitrage sur taille fichier Filter sizes Filtrer les tailles ConfIndexW Can't write configuration file Impossible d'écrire le fichier de configuration Global parameters Paramètres globaux Local parameters Paramètres locaux Search parameters Paramètres pour la recherche Top directories Répertoires de départ The list of directories where recursive indexing starts. Default: your home. La liste des répertoires où l'indexation récursive démarre. Défault: votre répertoire par défaut. Skipped paths Chemins ignorés These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Ce sont les chemins des répertoires où l'indexation n'ira pas.<br>Les éléments peuvent contenir des caractères joker. Les entrés doivent correspondre aux chemins vus par l'indexeur (ex.: si topdirs comprend '/home/me' et que '/home' est en fait un lien vers '/usr/home', un élément correct pour skippedPaths serait '/home/me/tmp*', et non '/usr/home/me/tmp*') Stemming languages Langue pour l'expansion des termes The languages for which stemming expansion<br>dictionaries will be built. Les langages pour lesquels les dictionnaires d'expansion<br>des termes seront construits. Log file name Nom du fichier journal The file where the messages will be written.<br>Use 'stderr' for terminal output Le nom du fichier ou les messages seront ecrits.<br>Utiliser 'stderr' pour le terminal Log verbosity level Niveau de verbosité This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Cette valeur ajuste la quantite de messages emis,<br>depuis uniquement les erreurs jusqu'a beaucoup de donnees de debug. Index flush megabytes interval Intervalle d'écriture de l'index en mégaoctets This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Ajuste la quantité de données lues entre les écritures sur disque.<br>Contrôle l'utilisation de la mémoire. Défaut 10 Mo Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. C'est le pourcentage d'utilisation disque - utilisation totale, et non taille de l'index - où l'indexation s'arrêtera en erreur.<br>La valeur par défaut de 0 désactive ce test. No aspell usage Pas d'utilisation d'aspell Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Désactiver l'utilisation d'aspell pour générer les approximations orthographiques.<br> Utile si aspell n'est pas installé ou ne fonctionne pas. Aspell language Langue pour aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Langue pour le dictionnaire aspell. La valeur devrait ressembler à 'en' ou 'fr'... <br>Si cette valeur n'est pas positionnée, l'environnement national sera utilisé pour la calculer, ce qui marche bien habituellement. Pour avoir une liste des valeurs possibles sur votre système, entrer 'aspell config' sur une ligne de commande et regarder les fichiers '.dat' dans le répertoire 'data-dir'. Database directory name Répertoire de stockage de l'index The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Le nom d'un répertoire pour stocker l'index<br>Un chemin relatif sera interprété par rapport au répertoire de configuration. La valeur par défaut est 'xapiandb'. Unac exceptions Exceptions Unac <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Ce sont les exceptions au mécanisme de suppression des accents, qui, par défaut et en fonction de la configuration de l'index, supprime tous les accents et effectue une décomposition canonique Unicode. Vous pouvez inhiber la suppression des accents pour certains caractères, en fonction de votre langue, et préciser d'autres décompositions, par exemple pour des ligatures. Dans la liste séparée par des espaces, le premier caractères d'un élément est la source, le reste est la traduction. Process the WEB history queue Traiter la file des pages WEB Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Permet d'indexer les pages Web visitées avec Firefox <br>(il vous faut également installer l'extension Recoll pour Firefox) Web page store directory name Répertoire de stockage des pages WEB The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Le nom d'un répertoire où stocker les copies des pages visitées.<br>Un chemin relatif se réfère au répertoire de configuration. Max. size for the web store (MB) Taille maximale pour le cache Web (Mo) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Les entrées seront recyclées quand la taille sera atteinte.<br>Seule l'augmentation de la taille a un sens parce que réduire la valeur ne tronquera pas un fichier existant (mais gachera de l'espace à la fin). Automatic diacritics sensitivity Sensibilité automatique aux accents <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>Activer automatiquement la sensibilité aux accents si le terme recherché contient des accents (saufs pour ceux de unac_except_trans). Sans cette option, il vous faut utiliser le langage de recherche et le drapeau <i>D</i> pour activer la sensibilité aux accents. Automatic character case sensitivity Sensibilité automatique aux majuscules <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>Activer automatiquement la sensibilité aux majuscules si le terme de recherche contient des majuscules (sauf en première lettre). Sans cette option, vous devez utiliser le langage de recherche et le drapeau <i>C</i> pour activer la sensibilité aux majuscules. Maximum term expansion count Taille maximum de l'expansion d'un terme <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Nombre maximum de termes de recherche résultant d'un terme entré (par exemple expansion par caractères jokers). La valeur par défaut de 10000 est raisonnable et évitera les requêtes qui paraissent bloquées pendant que le moteur parcourt l'ensemble de la liste des termes. Maximum Xapian clauses count Compte maximum de clauses Xapian <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Nombre maximum de clauses Xapian élémentaires générées pour une requête. Dans certains cas, le résultat de l'expansion des termes peut ere multiplicatif, et utiliserait trop de mémoire. La valeur par défaut de 100000 devrait être à la fois suffisante et compatible avec les configurations matérielles typiques. ConfSubPanelW Only mime types Seulement ces types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Une liste exclusive des types MIME à indexer.<br>Rien d'autre ne sera indexé. Normalement vide et inactif Exclude mime types Types exclus Mime types not to be indexed Types MIME à ne pas indexer Max. compressed file size (KB) Taille maximale pour les fichiers à décomprimer (ko) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Cette valeur définit un seuil au delà duquel les fichiers comprimés ne seront pas traités. Utiliser -1 pour désactiver la limitation, 0 pour ne traiter aucun fichier comprimé. Max. text file size (MB) Taille maximale d'un fichier texte (Mo) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Cette valeur est un seuil au delà duquel les fichiers de texte pur ne seront pas indexés. Spécifier -1 pour supprimer la limite. Utilisé pour éviter d'indexer des fichiers monstres. Text file page size (KB) Taille de page pour les fichiers de texte pur (ko) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Si cette valeur est spécifiée et positive, les fichiers de texte pur seront découpés en tranches de cette taille pour l'indexation. Ceci diminue les ressources consommées par l'indexation et aide le chargement pour prévisualisation. Max. filter exec. time (s) Temps d'exécution maximum pour un filtre (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Un filtre externe qui prend plus de temps sera arrêté. Traite le cas rare (possible avec postscript par exemple) où un document pourrait amener un filtre à boucler sans fin. Mettre -1 pour complètement supprimer la limite (déconseillé). Global Global CronToolW Cron Dialog Dialogue Cron <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span>: planification de l'indexation périodique (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Chaque champ peut contenir un joker (*), une simple valeur numérique , des listes ponctuées par des virgules (1,3,5) et des intervalles (1-7). Plus généralement, les champs seront utilisés <span style=" font-style:italic;">tels quels</span> dans le fichier crontab, et la syntaxe générale crontab peut être utilisée, voir la page de manuel crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Par exemple, en entrant <span style=" font-family:'Courier New,courier';">*</span> dans <span style=" font-style:italic;">Jours, </span><span style=" font-family:'Courier New,courier';">12,19</span> dans <span style=" font-style:italic;">Heures</span> et <span style=" font-family:'Courier New,courier';">15</span> dans <span style=" font-style:italic;">Minutes</span>, recollindex démarrerait chaque jour à 12:15 et 19:15</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Un planning avec des activations très fréquentes est probablement moins efficace que l'indexation au fil de l'eau.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Jours de la semaine (* ou 0-7, 0 ou 7 signifie Dimanche) Hours (* or 0-23) Heures (* ou 0-23) Minutes (0-59) Minutes (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Cliquer <span style=" font-style:italic;">Désactiver</span> pour arrêter l'indexation automatique périodique, <span style=" font-style:italic;">Activer</span> pour la démarrer, <span style=" font-style:italic;">Annuler</span> pour ne rien changer.</p></body></html> Enable Activer Disable Désactiver It seems that manually edited entries exist for recollindex, cannot edit crontab Il semble que des entrées créées manuellement existent pour recollindex. Impossible d´éditer le fichier Cron Error installing cron entry. Bad syntax in fields ? Erreur durant l'installation de l'entrée cron. Mauvaise syntaxe des champs ? EditDialog Dialog Dialogue EditTrans Source path Chemin source Local path Chemin local Config error Erreur config Original path Chemin Originel EditTransBase Path Translations Traductions de chemins Setting path translations for Ajustement des traductions de chemins pour Select one or several file types, then use the controls in the frame below to change how they are processed Sélectionner un ou plusieurs types de fichiers, puis utiliser les contrôles dans le cadre ci-dessous pour changer leur traitement Add Ajouter Delete Supprimer Cancel Annuler Save Sauvegarder FirstIdxDialog First indexing setup Paramétrage de la première indexation <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Il semble que l'index pour cette configuration n'existe pas encore.</span><br /><br />Si vous voulez simplement indexer votre répertoire avec un jeu raisonnable de valeurs par défaut, cliquer le bouton <span style=" font-style:italic;">Démarrer l'indexation maintenant</span>. Vous pourrez ajuster les détails plus tard. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Si vous voulez plus de contrôle, utilisez les liens qui suivent pour ajuster la configuration et le planning d'indexation.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Ces outils peuvent être accédés plus tard à partir du menu <span style=" font-style:italic;">Preferences</span>.</p></body></html> Indexing configuration Indexation This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Vous pourrez ajuster les répertoires que vous voulez indexer, et d'autres paramètres comme les schémas de noms ou chemins de fichiers exclus, les jeux de caractères par défaut, etc. Indexing schedule Planning de l'indexation This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Vous pourrez choisir entre l'indexation à intervalles fixes ou au fil de l'eau, et définir un planning pour la première (basé sur l'utilitaire cron). Start indexing now Démarrer l'indexation maintenant FragButs %1 not found. %1 non trouvé %1: %2 %1 : %2 Fragment Buttons Fragments de recherche Query Fragments Fragments de recherche IdxSchedW Index scheduling setup Paramétrage du planning d'indexation <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">L'indexation <span style=" font-weight:600;">Recoll</span> peut fonctionner en permanence, traitant les fichiers dès qu'ils sont modifiés, ou être exécutée à des moments prédéterminés. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Une lecture du manuel peut vous aider à choisir entre ces approches (presser F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Cet outil peut vous aider à planifier l'indexation périodique, ou configurer un démarrage automatique de l'indexation au fil de l'eau quand vous vous connectez (ou les deux, ce qui est rarement pertinent). </p></body></html> Cron scheduling Planning Cron The tool will let you decide at what time indexing should run and will install a crontab entry. Le dialogue vous permettra de déterminer à quelle heure l'indexation devra démarrer et installera une entrée crontab. Real time indexing start up Démarrage de l'indexation au fil de l'eau Decide if real time indexing will be started when you log in (only for the default index). Déterminer si l'indexation au fil de l'eau démarre quand vous vous connectez (pour l'index par défaut). ListDialog Dialog Dialogue GroupBox GroupBox Main No db directory in configuration Répertoire de la base de données non défini dans la configuration Could not open database in Impossible d'ouvrir la base dans . Click Cancel if you want to edit the configuration file before indexing starts, or Ok to let it proceed. Cliquer Annuler pour pouvoir éditer le fichier de configuration avant que l'indexation ne démarre, ou Ok pour continuer. Configuration problem (dynconf Problème de configuration (dynconf) "history" file is damaged or un(read)writeable, please check or remove it: Le fichier d'historique est illisible, le verifier ou l'effacer : "history" file is damaged, please check or remove it: Le fichier "history" est corrompu. Le detruire : Preview &Search for: &Rechercher : &Next &Suivant &Previous &Précédent Match &Case Respecter la &casse Clear Effacer Creating preview text Création du texte pour la prévisualisation Loading preview text into editor Chargement du texte de la prévisualisation Cannot create temporary directory Impossible de créer le répertoire temporaire Cancel Annuler Close Tab Fermer l'onglet Missing helper program: Programmes filtres externes manquants : Can't turn doc into internal representation for Impossible de traduire le document en représentation interne pour Cannot create temporary directory: Impossible de créer un répertoire temporaire Error while loading file Erreur de chargement du fichier Form Ecran Tab 1 Tab 1 Open Ouvrir Canceled Annulé Error loading the document: file missing. Erreur de chargement : fichier manquant. Error loading the document: no permission. Erreur de chargement : accès refusé. Error loading: backend not configured. Erreur de chargement : gestionnaire de stockage non configuré. Error loading the document: other handler error<br>Maybe the application is locking the file ? Erreur de chargement : erreur indéterminée<br>Fichier verrouillé par l'application ? Error loading the document: other handler error. Erreur de chargement : erreur indéterminée. <br>Attempting to display from stored text. <br>Essai d'affichage à partir du texte stocké. Could not fetch stored text Impossible de récupérer le texte stocké PreviewTextEdit Show fields Afficher les valeurs des champs Show main text Afficher le corps du texte Print Imprimer Print Current Preview Imprimer la fenêtre de prévisualisation Show image Afficher l'image Select All Tout sélectionner Copy Copier Save document to file Sauvegarder le document Fold lines Replier les lignes Preserve indentation Préserver l'indentation Open document Ouvrir le document QObject Global parameters Paramètres globaux Local parameters Paramètres locaux <b>Customised subtrees <b>Répertoires avec paramètres spécifiques The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. La liste des sous-répertoires de la zone indexée<br>où certains paramètres sont redéfinis. Défaut : vide. <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons. <i>Les paramètres qui suivent sont définis soit globalement, si la sélection dans la liste ci-dessus est vide ou réduite à la ligne vide, soit pour le répertoire sélectionné. Vous pouvez ajouter et enlever des répertoires en cliquant les boutons +/-. Skipped names Noms ignorés These are patterns for file or directory names which should not be indexed. Canevas définissant les fichiers ou répertoires qui ne doivent pas etre indexés. Default character set Jeu de caractères par défaut This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Jeu de caractères utilisé pour lire les fichiers qui ne l'identifient pas de manière interne, par exemple les fichiers de texte pur. <br>La valeur par défaut est vide, et le programme utilise l'environnement. Follow symbolic links Suivre les liens symboliques Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Indexer les fichiers et répertoires pointés par les liens symboliques. Pas fait par défaut pour éviter les indexations multiples Index all file names Indexer tous les noms de fichiers Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Indexer les noms des fichiers dont le contenu n'est pas identifié ou traité (pas de type mime, ou type non supporté). Vrai par défaut Beagle web history Queue Web Search parameters Paramètres pour la recherche Web history Historique Web Default<br>character set Jeu de caractères<br>par défaut Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Jeu de caractères utilisé pour lire les fichiers qui n'identifient pas de manière interne leur encodage, par exemple les fichiers texte purs.<br>La valeur par défaut est vide, et la valeur obtenue à partir de l'environnement est utilisée dans ce cas. Ignored endings Suffixes ignorés These are file name endings for files which will be indexed by content only (no MIME type identification attempt, no decompression, no content indexing. Suffixes correspondants `a des fichiers qui seront indexe's par nom seulement (pas d'identification de type MIME, pas d'ecompression, pas d'indexation du contenu). These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). Suffixes sélectionnant des fichiers qui seront indexés uniquement sur leur nom (pas d'identification de type MIME, pas de décompression, pas d'indexation du contenu). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. <i>Les paramètres qui suivent sont définis soit globalement, si la sélection dans la liste ci-dessus est vide ou réduite à la ligne vide, soit pour le répertoire sélectionné. Vous pouvez ajouter et enlever des répertoires en cliquant les boutons +/-. QWidget Create or choose save directory Créer ou choisir un répertoire d'écriture Choose exactly one directory Choisir exactement un répertoire Could not read directory: Impossible de lire le répertoire : Unexpected file name collision, cancelling. Collision de noms inattendue, abandon. Cannot extract document: Impossible d'extraire le document : &Preview &Voir contenu &Open &Ouvrir Open With Ouvrir Avec Run Script Exécuter le Script Copy &File Name Copier le nom de &Fichier Copy &URL Copier l'&Url &Write to File &Sauver sous Save selection to files Sauvegarder la sélection courante dans des fichiers Preview P&arent document/folder Prévisualiser le document p&arent &Open Parent document/folder &Ouvrir le document/dossier parent Find &similar documents Chercher des documents &similaires Open &Snippets window Ouvrir la fenêtre des e&xtraits Show subdocuments / attachments Afficher les sous-documents et attachements QxtConfirmationMessage Do not show again. Ne plus afficher. RTIToolW Real time indexing automatic start Démarrage automatique de l'indexation au fil de l'eau <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">L'indexation <span style=" font-weight:600;">Recoll</span> peut être configurer pour s'exécuter en arrière plan, mettant à jour l'index au fur et à mesure que des documents sont modifiés. Vous y gagnez un index toujours à jour, mais des ressources systême (mémoire et processeur) sont consommées en permanence.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Démarrer le démon d'indexation quand je me connecte. Also start indexing daemon right now. Également démarrer le démon maintenant. Replacing: Remplacement de : Replacing file Remplacement du fichier Can't create: Impossible de créer : Warning Attention Could not execute recollindex Impossible d'exécuter recollindex Deleting: Effacement : Deleting file Effacement du fichier Removing autostart Enlèvement de l'autostart Autostart file deleted. Kill current process too ? Fichier autostart détruit. Arrêter le process en cours ? RclMain About Recoll À propos de Recoll Executing: [ Exécution de : [ Cannot retrieve document info from database Impossible d'accéder au document dans la base Warning Attention Can't create preview window Impossible de créer la fenêtre de visualisation Query results Résultats de la recherche Document history Historique des documents consultés History data Données d'historique Indexing in progress: Indexation en cours : Files Fichiers Purge Nettoyage Stemdb Base radicaux Closing Fermeture Unknown Inconnue This search is not active any more Cette recherche n'est plus active Can't start query: Impossible de démarrer la recherche : Bad viewer command line for %1: [%2] Please check the mimeconf file Mauvaise commande pour %1 : [%2] Vérifier le fichier mimeconf Cannot extract document or create temporary file Impossible d'extraire le document ou de créer le fichier temporaire (no stemming) (pas d'expansion) (all languages) (tous les langages) error retrieving stemming languages impossible de trouver la liste des langages d'expansion Update &Index Mettre à jour l'&index Indexing interrupted Indexation interrompue Stop &Indexing Arrêter l'&Indexation All Tout media multimédia message message other autres presentation présentation spreadsheet feuille de calcul text texte sorted trié filtered filtré External applications/commands needed and not found for indexing your file types: Applications externes non trouvees pour indexer vos types de fichiers : No helpers found missing Pas d'applications manquantes Missing helper programs Applications manquantes Save file dialog Dialogue de sauvegarde de fichier Choose a file name to save under Choisir un nom pour le nouveau fichier Document category filter Filtre de catégories de documents No external viewer configured for mime type [ Pas de visualiseur configuré pour le type MIME [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Le visualiseur spécifié dans mimeview pour %1 : %2 est introuvable. Voulez vous démarrer le dialogue de préférences ? Can't access file: Impossible d'accéder au fichier : Can't uncompress file: Impossible de décomprimer le fichier : Save file Sauvegarder le fichier Result count (est.) Nombre de résultats (estimation) Query details Détails de la recherche Could not open external index. Db not open. Check external indexes list. Impossible d'ouvrir un index externe. Base non ouverte. Verifier la liste des index externes. No results found Aucun résultat trouvé None Rien Updating Mise à jour Done Fini Monitor Moniteur Indexing failed L'indexation a échoué The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Le processus d'indexation en cours n'a pas été démarré depuis cette interface. Cliquer OK pour le tuer quand même, ou Annuler pour le laisser tranquille. Erasing index Effacement de l'index Reset the index and start from scratch ? Effacer l'index et redémarrer de zéro ? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Requête en cours.<br>En raison de restrictions internes, <br>annuler terminera l'exécution du programme Error Erreur Index not open Index pas ouvert Index query error Erreur de la recherche sur l'index Indexed Mime Types Types MIME indexés Content has been indexed for these MIME types: Du contenu a été indexé pour ces types MIME : Index not up to date for this file. Refusing to risk showing the wrong entry. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. Index pas à jour pour ce fichier. Risque d'afficher une entrée incorrecte. Cliquer OK pour mettre à jour l'index pour ce fichier, puis redémarrer la requête quand l'indexation est terminée. Sinon, Annuler. Can't update index: indexer running Impossible de mettre à jour l'index : un indexeur est déjà actif Indexed MIME Types Types MIME indexés Bad viewer command line for %1: [%2] Please check the mimeview file Ligne de commande incorrecte pour %1 : [%2]. Vérifier le fichier mimeview. Viewer command line for %1 specifies both file and parent file value: unsupported La ligne de commande pour %1 spécifie à la fois le fichier et son parent : non supporté Cannot find parent document Impossible de trouver le document parent Indexing did not run yet L'indexation n'a pas encore eu lieu External applications/commands needed for your file types and not found, as stored by the last indexing pass in Applications et commandes externes nécessaires pour vos types de documents, et non trouvées, telles qu'enregistrées par la dernière séquence d'indexation dans. Index not up to date for this file. Refusing to risk showing the wrong entry. L'index n'est pas à jour pour ce fichier. Il y aurait un risque d'afficher une entrée incorrecte Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel. Cliquer OK pour mettre à jour l'index pour ce fichier, puis relancer la recherche quand l'indexation est terminée. Sinon cliquer Annuler Indexer running so things should improve when it's done L'indexeur est en cours d'exécution, le fichier devrait être mis à jour Sub-documents and attachments Sous-documents et attachements Document filter Filtre de documents Index not up to date for this file. Refusing to risk showing the wrong entry. Index pas à jour pour ce fichier. Je ne veux pas risquer d'afficher la mauvaise entrée. Click Ok to update the index for this file, then you will need to re-run the query when indexing is done. Cliquer OK pour mettre à jour l'index pour ce fichier, puis attendez la fin de l'indexation pour relancer la recherche. The indexer is running so things should improve when it's done. L'indexeur est actif, les choses devraient aller mieux quand il aura fini. The document belongs to an external indexwhich I can't update. Le document appartient à un index externe que je ne peux pas mettre à jour. Click Cancel to return to the list. Click Ignore to show the preview anyway. Cliquer Annuler pour retourner à la liste. Cliquer Ignorer pour afficher tout de même. Duplicate documents Documents identiques These Urls ( | ipath) share the same content: Ces URLs(| ipath) partagent le même contenu : Bad desktop app spec for %1: [%2] Please check the desktop file Mauvaise spécification d'application pour %1 : [%2] Merci de vérifier le fichier desktop Bad paths Chemins inexistants Bad paths in configuration file: Chemins inexistants définis dans le fichier de configuration : Selection patterns need topdir Les schémas de sélection nécessitent un répertoire de départ Selection patterns can only be used with a start directory Les schémas de sélection ne peuvent être utilisés qu'avec un répertoire de départ No search Pas de recherche No preserved previous search Pas de recherche sauvegardée Choose file to save Choisir un fichier pour sauvegarder Saved Queries (*.rclq) Recherches Sauvegardées (*.rclq) Write failed Échec d'écriture Could not write to file Impossible d'écrire dans le fichier Read failed Erreur de lecture Could not open file: Impossible d'ouvrir le fichier : Load error Erreur de chargement Could not load saved query Le chargement de la recherche sauvegardée a échoué Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Ouverture d'un fichier temporaire. Les modification seront perdues<br/>si vous ne les sauvez pas dans un emplacement permanent. Do not show this warning next time (use GUI preferences to restore). Ne plus afficher ce message (utiliser le dialogue de préférences pour rétablir). Disabled because the real time indexer was not compiled in. Désactivé parce que l'indexeur au fil de l'eau n'est pas disponible dans cet exécutable. This configuration tool only works for the main index. Cet outil de configuration ne travaille que sur l'index principal. The current indexing process was not started from this interface, can't kill it Le processus d'indexation en cours n'a pas été démarré depuis cette interface, impossible de l'arrêter The document belongs to an external index which I can't update. Le document appartient à un index externe que je ne peux pas mettre à jour. Click Cancel to return to the list. <br>Click Ignore to show the preview anyway (and remember for this session). Cliquer Annulation pour retourner à la liste.<br>Cliquer Ignorer pour afficher la prévisualisation de toutes facons (mémoriser l'option pour la session). Index scheduling Programmation de l'indexation Sorry, not available under Windows for now, use the File menu entries to update the index Désolé, pas disponible pour Windows pour le moment, utiliser les entrées du menu fichier pour mettre à jour l'index Can't set synonyms file (parse error?) Impossible d'ouvrir le fichier des synonymes (erreur dans le fichier?) Index locked L'index est verrouillé Unknown indexer state. Can't access webcache file. État de l'indexeur inconnu. Impossible d'accéder au fichier webcache. Indexer is running. Can't access webcache file. L'indexeur est actif. Impossible d'accéder au fichier webcache. with additional message: avec le message complémentaire : Non-fatal indexing message: Erreur d'indexation non fatale : Types list empty: maybe wait for indexing to progress? Liste vide : attendre que l'indexation progresse ? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported La ligne de commande pour %1 specifie l'utilisation du fichier parent, mais l'URL est http[s] : ne peut pas marcher Tools Outils Results Résultats (%d documents/%d files/%d errors/%d total files) (%d documents/%d fichiers/%d erreurs/%d fichiers en tout) (%d documents/%d files/%d errors) (%d documents/%d fichiers/%d erreurs) Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Chemins vides ou non existants dans le fichier de configuration. Cliquer sur Ok pour démarrer l'indexation (les données absentes ne seront pas éliminées de l'index) : Indexing done Indexation terminée Can't update index: internal error Impossible de mettre à jour l'index : erreur interne Index not up to date for this file.<br> L'index n'est pas à jour pour ce fichier.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> <em>Par ailleurs, il semble que la dernière mise à jour pour ce fichier a échoué.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Cliquer Ok pour essayer de mettre à jour l'index. Vous devrez lancer la recherche à nouveau quand l'indexation sera terminée.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> Cliquer Annuler pour retourner à la liste.<br>Cliquer Ignorer pour afficher la prévisualisation (et enregister l'option pour cette session). Il y a un risque d'afficher le mauvais document.<br/> documents documents document document files fichiers file fichier errors erreurs error erreur total files) fichiers totaux) No information: initial indexing not yet performed. Pas de données : l'indexation initiale n'est pas faite. RclMainBase Previous page Page précédente Next page Page suivante &File &Fichier E&xit &Quitter &Tools &Outils &Help &Aide &Preferences &Préférences Search tools Outils de recherche Result list Liste de résultats &About Recoll &A propos de Recoll Document &History &Historique des documents Document History Historique des documents &Advanced Search Recherche &Avancée Advanced/complex Search Recherche Avancée &Sort parameters Paramètres pour le &tri Sort parameters Paramètres pour le tri Next page of results Page suivante Previous page of results Page précédente &Query configuration &Recherche &User manual &Manuel Recoll Recoll Ctrl+Q Ctrl+Q Update &index &Indexer Term &explorer &Exploration de l'index Term explorer tool Outil d'exploration de l'index External index dialog Index externes &Erase document history &Effacer l'historique des documents First page Première page Go to first page of results Aller à la première page de résultats &Indexing configuration Configuration d'&Indexation All Tout &Show missing helpers Afficher les application&s manquantes PgDown PgDown Shift+Home, Ctrl+S, Ctrl+Q, Ctrl+S Shift+Home, Ctrl+S, Ctrl+Q, Ctrl+S PgUp PgUp &Full Screen &Plein écran F11 F11 Full Screen Plein écran &Erase search history &Effacer l'historique des recherches sortByDateAsc sortByDateAsc Sort by dates from oldest to newest Trier par date des plus anciennes aux plus récentes sortByDateDesc sortByDateDesc Sort by dates from newest to oldest Trier par date des plus récentes aux plus anciennes Show Query Details Afficher la requête en détails Show results as table Afficher les résultats en tableau &Rebuild index &Reconstruire l'index &Show indexed types &Afficher les types indexés Shift+PgUp Shift+PgUp &Indexing schedule &Planning d'indexation E&xternal index dialog Index e&xternes &Index configuration &Index &GUI configuration Interface utilisateur &Results &Résultats Sort by date, oldest first Trier par date, le plus ancien en premier Sort by date, newest first Trier par date, le plus récent en premier Show as table Afficher comme un tableau Show results in a spreadsheet-like table Montrer les résultats dans un tableau Save as CSV (spreadsheet) file Sauver en format CSV (fichier tableur) Saves the result into a file which you can load in a spreadsheet Sauvegarde les résultats dans un fichier qu'il sera possible de charger dans un tableur Next Page Page suivante Previous Page Page précédente First Page Première page Query Fragments Fragments de recherche With failed files retrying Avec re-traitement des fichiers en échec Next update will retry previously failed files La prochaine mise à jour de l'index essaiera de traiter les fichiers actuellement en échec Save last query Sauvegarder la dernière recherche Load saved query Charger une recherche sauvegardée Special Indexing Indexation spéciale Indexing with special options Indexation avec des options spéciales Indexing &schedule Programme d'indexation Enable synonyms Activer les synonymes &View &Voir Missing &helpers &Traducteurs manquants Indexed &MIME types Types &MIME indexés Index &statistics &Statistiques de l'index Webcache Editor Editeur &Webcache Trigger incremental pass Déclencher une indexation incrémentale RclTrayIcon Restore Restaurer Quit Quitter RecollModel Abstract Extrait Author Auteur Document size Taille document Document date Date document File size Taille fichier File name Nom de fichier File date Date fichier Ipath Ipath Keywords Mots clef Mime type Type Mime Original character set Jeu de caractères d'origine Relevancy rating Pertinence Title Titre URL URL Mtime Mtime Date Date Date and time Date et heure Ipath Ipath MIME type Type MIME Can't sort by inverse relevance Impossible de trier par pertinence inverse ResList Result list Liste de résultats Unavailable document Document inaccessible Previous Précédent Next Suivant <p><b>No results found</b><br> <p><b>Aucun résultat</b><br> &Preview &Voir contenu Copy &URL Copier l'&Url Find &similar documents Chercher des documents &similaires Query details Détail de la recherche (show query) (requête) Copy &File Name Copier le nom de &Fichier filtered filtré sorted trié Document history Historique des documents consultés Preview Prévisualisation Open Ouvrir <p><i>Alternate spellings (accents suppressed): </i> <p><i>Orthographes proposés (sans accents) : </i> &Write to File &Sauver sous Preview P&arent document/folder Prévisualiser le document p&arent &Open Parent document/folder &Ouvrir le document parent &Open &Ouvrir Documents Documents out of at least parmi au moins for pour <p><i>Alternate spellings: </i> <p><i>Orthographes proposés : </i> Open &Snippets window Ouvrir la fenêtre des e&xtraits Duplicate documents Documents identiques These Urls ( | ipath) share the same content: Ces URLs(| ipath) partagent le même contenu Result count (est.) Nombre de résultats (est.) Snippets Extraits ResTable &Reset sort &Revenir au tri par pertinence &Delete column &Enlever la colonne Add " Ajouter " " column " colonne Save table to CSV file Sauvegarder dans un fichier CSV Can't open/create file: Impossible d'ouvrir ou créer le fichier : &Preview &Voir contenu &Open &Ouvrir Copy &File Name Copier le nom de &Fichier Copy &URL Copier l'&Url &Write to File &Sauver sous Find &similar documents Chercher des documents &similaires Preview P&arent document/folder Prévisualiser le document p&arent &Open Parent document/folder &Ouvrir le document parent &Save as CSV &Sauvegarder en CSV Add "%1" column Ajouter une colonne "%1" ResTableDetailArea &Preview &Voir contenu &Open &Ouvrir Copy &File Name Copier le nom de &Fichier Copy &URL Copier l'&Url &Write to File &Sauver sous Find &similar documents Chercher des documents &similaires Preview P&arent document/folder Prévisualiser le document p&arent &Open Parent document/folder &Ouvrir le document parent ResultPopup &Preview &Voir contenu &Open &Ouvrir Copy &File Name Copier le nom de &Fichier Copy &URL Copier l'&Url &Write to File &Sauver sous Save selection to files Sauvegarder la sélection courante dans des fichiers Preview P&arent document/folder Prévisualiser le document p&arent &Open Parent document/folder &Ouvrir le document parent Find &similar documents Chercher des documents &similaires Open &Snippets window Ouvrir la fenêtre des e&xtraits Show subdocuments / attachments Afficher les sous-documents et attachements Open With Ouvrir Avec Run Script Exécuter le Script SSearch Any term Certains termes All terms Tous les termes File name Nom de fichier Completions Complétions Select an item: Sélectionner un élément : Too many completions Trop de complétions possibles Query language Language d'interrogation Bad query string Requête non reconnue Out of memory Plus de mémoire disponible Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> No actual parentheses allowed.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Entrer une expression du langage de recherche. Antisèche :<br> <i>term1 term2</i> : 'term1' ET 'term2' champ non spécifié.<br> <i>field:term1</i> : 'term1' recherche dans le champ 'field'.<br> Noms de champs standards (utiliser les mots anglais)/alias:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-champs: dir, mime/format, type/rclcat, date.<br> Examples d'intervalles de dates: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> NE PAS mettre les parenthèses.<br> <i>"term1 term2"</i> : phrase exacte. Options::<br> <i>"term1 term2"p</i> : proximité (pas d'ordre).<br> Utiliser le lien <b>Afficher la requête en détail</b> en cas de doute sur les résultats et consulter le manuel (en anglais) (&lt;F1>) pour plus de détails. Enter file name wildcard expression. Entrer un nom de fichier (caractères jokers possibles) Enter search terms here. Type ESC SPC for completions of current term. Entrer les termes recherchés ici. Taper ESC SPC pour afficher les mots commençant par l'entrée en cours. Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Entrer une expression du langage de recherche. Antisèche :<br> <i>term1 term2</i> : 'term1' ET 'term2' champ non spécifié.<br> <i>field:term1</i> : 'term1' recherche dans le champ 'field'.<br> Noms de champs standards (utiliser les mots anglais)/alias:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-champs: dir, mime/format, type/rclcat, date.<br> Examples d'intervalles de dates: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> NE PAS mettre les parenthèses.<br> <i>"term1 term2"</i> : phrase exacte. Options::<br> <i>"term1 term2"p</i> : proximité (pas d'ordre).<br> Utiliser le lien <b>Afficher la requête en détail</b> en cas de doute sur les résultats et consulter le manuel (en anglais) (&lt;F1>) pour plus de détails. Stemming languages for stored query: Les langages d'expansion pour la recherche sauvegardée : differ from current preferences (kept) diffèrent des préférences en cours (conservées) Auto suffixes for stored query: L'option de suffixe automatique pour la recherche sauvegardée : External indexes for stored query: Les index externes pour la recherche sauvegardée : Autophrase is set but it was unset for stored query L'option autophrase est positionnée, mais ne l'était pas pour la recherche sauvegardée Autophrase is unset but it was set for stored query L'option autophrase est désactivée mais était active pour la recherche sauvegardée Enter search terms here. Entrer les termes recherchés ici. SSearchBase SSearchBase SSearchBase Clear Effacer Ctrl+S Ctrl+S Erase search entry Effacer l'entrée Search Rechercher Start query Démarrer la recherche Enter search terms here. Type ESC SPC for completions of current term. Entrer les termes recherchés ici. Taper ESC SPC pour afficher les mots commençant par l'entrée en cours. Choose search type. Choisir le type de recherche. Show query history Afficher l'historique des recherches Enter search terms here. Entrer les termes recherchés ici. SearchClauseW SearchClauseW SearchClauseW Any of these Un de ces mots All of these Tous ces mots None of these Aucun de ces mots This phrase Cette phrase Terms in proximity Termes en proximité File name matching Modèle de nom de fichier Select the type of query that will be performed with the words Sélectionner le type de requête à effectuer avec les mots Number of additional words that may be interspersed with the chosen ones Nombre de mots additionnels qui peuvent se trouver entre les termes recherchés In field Sans champ No field Sans champ Any Certains All Tout None Rien Phrase Phrase Proximity Proximité File name Nom de fichier Snippets Snippets Extraits X X Find: Trouver : Next Suivant Prev Précédent SnippetsW Search Rechercher <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> <p>Désolé, aucun résultat trouvé dans les limites de recherche. Peut-être que le document est très gros et que le générateur d'extraits s'est perdu...<p> Sort By Relevance Trier Par Pertinence Sort By Page Trier Par Page SortForm Date Date Mime type Type Mime SortFormBase Sort Criteria Critères de tri Sort the Trier les most relevant results by: résultats les plus pertinents en fonction de : Descending Décroissant Close Fermer Apply Appliquer SpecIdxW Special Indexing Indexation spéciale Do not retry previously failed files. Ne pas réessayer les fichiers en erreur. Else only modified or failed files will be processed. Sinon, seulement les fichiers modifiés ou en erreur seront traités. Erase selected files data before indexing. Effacer les données pour les fichiers sélectionnés avant de réindexer. Directory to recursively index Répertoire à indexer récursivement Browse Parcourir Start directory (else use regular topdirs): Répertoire de départ (sinon utiliser la variable normale topdirs) : Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Laisser vide pour sélectionner tous les fichiers. Vous pouvez utiliser plusieurs schémas séparés par des espaces.<br>Les schémas contenant des espaces doivent ere enclos dans des apostrophes doubles.<br>Ne peut être utilisé que si le répertoire de départ est positionné. Selection patterns: Schémas de sélection : Top indexed entity Objet indexé de démarrage Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Répertoire à indexer récursivement. Il doit être à l'intérieur de la zone normale<br>définie par la variable topdirs. Retry previously failed files. Ne pas réessayer les fichiers en erreur. Start directory. Must be part of the indexed tree. We use topdirs if empty. Répertoire de départ. Doit faire partie de la zone indexée. topdirs est utilisé si non renseigné. Start directory. Must be part of the indexed tree. Use full indexed area if empty. Répertoire de départ. Doit faire partie de la zone indexée. Traite toute la zone si non renseigné. SpellBase Term Explorer Explorateur d'index &Expand &Dérivés Alt+E Alt+D &Close &Fermer Alt+C Alt+F Term Terme No db info. Pas d'information sur la base. Doc. / Tot. Doc. / Tot. Match Faire correspondre Case Majuscules/Minuscules Accents Accents SpellW Wildcards Wildcards Regexp Expression régulière Spelling/Phonetic Orthographe/Phonétique Aspell init failed. Aspell not installed? Erreur d'initialisation aspell. Il n'est peut-être pas installé? Aspell expansion error. Erreur aspell. Stem expansion Expansion grammaticale error retrieving stemming languages Impossible de former la liste des langages d'expansion No expansion found Pas de résultats Term Terme Doc. / Tot. Doc. / Tot. Index: %1 documents, average length %2 terms Index : %1 documents, taille moyenne %2 termes Index: %1 documents, average length %2 terms.%3 results Index : %1 documents, longueur moyenne %2 termes. %3 résultats %1 results %1 résultats List was truncated alphabetically, some frequent La liste a été tronquée par ordre alphabétique. Certains termes fréquents terms may be missing. Try using a longer root. pourraient être absents. Essayer d'utiliser une racine plus longue Show index statistics Afficher les statistiques de l'index Number of documents Nombre de documents Average terms per document Nombre moyen de termes par document Smallest document length Longueur du plus petit document Longest document length Longueur du plus grand document Database directory size Taille occupee par l'index MIME types: Types MIME : Item Element Value Valeur Smallest document length (terms) Taille minimale document (termes) Longest document length (terms) Taille maximale document (termes) Results from last indexing: Résultats de la dernière indexation : Documents created/updated Documents créés ou mis à jour Files tested Fichiers testés Unindexed files Fichiers non indexés List files which could not be indexed (slow) Lister les fichiers qui n'ont pas pu être traités (lent) Spell expansion error. Erreur dans les suggestions orthographiques. UIPrefsDialog The selected directory does not appear to be a Xapian index Le répertoire sélectionné ne semble pas être un index Xapian This is the main/local index! C'est l'index principal ! The selected directory is already in the index list Le répertoire sélectionné est déjà dans la liste Select xapian index directory (ie: /home/buddy/.recoll/xapiandb) Sélectionner un répertoire contenant un index Xapian (ex: /home/monAmi/.recoll/xapiandb) error retrieving stemming languages Impossible de former la liste des langues pour l'expansion grammaticale Choose Choisir Result list paragraph format (erase all to reset to default) Format de paragraphe de la liste de résultats (tout effacer pour revenir à la valeur par défaut) Result list header (default is empty) En-tête HTML (la valeur par défaut est vide) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) Sélection un répertoire de configuration Recoll ou un répertoire d'index Xapian (Ex : /home/moi/.recoll ou /home/moi/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read Le repertoire selectionne ressemble a un repertoire de configuration Recoll mais la configuration n'a pas pu etre chargee At most one index should be selected Selectionner au plus un index Cant add index with different case/diacritics stripping option Impossible d'ajouter un index avec une option differente de sensibilite a la casse et aux accents Default QtWebkit font Fonte par défaut de QtWebkit Any term Certains termes All terms Tous les termes File name Nom de fichier Query language Language d'interrogation Value from previous program exit Valeur obtenue de la dernière exécution UIPrefsDialogBase User interface Interface utilisateur Number of entries in a result page Nombre de résultats par page Result list font Fonte pour la liste de résultats Helvetica-10 Helvetica-10 Opens a dialog to select the result list font Ouvre une fenetre permettant de changer la fonte Reset Réinitialiser Resets the result list font to the system default Réinitialiser la fonte à la valeur par défaut Auto-start simple search on whitespace entry. Démarrer automatiquement une recherche simple sur entrée d'un espace. Start with advanced search dialog open. Panneau de recherche avancée ouvert au démarrage Start with sort dialog open. Panneau de tri ouvert au démarrage. Search parameters Paramètres pour la recherche Stemming language Langue pour l'expansion des termes Dynamically build abstracts Construire dynamiquement les résumés Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Décide si des résumés seront construits à partir du contexte des termes de recherche. Peut ralentir l'affichage si les documents sont gros. Replace abstracts from documents Remplacer les résumés existant dans les documents Do we synthetize an abstract even if the document seemed to have one? Est-ce qu'un résumé doit etre synthétisé meme dans le cas ou le document original en avait un? Synthetic abstract size (characters) Taille du résumé synthétique (caractères) Synthetic abstract context words Nombre de mots de contexte par occurrence de terme dans le résumé External Indexes Index externes Add index Ajouter un index Select the xapiandb directory for the index you want to add, then click Add Index Sélectionner le répertoire xapian de l'index à ajouter, puis cliquer Ajouter Index Browse Parcourir &OK &OK Apply changes Appliquer les modifications &Cancel &Annuler Discard changes Suivant Result paragraph<br>format string Chaîne de formatage<br>des paragraphes résultats Automatically add phrase to simple searches Ajouter automatiquement une phrase aux recherches simples. A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Une recherche pour [vin rouge] (2 mots) sera complétée comme [vin OU rouge OU (vin PHRASE 2 rouge)].<br> Ceci devrait donner une meilleure pertinence aux résultats où les termes recherchés apparaissent exactement et dans l'ordre. User preferences Préférences utilisateur Use desktop preferences to choose document editor. Utiliser les réglages du bureau pour choisir l'application de visualisation. External indexes Index externes Toggle selected Changer l'état pour les entrées sélectionnées Activate All Tout activer Deactivate All Tout désactiver Remove selected Effacer la sélection Remove from list. This has no effect on the disk index. Oter de la liste. Sans effet sur les données stockées. Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Definit le format des paragraphes de la liste de resultats. Utilise le format html qt et des directives de substitution de type printf:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Remember sort activation state. Mémoriser l'état d'activation du tri Maximum text size highlighted for preview (megabytes) Taille maximum des textes surlignes avant previsualisation (MO) Texts over this size will not be highlighted in preview (too slow). Les texte plus gros ne seront pas surlignes dans la previsualisation (trop lent). Highlight color for query terms Couleur de mise en relief des termes recherchés Prefer Html to plain text for preview. Utiliser le format Html pour la prévisualisation If checked, results with the same content under different names will only be shown once. N'afficher qu'une entrée pour les résultats de contenu identique. Hide duplicate results. Cacher les doublons Choose editor applications Choisir les éditeurs pour les différents types de fichiers Display category filter as toolbar instead of button panel (needs restart). Afficher le filtre de catégorie comme une barre d'outils plutot que comme un panneau de boutons (après le redémarrage). The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Les mots de la liste seront automatiquement changés en clauses ext:xxx dans les requêtes en langage d'interrogation. Query language magic file name suffixes. Suffixes automatiques pour le langage d'interrogation. Enable Activer ViewAction Changing actions with different current values Changement d'actions avec des valeurs actuelles differentes Mime type Type Mime Command Commande MIME type Type MIME Desktop Default Défaut du bureau Changing entries with different current values Nous changeons des éléments avec des valeurs actuelles différentes ViewActionBase File type Type de fichier Action Action Select one or several file types, then click Change Action to modify the program used to open them Sélectionner une ou plusieurs types de fichiers, puis cliquer Changer pour modifier le programme utilisé pour les ouvrir Change Action Changer Close Fermer Native Viewers Applications de visualisation Select one or several mime types then click "Change Action"<br>You can also close this dialog and check "Use desktop preferences"<br>in the main panel to ignore this list and use your desktop defaults. Sélectionner un ou plusieurs types MIME puis cliquer "Modifier l'Action"<br>Vous pouvez aussi fermer ce dialogue et choisir "Utiliser les préférences du bureau"<br> dans le dialogue principal pour ignorer cette liste. Select one or several mime types then use the controls in the bottom frame to change how they are processed. Sélectionner un ou plusieurs types MIME, puis utiliser les contrôles dans le cadre du bas pour changer leur traitement Use Desktop preferences by default Utiliser les préférences du bureau Select one or several file types, then use the controls in the frame below to change how they are processed Sélectionner un ou plusieurs types de fichiers, puis utiliser les contrôles dans le cadre du bas pour changer leur traitement Exception to Desktop preferences Exception aux préférences du bureau Action (empty -> recoll default) Action (vide -> utiliser le defaut recoll) Apply to current selection Appliquer à la sélection courante Recoll action: Action current value valeur actuelle Select same S&eacute;lectionner par valeur <b>New Values:</b> <b>Nouveaux param&egrave;tres</b> Webcache Webcache editor Editeur Webcache Search regexp Recherche (regexp) WebcacheEdit Copy URL Copier l'URL Unknown indexer state. Can't edit webcache file. État indexeur inconnu. Impossible d'éditer le fichier webcache. Indexer is running. Can't edit webcache file. L'indexeur est actif. Impossible d'accéder au fichier webcache. Delete selection Détruire les entrées sélectionnées Webcache was modified, you will need to run the indexer after closing this window. Le fichier webcache a été modifié, il faudra redémarrer l'indexation après avoir fermé cette fenêtre. WebcacheModel MIME MIME Url Url confgui::ConfBeaglePanelW Steal Beagle indexing queue Voler la queue d'indexation de Beagle Beagle MUST NOT be running. Enables processing the beagle queue to index Firefox web history.<br>(you should also install the Firefox Beagle plugin) Beagle NE DOIT PAS être actif. Permet de traiter la queue de Beagle pour indexer l'historique des pages visitées de Firefox.<br> (Il faut aussi installer le plugin Beagle pour Firefox) Web cache directory name Nom du répertoire cache de pages WEB The name for a directory where to store the cache for visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Le nom d'un répertoire où stocker les copies des pages WEB visitées.<br>Le chemin peut être relatif au répertoire de configuration. Max. size for the web cache (MB) Taille maximum pour le cache (Mo) Entries will be recycled once the size is reached Les pages seront écrasées quand la taille spécifiée est atteinte Web page store directory name Répertoire de stockage des pages WEB The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Le nom d'un répertoire où stocker les copies des pages visitées.<br>Un chemin relatif se réfère au répertoire de configuration. Max. size for the web store (MB) Taille maximale pour le cache Web (Mo) Process the WEB history queue Traiter la file des pages WEB Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Permet d'indexer les pages Web visitées avec Firefox <br>(il vous faut également installer l'extension Recoll pour Firefox) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Les entrées seront recyclées quand la taille sera atteinte.<br>Seule l'augmentation de la taille a un sens parce que réduire la valeur ne tronquera pas un fichier existant (mais gachera de l'espace à la fin). confgui::ConfIndexW Can't write configuration file Impossible d'ecrire le fichier de configuration Recoll - Index Settings: Recoll - Paramètres de l'index : confgui::ConfParamFNW Browse Parcourir Choose Choisir confgui::ConfParamSLW + + - - Add entry Ajouter une entrée Delete selected entries Détruire les entrées sélectionnées ~ ~ Edit selected entries Modifier les entrées sélectionnées confgui::ConfSearchPanelW Automatic diacritics sensitivity Sensibilité automatique aux accents <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. <p>Activer automatiquement la sensibilité aux accents si le terme recherché contient des accents (saufs pour ceux de unac_except_trans). Sans cette option, il vous faut utiliser le langage de recherche et le drapeau <i>D</i> pour activer la sensibilité aux accents. Automatic character case sensitivity Sensibilité automatique aux majuscules <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. <p>Activer automatiquement la sensibilité aux majuscules si le terme de recherche contient des majuscules (sauf en première lettre). Sans cette option, vous devez utiliser le langage de recherche et le drapeau <i>C</i> pour activer la sensibilité aux majuscules. Maximum term expansion count Taille maximum de l'expansion d'un terme <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. <p>Nombre maximum de termes de recherche résultant d'un terme entré (par exemple expansion par caractères jokers). La valeur par défaut de 10000 est raisonnable et évitera les requêtes qui paraissent bloquées pendant que le moteur parcourt l'ensemble de la liste des termes. Maximum Xapian clauses count Compte maximum de clauses Xapian <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. <p>Nombre maximum de clauses Xapian élémentaires générées pour une requête. Dans certains cas, le résultat de l'expansion des termes peut ere multiplicatif, et utiliserait trop de mémoire. La valeur par défaut de 100000 devrait être à la fois suffisante et compatible avec les configurations matérielles typiques. confgui::ConfSubPanelW Global Global Max. compressed file size (KB) Taille maximale pour les fichiers à décomprimer (ko) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Cette valeur définit un seuil au delà duquel les fichiers comprimés ne seront pas traités. Utiliser -1 pour désactiver la limitation, 0 pour ne traiter aucun fichier comprimé. Max. text file size (MB) Taille maximale d'un fichier texte (Mo) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Cette valeur est un seuil au delà duquel les fichiers de texte pur ne seront pas indexés. Spécifier -1 pour supprimer la limite. Utilisé pour éviter d'indexer des fichiers monstres. Text file page size (KB) Taille de page pour les fichiers de texte pur (ko) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Si cette valeur est spécifiée et positive, les fichiers de texte pur seront découpés en tranches de cette taille pour l'indexation. Ceci diminue les ressources consommées par l'indexation et aide le chargement pour prévisualisation. Max. filter exec. time (S) Temps d'exécution maximum pour un filtre (S) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loopSet to -1 for no limit. Un filtre externe sera interrompu si l'attente dépasse ce temps. Utile dans le cas rare (programme postscript par exemple) où un document pourrait forcer un filtre à boucler indéfiniment. Positionner à -1 pour supprimer la limite. External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Un filtre externe qui prend plus de temps sera arrêté. Traite le cas rare (possible avec postscript par exemple) où un document pourrait amener un filtre à boucler sans fin. Mettre -1 pour complètement supprimer la limite (déconseillé). Only mime types Seulement ces types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Une liste exclusive des types MIME à indexer.<br>Rien d'autre ne sera indexé. Normalement vide et inactif Exclude mime types Types exclus Mime types not to be indexed Types MIME à ne pas indexer Max. filter exec. time (s) Temps d'exécution maximum pour un filtre (s) confgui::ConfTopPanelW Top directories Répertoires de départ The list of directories where recursive indexing starts. Default: your home. La liste des répertoires où l'indexation récursive démarre. Défault: votre répertoire par défaut. Skipped paths Chemins ignorés These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Ce sont des noms de répertoires où l'indexation n'entrera pas.<br>Ils peuvent contenir des caractères jokers. Les chemins doivent correspondre à ceux vus par l'indexeur (par exemple: si un des répertoires de départ est '/home/me' et que '/home' est un lien sur '/usr/home', une entrée correcte ici serait '/home/me/tmp*' , pas '/usr/home/me/tmp*') Stemming languages Langue pour l'expansion des termes The languages for which stemming expansion<br>dictionaries will be built. Les langages pour lesquels les dictionnaires d'expansion<br>des termes seront construits. Log file name Nom du fichier journal The file where the messages will be written.<br>Use 'stderr' for terminal output Le nom du fichier ou les messages seront ecrits.<br>Utiliser 'stderr' pour le terminal Log verbosity level Niveau de verbosité This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Cette valeur ajuste la quantite de messages emis,<br>depuis uniquement les erreurs jusqu'a beaucoup de donnees de debug. Index flush megabytes interval Intervalle d'écriture de l'index en mégaoctets This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Ajuste la quantité de données lues entre les écritures sur disque.<br>Contrôle l'utilisation de la mémoire. Défaut 10 Mo Max disk occupation (%) Occupation disque maximum (%) This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default). Niveau d'occupation du disque ou l'indexation s'arrête (pour eviter un remplissage excessif).<br>0 signifie pas de limite (defaut). No aspell usage Pas d'utilisation d'aspell Aspell language Langue pour aspell The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Langue du dictionnaire aspell. Une valeur correcte ressemble à 'en' ou 'fr'... <br>Si cette valeur n'est pas positionnée, l'environnement est utilisé pour la calculer, ce qui marche bien, habituellement. Utiliser 'aspell config' et regarder les fichiers .dat dans le répertoire 'data-dir' pour connaitre les langues aspell installées sur votre système. Database directory name Répertoire de stockage de l'index The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Le nom d'un répertoire où l'index sera stocké<br>Un chemin relatif démarrera au répertoire de configuration. Le défaut est 'xapiandb'. Use system's 'file' command Utiliser la commande 'file' du systeme Use the system's 'file' command if internal<br>mime type identification fails. Utiliser la commande 'file' si la determination<br>interne du type mime n'obtient pas de resultat. Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Désactiver l'utilisation d'aspell pour générer les approximations orthographiques.<br> Utile si aspell n'est pas installé ou ne fonctionne pas. The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Langue pour le dictionnaire aspell. La valeur devrait ressembler à 'en' ou 'fr'... <br>Si cette valeur n'est pas positionnée, l'environnement national sera utilisé pour la calculer, ce qui marche bien habituellement. Pour avoir une liste des valeurs possibles sur votre système, entrer 'aspell config' sur une ligne de commande et regarder les fichiers '.dat' dans le répertoire 'data-dir'. The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Le nom d'un répertoire pour stocker l'index<br>Un chemin relatif sera interprété par rapport au répertoire de configuration. La valeur par défaut est 'xapiandb'. Unac exceptions Exceptions Unac <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. <p>Ce sont les exceptions au mécanisme de suppression des accents, qui, par défaut et en fonction de la configuration de l'index, supprime tous les accents et effectue une décomposition canonique Unicode. Vous pouvez inhiber la suppression des accents pour certains caractères, en fonction de votre langue, et préciser d'autres décompositions, par exemple pour des ligatures. Dans la liste séparée par des espaces, le premier caractères d'un élément est la source, le reste est la traduction. These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Ce sont les chemins des répertoires où l'indexation n'ira pas.<br>Les éléments peuvent contenir des caractères joker. Les entrés doivent correspondre aux chemins vus par l'indexeur (ex.: si topdirs comprend '/home/me' et que '/home' est en fait un lien vers '/usr/home', un élément correct pour skippedPaths serait '/home/me/tmp*', et non '/usr/home/me/tmp*') Max disk occupation (%, 0 means no limit) Utilisation disque maximale (%, 0 signifie pas de limite) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. C'est le pourcentage d'utilisation disque - utilisation totale, et non taille de l'index - où l'indexation s'arrêtera en erreur.<br>La valeur par défaut de 0 désactive ce test. uiPrefsDialogBase User preferences Préférences utilisateur User interface Interface utilisateur Number of entries in a result page Nombre de résultats par page If checked, results with the same content under different names will only be shown once. N'afficher qu'une entrée pour les résultats de contenu identique. Hide duplicate results. Cacher les doublons Highlight color for query terms Couleur de mise en relief des termes recherchés Result list font Fonte pour la liste de résultats Opens a dialog to select the result list font Ouvre une fenêtre permettant de changer la fonte Helvetica-10 Helvetica-10 Resets the result list font to the system default Réinitialiser la fonte à la valeur par défaut Reset Réinitialiser Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Definit le format des paragraphes de la liste de resultats. Utilise le format html qt et des directives de substitution de type printf:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br> Result paragraph<br>format string Chaîne de formatage<br>des paragraphes résultats Texts over this size will not be highlighted in preview (too slow). Les textes plus gros ne seront pas surlignés dans la prévisualisation (trop lent). Maximum text size highlighted for preview (megabytes) Taille maximum des textes surlignés avant prévisualisation (Mo) Use desktop preferences to choose document editor. Utiliser les réglages du bureau pour choisir l'application de visualisation. Choose editor applications Choisir les éditeurs pour les différents types de fichiers Display category filter as toolbar instead of button panel (needs restart). Afficher le filtre de catégorie comme une barre d'outils plutot que comme un panneau de boutons (après le redémarrage). Auto-start simple search on whitespace entry. Démarrer automatiquement une recherche simple sur entrée d'un espace. Start with advanced search dialog open. Panneau de recherche avancée ouvert au démarrage. Start with sort dialog open. Panneau de tri ouvert au démarrage. Remember sort activation state. Mémoriser l'état d'activation du tri Prefer Html to plain text for preview. Utiliser le format Html pour la previsualisation. Search parameters Paramètres pour la recherche Stemming language Langue pour l'expansion des termes A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Une recherche pour [vin rouge] (2 mots) sera complétée comme [vin OU rouge OU (vin PHRASE 2 rouge)].<br> Ceci devrait donner une meilleure pertinence aux résultats où les termes recherchés apparaissent exactement et dans l'ordre. Automatically add phrase to simple searches Ajouter automatiquement une phrase aux recherches simples Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Décide si des résumés seront construits à partir du contexte des termes de recherche. Peut ralentir l'affichage si les documents sont gros. Dynamically build abstracts Construire dynamiquement les résumés Do we synthetize an abstract even if the document seemed to have one? Est-ce qu'un résumé doit etre synthétisé meme dans le cas ou le document original en avait un? Replace abstracts from documents Remplacer les résumés existant dans les documents Synthetic abstract size (characters) Taille du résumé synthétique (caractères) Synthetic abstract context words Nombre de mots de contexte par occurrence de terme dans le résumé The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Les mots de la liste seront automatiquement changés en clauses ext:xxx dans les requêtes en langage d'interrogation. Query language magic file name suffixes. Suffixes automatiques pour le langage d'interrogation Enable Activer External Indexes Index externes Toggle selected Changer l'état pour les entrées sélectionnées Activate All Tout activer Deactivate All Tout désactiver Remove from list. This has no effect on the disk index. Oter de la liste. Sans effet sur les données stockées. Remove selected Effacer la sélection Click to add another index directory to the list Cliquer pour ajouter un autre index à la liste Add index Ajouter un index Apply changes Appliquer les modifications &OK &OK Discard changes Abandonner les modifications &Cancel &Annuler Abstract snippet separator Séparateur d'extrait Use <PRE> tags instead of <BR>to display plain text as html. Utilise des balises <PRE> au lieu de <BR> pour afficher du texte brut en html. Lines in PRE text are not folded. Using BR loses indentation. Les lignes de text brut <PRE> ne sont pas replies (scroll horizontal). L'utilisation de balises <BR> ne preserve pas l'indentation. Style sheet Feuille de style Opens a dialog to select the style sheet file Ouvre un dialogue pour choisir un fichier feuille de style Choose Choisir Resets the style sheet to default Restore la valeur par défaut pour la feuille de style Lines in PRE text are not folded. Using BR loses some indentation. Les lignes dans des sections PRE ne sont pas justifiées. Utiliser BR perd une partie de l'indentation. Use <PRE> tags instead of <BR>to display plain text as html in preview. Utiliser des tags <PRE> au lieu de <BR> pour afficher du texte non formaté en html dans la prévisualisation Result List Liste de résultats Edit result paragraph format string Editer le format du paragraphe de résultat Edit result page html header insert Editer le fragment à insérer dans l'en-tête HTML Date format (strftime(3)) Format de date (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Seuil de fréquence (pourcentage) au delà duquel les termes ne seront pas utilisés. Les phrases contenant des termes trop fréquents posent des problèmes de performance. Les termes ignorés augmentent la distance de phrase, et réduisent l'efficacité de la fonction de recherche de phrase automatique. La valeur par défaut est 2% Autophrase term frequency threshold percentage Seuil de fréquence de terme (pourcentage) pour la génération automatique de phrases Plain text to HTML line style Style de traduction texte ordinaire vers HTML Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. Les lignes dans une balise PRE ne sont pas repliées. Utiliser BR conduit à perdre une partie des tabulations. Le style PRE + WRAP peut être le meilleurs compromis mais son bon fonctionnement dépend des versions Qt. <BR> <BR> <PRE> <PRE> <PRE> + wrap <PRE> + repliement Exceptions Exceptions Mime types that should not be passed to xdg-open even when "Use desktop preferences" is set.<br> Useful to pass page number and search string options to, e.g. evince. Types MIME qui ne doivent pas être passés à xdg-open même quand "Utiliser les préférences du bureau" est coché. <br> Utile pour pouvoir passer les paramètres de numéro de page et de terme de recherche à evince, par exemple. Disable Qt autocompletion in search entry. Désactiver l'autocomplétion Qt dans l'entrée de recherche Search as you type. Lancer la recherche a chaque caractere entre Paths translations Traductions de chemins Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Cliquer pour ajouter un autre index a la liste. Vous pouvez sélectionner soit un répertoire de configuration Recoll soit un index Xapian Snippets window CSS file Feuille de style CSS pour le popup de fragments Opens a dialog to select the Snippets window CSS style sheet file Ouvre un dialogue permettant de sélectionner la feuille de style CSS pour le popup des fragments Resets the Snippets window style Réinitialise le style de la fenêtre des fragments Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Décide si les filtres de documents sont affichés comme des radio-boutons, un menu déroulant dans la barre d'outils, ou un menu. Document filter choice style: Style de choix des filtres de documents : Buttons Panel Panneau de boutons Toolbar Combobox Menu déroulant dans le panneau d'outils Menu Menu Show system tray icon. Afficher l'icone dans la barre d'état système Close to tray instead of exiting. Réduire dans la barre d'état au lieu de quitter Start with simple search mode Démarrer en mode recherche simple Show warning when opening temporary file. Afficher un avertissement quand on édite une copie temporaire User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Style utilisateur à appliquer à la fenêtre "snippets".<br>Note : l'en tête de page de résultat est aussi inclus dans la fenêtre "snippets". Synonyms file Fichier de synonymes Highlight CSS style for query terms Style CSS de mise en avant pour les termes de la recherche Recoll - User Preferences Recoll - Préférences utilisateur Set path translations for the selected index or for the main one if no selection exists. Créer les traductions de chemins d'accès pour l'index selectionné, ou pour l'index principal si rien n'est sélectionné. Activate links in preview. Activer les liens dans la prévisualisation Make links inside the preview window clickable, and start an external browser when they are clicked. Rendre clicquables les liens dans la fenêtre de prévisualisation et démarrer un navigateur extérieur quand ils sont activés. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Mise en évidence des termes de recherche. <br>Si le bleu utilisé par défaut est trop discret, essayer peut-être : "color:red;background:yellow"... Start search on completer popup activation. Démarrer la recherche quand un choix est fait dans les suggestions Maximum number of snippets displayed in the snippets window Nombre maximum d'extraits affichés dans la fenêtre des extraits Sort snippets by page number (default: by weigth). Trier les extraits par numéro de page (défaut: par pertinence). Suppress all beeps. Mode silencieux. recoll-1.26.3/qtgui/i18n/recoll_xx.ts0000644000175000017500000031336213566424763014313 00000000000000 AdvSearch All clauses Any clause media other Bad multiplier suffix in size filter text spreadsheet presentation message texts spreadsheets AdvSearchBase Advanced search Search for <br>documents<br>satisfying: Delete clause Add clause Restrict file types Check this to enable filtering on file types By categories Check this to use file categories instead of raw mime types Save as default Searched file types All ----> Sel -----> <----- Sel <----- All Ignored file types Enter top directory for search Browse Restrict results to files in subtree: Start Search Close All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored. Invert Minimum size. You can use k/K,m/M,g/G as multipliers Min. Size Maximum size. You can use k/K,m/M,g/G as multipliers Max. Size Filter From To Check this to enable filtering on dates Filter dates Find Check this to enable filtering on sizes Filter sizes ConfIndexW Can't write configuration file Global parameters Local parameters Search parameters Top directories The list of directories where recursive indexing starts. Default: your home. Skipped paths These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*') Stemming languages The languages for which stemming expansion<br>dictionaries will be built. Log file name The file where the messages will be written.<br>Use 'stderr' for terminal output Log verbosity level This value adjusts the amount of messages,<br>from only errors to a lot of debugging data. Index flush megabytes interval This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit) This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit. No aspell usage Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. Aspell language The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. Database directory name The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. Unac exceptions <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. Process the WEB history queue Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin) Web page store directory name The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory. Max. size for the web store (MB) Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end). Automatic diacritics sensitivity <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity. Automatic character case sensitivity <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity. Maximum term expansion count <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. Maximum Xapian clauses count <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfSubPanelW Only mime types An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive Exclude mime types Mime types not to be indexed Max. compressed file size (KB) This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. Max. text file size (MB) This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. Text file page size (KB) If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). Max. filter exec. time (s) External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit. Global CronToolW Cron Dialog <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html> Days of week (* or 0-7, 0 or 7 is Sunday) Hours (* or 0-23) Minutes (0-59) <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html> Enable Disable It seems that manually edited entries exist for recollindex, cannot edit crontab Error installing cron entry. Bad syntax in fields ? EditDialog Dialog EditTrans Source path Local path Config error Original path EditTransBase Path Translations Setting path translations for Select one or several file types, then use the controls in the frame below to change how they are processed Add Delete Cancel Save FirstIdxDialog First indexing setup <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html> Indexing configuration This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc. Indexing schedule This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron). Start indexing now FragButs %1 not found. %1: %2 Query Fragments IdxSchedW Index scheduling setup <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> Cron scheduling The tool will let you decide at what time indexing should run and will install a crontab entry. Real time indexing start up Decide if real time indexing will be started when you log in (only for the default index). ListDialog Dialog GroupBox Main No db directory in configuration "history" file is damaged, please check or remove it: Preview Cancel Missing helper program: Can't turn doc into internal representation for Creating preview text Loading preview text into editor &Search for: &Next &Previous Clear Match &Case Form Tab 1 Open Canceled Error loading the document: file missing. Error loading the document: no permission. Error loading: backend not configured. Error loading the document: other handler error<br>Maybe the application is locking the file ? Error loading the document: other handler error. <br>Attempting to display from stored text. Could not fetch stored text PreviewTextEdit Show fields Show main text Print Print Current Preview Show image Select All Copy Save document to file Fold lines Preserve indentation Open document QObject <b>Customised subtrees The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty. Skipped names These are patterns for file or directory names which should not be indexed. Follow symbolic links Follow symbolic links while indexing. The default is no, to avoid duplicate indexing Index all file names Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true Default<br>character set Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used. Ignored endings These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing). <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons. QWidget Create or choose save directory Choose exactly one directory Could not read directory: Unexpected file name collision, cancelling. Cannot extract document: &Preview &Open Open With Run Script Copy &File Name Copy &URL &Write to File Save selection to files Preview P&arent document/folder &Open Parent document/folder Find &similar documents Open &Snippets window Show subdocuments / attachments QxtConfirmationMessage Do not show again. RTIToolW Real time indexing automatic start <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html> Start indexing daemon with my desktop session. Also start indexing daemon right now. Replacing: Replacing file Can't create: Warning Could not execute recollindex Deleting: Deleting file Removing autostart Autostart file deleted. Kill current process too ? RclMain (no stemming) (all languages) error retrieving stemming languages Indexing in progress: Purge Stemdb Closing Unknown Query results Cannot retrieve document info from database Warning Can't create preview window This search is not active any more Cannot extract document or create temporary file Executing: [ About Recoll History data Document history Update &Index Stop &Indexing All media message other presentation spreadsheet text sorted filtered No helpers found missing Missing helper programs No external viewer configured for mime type [ The viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ? Can't access file: Can't uncompress file: Save file Result count (est.) Could not open external index. Db not open. Check external indexes list. No results found None Updating Done Monitor Indexing failed The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone Erasing index Reset the index and start from scratch ? Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program Error Index query error Can't update index: indexer running Indexed MIME Types Bad viewer command line for %1: [%2] Please check the mimeview file Viewer command line for %1 specifies both file and parent file value: unsupported Cannot find parent document External applications/commands needed for your file types and not found, as stored by the last indexing pass in Sub-documents and attachments Document filter The indexer is running so things should improve when it's done. Duplicate documents These Urls ( | ipath) share the same content: Bad desktop app spec for %1: [%2] Please check the desktop file Indexing interrupted Bad paths Selection patterns need topdir Selection patterns can only be used with a start directory No search No preserved previous search Choose file to save Saved Queries (*.rclq) Write failed Could not write to file Read failed Could not open file: Load error Could not load saved query Index scheduling Sorry, not available under Windows for now, use the File menu entries to update the index Disabled because the real time indexer was not compiled in. This configuration tool only works for the main index. Can't set synonyms file (parse error?) The document belongs to an external index which I can't update. Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location. Do not show this warning next time (use GUI preferences to restore). Index locked Unknown indexer state. Can't access webcache file. Indexer is running. Can't access webcache file. with additional message: Non-fatal indexing message: Types list empty: maybe wait for indexing to progress? Viewer command line for %1 specifies parent file but URL is http[s]: unsupported Tools Results Content has been indexed for these MIME types: Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index): Indexing done Can't update index: internal error Index not up to date for this file.<br> <em>Also, it seems that the last index update for the file failed.</em><br/> Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br> Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/> documents document files file errors error total files) No information: initial indexing not yet performed. RclMainBase Recoll &File &Tools &Preferences &Help E&xit Ctrl+Q Update &index &Erase document history &About Recoll &User manual Document &History Document History &Advanced Search Advanced/complex Search &Sort parameters Sort parameters Term &explorer Term explorer tool Next page Next page of results First page Go to first page of results Previous page Previous page of results External index dialog PgDown PgUp &Full Screen F11 Full Screen &Erase search history Sort by dates from oldest to newest Sort by dates from newest to oldest Show Query Details &Rebuild index Shift+PgUp E&xternal index dialog &Index configuration &GUI configuration &Results Sort by date, oldest first Sort by date, newest first Show as table Show results in a spreadsheet-like table Save as CSV (spreadsheet) file Saves the result into a file which you can load in a spreadsheet Next Page Previous Page First Page Query Fragments With failed files retrying Next update will retry previously failed files Indexing &schedule Enable synonyms Save last query Load saved query Special Indexing Indexing with special options &View Missing &helpers Indexed &MIME types Index &statistics Webcache Editor Trigger incremental pass RclTrayIcon Restore Quit RecollModel Abstract Author Document size Document date File size File name File date Keywords Original character set Relevancy rating Title URL Mtime Date Date and time Ipath MIME type Can't sort by inverse relevance ResList Result list (show query) Document history <p><b>No results found</b><br> Previous Next Unavailable document Preview Open <p><i>Alternate spellings (accents suppressed): </i> Documents out of at least for <p><i>Alternate spellings: </i> Result count (est.) Query details Snippets ResTable &Reset sort &Delete column Save table to CSV file Can't open/create file: &Save as CSV Add "%1" column SSearch Any term All terms File name Query language Bad query string Out of memory Enter file name wildcard expression. Enter query language expression. Cheat sheet:<br> <i>term1 term2</i> : 'term1' and 'term2' in any field.<br> <i>field:term1</i> : 'term1' in field 'field'.<br> Standard field names/synonyms:<br> title/subject/caption, author/from, recipient/to, filename, ext.<br> Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br> Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.<br> <i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br> You can use parentheses to make things clearer.<br> <i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br> <i>"term1 term2"p</i> : unordered proximity search with default distance.<br> Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail. Stemming languages for stored query: differ from current preferences (kept) Auto suffixes for stored query: External indexes for stored query: Autophrase is set but it was unset for stored query Autophrase is unset but it was set for stored query Enter search terms here. SSearchBase SSearchBase Clear Ctrl+S Erase search entry Search Start query Choose search type. Show query history SearchClauseW Select the type of query that will be performed with the words Number of additional words that may be interspersed with the chosen ones No field Any All None Phrase Proximity File name Snippets Snippets Find: Next Prev SnippetsW Search <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p> Sort By Relevance Sort By Page SpecIdxW Special Indexing Else only modified or failed files will be processed. Erase selected files data before indexing. Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Browse Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Selection patterns: Top indexed entity Retry previously failed files. Start directory. Must be part of the indexed tree. Use full indexed area if empty. SpellBase Term Explorer &Expand Alt+E &Close Alt+C No db info. Match Case Accents SpellW Wildcards Regexp Stem expansion Spelling/Phonetic error retrieving stemming languages No expansion found Term Doc. / Tot. Index: %1 documents, average length %2 terms.%3 results %1 results List was truncated alphabetically, some frequent terms may be missing. Try using a longer root. Show index statistics Number of documents Average terms per document Database directory size MIME types: Item Value Smallest document length (terms) Longest document length (terms) Results from last indexing: Documents created/updated Files tested Unindexed files List files which could not be indexed (slow) Spell expansion error. UIPrefsDialog error retrieving stemming languages The selected directory does not appear to be a Xapian index This is the main/local index! The selected directory is already in the index list Choose Result list paragraph format (erase all to reset to default) Result list header (default is empty) Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb) The selected directory looks like a Recoll configuration directory but the configuration could not be read At most one index should be selected Cant add index with different case/diacritics stripping option Default QtWebkit font Any term All terms File name Query language Value from previous program exit ViewAction Command MIME type Desktop Default Changing entries with different current values ViewActionBase Native Viewers Close Select one or several mime types then use the controls in the bottom frame to change how they are processed. Use Desktop preferences by default Select one or several file types, then use the controls in the frame below to change how they are processed Exception to Desktop preferences Action (empty -> recoll default) Apply to current selection Recoll action: current value Select same <b>New Values:</b> Webcache Webcache editor Search regexp WebcacheEdit Copy URL Unknown indexer state. Can't edit webcache file. Indexer is running. Can't edit webcache file. Delete selection Webcache was modified, you will need to run the indexer after closing this window. WebcacheModel MIME Url confgui::ConfParamFNW Choose confgui::ConfParamSLW + - Add entry Delete selected entries ~ Edit selected entries uiPrefsDialogBase User interface Number of entries in a result page If checked, results with the same content under different names will only be shown once. Hide duplicate results. Result list font Opens a dialog to select the result list font Helvetica-10 Resets the result list font to the system default Reset Texts over this size will not be highlighted in preview (too slow). Maximum text size highlighted for preview (megabytes) Choose editor applications Start with advanced search dialog open. Remember sort activation state. Prefer Html to plain text for preview. Search parameters Stemming language A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. This should give higher precedence to the results where the search terms appear exactly as entered. Automatically add phrase to simple searches Do we try to build abstracts for result list entries by using the context of query terms ? May be slow for big documents. Dynamically build abstracts Do we synthetize an abstract even if the document seemed to have one? Replace abstracts from documents Synthetic abstract size (characters) Synthetic abstract context words The words in the list will be automatically turned to ext:xxx clauses in the query language entry. Query language magic file name suffixes. Enable External Indexes Toggle selected Activate All Deactivate All Remove from list. This has no effect on the disk index. Remove selected Add index Apply changes &OK Discard changes &Cancel Abstract snippet separator Style sheet Opens a dialog to select the style sheet file Choose Resets the style sheet to default Result List Edit result paragraph format string Edit result page html header insert Date format (strftime(3)) Frequency percentage threshold over which we do not use terms inside autophrase. Frequent terms are a major performance issue with phrases. Skipped terms augment the phrase slack, and reduce the autophrase efficiency. The default value is 2 (percent). Autophrase term frequency threshold percentage Plain text to HTML line style Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want. <BR> <PRE> <PRE> + wrap Disable Qt autocompletion in search entry. Paths translations Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index. Snippets window CSS file Opens a dialog to select the Snippets window CSS style sheet file Resets the Snippets window style Decide if document filters are shown as radio buttons, toolbar combobox, or menu. Document filter choice style: Buttons Panel Toolbar Combobox Menu Show system tray icon. Close to tray instead of exiting. Start with simple search mode User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header. Synonyms file Show warning when opening temporary file. Highlight CSS style for query terms Recoll - User Preferences Set path translations for the selected index or for the main one if no selection exists. Activate links in preview. Make links inside the preview window clickable, and start an external browser when they are clicked. Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue... Start search on completer popup activation. Maximum number of snippets displayed in the snippets window Sort snippets by page number (default: by weigth). Suppress all beeps. recoll-1.26.3/qtgui/i18n/recoll_el.qm0000644000175000017500000030554313566424763014245 00000000000000.caDo2NXMfRXs`^hult%nw wAxvOW,hIvcxC-YͺC LSI"[(؅. K[dp#6vvv sw 5 w 5"w 5%xw U&.F9ֳ3w6+Bf3ͼugQgg׸~;UtUe z!Dq&[.N'=dr_?d@G$pJUYJUYY13[uG_n/vu^xʷtʗbʗj^blLUx,Jg]5A:g%5A!B c<.#{d.;[-&<<LCNBB X4=e 2vhA"wT7(Pيb "3SZIvInn+f7fIfQ#,vWuWQy%qy%]ŠilJ΄: .#^X|FWp̔ii)6 -Z-)TpB9)BS'ko'Ir?))r||k,1<"t$+S,t|ASâ  5}R-ǢI^I~C~ߧ>uL`Myq$ۺ" $UKrp hOu9v2T#K7[ %En l7K^[1XnDҫw)'{?CO!v=#+u7I^A<~4[FW#&7FNYH:"_guap/f wpiTI|N;^wϗna,,& plÓtÓt`>ȍɆty]8u)q 3E#0*snT[,y*:ukJ¢Jn,QL}};Epv08[p HcCYhzF$D@-ʶ *<؆@cd䴥ӝ?q0 Nss H -(v- 9Zy# ;3* D' K ]# cCA kI lM x qD.;  Mn X 9H ÛCC ü>m 3V Pm i3 A *N+ :^6o c  *Rsg +<2 6 >VH? G.~) `P ` aE cE d8@ y IJ( FD s VT/ C p A  1 ԅ ^ yeq TH  ,x@ =!w Kj X# X[ h4 /V 1 Σ r7 ; ٷA ۷ Q ?c ® Vd 4w DO 'ИB +bC / 97C 9ɝ L* P֙/ RVd T# V1S \iCo ].~ `F E h v {lls !Yc !Y W | ד D ҬQM r ID i'  2 N ~b NB + m y 'R - ) 8K[ Fc OE ]w ]D u0 y< y~N 3 D ȩU u" u8F P PS 5di B q 7" i R ՒuP H Q50m £2 qb"%no/.68b19<wnyQ~mY~sl[s\r5e3g3\p~w =2WN!N>cmc B|0|sBU'M liL*i{   All clauses AdvSearch"  Any clause AdvSearch^     $Bad multiplier suffix in size filter AdvSearchmedia AdvSearch message AdvSearchother AdvSearch presentation AdvSearch  spreadsheet AdvSearch  spreadsheets AdvSearchtext AdvSearchtexts AdvSearch<-----  <----- All AdvSearchBase<-----  <----- Sel AdvSearchBase  Add clause AdvSearchBase* Advanced search AdvSearchBase ----> All ----> AdvSearchBase|             (   )   (    ). <br>         ,                 . <br>   .All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions.
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBaseBrowse AdvSearchBase  By categories AdvSearchBase|        'Check this to enable filtering on dates AdvSearchBase           ,Check this to enable filtering on file types AdvSearchBase         'Check this to enable filtering on sizes AdvSearchBase           mime;Check this to use file categories instead of raw mime types AdvSearchBaseClose AdvSearchBase  Delete clause AdvSearchBase\     Enter top directory for search AdvSearchBase Filter AdvSearchBase$  Filter dates AdvSearchBase  Filter sizes AdvSearchBaseFind AdvSearchBaseFrom AdvSearchBase<    Ignored file types AdvSearchBaseInvert AdvSearchBase.  Max. Size AdvSearchBase :     k/K,m/M,g/G  4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase.  Min. Size AdvSearchBase :     k/K,m/M,g/G  4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase:   Restrict file types AdvSearchBasej      :%Restrict results to files in subtree: AdvSearchBase0  Save as default AdvSearchBaseT <br><br> :'Search for
documents
satisfying: AdvSearchBase6  Searched file types AdvSearchBase ----> Sel -----> AdvSearchBase&  Start Search AdvSearchBaseTo AdvSearchBaseP<p>    /         (   ).             <i>C</i>        / .

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity. ConfIndexW<p>           (   unac_except_trans).             <i>D</i>      .

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity. ConfIndexW<p>      (..:     ).    10000                    .

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. ConfIndexW|<p>          Xapian.   ,          ,     .    100000            .5

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfIndexWj<p>      unac,   ' ,    ,    .          ,     ,    ,    .     ,        ,     .l

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. ConfIndexW"  aspellAspell language ConfIndexWF  /$Automatic character case sensitivity ConfIndexW@    Automatic diacritics sensitivity ConfIndexWR     Can't write configuration file ConfIndexWH   Database directory name ConfIndexW*    aspell      .<br>   aspell      . Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexW       Firefox.<br>(       Firefox Recoll)\Enables indexing Firefox visited pages.
(you need also install the Firefox Recoll plugin) ConfIndexW& Global parameters ConfIndexW^     megabyteIndex flush megabytes interval ConfIndexW" Local parameters ConfIndexW8    Log file name ConfIndexW>   Log verbosity level ConfIndexW\.      (MB) Max. size for the web store (MB) ConfIndexW>   Xapian Maximum Xapian clauses count ConfIndexWF    Maximum term expansion count ConfIndexW,   aspellNo aspell usage ConfIndexWR     Process the WEB history queue ConfIndexW( Search parameters ConfIndexW0  Skipped paths ConfIndexW@     Stemming languages ConfIndexW      .<br> 'stderr'    PThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexW     aspell.        en  el ...<br>      ,     NLS    ,   .            ,  aspell config     .dat   data-dir. 3The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory.  ConfIndexW         <br> .IThe languages for which stemming expansion
dictionaries will be built. ConfIndexW         . :    .LThe list of directories where recursive indexing starts. Default: your home. ConfIndexW        .<br>      .The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory. ConfIndexW       <br>      .  '   xapiandb. The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. ConfIndexW8              .<br>     . : 10MB This value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexW        ,<br>       .ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexW& Top directories ConfIndexW unacUnac exceptions ConfIndexWN   Web page store directory name ConfIndexW     mime.<br>    .    eAn exclusive list of indexed mime types.
Nothing else will be indexed. Normally empty and inactive ConfSubPanelW2  Exclude mime types ConfSubPanelW         .      (.. postscript)          .    -1     .External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.  ConfSubPanelW Global ConfSubPanelW        ,             .                 .If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). ConfSubPanelWT.     (KB)Max. compressed file size (KB) ConfSubPanelWD.    (MB)Max. text file size (MB) ConfSubPanelWN  Mime    Mime types not to be indexed ConfSubPanelW$   MIMEOnly mime types ConfSubPanelWR      (KB)Text file page size (KB) ConfSubPanelWd              .  -1   , 0       .This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. ConfSubPanelW                .  -1   .           .This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. ConfSubPanelW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span>     (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">        (*),    ,     (1,3,5)   (1-7). ,     <span style=" font-style:italic;"> </span>   crontab,     crontab   ,      crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /> ,  <span style=" font-family:'Courier New,courier';">*</span>  <span style=" font-style:italic;">, </span><span style=" font-family:'Courier New,courier';">12,19</span>  <span style=" font-style:italic;"></span>  <span style=" font-family:'Courier New,courier';">15</span>  <span style=" font-style:italic;"></span>,  recollindex      12:15 AM  7:15 PM</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">               .</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">   <span style=" font-style:italic;"></span>       ,  <span style=" font-style:italic;"></span>    ,  <span style=" font-style:italic;"></span>     .</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolW Cron Cron Dialog CronToolWl   (*  0-7, 0  7  ))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWDisable CronToolWEnable CronToolW      cron.    ;3Error installing cron entry. Bad syntax in fields ? CronToolW (*  0-23)Hours (* or 0-23) CronToolW        recollindex.     Cron   PIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolW (0-59)Minutes (0-59) CronToolWDialog EditDialog$  Config error EditTrans  Local path EditTrans  Original path EditTrans  Source path EditTransAdd EditTransBaseCancel EditTransBaseDelete EditTransBase( Path Translations EditTransBaseSave EditTransBase:     ,                kSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBaseD    Setting path translations for  EditTransBase <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">          ..</span><br /><br />            y ,    <span style=" font-style:italic;">   </span>.      . </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">   ,              .</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">           <span style=" font-style:italic;"></span>.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialogF   First indexing setupFirstIdxDialog. Indexing configurationFirstIdxDialog8 Indexing scheduleFirstIdxDialog8   Start indexing nowFirstIdxDialogz         ,          ,    , .This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialogj            ,         (   cron).This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog$   %1. %1 not found.FragButs%1: %2%1: %2FragButs( Query FragmentsFragButs <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">   <span style=" font-weight:600;">Recoll</span>      ,       ,      . </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">              ( F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">                           (   ,    ). </p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedW( CronCron scheduling IdxSchedW             (    ).ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedWV   Index scheduling setup IdxSchedWV     Real time indexing start up IdxSchedW                crontab._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedWDialog ListDialogGroupBoxGroupBox ListDialog          No db directory in configurationMain&&NextPreview& &PreviousPreview& : &Search for:Preview         0Can't turn doc into internal representation for PreviewCancelPreviewClearPreviewL   Creating preview textPreviewh      Loading preview text into editorPreview2 &/ Match &CasePreviewR   :Missing helper program: PreviewOpenPreviewCopyPreviewTextEdit,   Fold linesPreviewTextEdit(  Preserve indentationPreviewTextEditPrintPreviewTextEditJ   Print Current PreviewPreviewTextEdit.  Save document to filePreviewTextEdit  Select AllPreviewTextEdit&   Show fieldsPreviewTextEdit(   Show imagePreviewTextEditB    Show main textPreviewTextEditP<b>   Customised subtreesQObject                 ,     : <br>     ,       NLS.Character set used for reading files which do not identify the character set internally, for example pure text files.
The default value is empty, and the value from the NLS environnement is used.QObject> <br> Default
character setQObject@   Follow symbolic linksQObjectP           .     ,     TFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject* Ignored endingsQObjectH     Index all file namesQObjectj               (  mime,    ).     }Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject"  Skipped namesQObject       <br>    . : .sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObject|              (     MIME,  ,    ).These are file name endings for files which will be indexed by name only (no MIME type identification attempt, no decompression, no content indexing).QObject             .LThese are patterns for file or directory names which should not be indexed.QObject&&OpenQWidgetN&   /&Open Parent document/folderQWidget&&PreviewQWidget$&  &Write to FileQWidget@    : Cannot extract document: QWidget6   Choose exactly one directoryQWidgetF    &Copy &File NameQWidget$  &URL Copy &URLQWidgetD    : Could not read directory: QWidget\     Create or choose save directoryQWidget: & Find &similar documentsQWidgetF   &Open &Snippets windowQWidget  Open WithQWidgetZ  & /Preview P&arent document/folderQWidget*  Run ScriptQWidgetB    Save selection to filesQWidgetL  - / Show subdocuments / attachmentsQWidget`   , .+Unexpected file name collision, cancelling.QWidget<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">   <span style=" font-weight:600;">Recoll</span>        ,         .      ,       (  ).</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>.

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWZ      .%Also start indexing daemon right now.RTIToolW  autostart .     ;2Autostart file deleted. Kill current process too ?RTIToolW* :Can't create: RTIToolWD   recollindexCould not execute recollindexRTIToolW(   Deleting fileRTIToolW: Deleting: RTIToolWX    . "Real time indexing automatic startRTIToolW,  autostartRemoving autostartRTIToolW2  Replacing fileRTIToolW$ : Replacing: RTIToolWl       ..Start indexing daemon with my desktop session.RTIToolWWarningRTIToolW"(  )(all languages)RclMain ( ) (no stemming)RclMain(   Recoll About RecollRclMainAllRclMain       %1: [%2]       ?Bad desktop app spec for %1: [%2] Please check the desktop fileRclMain    %1: [%2]     mimeviewCBad viewer command line for %1: [%2] Please check the mimeview fileRclMain<    :Can't access file: RclMainb     Can't create preview windowRclMainF    : Can't uncompress file: RclMain    :      #Can't update index: indexer runningRclMain          0Cannot extract document or create temporary fileRclMainJ     Cannot find parent documentRclMainb       +Cannot retrieve document info from databaseRclMainClosingRclMain     .      .      .HCould not open external index. Db not open. Check external indexes list.RclMain Document filterRclMain@   Document historyRclMain DoneRclMain" Duplicate documentsRclMain.   Erasing indexRclMain ErrorRclMain : [ Executing: [RclMainf          ,     ,            pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMain,   History dataRclMainF    Index query errorRclMain. MIME  Indexed MIME TypesRclMain*  Indexing failedRclMain0  : Indexing in progress: RclMain*  Indexing interruptedRclMain*  Missing helper programsRclMainMonitorRclMainb      MIME [-No external viewer configured for mime type [RclMain2   No helpers found missingRclMain2  No results foundRclMain NoneRclMainPurgeRclMain  .<br>  ,<br>       eQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMain6   Query resultsRclMainh       ;(Reset the index and start from scratch ?RclMain<  (.)Result count (est.)RclMain,   Save fileRclMain StemdbStemdbRclMain2  &Stop &IndexingRclMain2-  Sub-documents and attachmentsRclMainR          .            ',        yThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMaint        .@The indexer is running so things should improve when it's done. RclMain    mimeview  %1: %2  .        ;hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMainl  Url (| ipath)    :-These Urls ( | ipath) share the same content:RclMain@     "This search is not active any moreRclMainUnknownRclMain2  & Update &IndexRclMainUpdatingRclMain    %1          :  QViewer command line for %1 specifies both file and parent file value: unsupportedRclMainWarningRclMain`       #error retrieving stemming languagesRclMainfilteredRclMainmediaRclMain messageRclMainotherRclMain presentationRclMainsortedRclMain  spreadsheetRclMaintextRclMainX       With failed files retrying RclMainBase*&   Recoll &About Recoll RclMainBase,& &Advanced Search RclMainBase@&   &Erase document history RclMainBaseF&   &Erase search history RclMainBase&&File RclMainBase&  &Full Screen RclMainBase2 &&GUI configuration RclMainBase&&Help RclMainBase, &&Index configuration RclMainBase& &Preferences RclMainBase8&  &Rebuild index RclMainBase&&Results RclMainBase,& &Sort parameters RclMainBase&&Tools RclMainBase& &User manual RclMainBase* Advanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBase*  Document History RclMainBase,&  Document &History RclMainBase&E&xit RclMainBaseJ&   E&xternal index dialog RclMainBase& External index dialog RclMainBaseF11F11 RclMainBase  First Page RclMainBase  First page RclMainBase  Full Screen RclMainBaseP    Go to first page of results RclMainBase  Next Page RclMainBase  Next page RclMainBase@   Next page of results RclMainBase         .Next update will retry previously failed files RclMainBase PgDownPgDown RclMainBasePgUpPgUp RclMainBase$  Previous Page RclMainBase$  Previous page RclMainBase@  Previous page of results RclMainBase( Query Fragments RclMainBase RecollRecoll RclMainBaseR   CVS ( )Save as CSV (spreadsheet) file RclMainBase              @Saves the result into a file which you can load in a spreadsheet RclMainBaseShift+PgUp Shift+PgUp RclMainBaseJ   Show Query Details RclMainBase&   Show as table RclMainBasev        (Show results in a spreadsheet-like table RclMainBaseV  ,   Sort by date, newest first RclMainBase\  ,   Sort by date, oldest first RclMainBasep       #Sort by dates from newest to oldest RclMainBasep       #Sort by dates from oldest to newest RclMainBase* Sort parameters RclMainBase6&  Term &explorer RclMainBaseF   Term explorer tool RclMainBase*&  Update &index RclMainBase Quit RclTrayIconRestore RclTrayIconAbstract RecollModelAuthor RecollModelDate RecollModel$   Date and time RecollModel&  Document date RecollModel   Document size RecollModel$  File date RecollModel  File name RecollModel  File size RecollModel IpathIpath RecollModel Keywords RecollModel MIME MIME type RecollModel MtimeMtime RecollModel0  Original character set RecollModelRelevancy rating RecollModel Title RecollModelURLURL RecollModel() (show query)ResList><p><b> </b><br>

No results found
ResListd<p><i>  ( ): </i>4

Alternate spellings (accents suppressed): ResList<<p><i> : </i>

Alternate spellings: ResList@   Document historyResList DocumentsResListNextResListOpenResListPreviewResListPreviousResList6   Query detailsResList<  (.)Result count (est.)ResList&  Result listResListSnippetsResList(  Unavailable documentResListforResList out of at leastResList(&  &Delete columnResTable4&   &Reset sortResTable$&  CSV &Save as CSVResTable2   %1Add "%1" columnResTableT  /  :Can't open/create file: ResTable8    CSVSave table to CSV fileResTable   All termsSSearch" Any termSSearch0  Bad query stringSSearch    (   ).$Enter file name wildcard expression.SSearch~   . :<br> <i>1 2</i> : '1'  '2'   .<br> <i>:1</i> : '1'   ''.<br>   /:<br> //, /, /,  , .<br> -: , mime/, /rclcat, , .<br>    : 2009-03-01/2009-05-20 2009-03-01/2.<br> <i>1 2 OR 3</i> : 1 AND (2 OR 3).<br>        .<br> <i>"1 2"</i> :  (   ).  :<br> <i>"1 2"p</i> :        .<br>    <b> </b>             (&lt;F1>)   . Enter query language expression. Cheat sheet:
term1 term2 : 'term1' and 'term2' in any field.
field:term1 : 'term1' in field 'field'.
Standard field names/synonyms:
title/subject/caption, author/from, recipient/to, filename, ext.
Pseudo-fields: dir, mime/format, type/rclcat, date, size.
Two date interval exemples: 2009-03-01/2009-05-20 2009-03-01/P2M.
term1 term2 OR term3 : term1 AND (term2 OR term3).
You can use parentheses to make things clearer.
"term1 term2" : phrase (must occur exactly). Possible modifiers:
"term1 term2"p : unordered proximity search with default distance.
Use Show Query link when in doubt about result and see manual (<F1>) for more detail. SSearch  File nameSSearch6    Out of memorySSearch" Query languageSSearch:   .Choose search type. SSearchBaseClear SSearchBase Ctrl+SCtrl+S SSearchBase4  Erase search entry SSearchBaseSSearchBase SSearchBase SSearchBaseSearch SSearchBase*   Start query SSearchBaseAll SearchClauseWAny SearchClauseW"   File name SearchClauseW No field SearchClauseW None SearchClauseW           HNumber of additional words that may be interspersed with the chosen ones SearchClauseW Phrase SearchClauseW Proximity SearchClauseW          >Select the type of query that will be performed with the words SearchClauseW:Find:SnippetsNextSnippetsPrevSnippetsSnippetsSnippets<<p>,       .              ...</p>

Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...

 SnippetsWSearch SnippetsWBrowseSpecIdxW&&Close SpellBase&&Expand  SpellBase Accents SpellBase Alt+CAlt+C SpellBase Alt+EAlt+E SpellBase/Case SpellBaseMatch SpellBase^      . No db info. SpellBase   Term Explorer SpellBase%1  %1 resultsSpellW6    Average terms per documentSpellWB   Database directory sizeSpellWDoc. / Tot. Doc. / Tot.SpellWr: %1 ,   %2 .%3 7Index: %1 documents, average length %2 terms.%3 resultsSpellWItemSpellW\    ,   1List was truncated alphabetically, some frequent SpellW MIME: MIME types:SpellW" No expansion foundSpellW  Number of documentsSpellW  RegexpSpellWF   Show index statisticsSpellW&/Spelling/PhoneticSpellW& Stem expansionSpellWTermSpellWValueSpellW0  WildcardsSpellWR      #error retrieving stemming languagesSpellW   .       ..terms may be missing. Try using a longer root.SpellW   All terms UIPrefsDialog" Any term UIPrefsDialog`      $At most one index should be selected UIPrefsDialog         /   >Cant add index with different case/diacritics stripping option UIPrefsDialogChoose UIPrefsDialogB   QtWebkitDefault QtWebkit font UIPrefsDialog" Query language UIPrefsDialogv   ( '   )%Result list header (default is empty) UIPrefsDialog    (     ' ) :</b>New Values:ViewActionBaseX ( ->   recoll) Action (empty -> recoll default)ViewActionBase<   Apply to current selectionViewActionBaseCloseViewActionBaseX     Exception to Desktop preferencesViewActionBase* Native ViewersViewActionBase  Recoll:Recoll action:ViewActionBase:     ,                kSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseH     ,                  .lSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBase    Select sameViewActionBaser '      "Use Desktop preferences by defaultViewActionBase  current valueViewActionBaseChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW&&CanceluiPrefsDialogBase&&OKuiPrefsDialogBase<BR>
uiPrefsDialogBase <PRE>
uiPrefsDialogBase$<PRE> + 
 + wrapuiPrefsDialogBase   [ ] (2 )    [    (  2 )].<br>
                  .A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBase0 Abstract snippet separatoruiPrefsDialogBase" Activate AlluiPrefsDialogBase& 	Add indexuiPrefsDialogBase(  
Apply changesuiPrefsDialogBasej       +Automatically add phrase to simple searchesuiPrefsDialogBase    ()     .Autophrase term frequency threshold percentageuiPrefsDialogBase  
Buttons PaneluiPrefsDialogBaseChooseuiPrefsDialogBasex       Choose editor applicationsuiPrefsDialogBase(         .        Recoll    Xapian.{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBaseh  ,    .!Close to tray instead of exiting.uiPrefsDialogBaseF   (strftime(3))Date format (strftime(3))uiPrefsDialogBase& Deactivate AlluiPrefsDialogBase          ,    ,  .QDecide if document filters are shown as radio buttons, toolbar combobox, or menu.uiPrefsDialogBase    Qt   .*Disable Qt autocompletion in search entry.uiPrefsDialogBase(  Discard changesuiPrefsDialogBase              ;EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBase(          .
         .zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBaseL   :Document filter choice style:uiPrefsDialogBaseH   Dynamically build abstractsuiPrefsDialogBasep       HTML#Edit result page html header insertuiPrefsDialogBasef     #Edit result paragraph format stringuiPrefsDialogBaseEnableuiPrefsDialogBase& External IndexesuiPrefsDialogBase  ()        .
          .
       ,         .
    2%. Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10Helvetica-10uiPrefsDialogBase6  .Hide duplicate results.uiPrefsDialogBase         .XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBase      PRE  .   BR     .   PRE +                  Qt.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBasep.      (MB)5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBase
MenuuiPrefsDialogBase@   "Number of entries in a result pageuiPrefsDialogBase           CSS     AOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBase           -Opens a dialog to select the result list fontuiPrefsDialogBasez         -Opens a dialog to select the style sheet fileuiPrefsDialogBase* Paths translationsuiPrefsDialogBaseH     HTMLPlain text to HTML line styleuiPrefsDialogBaseX   HTML   .&Prefer Html to plain text for preview.uiPrefsDialogBaseX     .(Query language magic file name suffixes.uiPrefsDialogBasev     .Remember sort activation state.uiPrefsDialogBase   .      .7Remove from list. This has no effect on the disk index.uiPrefsDialogBase0  Remove selecteduiPrefsDialogBasej      Replace abstracts from documentsuiPrefsDialogBaseResetuiPrefsDialogBaseZ      Resets the Snippets window styleuiPrefsDialogBase         1Resets the result list font to the system defaultuiPrefsDialogBaseh       !Resets the style sheet to defaultuiPrefsDialogBase& Result ListuiPrefsDialogBase( Result list fontuiPrefsDialogBase( Search parametersuiPrefsDialogBaseX    .Show system tray icon.uiPrefsDialogBaseB CSS  Snippets window CSS fileuiPrefsDialogBasex       .'Start with advanced search dialog open.uiPrefsDialogBase@     Stemming languageuiPrefsDialogBase Style sheetuiPrefsDialogBasev         Synthetic abstract context wordsuiPrefsDialogBase`    ()$Synthetic abstract size (characters)uiPrefsDialogBase            ( ).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBase         ext:xxx     .bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase:  Toggle selecteduiPrefsDialogBaseH   Toolbar ComboboxuiPrefsDialogBase" User interfaceuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_cs.ts0000644000175000017500000056063513566424763014270 00000000000000



    AdvSearch
    
        All clauses
        Všechny výrazy
    
    
        Any clause
        Některý z výrazů
    
    
        texts
        Texty
    
    
        spreadsheets
        Tabulky
    
    
        presentations
        Představení
    
    
        media
        Multimedia
    
    
        messages
        Zprávy
    
    
        other
        Jiné
    
    
        Bad multiplier suffix in size filter
        Špatná přípona násobitele ve filtru velikosti
    
    
        text
        Text
    
    
        spreadsheet
        Tabulky
    
    
        presentation
        Představení
    
    
        message
        Zpráva
    


    AdvSearchBase
    
        Advanced search
        Pokročilé hledání
    
    
        Restrict file types
        Omezit souborových typů
    
    
        Save as default
        Uložit jako výchozí
    
    
        Searched file types
        Hledané souborové typy
    
    
        All ---->
        Vše ---->
    
    
        Sel ----->
        Výběr ----->
    
    
        <----- Sel
        <----- Výběr
    
    
        <----- All
        <----- Vše
    
    
        Ignored file types
        Přehlížené souborové typy
    
    
        Enter top directory for search
        Zadejte základní adresář pro hledání
    
    
        Browse
        Procházet
    
    
        Restrict results to files in subtree:
        Omezit výsledky na soubory v následujícím podadresáři:
    
    
        Start Search
        Spustit hledání
    
    
        Search for <br>documents<br>satisfying:
        Hledat <br>dokumenty<br>, které splňují následující hlediska:
    
    
        Delete clause
        Smazat poslední výraz
    
    
        Add clause
        Přidat nový výraz
    
    
        Check this to enable filtering on file types
        Zaškrtněte pro zapnutí filtrování podle souborových typů
    
    
        By categories
        Podle skupin
    
    
        Check this to use file categories instead of raw mime types
        Zaškrtněte pro používání skupin souborů místo MIME typů
    
    
        Close
        Zavřít
    
    
        All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored.
        Všechna pole napravo, která nejsou prázdná, budou spojována spojeními AND (volba "Všechny výrazy") nebo OR (volba "Některý z výrazů"). <br>Typy polí "Jakékoli" "Vše" a "Žádné" mohou přijmout směs jednoduchých slov, a věty uzavřené dvojitými uvozovkami.<br>Pole bez dat jsou přehlížena.
    
    
        Invert
        Obrátit
    
    
        Minimum size. You can use k/K,m/M,g/G as multipliers
        Nejmenší velikost: Můžete použít k/K,m/M,g/G jako násobitele
    
    
        Min. Size
        Nejmenší velikost
    
    
        Maximum size. You can use k/K,m/M,g/G as multipliers
        Největší velikost: Můžete použít k/K,m/M,g/G jako násobitele
    
    
        Max. Size
        Největší velikost
    
    
        Select
        Vybrat
    
    
        Filter
        Filtrovat
    
    
        From
        Od
    
    
        To
        Do
    
    
        Check this to enable filtering on dates
        Zaškrtněte pro zapnutí filtrování podle dat
    
    
        Filter dates
        Filtrovat data
    
    
        Find
        Najít
    
    
        Check this to enable filtering on sizes
        Zaškrtněte pro zapnutí filtrování podle velikostí
    
    
        Filter sizes
        Filtrovat velikosti
    


    ConfIndexW
    
        Can't write configuration file
        Nelze zapsat soubor s nastavením
    
    
        Global parameters
        Celkové parametry
    
    
        Local parameters
        Místní parametry
    
    
        Search parameters
        
    
    
        Top directories
        Počáteční adresáře
    
    
        The list of directories where recursive indexing starts. Default: your home.
        Seznam adresářů, ve kterých začíná rejstříkování včetně podsložek. Výchozí: adresář Home.
    
    
        Skipped paths
        Přeskočené cesty
    
    
        These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')
        Názvy názvy cest adresářů, které se nebudou rejstříkovat.<br>Může obsahovat zástupné symboly (žolíky). Musí odpovídat cestám, které rejstříkovač vidí (např. pokud v počátečních adresářích stojí '/home/me' a '/home' je ve skutečnosti odkazem na '/usr/home', potom by byl správným zápisem skippedPath '/home/me/tmp*' a ne '/usr/home/me/tmp*')
    
    
        Stemming languages
        Jazyky s kmeny slov
    
    
        The languages for which stemming expansion<br>dictionaries will be built.
        Jazyky, pro které se vytvoří <br>adresáře rozšíření kmenů slov.
    
    
        Log file name
        Název pro soubor se zápisem
    
    
        The file where the messages will be written.<br>Use 'stderr' for terminal output
        Soubor, do kterého se zapíše výstupní zpráva.<br>Pro výstupy na terminál použijte 'stderr'
    
    
        Log verbosity level
        Úroveň podrobnosti zápisu
    
    
        This value adjusts the amount of messages,<br>from only errors to a lot of debugging data.
        Tato hodnota upravuje množství zpráv,<br>od pouze chyb až po velké množství dat zajímavých pro ladění.
    
    
        Index flush megabytes interval
        Interval v megabytech pro vymazání rejstříku
    
    
        This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 
        Tato hodnota upravuje množství dat, která jsou rejstříkována mezi spláchnutími na disk.<br>Pomáhá to řídit použití paměti rejstříkovače. Výchozí je 10 MB 
    
    
        Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit)
        
    
    
        This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit.
        Toto je procentní podíl využívání disku - celkové využití disku, ne velikost rejstříku , kdy rejstříkování selže a zastaví se (kvůli vyhnutí se zaplnění vašeho disku).<br>Výchozí hodnota 0 odstraní všechna omezení, znamená žádné omezení.
    
    
        No aspell usage
        Nepoužívat aspell
    
    
        Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 
        Zakáže používání aspellu pro vytvoření přibližné podoby pravopisu v nástroji průzkumníka výrazů.<br> Užitečné, pokud aspell není přítomen anebo nepracuje. 
    
    
        Aspell language
        Jazyk aspellu
    
    
        The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. 
        Jazyk pro adresář aspellu. Mělo by to vypadat jako 'en' nebo 'fr' nebo 'cs'...<br>Pokud není tato hodnota nastavena, použije se pro její vypočítání prostředí NLS, což obvykle pracuje. Pro získání představy o tom, co je ve vašem systému nainstalováno, napište 'aspell config' a hledejte soubory .dat v adresáři 'data-dir'. 
    
    
        Database directory name
        Název adresáře s databází
    
    
        The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.
        Název pro adresář, v němž se má ukládat rejstřík.<br>Neabsolutní cesta je vzata relativně k adresáři s nastavením. Výchozí je 'xapiandb'.
    
    
        Unac exceptions
        Výjimky unac
    
    
        <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.
        <p>Toto jsou výjimky pro mechanismus unac, který ve výchozím nastavení odstraňuje všechny diakritické znaky a nahrazuje je kanonickými obdobami. Toto odstraňování akcentů můžete (v závislosti na vaší řeči) pro některé znaky potlačit a zadat dodatečná nahrazení, např. pro ligatury. V každém mezerou odděleném záznamu je první znak zdrojovým (výchozím) a zbytek je nahrazení.
    
    
        Process the WEB history queue
        Zpracovat řadu historie WEBu
    
    
        Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin)
        Povolí rejstříkování Firefoxem navštívených stránek.<br>(také je potřeba, abyste nainstalovali přídavný modul Recollu pro Firefox)
    
    
        Web page store directory name
        Název adresáře pro ukládání internetové stránky
    
    
        The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory.
        Název pro adresář, kam se mají ukládat kopie navštívených internetových stránek.<br>Neabsolutní cesta je vzata relativně k adresáři s nastavením.
    
    
        Max. size for the web store (MB)
        Největší velikost pro ukládání internetových stránek (MB)
    
    
        Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end).
        Záznamy budou po dosažení velikosti vráceny do původního stavu.<br>Skutečně dává smysl jen zvětšení velikosti, protože zmenšení hodnoty neoseká stávající soubor (na konci jen plýtvání místem).
    
    
        Automatic diacritics sensitivity
        Automaticky rozlišovat diakritická znaménka
    
    
        <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity.
        <p>Zapnout automaticky rozlišování diakritických znamének, když hledaný pojem obsahuje znaky a akcenty (ne v unac_except_trans). Jinak pro  musíte použít jazyk dotazu a modifikátor <i>D</i>.
    
    
        Automatic character case sensitivity
        Automaticky rozlišovat velká a malá písmena
    
    
        <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity.
        <p>Zapnout automaticky rozlišování velkých a malých písmen, když záznam obsahuje velká písmena (mimo na prvním místě). Jinak pro  musíte použít jazyk dotazu a modifikátor <i>C</i>.
    
    
        Maximum term expansion count
        Největší počet rozšíření výrazu
    
    
        <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.
        <p>Největší počet rozšíření pro jeden výraz (např. při použití žolíků). Standardní výchozí hodnota 10 000 je rozumná a zabrání tomu, aby se hledaný pojem jevil jako zamrzlý, zatímco je procházen seznam pojmů.
    
    
        Maximum Xapian clauses count
        Největší počet výrazů Xapian
    
    
        <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.
        <p>Největší počet základních výrazů, které přidáme do jednoho dotazu Xapian. V některých případech se mohou výsledky rozšíření výrazu vynásobit, a my se chceme vyvarovat nadbytečné spotřebě paměti. Standardní výchozí hodnota 100 000 by měla ve většině případů naprosto postačovat a hodit se k typickému současnému sestavení zařízení (hardware).
    


    ConfSubPanelW
    
        Only mime types
        Pouze typy MIME
    
    
        An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive
        Vybraný seznam rejstříkovaných typů MIME.<br>Nic jiného se nebude rejstříkovat. Obyčejně je seznam prázdný a nečinný
    
    
        Exclude mime types
        Vyloučené typy MIME
    
    
        Mime types not to be indexed
        Typy MIME, které se nemají rejstříkovat
    
    
        Max. compressed file size (KB)
        Největší velikost zabaleného souboru (KB)
    
    
        This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever.
        Tato hodnota nastavuje práh, za kterým nebudou zabalené soubory zpracovávány. Nastavte na -1 pro žádné omezení, na 0 pro vůbec žádné rozbalování.
    
    
        Max. text file size (MB)
        Největší velikost textového souboru (KB)
    
    
        This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. 
This is for excluding monster log files from the index.
        Tato hodnota nastavuje práh, za kterým nebudou textové soubory zpracovávány. Nastavte na -1 pro žádné omezení.
Je to kvůli vyloučení obřích souborů se zápisem z rejstříkování.
    
    
        Text file page size (KB)
        Velikost stránky textového souboru (KB)
    
    
        If this value is set (not equal to -1), text files will be split in chunks of this size for indexing.
This will help searching very big text  files (ie: log files).
        Pokud je nastavena tato hodnota (nerovná se -1), textové soubory budou pro rejstříkování rozděleny na kousky o této velikosti.
To pomůže při prohledávání velmi velkých textových souborů (např. souborů se zápisem).
    
    
        Max. filter exec. time (s)
        
    
    
        External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.

        Vnější filtry pracující déle než po tak dlouhou dobu budou přerušeny. Je to pro ten zřídkavý případ (např. postscript), kdy by dokument mohl zapříčinit vejití filtru do smyčky. Nastavte na -1 pro žádné omezení.

    
    
        Global
        Celkové
    


    CronToolW
    
        Cron Dialog
        Dialog Cron
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> dávkový rejstříkovací rozvrh (cron) </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Každé pole může obsahovat zástupný symbol (*), jednoduchou číselnou hodnotu, čárkou oddělené seznamy (1,3,5) a rozsahy (1-7). Obecněji, pole se budou používat <span style=" font-style:italic;">jak je</span> uvnitř souboru crontab, a lze použít úplnou stavbu crontab, podívejte se na crontab(5).</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Například, zadání <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Dny, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> a <span style=" font-family:'Courier New,courier';">15</span> v <span style=" font-style:italic;">Minuty</span> spustí rejstříkování (recollindex) každý den v 12:15 dopoledne a 7:15 odpoledne</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Rozvrh s velmi častým spuštěním je pravděpodobně méně účinný než je rejstříkování ve skutečném čase.</p></body></html>
    
    
        Days of week (* or 0-7, 0 or 7 is Sunday)
        Dny v týdnu (* nebo 0-7, 0 nebo 7 je neděle)
    
    
        Hours (* or 0-23)
        Hodiny (* nebo 0-23)
    
    
        Minutes (0-59)
        Minuty (0-59)
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Klepněte na <span style=" font-style:italic;">Zakázat</span> pro zastavení automatického dávkového rejstříkování, <span style=" font-style:italic;">Povolit</span> pro jeho zapnutí, <span style=" font-style:italic;">Zrušit</span>, aby vše zůstalo beze změny.</p></body></html>
    
    
        Enable
        Povolit
    
    
        Disable
        Zakázat
    
    
        It seems that manually edited entries exist for recollindex, cannot edit crontab
        Zdá se, že pro recollindex existují ručně upravené záznamy, nelze upravit crontab
    
    
        Error installing cron entry. Bad syntax in fields ?
        Chyba při instalaci záznamu cron. Špatná skladba v polích?
    


    EditDialog
    
        Dialog
        Dialog
    


    EditTrans
    
        Source path
        Cesta ke zdroji
    
    
        Local path
        Místní cesta
    
    
        Config error
        Chyba v nastavení
    
    
        Original path
        Původní cesta
    


    EditTransBase
    
        Path Translations
        Překlady cest
    
    
        Setting path translations for 
        Nastavení překladů cest pro 
    
    
        Select one or several file types, then use the controls in the frame below to change how they are processed
        Vyberte jeden nebo více datových typů a použijte ovládací prvky v rámečku níže pro změnu způsobu, jakým jsou zpracovány
    
    
        Add
        Přidat
    
    
        Delete
        Smazat
    
    
        Cancel
        Zrušit
    
    
        Save
        Uložit
    


    FirstIdxDialog
    
        First indexing setup
        První nastavení rejstříkování
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Zdá se, že rejstřík pro toto nastavení neexistuje.</span><br /><br />Pokud chcete pouze zrejstříkovat svůj domovský adresář sadou rozumných výchozích nastavení, stiskněte tlačítko <span style=" font-style:italic;">Spustit rejstříkování nyní</span>. Podrobnosti budete moci upravit později. </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Pokud chcete mít větší dohled, použijte následující odkazy pro upravení nastavení rejstříkování a rozvrhu.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">K těmto nástrojům lze přistupovat později v nabídce <span style=" font-style:italic;">Nastavení</span>.</p></body></html>
    
    
        Indexing configuration
        Nastavení rejstříkování
    
    
        This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.
        Toto vám umožní nastavit adresáře, které chcete rejstříkovat, a další parametry, jako jsou cesty pro vyloučené soubory, výchozí znakové sady atd.
    
    
        Indexing schedule
        Rozvrh rejstříkování
    
    
        This will let you chose between batch and real-time indexing, and set up an automatic  schedule for batch indexing (using cron).
        Toto vám umožní zvolit mezi dávkovým rejstříkováním a rejstříkováním ve skutečném čase, a nastavit automatický rozvrh pro dávkové rejstříkování (za použití cronu).
    
    
        Start indexing now
        Spustit rejstříkování nyní
    


    FragButs
    
        %1 not found.
        %1 nenalezen.
    
    
        %1:
 %2
        %1:
 %2
    
    
        Query Fragments
        Kousky hledání
    


    IdxSchedW
    
        Index scheduling setup
        Nastavení rozvrhu rejstříkování
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> rejstříkování může běžet nepřetržitě, soubory se rejstříkují při jejich změně, nebo běžet v samostatných intervalech. </p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Četba příručky vám může pomoci při rozhodování se mezi těmito přístupy (stiskněte F1). </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Tento nástroj vám může pomoci s nastavením rozvrhu pro automatizaci běhů dávkového rejstříkování, nebo spustit rejstříkování ve skutečném čase, když se přihlásíte (nebo obojí, což zřídkakdy dává smysl). </p></body></html>
    
    
        Cron scheduling
        Rozvrh cron
    
    
        The tool will let you decide at what time indexing should run and will install a crontab entry.
        Nástroj vám umožní rozhodnout se, kdy má rejstříkování běžet, a nainstaluje záznam crontab.
    
    
        Real time indexing start up
        Spuštění rejstříkování ve skutečném čase
    
    
        Decide if real time indexing will be started when you log in (only for the default index).
        Rozhodněte, zda se rejstříkování ve skutečném čase spustí, když se přihlásíte (pouze pro výchozí rejstřík).
    


    ListDialog
    
        Dialog
        Dialog
    
    
        GroupBox
        Seskupovací okénko
    


    Main
    
        No db directory in configuration
        Nenastaven žádný databázový adresář
    
    
        Could not open database in 
        Nepodařilo se otevřít databázi v
    
    
        .
Click Cancel if you want to edit the configuration file before indexing starts, or Ok to let it proceed.
        .
Klepněte na tlačítko Zrušit pro úpravu souboru s nastavením, předtím než se začne s rejstříkováním nebo na OK pro započetí s rejstříkováním.
    
    
        Configuration problem (dynconf
        Konfigurationsproblem (dynconf)
    
    
        "history" file is damaged or un(read)writeable, please check or remove it: 
        Soubor "history" je poškozen nebo nezapisovatelný/nečitelný. Prověřte jej, prosím, anebo jej odstraňte: 
    
    
        "history" file is damaged, please check or remove it: 
        Soubor "history" je poškozen. Prověřte jej, prosím, anebo jej odstraňte: 
    


    Preview
    
        &Search for:
        &Hledat:
    
    
        &Next
        &Další
    
    
        &Previous
        &Předchozí
    
    
        Match &Case
        Dbát na &psaní velkých a malých písmen
    
    
        Clear
        Vyprázdnit
    
    
        Creating preview text
        Vytváří se náhledový text
    
    
        Loading preview text into editor
        Náhledový text se nahrává do editoru
    
    
        Cannot create temporary directory
        Nelze vytvořit dočasný adresář
    
    
        Cancel
        Zrušit
    
    
        Close Tab
        Zavřít kartu
    
    
        Missing helper program: 
        Chybí program s nápovědou:
    
    
        Can't turn doc into internal representation for 
        Chyba při rejstříkování dokumentu 
    
    
        Cannot create temporary directory: 
        Nelze vytvořit dočasný adresář: 
    
    
        Error while loading file
        Chyba při nahrávání souboru
    
    
        Form
        
    
    
        Tab 1
        
    
    
        Open
        Otevřít
    
    
        Canceled
        
    
    
        Error loading the document: file missing.
        
    
    
        Error loading the document: no permission.
        
    
    
        Error loading: backend not configured.
        
    
    
        Error loading the document: other handler error<br>Maybe the application is locking the file ?
        
    
    
        Error loading the document: other handler error.
        
    
    
        <br>Attempting to display from stored text.
        
    
    
        Could not fetch stored text
        
    


    PreviewTextEdit
    
        Show fields
        Ukázat pole
    
    
        Show main text
        Ukázat hlavní text
    
    
        Print
        Tisk
    
    
        Print Current Preview
        Vytisknout nynější náhled
    
    
        Show image
        Ukázat obrázek
    
    
        Select All
        Vybrat vše
    
    
        Copy
        Kopírovat
    
    
        Save document to file
        Uložit dokument do souboru
    
    
        Fold lines
        Zalomit řádky
    
    
        Preserve indentation
        Zachovat odsazení
    
    
        Open document
        
    


    QObject
    
        Global parameters
        Celkové parametry
    
    
        Local parameters
        Místní parametry
    
    
        <b>Customised subtrees
        <b>Vlastní podstromy
    
    
        The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty.
        Seznam podadresářů v rejstříkované hierarchii <br>kde některé parametry je potřeba nově vymezit. Výchozí: prázdný.
    
    
        <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons.
        <i>Parametry, které následují, jsou nastaveny buď na nejvyšší úrovni, pokud nic<br>, nebo pokud je v seznamu výše vybrán prázdný řádek, nebo pro vybraný podadresář.<br>Adresáře můžete přidat anebo odebrat klepnutím na tlačítka +/-.
    
    
        Skipped names
        Přeskočené názvy
    
    
        These are patterns for file or directory  names which should not be indexed.
        Toto jsou vzory pro názvy souborů nebo adresářů, které se nemají rejstříkovat.
    
    
        Default character set
        Výchozí znaková sada
    
    
        This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used.
        Toto je znaková sada, která se používá pro čtení souborů, které svou znakovou sadu vnitřně neurčují, např.. soubory s textem.<br>Výchozí hodnota je prázdná a používá se hodnota prostředí NLS.
    
    
        Follow symbolic links
        Sledovat symbolické odkazy
    
    
        Follow symbolic links while indexing. The default is no, to avoid duplicate indexing
        Během rejstříkování sledovat symbolické odkazy. Výchozí nastavení je ne kvůli vyvarovaní se dvojitého rejstříkování
    
    
        Index all file names
        Rejstříkovat všechny souborové názvy
    
    
        Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true
        Rejstříkovat všechny názvy souborů, jejichž obsah nelze určit nebo zpracovat (žádný nebo nepodporovaný MIME typ). Výchozí hodnota je ano
    
    
        Beagle web history
        Internetová historie Beagle
    
    
        Search parameters
        Parametry hledání
    
    
        Web history
        Historie webu
    
    
        Default<br>character set
        Výchozí<br>znaková sada
    
    
        Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used.
        Toto je znaková sada, která se používá pro čtení souborů, které svou znakovou sadu vnitřně neurčují, např.. soubory s textem.<br>Výchozí hodnota je prázdná a používá se hodnota prostředí NLS.
    
    
        Ignored endings
        Přehlížená zakončení
    
    
        These are file name endings for files which will be indexed by name only 
(no MIME type identification attempt, no decompression, no content indexing).
        Toto jsou zakončení souborů pro soubory, které se budou rejstříkovat výhradně podle svého názvu
(žádné určování typu MIME, žádné rozbalování, žádné rejstříkování obsahu).
    
    
        <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons.
        
    


    QWidget
    
        Create or choose save directory
        Vytvořit nebo vybrat ukládací adresář
    
    
        Choose exactly one directory
        Vybrat přesně jeden adresář
    
    
        Could not read directory: 
        Nepodařilo se číst z adresáře: 
    
    
        Unexpected file name collision, cancelling.
        Neočekávaný střet v souborovém názvu. Ruší se.
    
    
        Cannot extract document: 
        Nelze vytáhnout dokument: 
    
    
        &Preview
        &Náhled
    
    
        &Open
        &Otevřít
    
    
        Open With
        Otevřít s
    
    
        Run Script
        Spustit skript
    
    
        Copy &File Name
        Kopírovat název &souboru
    
    
        Copy &URL
        Kopírovat adresu (&URL)
    
    
        &Write to File
        &Zapsat do souboru
    
    
        Save selection to files
        Uložit výběr do souborů
    
    
        Preview P&arent document/folder
        Náhled na &rodičovský dokument/složku
    
    
        &Open Parent document/folder
        &Otevřít rodičovský dokument/složku
    
    
        Find &similar documents
        Najít &podobné dokumenty
    
    
        Open &Snippets window
        Otevřít okno s úr&yvky
    
    
        Show subdocuments / attachments
        Ukázat podřízené dokumenty/přílohy
    


    QxtConfirmationMessage
    
        Do not show again.
        Neukazovat znovu.
    


    RTIToolW
    
        Real time indexing automatic start
        Automatické spuštění rejstříkování ve skutečném čase
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> rejstříkování může být nastaveno tak, aby běželo jako démon. Soubory jsou aktualizovány při jejich změně, ve skutečném čase. Získáte tak vždy nejnovější rejstřík, ale prostředky systému se při tom používají nepřetržitě.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>
    
    
        Start indexing daemon with my desktop session.
        Spustit rejstříkovacího démona s mým sezením pracovní plochy.
    
    
        Also start indexing daemon right now.
        Spustit rejstříkovacího démona ihned.
    
    
        Replacing: 
        Nahrazení:
    
    
        Replacing file
        Nahrazení souboru
    
    
        Can't create: 
        Nelze vytvořit: 
    
    
        Warning
        Varování
    
    
        Could not execute recollindex
        Nepodařilo se spustit recollindex
    
    
        Deleting: 
        Mazání:
    
    
        Deleting file
        Smazání souboru
    
    
        Removing autostart
        Odstranění automatického spuštění
    
    
        Autostart file deleted. Kill current process too ?
        Soubor automatického spuštění smazán. Zabít i současný proces?
    


    RclMain
    
        About Recoll
        O programu Recoll
    
    
        Executing: [
        Provádí se: [
    
    
        Cannot retrieve document info from database
        Žádné informace o dokumentu v databázi
    
    
        Warning
        Varování
    
    
        Can't create preview window
        Nelze vytvořit náhledové okno
    
    
        Query results
        Výsledky hledání
    
    
        Document history
        Historie dokumentu
    
    
        History data
        Historická data
    
    
        Indexing in progress: 
        Rejstříkuje se: 
    
    
        Files
        Soubory
    
    
        Purge
        Vyčistit
    
    
        Stemdb
        Kmeny slov
    
    
        Closing
        Zavření
    
    
        Unknown
        Neznámý
    
    
        This search is not active any more
        Toto hledání už není činné
    
    
        Can't start query: 
        Nelze spustit hledání:
    
    
        Bad viewer command line for %1: [%2]
Please check the mimeconf file
        Chybový příkaz pro prohlížeč pro %1: [%2]
Prověřte soubor mimeconf
    
    
        Cannot extract document or create temporary file
        Nelze vytáhnout dokument nebo vytvořit dočasný soubor
    
    
        (no stemming)
        Źádné rozšíření kmene slova
    
    
        (all languages)
        Všechny jazyky
    
    
        error retrieving stemming languages
        Chyba při vyhledání jazyků s kmeny slov
    
    
        Update &Index
        Obnovit &rejstřík
    
    
        Indexing interrupted
        Rejstříkování přerušeno
    
    
        Stop &Indexing
        Zastavit &rejstříkování
    
    
        All
        Vše
    
    
        media
        Multimedia
    
    
        message
        Zpráva
    
    
        other
        Jiné
    
    
        presentation
        Představení
    
    
        spreadsheet
        Tabulky
    
    
        text
        Text
    
    
        sorted
        Tříděno
    
    
        filtered
        Filtrováno
    
    
        External applications/commands needed and not found for indexing your file types:


        Pro rejstříkování vašich MIME typů jsou potřeba vnější programy/příkazy, které ale nebyly nalezeny:


    
    
        No helpers found missing
        Nenalezeny žádné pomocné programy
    
    
        Missing helper programs
        Chybí pomocné programy
    
    
        Document category filter
        Filtr pro skupinu dokumentu
    
    
        No external viewer configured for mime type [
        Žádný vnější prohlížeč nebyl nastaven pro MIME typ [
    
    
        The viewer specified in mimeview for %1: %2 is not found.
Do you want to start the  preferences dialog ?
        Prohlížeč stanovený v MIME zobrazení pro %1: %2 nenalezen.
Chcete spustit dialog s nastavením?
    
    
        Can't access file: 
        Nelze přistoupit k souboru: 
    
    
        Can't uncompress file: 
        Nelze rozbalit soubor: 
    
    
        Save file
        Uložit soubor
    
    
        Result count (est.)
        Počet výsledků (odhad)
    
    
        Query details
        Podrobnosti o hledání
    
    
        Could not open external index. Db not open. Check external indexes list.
        Nepodařilo se otevřít vnější rejstřík. Databáze neotevřena. Prověřte seznam vnějších rejstříků.
    
    
        No results found
        Nenalezeny žádné výsledky
    
    
        None
        Žádný
    
    
        Updating
        Obnova
    
    
        Done
        Hotovo
    
    
        Monitor
        Dohled
    
    
        Indexing failed
        Rejstříkování se nezdařilo
    
    
        The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone
        Nynější rejstříkovací proces nebyl spuštěn z tohoto rozhraní. Klepněte na OK pro jeho zabití, nebo na Zrušit, aby byl ponechán sám
    
    
        Erasing index
        Smazání rejstříku
    
    
        Reset the index and start from scratch ?
        Nastavit rejstřík znovu a začít od nuly?
    
    
        Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program
        Hledání běží.<br>Kvůli omezením rejstříkovací knihovny<br>zrušení ukončí program
    
    
        Error
        Chyba
    
    
        Index not open
        Rejstřík neotevřen
    
    
        Index query error
        Chyba při hledání v rejstříku
    
    
        Indexed Mime Types
        Rejstříkované mime typy
    
    
        Content has been indexed for these mime types:
        Obsah byl rejstříkován pro tyto MIME typy:
    
    
        Index not up to date for this file. Refusing to risk showing the wrong entry. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel.
        Rejstřík není pro tento soubor nejnovější. Odmítá se riziko, že by byl ukázán špatný záznam. Klepněte na OK pro obnovení rejstříku pro tento soubor, pak, až bude rejstříkování hotovo, spusťte dotaz znovu. Jinak klepněte na Zrušit.
    
    
        Can't update index: indexer running
        Nelze obnovit rejstřík: běží rejstříkovač
    
    
        Indexed MIME Types
        Rejstříkované MIME typy
    
    
        Bad viewer command line for %1: [%2]
Please check the mimeview file
        Chybový příkaz pro prohlížeč pro %1: [%2]
Prověřte soubor mimeconf
    
    
        Viewer command line for %1 specifies both file and parent file value: unsupported
        Příkaz pro prohlížeč pro %1 stanovuje jak hodnotu souboru tak hodnotu rodičovského souboru: nepodporováno
    
    
        Cannot find parent document
        Nelze najít rodičovský dokument
    
    
        Indexing did not run yet
        Rejstříkování ještě neběželo
    
    
        External applications/commands needed for your file types and not found, as stored by the last indexing pass in 
        Pro vaše souborové typy jsou potřeba vnější programy/příkazy, které ale nebyly nalezeny, jak byly uloženy při posledním rejstříkování v 
    
    
        Index not up to date for this file. Refusing to risk showing the wrong entry.
        Rejstřík není pro tento soubor nejnovější. Ukázání nesprávného záznamu bylo zamítnuto.
    
    
        Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel.
        Klepněte na tlačítko pro aktualizaci rejstříku pro tento soubor, potom dotaz, až bude rejstříkování hotovo, spusťte znovu. Jinak klepněte na Zrušit.
    
    
        Indexer running so things should improve when it's done
        Rejstříkovač běží, takže věci by se po dokončení rejstříkování měly zlepšit
    
    
        Sub-documents and attachments
        Podřízené dokumenty a přílohy
    
    
        Document filter
        Filtr dokumentu
    
    
        Index not up to date for this file. Refusing to risk showing the wrong entry. 
        Rejstřík není pro tento soubor nejnovější. Ukázání nesprávného záznamu bylo zamítnuto. 
    
    
        Click Ok to update the index for this file, then you will need to re-run the query when indexing is done. 
        Klepněte na tlačítko pro aktualizaci rejstříku pro tento soubor, potom hledání, až bude rejstříkování hotovo, spusťte znovu. 
    
    
        The indexer is running so things should improve when it's done. 
        Rejstříkovač běží, takže věci by se po dokončení rejstříkování měly zlepšit. 
    
    
        The document belongs to an external indexwhich I can't update. 
        Dokument je součástí vnějšího rejstříku, který nelze aktualizovat. 
    
    
        Click Cancel to return to the list. Click Ignore to show the preview anyway. 
        Klepněte na tlačítko Zrušit pro návrat do seznamu. Klepněte na tlačítko Přehlížet, aby byl přesto ukázán náhled. 
    
    
        Duplicate documents
        Zdvojené dokumenty
    
    
        These Urls ( | ipath) share the same content:
        Tyto adresy ( | ipath) sdílejí totožný obsah:
    
    
        Bad desktop app spec for %1: [%2]
Please check the desktop file
        Chybná specifikace aplikace pro %1: [%2]
Prověřte soubor pracovní plochy
    
    
        Index locked
        Rejstřík uzamknut
    
    
        The current indexing process was not started from this interface, can't kill it
        Nynější rejstříkovací proces nebyl spuštěn z tohoto rozhraní. Nelze jej ukončit
    
    
        Bad paths
        Špatné cesty
    
    
        Bad paths in configuration file:

        Špatné cesty v souboru s nastavením:
    
    
        Selection patterns need topdir
        Výběrové vzory potřebují počáteční adresář
    
    
        Selection patterns can only be used with a start directory
        Výběrové vzory lze použít jen s počátečním adresářem
    
    
        The document belongs to an external index which I can't update. 
        Dokument je součástí vnějšího rejstříku, který nelze aktualizovat. 
    
    
        Click Cancel to return to the list. <br>Click Ignore to show the preview anyway (and remember for this session).
        Klepněte na tlačítko Zrušit pro návrat do seznamu. <br>Klepněte na tlačítko Přehlížet pro ukázání náhledu tak jako tak (zapamatovat si pro toto sezení).
    
    
        No search
        Žádné hledání
    
    
        No preserved previous search
        Žádné zachované předchozí hledání
    
    
        Choose file to save
        Vybrat soubor k uložení
    
    
        Saved Queries (*.rclq)
        Uložené dotazy (*.rclq)
    
    
        Write failed
        Nepodařilo se zapsat
    
    
        Could not write to file
        Nepodařilo se zapsat do souboru
    
    
        Read failed
        Nepodařilo se přečíst
    
    
        Could not open file: 
        Nepodařilo se otevřít soubor: 
    
    
        Load error
        Chyba při nahrávání
    
    
        Could not load saved query
        Nepodařilo se nahrát uložené hledání
    
    
        Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location.
        Otevírá se dočasná kopie. Úpravy budou ztraceny, pokud je neuložíte<br/>do trvalého umístění.
    
    
        Do not show this warning next time (use GUI preferences to restore).
        Neukazovat toto varování příště (použít nastavení uživatelského rozhraní pro obnovení).
    
    
        Unknown indexer state. Can't access webcache file.
        Neznámý stav rejstříkovače. Nelze přistupovat k souboru s internetovou vyrovnávací pamětí.
    
    
        Indexer is running. Can't access webcache file.
        Rejstříkovač běží. Nelze přistupovat k souboru s internetovou vyrovnávací pamětí.
    
    
        Index scheduling
        Rozvržení rejstříkování
    
    
        Sorry, not available under Windows for now, use the File menu entries to update the index
        Promiňte. Není nyní dostupné pod OS Windows. Použijte položek v nabídce Soubor k aktualizaci rejstříku
    
    
        Disabled because the real time indexer was not compiled in.
        Zakázáno protože rejstříkovač ve skutečném čase nebyl sestaven.
    
    
        This configuration tool only works for the main index.
        Tento nastavovací nástroj pracuje jen pro hlavní rejstřík.
    
    
        Can't set synonyms file (parse error?)
        Nelze nastavit soubor se slovy majícími stejný význam (synonyma). Chyba při zpracování?
    
    
         with additional message: 
         s dodatečnou zprávou: 
    
    
        Non-fatal indexing message: 
        Nekritická zpráva o rejstříkování: 
    
    
        Types list empty: maybe wait for indexing to progress?
        Píše seznam prázdný: Možná počkat na pokračování rejstříkování?
    
    
        Viewer command line for %1 specifies parent file but URL is http[s]: unsupported
        Příkaz pro prohlížeč pro %1 stanovuje rodičovský soubor, ale adresa (URL) je  http[s]: nepodporováno
    
    
        Tools
        Nástroje
    
    
        Results
        Výsledky
    
    
        Content has been indexed for these MIME types:
        
    
    
        Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index):

        
    
    
        Indexing done
        
    
    
        Can't update index: internal error
        
    
    
        Index not up to date for this file.<br>
        
    
    
        <em>Also, it seems that the last index update for the file failed.</em><br/>
        
    
    
        Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br>
        
    
    
        Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/>
        
    
    
        documents
        
    
    
        document
        
    
    
        files
        
    
    
        file
        
    
    
        errors
        
    
    
        error
        
    
    
        total files)
        
    
    
        No information: initial indexing not yet performed.
        
    


    RclMainBase
    
        Previous page
        Předchozí strana
    
    
        Next page
        Další strana
    
    
        &File
        &Soubor
    
    
        E&xit
        &Ukončit
    
    
        &Tools
        &Nástroje
    
    
        &Help
        &Nápověda
    
    
        &Preferences
        &Nastavení
    
    
        Search tools
        Nástroje pro hledání
    
    
        Result list
        Seznam s výsledky
    
    
        &About Recoll
        &O programu Recoll
    
    
        Document &History
        &Historie dokumentu
    
    
        Document  History
        Historie dokumentu
    
    
        &Advanced Search
        &Pokročilé hledání
    
    
        Advanced/complex  Search
        Pokročilé/Složené hledání
    
    
        &Sort parameters
        Parametry &třídění
    
    
        Sort parameters
        Parametry třídění
    
    
        Next page of results
        Další strana s výsledky
    
    
        Previous page of results
        Předchozí strana s výsledky
    
    
        &Query configuration
        Nastavení &hledání
    
    
        &User manual
        &Uživatelská příručka
    
    
        Recoll
        Recoll
    
    
        Ctrl+Q
        Ctrl+Q
    
    
        Update &index
        Obnovit &rejstřík
    
    
        Term &explorer
        Průzkumník &výrazů
    
    
        Term explorer tool
        Nástroj průzkumníka výrazů
    
    
        External index dialog
        Dialog pro vnější rejstříkování
    
    
        &Erase document history
        &Vymazat historii dokumentu
    
    
        First page
        První strana
    
    
        Go to first page of results
        Jít na první stranu s výsledky
    
    
        &Indexing configuration
        Nastavení &rejstříkování
    
    
        All
        Vše
    
    
        &Show missing helpers
        &Ukázat chybějící pomocné programy
    
    
        PgDown
        O stranu dolů (PgDown)
    
    
        PgUp
        O stranu nahoru (PgUp)
    
    
        &Full Screen
        &Celá obrazovka
    
    
        F11
        F11
    
    
        Shift+Home
        Shift+Home
    
    
        Full Screen
        Na celou obrazovku
    
    
        &Erase search history
        &Vymazat historii hledání
    
    
        sortByDateAsc
        Třídit podle data vzestupně
    
    
        Sort by dates from oldest to newest
        Roztřídit podle data od nejstaršího po nejnovější
    
    
        sortByDateDesc
        Třídit podle data sestupně
    
    
        Sort by dates from newest to oldest
        Roztřídit podle data od nejnovějšího po nejstarší
    
    
        Show Query Details
        Ukázat podrobnosti hledání
    
    
        Show results as table
        Ukázat výsledky jako tabulku
    
    
        &Rebuild index
        &Sestavit rejstřík znovu
    
    
        &Show indexed types
        &Ukázat rejstříkované typy
    
    
        Shift+PgUp
        Shift+PgUp
    
    
        &Indexing schedule
        Rozvrh &rejstříkování
    
    
        E&xternal index dialog
        Dialog pro &vnější rejstříkování
    
    
        &Index configuration
        Nastavení &rejstříku
    
    
        &GUI configuration
        Nastavení uživatelského roz&hraní
    
    
        &Results
        &Výsledky
    
    
        Sort by date, oldest first
        Roztřídit podle data, nejprve nejstarší
    
    
        Sort by date, newest first
        Roztřídit podle data, nejprve nejnovější
    
    
        Show as table
        Ukázat jako tabulku
    
    
        Show results in a spreadsheet-like table
        Ukázat výsledky v tabulce na způsob sešitu s listy v tabulkovém kalkulátoru
    
    
        Save as CSV (spreadsheet) file
        Uložit jako soubor CSV (tabulkový dokument)
    
    
        Saves the result into a file which you can load in a spreadsheet
        Uložit výsledek do souboru, jejž můžete nahrát jako sešit s listy v tabulkovém kalkulátoru
    
    
        Next Page
        Další strana
    
    
        Previous Page
        Předchozí strana
    
    
        First Page
        První strana
    
    
        Query Fragments
        Kousky hledání
    
    
            With failed files retrying
        S novým pokusem o zpracování selhavších souborů
    
    
        Next update will retry previously failed files
        Nová aktualizace rejstříku se pokusí znovu zpracovat nyní nezpracované soubory
    
    
        &View
        &Pohled
    
    
        Missing &helpers
        Chybějící &pomocné programy
    
    
        Indexed &MIME types
        Rejstříkované &MIME typy
    
    
        Indexing &schedule
        &Rozvrh rejstříkování
    
    
        Enable synonyms
        Povolit slova mající stejný význam
    
    
        Save last query
        Uložit poslední hledání
    
    
        Load saved query
        Nahrát uložené hledání
    
    
        Special Indexing
        Zvláštní rejstříkování
    
    
        Indexing with special options
        Rejstříkování se zvláštními volbami
    
    
        Index &statistics
        &Statistika rejstříku
    
    
        Webcache Editor
        Editor internetové vyrovnávací paměti
    
    
        Trigger incremental pass
        Spustit přírůstkové procházení
    


    RclTrayIcon
    
        Restore
        Obnovit
    
    
        Quit
        Ukončit
    


    RecollModel
    
        File name
        Název souboru
    
    
        Mime type
        Mime typ
    
    
        Date
        Datum
    
    
        Abstract
        Výtah
    
    
        Author
        Autor
    
    
        Document size
        Velikost dokumentu
    
    
        Document date
        Datum dokumentu
    
    
        File size
        Velikost souboru
    
    
        File date
        Datum souboru
    
    
         Ipath
        Ipath
    
    
        Keywords
        Klíčová slova
    
    
        Original character set
        Původní znaková sada
    
    
        Relevancy rating
        Hodnocení závažnosti
    
    
        Title
        Název
    
    
        URL
        Adresa (URL)
    
    
        Mtime
        Mtime
    
    
        Date and time
        Datum a čas
    
    
        Ipath
        Ipath
    
    
        MIME type
        Typ MIME
    
    
        Can't sort by inverse relevance
        
    


    ResList
    
        Result list
        Výsledky
    
    
        Unavailable document
        Nedostupný dokument
    
    
        Previous
        Předchozí
    
    
        Next
        Další
    
    
        <p><b>No results found</b><br>
        <p><b>Nebyly nalezeny žádné výsledky</b><br>
    
    
        &Preview
        &Náhled
    
    
        Copy &URL
        Kopírovat adresu (&URL)
    
    
        Find &similar documents
        Najít &podobné dokumenty
    
    
        Query details
        Podrobnosti o hledání
    
    
        (show query)
        (ukázat hledání)
    
    
        Copy &File Name
        Kopírovat název &souboru
    
    
        Document history
        Historie dokumentu
    
    
        Preview
        Náhled
    
    
        Open
        Otevřít
    
    
        <p><i>Alternate spellings (accents suppressed): </i>
        <p><i>Náhradní pravopis (přízvuky potlačeny): </i>
    
    
        &Write to File
        &Zapsat do souboru
    
    
        Preview P&arent document/folder
        Náhled na &rodičovský dokument/složku
    
    
        &Open Parent document/folder
        &Otevřít rodičovský dokument/složku
    
    
        &Open
        &Otevřít
    
    
        Documents
        Dokumenty
    
    
        out of at least
        mimo alespoň
    
    
        for
        pro
    
    
        <p><i>Alternate spellings: </i>
        <p><i>Náhradní pravopis: </i>
    
    
        Open &Snippets window
        Otevřít okno s úr&yvky
    
    
        Duplicate documents
        Zdvojené dokumenty
    
    
        These Urls ( | ipath) share the same content:
        Tyto adresy ( | ipath) sdílejí totožný obsah:
    
    
        Result count (est.)
        Počet výsledků (odhad)
    
    
        Snippets
        Úryvky
    


    ResTable
    
        &Reset sort
        Nastavit třídění &znovu
    
    
        &Delete column
        &Smazat sloupec
    
    
        Add "
        Přidat "
    
    
        " column
        " sloupec
    
    
        Save table to CSV file
        Uložit tabulku jako soubor CSV
    
    
        Can't open/create file: 
        Nelze otevřít/vytvořit soubor: 
    
    
        &Preview
        &Náhled
    
    
        &Open
        &Otevřít
    
    
        Copy &File Name
        Kopírovat název &souboru
    
    
        Copy &URL
        Kopírovat adresu (&URL)
    
    
        &Write to File
        &Zapsat do souboru
    
    
        Find &similar documents
        Najít &podobné dokumenty
    
    
        Preview P&arent document/folder
        Náhled na &rodičovský dokument/složku
    
    
        &Open Parent document/folder
        &Otevřít rodičovský dokument/složku
    
    
        &Save as CSV
        &Uložit jako CSV
    
    
        Add "%1" column
        Přidat sloupec "%1"
    


    ResTableDetailArea
    
        &Preview
        &Náhled
    
    
        &Open
        &Otevřít
    
    
        Copy &File Name
        Kopírovat název &souboru
    
    
        Copy &URL
        Kopírovat adresu (&URL)
    
    
        &Write to File
        &Zapsat do souboru
    
    
        Find &similar documents
        Najít &podobné dokumenty
    
    
        Preview P&arent document/folder
        Náhled na &rodičovský dokument/složku
    
    
        &Open Parent document/folder
        &Otevřít rodičovský dokument/složku
    


    ResultPopup
    
        &Preview
        &Náhled
    
    
        &Open
        &Otevřít
    
    
        Copy &File Name
        Kopírovat název &souboru
    
    
        Copy &URL
        Kopírovat adresu (&URL)
    
    
        &Write to File
        &Zapsat do souboru
    
    
        Save selection to files
        Uložit výběr do souborů
    
    
        Preview P&arent document/folder
        Náhled na &rodičovský dokument/složku
    
    
        &Open Parent document/folder
        &Otevřít rodičovský dokument/složku
    
    
        Find &similar documents
        Najít &podobné dokumenty
    
    
        Open &Snippets window
        Otevřít okno s úr&yvky
    
    
        Show subdocuments / attachments
        Ukázat podřízené dokumenty/přílohy
    


    SSearch
    
        Any term
        Jakýkoli výraz
    
    
        All terms
        Všechny výrazy
    
    
        File name
        Název souboru
    
    
        Completions
        Doplnění
    
    
        Select an item:
        Vyberte položku:
    
    
        Too many completions
        Příliš mnoho doplnění
    
    
        Query language
        Jazyk hledání
    
    
        Bad query string
        Špatný řetězec hledání
    
    
        Out of memory
        Není dostupná žádná další paměť
    
    
        Enter query language expression. Cheat sheet:<br>
<i>term1 term2</i> : 'term1' and 'term2' in any field.<br>
<i>field:term1</i> : 'term1' in field 'field'.<br>
 Standard field names/synonyms:<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pseudo-fields: dir, mime/format, type/rclcat, date.<br>
 Two date interval exemples: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br>
  No actual parentheses allowed.<br>
<i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br>
<i>"term1 term2"p</i> : unordered proximity search with default distance.<br>
Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail.

        Zadejte výraz jazyka hledání. Seznam:<br>
<i>term1 term2</i> : 'term1' a 'term2' do kteréhokoli pole.<br>
<i>field:term1</i> : 'term1' do pole 'field'.<br>
 Obvyklé názvy polí/synonyma:<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pseudopole: dir, mime/format, type/rclcat, date.<br>
 Příklady intervalů dvou dat: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br>
  Nepovoleny žádné závorky.<br>
<i>"term1 term2"</i> : větný úsek (musí se objevit přesně). Možné modifikátory:<br>
<i>"term1 term2"p</i> : neuspořádané hledání podle blízkosti s výchozí vzdáleností.<br>
Použijte odkaz <b>Ukázat hledání</b>, když máte o výsledku pochybnost, a podívejte se do příručky (&lt;F1>) na další podrobnosti.

    
    
        Enter file name wildcard expression.
        Zadejte žolíkový výraz (zástupný symbol) pro název souboru.
    
    
        Enter search terms here. Type ESC SPC for completions of current term.
        Zde zadejte hledané výrazy. Stiskněte ESC SPC pro doplnění současného výrazu.
    
    
        Enter query language expression. Cheat sheet:<br>
<i>term1 term2</i> : 'term1' and 'term2' in any field.<br>
<i>field:term1</i> : 'term1' in field 'field'.<br>
 Standard field names/synonyms:<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br>
 Two date interval exemples: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br>
  You can use parentheses to make things clearer.<br>
<i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br>
<i>"term1 term2"p</i> : unordered proximity search with default distance.<br>
Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail.

        Zadejte výraz jazyka hledání. Seznam:<br>
<i>term1 term2</i> : 'term1' a 'term2' do kteréhokoli pole.<br>
<i>field:term1</i> : 'term1' do pole 'field'.<br>
 Obvyklé názvy polí/synonyma:<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pseudopole: dir, mime/format, type/rclcat, date.<br>
 Příklady intervalů dvou dat: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br>
  Můžete použít kulaté závorky, aby byly věci zřetelnější.<br>
<i>"term1 term2"</i> : větný úsek (musí se objevit přesně). Možné modifikátory:<br>
<i>"term1 term2"p</i> : neuspořádané hledání podle blízkosti s výchozí vzdáleností.<br>
Použijte odkaz <b>Ukázat hledání</b>, když máte o výsledku pochybnost, a podívejte se do příručky (&lt;F1>) na další podrobnosti.

    
    
        Stemming languages for stored query: 
        Jazyky s kmeny slov pro uložená hledání: 
    
    
         differ from current preferences (kept)
         liší se od nynějšího nastavení (ponecháno)
    
    
        Auto suffixes for stored query: 
        Automatické přípony pro uložená hledání: 
    
    
        External indexes for stored query: 
        Vnější rejstříky pro uložená hledání: 
    
    
        Autophrase is set but it was unset for stored query
        Automatické tvoření slovních obratů je nastaveno, ale bylo zrušeno pro uložené hledání
    
    
        Autophrase is unset but it was set for stored query
        Automatické tvoření slovních obratů je zrušeno, ale bylo nastaveno pro uložené hledání
    
    
        Enter search terms here.
        
    


    SSearchBase
    
        SSearchBase
        SSearchBase
    
    
        Clear
        Smazat
    
    
        Ctrl+S
        Ctrl+S
    
    
        Erase search entry
        Smazat hledaný záznam
    
    
        Search
        Hledat
    
    
        Start query
        Spustit hledání
    
    
        Enter search terms here. Type ESC SPC for completions of current term.
        Zde zadejte hledané výrazy. Stiskněte ESC SPC pro doplnění současného výrazu.
    
    
        Choose search type.
        Vyberte typ hledání.
    
    
        Show query history
        
    


    SearchClauseW
    
        SearchClauseW
        SearchClauseW
    
    
        Any of these
        jakýkoli z těchto
    
    
        All of these
        Všechny tyto
    
    
        None of these
        Žádný z těchto
    
    
        This phrase
        Tato slova
    
    
        Terms in proximity
        Podobné výrazy
    
    
        File name matching
        Odpovídající názvy souborů
    
    
        Select the type of query that will be performed with the words
        Vyberte druh hledání, se kterým se slova budou hledat
    
    
        Number of additional words that may be interspersed with the chosen ones
        Počet slov, která se smějí nacházet mezi hledanými
    
    
        No field
        Žádné pole
    
    
        Any
        Jakýkoliv
    
    
        All
        Vše
    
    
        None
        Žádný
    
    
        Phrase
        Tato slova
    
    
        Proximity
        Podobné výrazy
    
    
        File name
        Název souboru
    


    Snippets
    
        Snippets
        Úryvky
    
    
        about:blank
        about:blank
    
    
        Find:
        Hledat:
    
    
        Next
        Další
    
    
        Prev
        Předchozí
    
    
        X
        X
    


    SnippetsW
    
        Search
        Hledat
    
    
        <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p>
        <p>V rámci omezení hledání nebyla bohužel nalezena žádná shoda. Pravděpodobně je dokument velice velký a vyvíječ úryvků se v něm ztratil (nebo skončil ve škarpě)...</p>
    
    
        Sort By Relevance
        
    
    
        Sort By Page
        
    


    SortForm
    
        Date
        Datum
    
    
        Mime type
        Mime Type
    


    SortFormBase
    
        Sort Criteria
        Sortierkriterium
    
    
        Sort the
        Zeige die
    
    
        most relevant results by:
        relevantesten Ergebnisse sortiert nach:
    
    
        Descending
        Absteigend
    
    
        Close
        Schließen
    
    
        Apply
        Übernehmen
    


    SpecIdxW
    
        Special Indexing
        Zvláštní rejstříkování
    
    
        Do not retry previously failed files.
        Nezkoušet znovu soubory, které předtím selhaly.
    
    
        Else only modified or failed files will be processed.
        Jinak jen změněné nebo selhavší soubory budou zpracovány.
    
    
        Erase selected files data before indexing.
        Vymazat před rejstříkováním data vybraných souborů.
    
    
        Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs).
        Adresář k rejstříkování včetně podadresářů. Musí být uvnitř rejstříkované oblasti<br>, jak je stanovena v souboru s nastavením (počáteční adresáře).
    
    
        Browse
        Procházet
    
    
        Start directory (else use regular topdirs):
        Začáteční adresář (jinak použít počáteční adresáře):
    
    
        Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set.
        Ponechat prázdné pro vybrání všech souborů. Můžete použít více vzorů oddělených mezerami.<br>Vzory s vloženými mezerami musejí být uzavřeny ve dvojitých uvozovkách.<br>Lze použít, jen když je nastaven začáteční cíl.
    
    
        Selection patterns:
        Výběrové vzory:
    
    
        Top indexed entity
        Předmět rejstříkovaný od spuštění
    
    
        Retry previously failed files.
        
    
    
        Start directory. Must be part of the indexed tree. Use full indexed area if empty.
        
    


    SpellBase
    
        Term Explorer
        Průzkumník výrazů
    
    
        &Expand 
        &Rozbalit 
    
    
        Alt+E
        Alt+E
    
    
        &Close
        &Zavřít
    
    
        Alt+C
        Alt+C
    
    
        Term
        Ausdruck
    
    
        No db info.
        Žádné informace o databázi.
    
    
        Match
        Shoda
    
    
        Case
        Rozlišování velkých a malých písmen
    
    
        Accents
        Přízvuky
    


    SpellW
    
        Wildcards
        Zástupné symboly
    
    
        Regexp
        Regulární výraz
    
    
        Spelling/Phonetic
        Pravopis/Hláskosloví
    
    
        Aspell init failed. Aspell not installed?
        Chyba při spuštění Aspellu. Aspell není nainstalován?
    
    
        Aspell expansion error. 
        Chyba rozšíření Aspell. 
    
    
        Stem expansion
        Rozšíření kmene slova
    
    
        error retrieving stemming languages
        Chyba při vyhledání jazyka s kmeny slov
    
    
        No expansion found
        Nenalezeno žádné rozšíření
    
    
        Term
        Výraz
    
    
        Doc. / Tot.
        Dok. / Tot.
    
    
        Index: %1 documents, average length %2 terms
        Rejstřík: %1 dokumentů, průměrná délka %2 výrazy(ů)
    
    
        Index: %1 documents, average length %2 terms.%3 results
        Rejstřík: %1 dokumentů, průměrná délka %2 výrazy(ů). %3 výsledky(ů)
    
    
        %1 results
        %1 výsledky(ů)
    
    
        List was truncated alphabetically, some frequent 
        Seznam byl zkrácen abecedně, některé četné 
    
    
        terms may be missing. Try using a longer root.
        pojmy mohou chybět. Zkuste použít delší kořen.
    
    
        Show index statistics
        Ukázat statistiku rejstříku
    
    
        Number of documents
        Počet dokumentů
    
    
        Average terms per document
        Průměrný počet výrazů na dokument
    
    
        Smallest document length
        Délka nejmenšího dokumentu
    
    
        Longest document length
        Délka nejdelšího dokumentu
    
    
        Database directory size
        Velikost adresáře s databází
    
    
        MIME types:
        Typy MIME:
    
    
        Item
        Položka
    
    
        Value
        Hodnota
    
    
        Smallest document length (terms)
        Nejmenší délka dokumentu (mez)
    
    
        Longest document length (terms)
        Největší délka dokumentu (mez)
    
    
        Results from last indexing:
        Výsledky posledního rejstříkování:
    
    
          Documents created/updated
        Dokumenty vytvořené nebo aktualizované
    
    
          Files tested
        Vyzkoušené soubory
    
    
          Unindexed files
        Nezrejstříkované soubory
    
    
        List files which could not be indexed (slow)
        Vypsat soubory, které se nepodařilo zrejstříkovat (pomalé)
    
    
        Spell expansion error. 
        Chyba v pravopisných návrzích. 
    


    UIPrefsDialog
    
        The selected directory does not appear to be a Xapian index
        Zdá se, že vybraný adresář není rejstříkem Xapian Index
    
    
        This is the main/local index!
        Toto je hlavní/místní rejstřík!
    
    
        The selected directory is already in the index list
        Vybraný adresář je již částí rejstříkového seznamu
    
    
        Select xapian index directory (ie: /home/buddy/.recoll/xapiandb)
        Vyberte adresář s rejstříkem Xapian Indexverzeichnis (např.: /home/benutzer/.recoll/xapiandb)
    
    
        error retrieving stemming languages
        Chyba při vyhledání jazyka s kmeny slov
    
    
        Choose
        Vybrat
    
    
        Result list paragraph format (erase all to reset to default)
        Formát odstavce seznamu s výsledky (vymazat všechny pro znovunastavení na výchozí)
    
    
        Result list header (default is empty)
        Záhlaví seznamu s výsledky (výchozí je prázdné)
    
    
        Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb)
        Vyberte adresář s nastavením pro Recoll nebo rejstříkový adresář Xapianu (např.: /home/me/.recoll or /home/me/.recoll/xapiandb)
    
    
        The selected directory looks like a Recoll configuration directory but the configuration could not be read
        Vybraný adresář vypadá jako adresář s nastavením pro Recoll, ale nastavení se nepodařilo přečíst
    
    
        At most one index should be selected
        Je potřeba vybrat alespoň jeden rejstřík
    
    
        Cant add index with different case/diacritics stripping option
        Nelze přidat rejstřík s odlišnou volbou pro velikost písma/diakritiku
    
    
        Default QtWebkit font
        Výchozí písmo QtWebkit
    
    
        Any term
        Jakýkoli výraz
    
    
        All terms
        Všechny výrazy
    
    
        File name
        Název souboru
    
    
        Query language
        Jazyk hledání
    
    
        Value from previous program exit
        Hodnota obdržená z posledního ukončení programu
    


    UIPrefsDialogBase
    
        User interface
        Benutzeroberfläche
    
    
        Number of entries in a result page
        Anzahl der Ergebnisse pro Seite
    
    
        Result list font
        Schriftart für Ergebnisliste
    
    
        Helvetica-10
        Helvetica-10
    
    
        Opens a dialog to select the result list font
        Öffnet einen Dialog zur Auswahl der Schriftart für die Ergebnisliste
    
    
        Reset
        Reset
    
    
        Resets the result list font to the system default
        Setzt die Schriftart für die Ergebnisliste zurück auf den Standardwert
    
    
        Auto-start simple search on whitespace entry.
        Automatisch eine einfache Suche starten, wenn ein Worttrenner im Sucheingabefeld eingegeben wird.
    
    
        Start with advanced search dialog open.
        Nach dem Start automatisch den Dialog für die erweiterte Suche öffnen.
    
    
        Start with sort dialog open.
        Nach dem Start automatisch den Sortierdialog öffnen.
    
    
        Search parameters
        Suchparameter
    
    
        Stemming language
        Stemming Sprache
    
    
        Dynamically build abstracts
        Zusammenfassungen dynamisch erzeugen
    
    
        Do we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.
        Festlegung ob Zusammenfassungen für Ergebnisse im Kontext der Suchparameter erzeugt werden (kann bei großen Dokumenten langsam sein).
    
    
        Replace abstracts from documents
        Ersetzen der Zusammenfassungen in den Dokumenten
    
    
        Do we synthetize an abstract even if the document seemed to have one?
        Festlegung ob eine Zusammenfassung auch dann erzeugt wird, wenn das Dokument schon eine Zusammenfassung enthält
    
    
        Synthetic abstract size (characters)
        Länge der erzeugten Zusammenfassung (Zeichen)
    
    
        Synthetic abstract context words
        Anzahl der Kontextworte in der Zusammenfassung
    
    
        External Indexes
        externe Indizes
    
    
        Add index
        Index hinzufügen
    
    
        Select the xapiandb directory for the index you want to add, then click Add Index
        Wählen Sie das xapiandb-Verzeichnis des zuzufügenden Indizes und klicken Sie auf Index hinzufügen
    
    
        Browse
        Auswahl
    
    
        &OK
        &OK
    
    
        Apply changes
        Änderungen übernehmen
    
    
        &Cancel
        &Abbrechen
    
    
        Discard changes
        Änderungen verwerfen
    
    
        Result paragraph<br>format string
        Formatstring 
für Ergebnisse
    
    
        Automatically add phrase to simple searches
        Automatisches Zufügen von Sätzen zu einfachen Suchen
    
    
        A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.
        Eine Suche nach [Jürgen Klinsmann] wird geändert nach [Jürgen OR Klinsmann OR (Jürgen PHRASE 2 Klinsmann)].
Dadurch sollten Ergebnisse, die exakte Übereinstimmungen der Suchworte enthalten, stärker gewichtet werden.
    
    
        User preferences
        Benutzereinstellungen
    
    
        Use desktop preferences to choose document editor.
        Die Einstellung des Dokumenteneditors erfolgt in den Desktopvoreinstellungen.
    
    
        External indexes
        Externe Indizes
    
    
        Toggle selected
        Auswahl umkehren
    
    
        Activate All
        Alle Auswählen
    
    
        Deactivate All
        Alle Abwählen
    
    
        Remove selected
        Ausgewählte entfernen
    
    
        Remove from list. This has no effect on the disk index.
        Aus der Liste entfernen. Dies hat keinen Einfluss auf den gespeicherten Index.
    
    
        Remember sort activation state.
        Speichern, ob Sortieren aktiviert ist.
    


    ViewAction
    
        Changing actions with different current values
        Mění se činnosti s odlišnými nynějšími hodnotami
    
    
        Mime type
        Mime typ
    
    
        Command
        Příkaz
    
    
        MIME type
        Typ MIME
    
    
        Desktop Default
        Výchozí plocha
    
    
        Changing entries with different current values
        Mění se záznamy s odlišnými nynějšími hodnotami
    


    ViewActionBase
    
        File type
        Dateityp
    
    
        Action
        Aktion
    
    
        Select one or several file types, then click Change Action to modify the program used to open them
        Vyberte jeden nebo více datových typů a klepněte na "Změnit činnost" pro změnu programu přiřazeného k jejich otevření
    
    
        Change Action
        Změnit činnost
    
    
        Close
        Zavřít
    
    
        Native Viewers
        Prohlížeče
    
    
        Select one or several mime types then click "Change Action"<br>You can also close this dialog and check "Use desktop preferences"<br>in the main panel to ignore this list and use your desktop defaults.
        Vyberte jeden nebo několik MIME typů a potom klepněte na "Změnit činnost"<br>Taktéž můžete tento dialog zavřít a zaškrtnout "Použít nastavení pracovní plochy"<br>v hlavním panelu, aby se tento seznam přehlížel a pracovní plocha se použila jako výchozí.
    
    
        Select one or several mime types then use the controls in the bottom frame to change how they are processed.
        Vyberte jeden nebo více MIME typů a použijte ovládací prvky v rámečku s tlačítky pro změnu způsobu, jakým jsou zpracovány.
    
    
        Use Desktop preferences by default
        Použít nastavení pracovní plochy jako výchozí
    
    
        Select one or several file types, then use the controls in the frame below to change how they are processed
        Vyberte jeden nebo více datových typů a použijte ovládací prvky v rámečku níže pro změnu způsobu, jakým jsou zpracovány
    
    
        Exception to Desktop preferences
        Výjimka pro nastavení pracovní plochy
    
    
        Action (empty -> recoll default)
        Činnost (prázdné -> výchozí pro Recoll)
    
    
        Apply to current selection
        Použít na nynější výběr
    
    
        Recoll action:
        Aktion
    
    
        current value
        current value
    
    
        Select same
        Vybrat  stejný
    
    
        <b>New Values:</b>
        <b>Nové hodnoty:</b>
    


    Webcache
    
        Webcache editor
        Editor internetové vyrovnávací paměti
    
    
        Search regexp
        Hledat regulární výraz
    


    WebcacheEdit
    
        Copy URL
        Kopírovat adresu (URL)
    
    
        Unknown indexer state. Can't edit webcache file.
        Neznámý stav rejstříkovače. Nelze upravovat soubor s internetovou vyrovnávací pamětí.
    
    
        Indexer is running. Can't edit webcache file.
        Rejstříkovač běží. Nelze upravovat soubor s internetovou vyrovnávací pamětí.
    
    
        Delete selection
        Smazat výběr
    
    
        Webcache was modified, you will need to run the indexer after closing this window.
        Internetová vyrovnávací paměť byla změněna. Po zavření tohoto okna budete muset spustit rejstříkovač.
    


    WebcacheModel
    
        MIME
        MIME
    
    
        Url
        URL
    


    confgui::ConfBeaglePanelW
    
        Steal Beagle indexing queue
        Ukrást rejstříkovací řadu Beagle
    
    
        Beagle MUST NOT be running. Enables processing the beagle queue to index Firefox web history.<br>(you should also install the Firefox Beagle plugin)
        Beagle NESMǏ běžet. Povolí zpracování řady Beagle pro rejstříkování internetové historie Firefoxu.<br>(také byste měl nainstalovat přídavný modul Beagle pro Firefox)
    
    
        Entries will be recycled once the size is reached
        Záznamy budou opětně použity, jakmile bude velikost dosažena
    
    
        Web page store directory name
        Název adresáře pro ukládání internetové stránky
    
    
        The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory.
        Název pro adresář, kam se mají ukládat kopie navštívených internetových stránek.<br>Neabsolutní cesta je vzata relativně k adresáři s nastavením.
    
    
        Max. size for the web store (MB)
        Největší velikost pro ukládání internetových stránek (MB)
    
    
        Process the WEB history queue
        Zpracovat řadu historie WEBu
    
    
        Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin)
        Povolí rejstříkování Firefoxem navštívených stránek.<br>(také je potřeba, abyste nainstalovali přídavný modul Recollu pro Firefox)
    
    
        Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end).
        Záznamy budou po dosažení velikosti vráceny do původního stavu.<br>Skutečně dává smysl jen zvětšení velikosti, protože zmenšení hodnoty neoseká stávající soubor (na konci jen plýtvání místem).
    


    confgui::ConfIndexW
    
        Can't write configuration file
        Nelze zapsat soubor s nastavením
    


    confgui::ConfParamFNW
    
        Choose
        Vybrat
    


    confgui::ConfParamSLW
    
        +
        +
    
    
        -
        -
    
    
        Add entry
        
    
    
        Delete selected entries
        
    
    
        ~
        
    
    
        Edit selected entries
        
    


    confgui::ConfSearchPanelW
    
        Automatic diacritics sensitivity
        Automaticky rozlišovat diakritická znaménka
    
    
        <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity.
        <p>Zapnout automaticky rozlišování diakritických znamének, když hledaný pojem obsahuje znaky a akcenty (ne v unac_except_trans). Jinak pro  musíte použít jazyk dotazu a modifikátor <i>D</i>.
    
    
        Automatic character case sensitivity
        Automaticky rozlišovat velká a malá písmena
    
    
        <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity.
        <p>Zapnout automaticky rozlišování velkých a malých písmen, když záznam obsahuje velká písmena (mimo na prvním místě). Jinak pro  musíte použít jazyk dotazu a modifikátor <i>C</i>.
    
    
        Maximum term expansion count
        Největší počet rozšíření výrazu
    
    
        <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.
        <p>Největší počet rozšíření pro jeden výraz (např. při použití žolíků). Standardní výchozí hodnota 10 000 je rozumná a zabrání tomu, aby se hledaný pojem jevil jako zamrzlý, zatímco je procházen seznam pojmů.
    
    
        Maximum Xapian clauses count
        Největší počet výrazů Xapian
    
    
        <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.
        <p>Největší počet základních výrazů, které přidáme do jednoho dotazu Xapian. V některých případech se mohou výsledky rozšíření výrazu vynásobit, a my se chceme vyvarovat nadbytečné spotřebě paměti. Standardní výchozí hodnota 100 000 by měla ve většině případů naprosto postačovat a hodit se k typickému současnému sestavení zařízení (hardware).
    


    confgui::ConfSubPanelW
    
        Global
        Celkové
    
    
        Max. compressed file size (KB)
        Největší velikost zabaleného souboru (KB)
    
    
        This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever.
        Tato hodnota nastavuje práh, za kterým nebudou zabalené soubory zpracovávány. Nastavte na -1 pro žádné omezení, na 0 pro vůbec žádné rozbalování.
    
    
        Max. text file size (MB)
        Největší velikost textového souboru (KB)
    
    
        This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. 
This is for excluding monster log files from the index.
        Tato hodnota nastavuje práh, za kterým nebudou textové soubory zpracovávány. Nastavte na -1 pro žádné omezení.
Je to kvůli vyloučení obřích souborů se zápisem z rejstříkování.
    
    
        Text file page size (KB)
        Velikost stránky textového souboru (KB)
    
    
        If this value is set (not equal to -1), text files will be split in chunks of this size for indexing.
This will help searching very big text  files (ie: log files).
        Pokud je nastavena tato hodnota (nerovná se -1), textové soubory budou pro rejstříkování rozděleny na kousky o této velikosti.
To pomůže při prohledávání velmi velkých textových souborů (např. souborů se zápisem).
    
    
        Max. filter exec. time (S)
        Největší čas na provedení filtru (s)
    
    
        External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loopSet to -1 for no limit.

        Vnější filtry pracující déle než po tak dlouho budou přerušeny. Je to pro ten zřídkavý případ (např. postscript), kdy by dokument mohl zapříčinit filtr loopSet na -1 pro žádné omezení.

    
    
        External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.

        Vnější filtry pracující déle než po tak dlouhou dobu budou přerušeny. Je to pro ten zřídkavý případ (např. postscript), kdy by dokument mohl zapříčinit vejití filtru do smyčky. Nastavte na -1 pro žádné omezení.

    
    
        Only mime types
        Pouze typy MIME
    
    
        An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive
        Vybraný seznam rejstříkovaných typů MIME.<br>Nic jiného se nebude rejstříkovat. Obyčejně je seznam prázdný a nečinný
    
    
        Exclude mime types
        Vyloučené typy MIME
    
    
        Mime types not to be indexed
        Typy MIME, které se nemají rejstříkovat
    


    confgui::ConfTopPanelW
    
        Top directories
        Počáteční adresáře
    
    
        The list of directories where recursive indexing starts. Default: your home.
        Seznam adresářů, ve kterých začíná rejstříkování včetně podsložek. Výchozí: adresář Home.
    
    
        Skipped paths
        Přeskočené cesty
    
    
        These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')
        Názvy adresářů, které se nebudou rejstříkovat.<br>Může obsahovat zástupné symboly (žolíky). Musí odpovídat cestám, které rejstříkovač vidí (např. pokud v počátečních adresářích stojí '/home/me' a '/home' je ve skutečnosti odkazem na '/usr/home', potom by byl správným zápisem skippedPath '/home/me/tmp*' a ne '/usr/home/me/tmp*')
    
    
        Stemming languages
        Jazyky s kmeny slov
    
    
        The languages for which stemming expansion<br>dictionaries will be built.
        Jazyky, pro které se vytvoří <br>adresáře rozšíření kmenů slov.
    
    
        Log file name
        Název pro soubor se zápisem
    
    
        The file where the messages will be written.<br>Use 'stderr' for terminal output
        Soubor, do kterého se zapíše výstupní zpráva.<br>Pro výstupy na terminál použijte 'stderr'
    
    
        Log verbosity level
        Úroveň podrobnosti zápisu
    
    
        This value adjusts the amount of messages,<br>from only errors to a lot of debugging data.
        Tato hodnota upravuje množství zpráv,<br>od pouze chyb až po velké množství dat zajímavých pro ladění.
    
    
        Index flush megabytes interval
        Interval v megabytech pro vymazání rejstříku
    
    
        This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 
        Tato hodnota upravuje množství dat, která jsou rejstříkována mezi spláchnutími na disk.<br>Pomáhá to řídit použití paměti rejstříkovače. Výchozí je 10 MB 
    
    
        Max disk occupation (%)
        Největší obsazení disku (%)
    
    
        This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default).
        Jde o procentní podíl využívání disku, kdy rejstříkování selže a zastaví se (kvůli vyhnutí se zaplnění vašeho disku).<br>0 znamená žádné omezení (tato hodnota je nastavena jako výchozí).
    
    
        No aspell usage
        Nepoužívat aspell
    
    
        Aspell language
        Jazyk aspellu
    
    
        The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. 
        Jazyk pro adresář aspellu. Mělo by to vypadat jako 'en' nebo 'fr' nebo 'cs'...<br>Pokud není tato hodnota nastavena, použije se pro její vypočítání prostředí NLS, což obvykle pracuje. Pro získání představy o tom, co je ve vašem systému nainstalováno, napište 'aspell config' a hledejte soubory .dat v adresáři 'data-dir'. 
    
    
        Database directory name
        Název adresáře s databází
    
    
        The name for a directory where to store the index<br>A non-absolute path is taken relative to the  configuration directory. The default is 'xapiandb'.
        Název pro adresář, v němž se má ukládat rejstřík.<br>Neabsolutní cesta je vzata relativně k adresáři s nastavením. Výchozí je 'xapiandb'.
    
    
        Use system's 'file' command
        Použít příkaz 'file'
    
    
        Use the system's 'file' command if internal<br>mime type identification fails.
        Použít příkaz 'file', když vnitřní<br>rozpoznání MIME typu selže.
    
    
        Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 
        Zakáže používání aspellu pro vytvoření přibližné podoby pravopisu v nástroji průzkumníka výrazů.<br> Užitečné, pokud aspell není přítomen anebo nepracuje. 
    
    
        The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. 
        Jazyk pro adresář aspellu. Mělo by to vypadat jako 'en' nebo 'fr' nebo 'cs'...<br>Pokud není tato hodnota nastavena, použije se pro její vypočítání prostředí NLS, což obvykle pracuje. Pro získání představy o tom, co je ve vašem systému nainstalováno, napište 'aspell config' a hledejte soubory .dat v adresáři 'data-dir'. 
    
    
        The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.
        Název pro adresář, v němž se má ukládat rejstřík.<br>Neabsolutní cesta je vzata relativně k adresáři s nastavením. Výchozí je 'xapiandb'.
    
    
        Unac exceptions
        Výjimky unac
    
    
        <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.
        <p>Toto jsou výjimky pro mechanismus unac, který ve výchozím nastavení odstraňuje všechny diakritické znaky a nahrazuje je kanonickými obdobami. Toto odstraňování akcentů můžete (v závislosti na vaší řeči) pro některé znaky potlačit a zadat dodatečná nahrazení, např. pro ligatury. V každém mezerou odděleném záznamu je první znak zdrojovým (výchozím) a zbytek je nahrazení.
    
    
        These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')
        Názvy názvy cest adresářů, které se nebudou rejstříkovat.<br>Může obsahovat zástupné symboly (žolíky). Musí odpovídat cestám, které rejstříkovač vidí (např. pokud v počátečních adresářích stojí '/home/me' a '/home' je ve skutečnosti odkazem na '/usr/home', potom by byl správným zápisem skippedPath '/home/me/tmp*' a ne '/usr/home/me/tmp*')
    
    
        Max disk occupation (%, 0 means no limit)
        Největší obsazení disku (%, 0 znamená bez omezení)
    
    
        This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit.
        Toto je procentní podíl využívání disku - celkové využití disku, ne velikost rejstříku , kdy rejstříkování selže a zastaví se (kvůli vyhnutí se zaplnění vašeho disku).<br>Výchozí hodnota 0 odstraní všechna omezení, znamená žádné omezení.
    


    uiPrefsDialogBase
    
        User preferences
        Uživatelská nastavení
    
    
        User interface
        Uživatelské rozhraní
    
    
        Number of entries in a result page
        Počet výsledků na stranu
    
    
        If checked, results with the same content under different names will only be shown once.
        Je-li zaškrtnuto, budou výsledky se stejným obsahem pod jinými názvy ukázány jen jednou.
    
    
        Hide duplicate results.
        Skrýt zdvojené výsledky.
    
    
        Highlight color for query terms
        Zvýraznit barvu výrazů hledání
    
    
        Result list font
        Písmo pro seznam s výsledky
    
    
        Opens a dialog to select the result list font
        Otevře dialog pro výběr písma seznamu výsledků
    
    
        Helvetica-10
        Helvetica-10
    
    
        Resets the result list font to the system default
        Nastaví písmo pro seznam s výsledky znovu na výchozí hodnotu
    
    
        Reset
        Nastavit znovu
    
    
        Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br>
        Stanovuje formát pro každý odstavec seznamu s výsledky. Použijte qt nahrazení formátu html a printf:<br>%A přehled<br> %D datum<br> %I název obrázku ikony<br> %K klíčová slova (jsou-li)<br> %L odkazy na náhled a úpravy<br> %M mime typ<br> %N počet výsledků<br> %R procento významnosti<br> %S informace o velikosti<br> %T název<br> %U adresa (URL)<br>
    
    
        Result paragraph<br>format string
        Řetězec formátu<br>pro výsledky
    
    
        Texts over this size will not be highlighted in preview (too slow).
        Texty nad tuto velikost nebudou v náhledu zvýrazňovány (příliš pomalé).
    
    
        Maximum text size highlighted for preview (megabytes)
        Největší velikost textu zvýrazněného pro náhled (megabyty)
    
    
        Use desktop preferences to choose document editor.
        Použít nastavení pracovní plochy pro výběr editoru pro dokumenty.
    
    
        Choose editor applications
        Vybrat programy editorů
    
    
        Display category filter as toolbar instead of button panel (needs restart).
        Zobrazit skupinový filtr jako nástrojový pruh místo tlačítkového panelu (potřebuje spustit program znovu).
    
    
        Auto-start simple search on whitespace entry.
        Automaticky spustit jednoduché hledání, když je do zadávacího pole pro hledání zadáno prázdné místo (mezera).
    
    
        Start with advanced search dialog open.
        Po spuštění automaticky otevřít dialog pro rozšířené hledání
    
    
        Start with sort dialog open.
        Nach dem Start automatisch den Sortierdialog öffnen.
    
    
        Remember sort activation state.
        Zapamatovat si stav zapnutí hledání
    
    
        Prefer Html to plain text for preview.
        Upřednostňovat pro náhled HTML před prostým textem
    
    
        Search parameters
        Parametry hledání
    
    
        Stemming language
        Jazyk s kmeny slov
    
    
        A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.
        Hledání [Ježíš Kristus] se změní na [Ježíš OR Kristus OR (Ježíš PHRASE 2 Kristus)].
Tímto by měly být silněji zváženy výsledky, které obsahují přesné shody s hledaným slovem.
    
    
        Automatically add phrase to simple searches
        Automatické přidání vět do jednoduchého hledání
    
    
        Do we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.
        Stanovení, zda se má vytvořit přehled pro výsledky v souvislosti s parametrem hledání.
U velkých dokumentů může být pomalé.
    
    
        Dynamically build abstracts
        Vytvářet přehledy dynamicky
    
    
        Do we synthetize an abstract even if the document seemed to have one?
        Stanovení, zda se má vytvořit přehled i tehdy, když dokument již nějaký přehled obsahuje.
    
    
        Replace abstracts from documents
        Nahradit přehledy v dokumentech
    
    
        Synthetic abstract size (characters)
        Délka vytvořeného přehledu (počet znaků)
    
    
        Synthetic abstract context words
        Počet souvisejících slov v přehledu
    
    
        The words in the list will be automatically turned to ext:xxx clauses in the query language entry.
        Slova v seznamu budou v záznamu jazyka hledání automaticky obrácena na věty ext:xxx.
    
    
        Query language magic file name suffixes.
        Kouzelné přípony souborového názvu jazyka hledání
    
    
        Enable
        Povolit
    
    
        External Indexes
        Vnější rejstříky
    
    
        Toggle selected
        Přepnout vybrané
    
    
        Activate All
        Zapnout vše
    
    
        Deactivate All
        Vypnout vše
    
    
        Remove from list. This has no effect on the disk index.
        Odstranit ze seznamu. Nemá to žádný účinek na uložený rejstřík.
    
    
        Remove selected
        Odstranit vybrané
    
    
        Click to add another index directory to the list
        Klepnout pro přidání dalšího rejstříkového adresáře do seznamu
    
    
        Add index
        Přidat rejstřík
    
    
        Apply changes
        Použít změny
    
    
        &OK
        &OK
    
    
        Discard changes
        Zahodit změny
    
    
        &Cancel
        Z&rušit
    
    
        Abstract snippet separator
        Oddělovač úryvků
    
    
        Use <PRE> tags instead of <BR>to display plain text as html.
        Použít značky <PRE> namísto <BR> pro zobrazení prostého textu jako HTML.
    
    
        Lines in PRE text are not folded. Using BR loses indentation.
        Řádky v textu PRE nejsou složeny. Použití BR povede ke ztrátě odsazení.
    
    
        Style sheet
        Stylový list
    
    
        Opens a dialog to select the style sheet file
        Otevře dialog pro výběr souboru se stylovým listem
    
    
        Choose
        Vybrat
    
    
        Resets the style sheet to default
        Nastaví stylový list znovu na výchozí
    
    
        Lines in PRE text are not folded. Using BR loses some indentation.
        Řádky v textu PRE nejsou složeny. Použití BR povede ke ztrátě odsazení.
    
    
        Use <PRE> tags instead of <BR>to display plain text as html in preview.
        Použít značky <PRE> namísto <BR> pro zobrazení prostého textu formátovaného v náhledu jako HTML.
    
    
        Result List
        Seznam s výsledky
    
    
        Edit result paragraph format string
        Upravit řetězec formátu pro výsledky
    
    
        Edit result page html header insert
        Upravit záhlaví html na straně s výsledky
    
    
        Date format (strftime(3))
        Formát data (strftime(3))
    
    
        Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). 
        Práh četnosti (procento), od kterého se výrazy nepoužívají.
Slovní obraty obsahující příliš četné výrazy způsobují výkonnostní potíže.
Přeskočené výrazy zvětšují vzdálenost slovního obratu a zmenšují účinnost funkce automatického hledání slovního obratu.
Výchozí hodnota je 2 (procenta).
    
    
        Autophrase term frequency threshold percentage
        Četnost výskytu výrazu (procento) pro automatické tvoření slovních obratů
    
    
        Plain text to HTML line style
        Prostý text do stylu řádku HTML
    
    
        Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.
        Řádky v PRE textu nejsou zalomeny. Při použití BR dojde ke ztrátě některých zalomení. Možná je to, co chcete styl PRE + zalomení.
    
    
        <BR>
        <BR>
    
    
        <PRE>
        <PRE>
    
    
        <PRE> + wrap
        <PRE> + zalomení
    
    
        Exceptions
        Výjimky
    
    
        Mime types that should not be passed to xdg-open even when "Use desktop preferences" is set.<br> Useful to pass page number and search string options to, e.g. evince.
        MIME typy, jež nemají být předány xdg-open, dokonce i když je nastaveno "Použít nastavení plochy".<br>Užitečné pro předání čísla strany a hledaného řetězce, např. Evince.
    
    
        Disable Qt autocompletion in search entry.
        Zakázat automatické doplňování Qt při zadávání v poli pro hledání.
    
    
        Search as you type.
        Hledat při psaní.
    
    
        Paths translations
        Překlady cest
    
    
        Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.
        Klepnout pro přidání dalšího rejstříkového adresáře do seznamu. Můžete vybrat buď adresář s nastavením pro Recoll nebo rejstřík Xapian.
    
    
        Snippets window CSS file
        Soubor CSS okna s úryvky
    
    
        Opens a dialog to select the Snippets window CSS style sheet file
        Otevře dialog pro výběr souboru CSS se stylovým listem okna s úryvky
    
    
        Resets the Snippets window style
        Nastaví znovu styl okna s úryvky
    
    
        Decide if document filters are shown as radio buttons, toolbar combobox, or menu.
        Rozhodnout, zda se dokumentové filtry ukazují jako kulatá tlačítka, rozbalovací seznamy v nástrojovém pruhu, nebo jako nabídka.
    
    
        Document filter choice style:
        Styl výběru filtrů dokumentů:
    
    
        Buttons Panel
        Panel s tlačítky
    
    
        Toolbar Combobox
        Rozbalovací seznam v nástrojovém panelu
    
    
        Menu
        Nabídka
    
    
        Show system tray icon.
        Ukázat ikonu v oznamovací oblasti panelu.
    
    
        Close to tray instead of exiting.
        Zavřít do oznamovací oblasti panelu namísto ukončení.
    
    
        Start with simple search mode
        Spustit v jednoduchém vyhledávacím režimu
    
    
        Show warning when opening temporary file.
        Ukázat varování při otevírání dočasného souboru.
    
    
        User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header.
        Uživatelský styl k použití v okně s úryvky.<br>Poznámka: Záhlaví stránky s výsledky je zahrnuto i v záhlaví okna s úryvky.
    
    
        Synonyms file
        Soubor se slovy majícími stejný význam
    
    
        Highlight CSS style for query terms
        Zvýraznit styl CSS výrazů hledání
    
    
        Recoll - User Preferences
        Recoll - Uživatelská nastavení
    
    
        Set path translations for the selected index or for the main one if no selection exists.
        Nastavit překlady cest pro vybraný rejstřík nebo pro hlavní, pokud žádný vybrán není.
    
    
        Activate links in preview.
        Zapnout odkazy v náhledu.
    
    
        Make links inside the preview window clickable, and start an external browser when they are clicked.
        Udělat odkazy uvnitř náhledového okna klepnutelnými a spustit vnější prohlížeč, když je na ně klepnuto.
    
    
        Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue...
        
    
    
        Start search on completer popup activation.
        
    
    
        Maximum number of snippets displayed in the snippets window
        
    
    
        Sort snippets by page number (default: by weigth).
        
    
    
        Suppress all beeps.
        
    


recoll-1.26.3/qtgui/i18n/recoll_de.qm0000644000175000017500000025404313566424763014233 00000000000000GkH6H6J+gJ+nJ6BJ6GLbMz9PhSST5WTBWX?Z0:&[%\	'egw{s7s`v8еvvzϳ)mj!I\0+ýJafSLٝ	nEgt_::zwEn.'0,+;;s>.cDo2RNҴXM6XXl/`^hnltn&ww݌v#*Dc%xC~*ͺ?S
LSI\'؅L
p#vv<w5w5%w5Kw5
wU%}.]6ֳ	O6*f3ߛͼu7gg׸~F;U>mUe3H!D@&,.N-=dA?dG$JUYJUY=Y[u_nE2u.ʷÓʗfʗڍ9i^2L(gx*mJg-A7%cA B <.7d;[<L$NBCe2%vh>‹(Pb"3ZIohI3nf
mff%],oWmWy%k y%.R΄5:p#.X|AWj	̔i\i
-Z-)T@B9(BSOkor?)r|k,<"BlASâ,

R%Ǣ'I^~CȜ>kC`Mq$$"	UKAp
h#>u9yv2T#K4n	%En\#7^[\XnDw)=C#!v#+DI7I^N<~
FW#FN*H:"_guap,rfo|N8wϗnj/zlÓtÓtaȍ#ɆtH]]8nq 0R#:u;JnpQY}};E'D05f?E)c+@rO>1c
䴥?J-	H	-(n	9Zy	;3˻	D%	KT	]#	cC	k	lM	G		qD		G	)	9D4	ü>V
G.~L
`Pl
`
aE
cE
d8*
yg
I
A
VT,
C


ԅR	yejTH=,x>=!pKjnh}քOΣYr ŧٷ۷[%?4{	Vdu'И#+bC/97E9ɝ>L*P֙6RVםT#V.z\iC>b]+`F{h^v{l;!Yѐ!YW+Li&f"~37Nb
mD
'R#
-
8
F
OE
]pg
]
u0
y>
y~>
3	
ȩ'
u
u

P
Pl
5d8
·
jL
7
&
Ւn
H
Q5-
£gq2v%n/.8bv9><ErnQ~=
Y~sI[sY\kXe3g3p~" :(!"lc[mc	|.0|BV'b
l۾LiI
alle AusdrckeAll clauses	AdvSearch(irgendeinen Ausdruck
Any clause	AdvSearch`Ungltiger Multiplikator-Suffix im Gren-Filter$Bad multiplier suffix in size filter	AdvSearchMedienmedia	AdvSearchNachrichtmessage	AdvSearchAndereother	AdvSearchPrsentationpresentation	AdvSearchTabellespreadsheet	AdvSearchTabellenspreadsheets	AdvSearchTexttext	AdvSearch
Textetexts	AdvSearch<---- Alle
<----- All
AdvSearchBase<---- Auswahl
<----- Sel
AdvSearchBase&Ausdruck hinzufgen
Add clause
AdvSearchBase Erweiterte SucheAdvanced search
AdvSearchBaseAlle ---->	All ---->
AdvSearchBase`Alle nicht-leeren Felder rechts werden mit UND ("alle Ausdrcke") oder ODER ("irgendeinen Ausdruck") verknpft. <br>Felder des Typs "Irgendeines", "Alle" und "Keines" knnen eine Mischung aus Wrtern und in Anfhrungszeichen eingeschlossenen Phrasen enthalten. <br>Nicht gefllte Felder werden ignoriert.All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. 
"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.
Fields with no data are ignored. AdvSearchBaseDurchsuchenBrowse AdvSearchBaseNach Kategorien By categories AdvSearchBase`Auswhlen, um Filterung nach Datum einzuschalten'Check this to enable filtering on dates AdvSearchBasejAuswhlen, um Filterung nach Dateitypen einzuschalten,Check this to enable filtering on file types AdvSearchBasejAuswhlen, um Filterung nach Dateigre einzuschalten'Check this to enable filtering on sizes AdvSearchBasevAuswhlen, um Dateikategorien statt Mime-Typen zu verwenden;Check this to use file categories instead of raw mime types AdvSearchBaseSchlieenClose AdvSearchBase$Ausdruck entfernen Delete clause AdvSearchBasebGeben Sie das Basisverzeichnis fr die Suche ein.Enter top directory for search AdvSearchBaseFilternFilter AdvSearchBase$Nach Datum filtern Filter dates AdvSearchBase$Nach Gre filtern Filter sizes AdvSearchBase FindenFind AdvSearchBasevonFrom AdvSearchBase8Nicht durchsuchte DateitypenIgnored file types AdvSearchBaseInvertierenInvert AdvSearchBaseMax. Gre: Max. Size AdvSearchBaseMaximale Gre. Sie knnen k/K, m/M, g/G als Multiplikatoren verwenden.4Maximum size. You can use k/K,m/M,g/G as multipliers AdvSearchBaseMin. Gre: Min. Size AdvSearchBaseMinimale Gre. Sie knnen k/K, m/M, g/G als Multiplikatoren verwenden.4Minimum size. You can use k/K,m/M,g/G as multipliers AdvSearchBase.Dateitypen einschrnkenRestrict file types AdvSearchBaseErgebnisse auf Dateien in folgendem Verzeichnisbaum einschrnken:%Restrict results to files in subtree: AdvSearchBase,Als Standard speichernSave as default AdvSearchBasedSuche nach Dokumenten, <br>die Folgendes erfllen:'Search for
documents
satisfying: AdvSearchBase,Durchsuchte DateitypenSearched file types AdvSearchBaseAuswahl ----> Sel -----> AdvSearchBaseSuche starten Start Search AdvSearchBasebisTo AdvSearchBase<p> Automatisch die Beachtung von Gro-/Kleinschreibung einschalten, wenn der Eintrag Grobuchstaben enthlt (auer an erster Stelle). Ansonsten mssen Sie dafr die Abfragesprache und den <i>C</i> Modifikator verwenden.

Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity. ConfIndexW<p> Automatisch die Beachtung von diakritischen Zeichen einschalten, wenn der Suchbegriff Zeichen mit Akzenten enthlt (nicht in unac_except_trans). Ansonsten mssen Sie dafr die Abfrageprache und den <i>D</i> Modifikator verwenden.

Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity. ConfIndexW<p>Maximale Anzahl von Erweiterungen fr einen einzelnen Ausdruck (z.B. bei der Verwendung von Wildcards). Der Standardwert 10 000 ist vernnftig und verhindert, dass Suchanfragen scheinbar einfrieren, whrend die Liste der Begriffe durchlaufen wird.

Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. ConfIndexW<p>Maximale Anzahl von elementaren Ausdrcken, die wir zu einer einzelnen Xapian Abfrage hinzufgen. In manchen Fllen knnen die Ergebnisse von Ausdruck-Erweiterungen sich ausmultiplizieren, und wir wollen bermigen Speicherverbrauch vermeiden. Der Standardwert 100 000 sollte in den meisten Fllen hoch genug sein und zugleich zu typischen derzeitigen Hardware-Ausstattungen passen.5

Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. ConfIndexWT<p>Dies sind Ausnahmen fr den unac Mechanismus, der standardmig alle diakritischen Zeichen entfernt und sie durch kanonische Entsprechungen ersetzt. Sie knnen (abhngig von Ihrer Sprache) dieses Entfernen von Akzenten fr einige Zeichen bersteuern und zustzliche Ersetzungen angeben, z.B. fr Ligaturen. Bei jedem durch Leerzeichen getrennten Eintrag ist das erste Zeichen das Ausgangszeichen und der Rest die Ersetzung.l

These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation. ConfIndexW$Sprache fr AspellAspell language ConfIndexWTAutomatisch Gro-/Kleinschreibung beachten$Automatic character case sensitivity ConfIndexWRAutomatisch diakritische Zeichen beachten Automatic diacritics sensitivity ConfIndexWZFehler beim Schreiben der KonfigurationsdateiCan't write configuration file ConfIndexW>Verzeichnis fr Index-DatenbankDatabase directory name ConfIndexWtDeaktiviert die Verwendung von Aspell fr die Erzeugung von Schreibweisen-Nherungen im Ausdruck-Explorer-Werkzeug. <br>Ntzlich, wenn Aspell nicht vorhanden ist oder nicht funktioniert.Disables use of aspell to generate spelling approximation in the term explorer tool.
Useful if aspell is absent or does not work.  ConfIndexW"Globale ParameterGlobal parameters ConfIndexWBInterval (MB) fr SpeicherleerungIndex flush megabytes interval ConfIndexW Lokale ParameterLocal parameters ConfIndexWLog-Datei Log file name ConfIndexW0Ausfhrlichkeit des LogsLog verbosity level ConfIndexWXMaximale Gre fr Ablage von Webseiten (MB) Max. size for the web store (MB) ConfIndexWJMaximale Anzahl von Xapian-AusdrckenMaximum Xapian clauses count ConfIndexWTMaximale Anzahl von Ausdruck-ErweiterungenMaximum term expansion count ConfIndexW*Aspell nicht benutzenNo aspell usage ConfIndexWWeb-ChronikProcess the WEB history queue ConfIndexWSuchparameterSearch parameters ConfIndexW&Auszulassende Pfade Skipped paths ConfIndexW"Stemming-SprachenStemming languages ConfIndexWDie Datei, in die Ausgaben geschrieben werden.<br>Fr Ausgaben auf dem Terminal 'stderr' benutzen.PThe file where the messages will be written.
Use 'stderr' for terminal output ConfIndexWDie Sprache des Aspell-Wrterbuchs (z.B. 'en' oder 'de' ...)<br>Wenn dieser Wert nicht gesetzt ist, wird die NLS-Umgebung verwendet, um die Sprache festzustellen, was im Allgemeinen funktioniert. Um eine Vorstellung zu bekommen, was auf Ihrem System installiert ist, geben Sie 'aspell config' ein und schauen Sie nach .dat Dateien im Verzeichnis 'data-dir'.3The language for the aspell dictionary. This should look like 'en' or 'fr' ...
If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory.  ConfIndexWDie Sprachen, fr die Worstammerweiterungsverzeichnisse erstellt werden.IThe languages for which stemming expansion
dictionaries will be built. ConfIndexWDie Liste der Verzeichnisse, in denen die rekursive Indizierung startet. Standard: Home-Verzeichnis.LThe list of directories where recursive indexing starts. Default: your home. ConfIndexWXDer Name eines Verzeichnisses, in dem Kopien der besuchten Webseiten gespeichert werden sollen.<br>Ein nicht-absoluter Pfad ist dabei relativ zum Konfigurationsverzeichnis.The name for a directory where to store the copies of visited web pages.
A non-absolute path is taken relative to the configuration directory. ConfIndexWdDer Name eines Verzeichnisses, in dem der Index gespeichert werden soll.<br>Ein nicht-absoluter Pfad ist dabei relativ zum Konfigurationsverzeichnis. Der Standard ist 'xapiandb'.The name for a directory where to store the index
A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'. ConfIndexWDieser Wert steuert, wieviel Daten indiziert werden bevor die Indexinformationen auf Festplatte geschrieben werden.<br>Hierdurch kann der Speicherverbrauch des Indizierers gesteuert werden. Standardwert: 10MBThis value adjust the amount of data which is indexed between flushes to disk.
This helps control the indexer memory usage. Default 10MB  ConfIndexWDieser Wert steuert die Menge der Meldungen<br>(nur Fehler oder viele Debugging Ausgaben).ZThis value adjusts the amount of messages,
from only errors to a lot of debugging data. ConfIndexW&Start-VerzeichnisseTop directories ConfIndexWUnac AusnahmenUnac exceptions ConfIndexWHVerzeichnis zur Ablage von WebseitenWeb page store directory name ConfIndexWExterne Filter, die lnger als diese Zeit laufen, werden abgebrochen. Das ist fr den seltenen Fall (Postscript), in dem ein Dokument eine unendliche Schleife auslst. Auf -1 setzen, um keine Obergrenze zu haben.External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.  ConfSubPanelW GlobalGlobal ConfSubPanelWpWenn dieser Wert gesetzt ist (ungleich -1), werden Textdateien zur Indizierung in Stcke dieser Gre aufgeteilt. Das hilft bei der Suche in sehr groen Textdateien (z.B. Log-Dateien).If this value is set (not equal to -1), text files will be split in chunks of this size for indexing. This will help searching very big text files (ie: log files). ConfSubPanelW<Max. Gre kompr. Dateien (kB)Max. compressed file size (KB) ConfSubPanelW6Max. Gre Textdateien (MB)Max. text file size (MB) ConfSubPanelW8Seitengre Textdateien (kB)Text file page size (KB) ConfSubPanelWXDies ist eine Obergrenze; komprimierte Dateien jenseits dieser Gre werden nicht verarbeitet. Auf -1 setzen, um keine Obergrenze zu haben, auf 0, um nie zu dekomprimieren.This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever. ConfSubPanelWDies ist eine Obergrenze; Textdateien jenseits dieser Gre werden nicht verarbeitet Auf -1 setzen, um keine Obergrenze zu haben. Dies dient dazu, riesige Log-Dateien vom Index auszuschlieen.This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. This is for excluding monster log files from the index. ConfSubPanelW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> Zeitplan fr periodische Indizierung (cron) </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Jedes Feld kann eine Wildcard (*), eine einzelne Zahl, eine mit Kommata getrennte Liste (1,3,5) oder einen Bereich (1-7) enthalten. Die Felder werden <span style=" font-style:italic;">so wie sie sind</span> in der crontab-Datei verwendet und die gesamte crontab Syntax kann verwendet werden, siehe crontab(5).</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Beispielsweise startet die Eingabe <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Wochentage, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Stunden</span> und <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minuten</span> recollindex jeden Tag um 12:15 Uhr und 19:15 Uhr.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Ein Zeitplan mit sehr hufigen Aktivierungen ist wahrscheinlich weniger effizient als Echtzeit-Indizierung.</p></body></html>

Recoll batch indexing schedule (cron)

Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used as is inside the crontab file, and the full crontab syntax can be used, see crontab(5).


For example, entering * in Days, 12,19 in Hours and 15 in Minutes would start recollindex every day at 12:15 AM and 7:15 PM

A schedule with very frequent activations is probably less efficient than real time indexing.

 CronToolW<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Whlen Sie <span style=" font-style:italic;">Deaktivieren</span>, um die periodische Indizierung auszuschalten, <span style=" font-style:italic;">Aktivieren</span>, um sie einzuschalten, <span style=" font-style:italic;">Abbruch</span>, um nichts zu verndern.</p></body></html>

Click Disable to stop automatic batch indexing, Enable to activate it, Cancel to change nothing.

 CronToolWCron-Zeitplan Cron Dialog CronToolWPWochentage (* oder 0-7, 0/7 ist Sonntag))Days of week (* or 0-7, 0 or 7 is Sunday) CronToolWDeaktivierenDisable CronToolWAktivierenEnable CronToolWFehler beim Erstellen des cron Eintrags. Falsche Syntax in Feldern?3Error installing cron entry. Bad syntax in fields ? CronToolW*Stunden (* oder 0-23)Hours (* or 0-23) CronToolWOffenbar gibt es manuelle Eintrge fr recollindex, crontab kann nicht angepasst werden.PIt seems that manually edited entries exist for recollindex, cannot edit crontab CronToolWMinuten (0-59)Minutes (0-59) CronToolW DialogDialog EditDialog(Konfigurationsfehler Config error EditTransLokaler Pfad Local path EditTransOriginalpfad Original path EditTransQuellpfad Source path EditTransHinzufgenAdd EditTransBaseAbbrechenCancel EditTransBaseEntfernenDelete EditTransBase PfadumwandlungenPath Translations EditTransBaseSpeichernSave EditTransBaseWhlen Sie einen oder mehrere Dateitypen aus. Nutzen Sie dann die Bedienelemente unten, um einzustellen wie sie verarbeitet werden.kSelect one or several file types, then use the controls in the frame below to change how they are processed EditTransBase4Setze Pfadumwandlungen frSetting path translations for  EditTransBase R<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Es existiert noch kein Index fr diese Konfiguration.</span><br /><br />Wenn Sie nur Ihr Home-Verzeichnis mit sinnvollen Voreinstellungen indizieren wollen, whlen Sie die Schaltflche <span style=" font-style:italic;">Indizierung jetzt starten</span>. Sie knnen die Details spter anpassen.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Wenn Sie das Verhalten genauer festlegen wollen, verwenden Sie die folgenden Verknpfungen, um Einstellungen und Zeitplan fr die Indizierung anzupassen.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Diese Werkzeuge knnen Sie spter im Men <span style=" font-style:italic;">Einstellungen</span> erreichen.</p></body></html>

It appears that the index for this configuration does not exist.

If you just want to index your home directory with a set of reasonable defaults, press the Start indexing now button. You will be able to adjust the details later.

If you want more control, use the following links to adjust the indexing configuration and schedule.

These tools can be accessed later from the Preferences menu.

FirstIdxDialogHEinrichten fr die erste IndizierungFirst indexing setupFirstIdxDialog:Einstellungen fr IndizierungIndexing configurationFirstIdxDialog0Zeitplan fr IndizierungIndexing scheduleFirstIdxDialog2Indizierung jetzt startenStart indexing nowFirstIdxDialog<Hier knnen Sie die zu indizierenden Verzeichnisse und andere Einstellungen (wie auszuschlieende Dateipfade oder -namen, Standard-Zeichensatz usw.) anpassen.This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.FirstIdxDialogTHier knnen Sie zwischen regelmiger Indizierung und Echtzeit-Indizierung whlen und einen automatischen Zeitplan fr die regelmige Indizierung einrichten (mit cron). This will let you chose between batch and real-time indexing, and set up an automatic schedule for batch indexing (using cron).FirstIdxDialog <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> Indizierung kann stndig laufen und Datein indizieren sobald sie verndert werden, oder aber nur zu bestimmten Zeitpunkten ablaufen.</p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Im Handbuch finden Sie Informationen, anhand derer Sie sich fr einen der Anstze entscheiden knnen (drcken Sie F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Dieses Werkzeug hilft Ihnen, einen Zeitplan fr periodische Indizierungs-Lufe einzurichten oder die Echtzeit-Indizierung zu starten, wenn Sie sich anmelden (oder beides, was aber selten sinnvoll sein drfte). </p></body></html>

Recoll indexing can run permanently, indexing files as they change, or run at discrete intervals.

Reading the manual may help you to decide between these approaches (press F1).

This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense).

 IdxSchedWCron-ZeitplanCron scheduling IdxSchedWEntscheiden Sie, ob die Echtzeit-Indizierung beim Anmelden gestartet wird (nur fr den Standard-Index).ZDecide if real time indexing will be started when you log in (only for the default index). IdxSchedWZEinrichtung des Zeitplans fr die IndizierungIndex scheduling setup IdxSchedW<Start der Echtzeit-IndizierungReal time indexing start up IdxSchedWMit diesem Werkzeug knnen Sie festlegen, zu welchen Zeiten die Indizierung laufen soll, und einen crontab Eintrag anlegen._The tool will let you decide at what time indexing should run and will install a crontab entry. IdxSchedW DialogDialog ListDialogGruppenBoxGroupBox ListDialogLKein Datenbankverzeichnis konfiguriert No db directory in configurationMain&Nchstes&NextPreview&Vorheriges &PreviousPreview&Suche nach: &Search for:Previewjberfhrung in interne Darstellung nicht mglich fr 0Can't turn doc into internal representation for PreviewAbbrechenCancelPreview LeerenClearPreview(Erzeuge VorschautextCreating preview textPreview>Lade Vorschautext in den Editor Loading preview text into editorPreview>Gro-/Kleinschreibung &beachten Match &CasePreview2Fehlendes Hilfsprogramm: Missing helper program: Preview ffnenOpenPreviewKopierenCopyPreviewTextEdit Zeilen umbrechen Fold linesPreviewTextEdit&Einrckung erhaltenPreserve indentationPreviewTextEditDruckenPrintPreviewTextEdit2Aktuelle Vorschau druckenPrint Current PreviewPreviewTextEdit2Dokument in Datei sichernSave document to filePreviewTextEditAlles auswhlen Select AllPreviewTextEditFelder zeigen Show fieldsPreviewTextEditZeige Bild Show imagePreviewTextEdit&Vorschautext zeigenShow main textPreviewTextEditH<b>Angepasste<br> UnterverzeichnisseCustomised subtreesQObject0Folge symbolischen LinksFollow symbolic linksQObjectFolge symbolischen Links bei der Indizierung. Der Standardwert ist "Nein", um doppelte Indizierung zu vermeiden.TFollow symbolic links while indexing. The default is no, to avoid duplicate indexingQObject2Indiziere alle DateinamenIndex all file namesQObject:Indiziere die Namen von Dateien, deren Inhalt nicht erkannt oder verarbeitet werden kann (kein oder nicht untersttzter Mime-Typ). Der Standardwert ist "Ja".}Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default trueQObject&Auszulassende Namen Skipped namesQObjectDie Liste der Unterverzeichnisse in der indizierten Hierarchie, in denen einige Parameter anders gesetzt werden mssen. Voreinstellung: leer.sThe list of subdirectories in the indexed hierarchy
where some parameters need to be redefined. Default: empty.QObjectDies sind Muster fr Dateien oder Verzeichnisse, die nicht indiziert werden sollen.LThese are patterns for file or directory names which should not be indexed.QObject&ffnen&OpenQWidgetX&ffnen des bergeordneten Dokuments/Ordners&Open Parent document/folderQWidget&Vorschau&PreviewQWidget$&Schreibe in Datei&Write to FileQWidget(&Dateinamen kopierenCopy &File NameQWidget&URL kopieren Copy &URLQWidget4&hnliche Dokumente findenFind &similar documentsQWidget0ffne &Schnipsel-FensterOpen &Snippets windowQWidget\Vorschau des &bergeordneten Dokuments/OrdnersPreview P&arent document/folderQWidget4Auswahl in Dateien sichernSave selection to filesQWidgetVUntergeordnete Dokumente / Anhnge anzeigenShow subdocuments / attachmentsQWidget<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> Indizierung kann im Hintergrund laufen und den Index in Echtzeit aktualisieren sobald sich Dateien ndern. Sie erhalten so einen Index, der stets aktuell ist, aber die System-Resourcen werden ununterbrochen beansprucht.</p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></htm.

Recoll indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.

RTIToolWNIndizierungs-Dmon jetzt sofort starten%Also start indexing daemon right now.RTIToolWAutotstart-Datei wurde entfernt. Soll auch der laufende Prozess beendet werden?2Autostart file deleted. Kill current process too ?RTIToolW4Fehler beim Erzeugen von: Can't create: RTIToolWJFehler beim Ausfhren von recollindexCould not execute recollindexRTIToolWLsche Datei Deleting fileRTIToolWLsche:  Deleting: RTIToolWXAutomatischer Start der Echtzeit-Indizierung"Real time indexing automatic startRTIToolW.Autostart wird entferntRemoving autostartRTIToolWErsetze DateiReplacing fileRTIToolWErsetze:  Replacing: RTIToolW\Indizierungs-Dmon mit Desktop-Sitzung starten.Start indexing daemon with my desktop session.RTIToolWWarnungWarningRTIToolW(alle Sprachen)(all languages)RclMain(kein Stemming) (no stemming)RclMainber Recoll About RecollRclMainAlleAllRclMainFehlerhafter Anzeigebefehl fr %1: [%2] berprfen Sie die Datei mimeview.CBad viewer command line for %1: [%2] Please check the mimeview fileRclMain>Fehler beim Zugriff auf Datei: Can't access file: RclMainRFehler beim Erzeugen des VorschaufenstersCan't create preview windowRclMainLFehler beim Dekomprimieren von Datei: Can't uncompress file: RclMainlFehler beim Aktualisieren des Index: Indizierung luft#Can't update index: indexer runningRclMainFehler beim Extrahieren des Dokuments oder beim Erzeugen der temporren Datei0Cannot extract document or create temporary fileRclMainLbergeordnetes Dokument nicht gefundenCannot find parent documentRclMainbKeine Informationen zum Dokument in der Datenbank+Cannot retrieve document info from databaseRclMainSchlieenClosingRclMainExterner Index konnte nicht geffnet werden. Datenbank nicht offen. berprfen Sie die Liste der externen Indizes.HCould not open external index. Db not open. Check external indexes list.RclMain$Dokumenten-ChronikDocument historyRclMain FertigDoneRclMain$Doppelte DokumenteDuplicate documentsRclMainLsche Index Erasing indexRclMain FehlerErrorRclMainAusfhren: [ Executing: [RclMain:Externe Anwendungen/Befehle, die zur Indizierung Ihrer Dateitypen gebraucht werden und nicht gefunden wurden - vom letzten Indizierungslauf hinterlegt unter pExternal applications/commands needed for your file types and not found, as stored by the last indexing pass in RclMainChronik-Daten History dataRclMain<Fehler beim Abfragen des IndexIndex query errorRclMain*Indizierte Mime-TypenIndexed MIME TypesRclMain.Indizierung gescheitertIndexing failedRclMain&Indizierung luft: Indexing in progress: RclMain.Fehlende HilfsprogrammeMissing helper programsRclMainberwachenMonitorRclMainrKein externes Anzeigeprogramm konfiguriert fr Mime-Typ [-No external viewer configured for mime type [RclMain<Keine fehlenden HilfsprogrammeNo helpers found missingRclMain2Keine Ergebnisse gefundenNo results foundRclMain KeineNoneRclMainSubernPurgeRclMainSuche luft.<br>Aufgrund von Einschrnkungen der Indizierungs-Bibliothek<br>fhrt ein Abbruch zur Beendigung des Programms.eQuery in progress.
Due to limitations of the indexing library,
cancelling will exit the programRclMainSuchergebnisse Query resultsRclMainRIndex zurcksetzen und ganz neu aufbauen?(Reset the index and start from scratch ?RclMain.Anzahl Ergebnisse (ca.)Result count (est.)RclMainDatei sichern Save fileRclMainWortstmmeStemdbRclMain(&Indizierung stoppenStop &IndexingRclMainHUntergeordnete Dokumente und AnhngeSub-documents and attachmentsRclMainHDer laufende Indizierungs-Prozess wurde nicht aus diesem Programm gestartet. Drcken SIe OK, um ihn dennoch zu stoppen oder Abbrechen, um ihn unverndert zu lassen.yThe current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it aloneRclMainDas in mimeview angegebene Anzeigeprogramm fr %1: %2 wurde nicht gefunden. Wollen Sie den Einstellungs-Dialog starten?hThe viewer specified in mimeview for %1: %2 is not found. Do you want to start the preferences dialog ?RclMainRDiese URLs ( | ipath) sind inhaltsgleich:-These Urls ( | ipath) share the same content:RclMain@Diese Suche ist nicht mehr aktiv"This search is not active any moreRclMainUnbekanntUnknownRclMain(Index &aktualisieren Update &IndexRclMainAktualisiereUpdatingRclMainAnzeigebefehl fr %1 legt Datei und bergeordnete Datei fest: nicht untersttzt QViewer command line for %1 specifies both file and parent file value: unsupportedRclMainWarnungWarningRclMainNFehler beim Holen der Stemming-Sprachen#error retrieving stemming languagesRclMaingefiltertfilteredRclMain MedienmediaRclMainNachrichtmessageRclMain AndereotherRclMainPrsentation presentationRclMainsortiertsortedRclMainTabelle spreadsheetRclMainTexttextRclMain&ber Recoll &About Recoll RclMainBase"&Erweiterte Suche&Advanced Search RclMainBase4Lsche &Dokumenten-Chronik&Erase document history RclMainBase(Lsche &Such-Chronik&Erase search history RclMainBase &Datei&File RclMainBase&Vollbild &Full Screen RclMainBase$&GUI-Einstellungen&GUI configuration RclMainBase &Hilfe&Help RclMainBase(&Index-Einstellungen&Index configuration RclMainBase&Einstellungen &Preferences RclMainBase&Index &neu aufbauen&Rebuild index RclMainBase&Ergebnisse&Results RclMainBase"&Sortierparameter&Sort parameters RclMainBase&Werkzeuge&Tools RclMainBase"&Benutzerhandbuch &User manual RclMainBase2Erweiterte/komplexe SucheAdvanced/complex Search RclMainBase Ctrl+QCtrl+Q RclMainBase$Dokumenten-ChronikDocument History RclMainBase&&Dokumenten-ChronikDocument &History RclMainBase&BeendenE&xit RclMainBase6Dialog fr externe &IndizesE&xternal index dialog RclMainBase4Dialog fr externe IndizesExternal index dialog RclMainBaseF11F11 RclMainBaseErste Seite First Page RclMainBaseErste Seite First page RclMainBaseVollbild Full Screen RclMainBase:Gehe zur ersten ErgebnisseiteGo to first page of results RclMainBaseNchste Seite Next Page RclMainBaseNchste Seite Next page RclMainBase*Nchste ErgebnisseiteNext page of results RclMainBase PgDownPgDown RclMainBasePgUpPgUp RclMainBaseVorherige Seite Previous Page RclMainBaseVorherige Seite Previous page RclMainBase.Vorherige ErgebnisseitePrevious page of results RclMainBase RecollRecoll RclMainBase>Tabelle als CSV Datei speichernSave as CSV (spreadsheet) file RclMainBaseSpeichert Resultate als Tabellenkalkulations-kompatible CSV-Datei ab@Saves the result into a file which you can load in a spreadsheet RclMainBaseShift+PgUp Shift+PgUp RclMainBase:Zeige Details zur SuchanfrageShow Query Details RclMainBase(Als Tabelle anzeigen Show as table RclMainBase>Zeigt Ergebnisse als Tabelle an(Show results in a spreadsheet-like table RclMainBaseNNach Datum sortieren (von neu nach alt)Sort by date, newest first RclMainBaseNNach Datum sortieren (von alt nach neu)Sort by date, oldest first RclMainBaseNNach Datum sortieren (von neu nach alt)#Sort by dates from newest to oldest RclMainBaseNNach Datum sortieren (von alt nach neu)#Sort by dates from oldest to newest RclMainBase SortierparameterSort parameters RclMainBase$&Ausdruck-ExplorerTerm &explorer RclMainBase4Ausdruck-Explorer-WerkzeugTerm explorer tool RclMainBase(&Index aktualisieren Update &index RclMainBase AuszugAbstract RecollModel AutorAuthor RecollModel DatumDate RecollModel"Datum und Uhrzeit Date and time RecollModel&Datum des Dokuments Document date RecollModel&Gre des Dokuments Document size RecollModelDatum der Datei File date RecollModelDateiname File name RecollModelGre der Datei File size RecollModelInterner PfadIpath RecollModelSchlagworteKeywords RecollModelMime-Typ MIME type RecollModel$nderungszeitpunktMtime RecollModel4Ursprnglicher ZeichensatzOriginal character set RecollModel$Relevanz-BewertungRelevancy rating RecollModel TitelTitle RecollModelURLURL RecollModel((Suchanfrage zeigen) (show query)ResListN<p><b>Keine Ergebnisse gefunden</b><br>

No results found
ResListv<p><i>Alternative Schreibweisen (Akzente unterdrckt): </i>4

Alternate spellings (accents suppressed): ResListJ<p><i>Alternative Schreibweisen: </i>

Alternate spellings: ResList$Dokumenten-ChronikDocument historyResListDokumente DocumentsResList WeiterNextResList ffnenOpenResListVorschauPreviewResList ZurckPreviousResListSuchdetails Query detailsResList.Anzahl Ergebnisse (ca.)Result count (est.)ResListErgebnisliste Result listResListSchnipselSnippetsResList0Dokument nicht verfgbarUnavailable documentResListfrforResListvon mindestensout of at leastResListSpalte &lschen&Delete columnResTable0Sortierung &zurcksetzen &Reset sortResTable$Als CSV &speichern &Save as CSVResTable,Spalte "%1" hinzufgenAdd "%1" columnResTableNFehler beim ffnen/Erzeugen von Datei: Can't open/create file: ResTable>Tabelle als CSV Datei speichernSave table to CSV fileResTableAlle Ausdrcke All termsSSearch$Irgendein AusdruckAny termSSearch.Fehlerhafte SuchanfrageBad query stringSSearchjGeben Sie einen Wildcard-Ausdruck fr Dateinamen ein.$Enter file name wildcard expression.SSearchDateiname File nameSSearch8Kein Speicher mehr verfgbar Out of memorySSearchAbfragespracheQuery languageSSearch8Whlen Sie die Art der SucheChoose search type. SSearchBaseLschenClear SSearchBase Ctrl+SCtrl+S SSearchBase&Sucheintrag lschenErase search entry SSearchBaseSSearchBase SSearchBase SSearchBase SuchenSearch SSearchBaseSuche starten Start query SSearchBaseAlleAll SearchClauseWIrgendeinesAny SearchClauseWDateiname File name SearchClauseWKein FeldNo field SearchClauseW KeinesNone SearchClauseWAnzahl der Wrter, die sich zwischen den angegebenen befinden drfenHNumber of additional words that may be interspersed with the chosen ones SearchClauseW PhrasePhrase SearchClauseWNhe Proximity SearchClauseWWhlen Sie die Art der Suche aus, die mit den Wrtern gestartet wird.>Select the type of query that will be performed with the words SearchClauseWFinden:Find:Snippets WeiterNextSnippets ZurckPrevSnippetsSchnipselSnippetsSnippets SuchenSearch SnippetsW&Schlieen&Close SpellBase"&Vervollstndigen&Expand  SpellBase BetonungszeichenAccents SpellBase Alt+SAlt+C SpellBase Alt+VAlt+E SpellBase*Gro-/KleinschreibungCase SpellBaseBeachteMatch SpellBase6Keine Datenbank-Information No db info. SpellBase"Ausdruck-Explorer Term Explorer SpellBase%1 Ergebnisse %1 resultsSpellWfDurchschnittliche Zahl von Ausdrcken pro DokumentAverage terms per documentSpellW4Gre des DatenbankordnersDatabase directory sizeSpellWDok. / Ges. Doc. / Tot.SpellWIndex: %1 Dokumente mit durchschnittlicher Lnge von %2 Begriffen. %3 Ergebnisse7Index: %1 documents, average length %2 terms.%3 resultsSpellWEintragItemSpellWListe wurde alphabetisch abgeschnitten, einige hufige Begriffe 1List was truncated alphabetically, some frequent SpellWMime-Typen: MIME types:SpellW4Keine Erweiterung gefundenNo expansion foundSpellWDokumentenzahlNumber of documentsSpellW$Regulrer AusdruckRegexpSpellW2Indexstatistiken anzeigenShow index statisticsSpellWPhonetischSpelling/PhoneticSpellW*Wortstamm-ErweiterungStem expansionSpellWBegriffTermSpellWWertValueSpellWWildcards WildcardsSpellWNFehler beim Holen der Stemming-Sprachen#error retrieving stemming languagesSpellWtknnen fehlen. Versuchen Sie es mit einer lngeren Wurzel..terms may be missing. Try using a longer root.SpellWAlle Ausdrcke All terms UIPrefsDialog$Irgendein AusdruckAny term UIPrefsDialogPBitte whlen Sie maximal einen Index aus$At most one index should be selected UIPrefsDialogIndices mit unterschiedlichen Einstellungen zum Umgang mit Gro/-Kleinschreibung und diakritischen Zeichen knnen nicht hinzugefgt werden>Cant add index with different case/diacritics stripping option UIPrefsDialogAuswhlenChoose UIPrefsDialogDateiname File name UIPrefsDialogAbfragespracheQuery language UIPrefsDialogXHeader der Ergebnisliste (Standard ist leer)%Result list header (default is empty) UIPrefsDialogFormat fr Ergebnis-Absatz (alles lschen, um auf Standard zurck zu setzen)Neuer Wert</b>New Values:ViewActionBaseJAktion (leer ! Recoll-Voreinstellung) Action (empty -> recoll default)ViewActionBase:Auf aktuelle Auswahl anwendenApply to current selectionViewActionBaseSchlieenCloseViewActionBase`Von Desktopvoreinstellungen abweichende Ausnahme Exception to Desktop preferencesViewActionBase AnzeigeprogrammeNative ViewersViewActionBaseRecoll-Aktion:Recoll action:ViewActionBaseWhlen Sie einen oder mehrere Dateitypen aus. Nutzen Sie dann die Bedienelemente unten, um das Programm zum ffnen anzupassenkSelect one or several file types, then use the controls in the frame below to change how they are processedViewActionBaseWhlen Sie einen oder mehrere MIME-Typen aus und nutzen Sie dann die Bedienelemente unten, um das Programm zum ffnen anzupassen.lSelect one or several mime types then use the controls in the bottom frame to change how they are processed.ViewActionBase Das Selbe whlen Select sameViewActionBaseXStandardmig Desktopvoreinstellungen nutzen"Use Desktop preferences by defaultViewActionBaseaktueller Wert current valueViewActionBaseAuswhlenChooseconfgui::ConfParamFNW++confgui::ConfParamSLW--confgui::ConfParamSLW&Abbrechen&CanceluiPrefsDialogBase&OK&OKuiPrefsDialogBase<BR>
uiPrefsDialogBase <PRE>

uiPrefsDialogBase<PRE> + Umbruch
 + wrapuiPrefsDialogBaseEine Suche nach [Rolling Stones] wird gendert zu [Rolling OR Stones OR (Rolling PHRASE 2 Stones)].
Dadurch sollten Ergebnisse, in denen die Suchworte genau wie eingegeben auftreten, strker gewichtet werden.A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.uiPrefsDialogBaseDTrenner fr Zusammenfassungs-TeileAbstract snippet separatoruiPrefsDialogBaseAlle auswhlenActivate AlluiPrefsDialogBase Index hinzufgen	Add indexuiPrefsDialogBase*nderungen bernehmen
Apply changesuiPrefsDialogBasepAutomatisches Hinzufgen von Phrasen zu einfachen Suchen+Automatically add phrase to simple searchesuiPrefsDialogBase^Hufigkeitsschwellwert fr automatische Phrasen.Autophrase term frequency threshold percentageuiPrefsDialogBaseAuswhlenChooseuiPrefsDialogBase:Standardanwendungen auswhlenChoose editor applicationsuiPrefsDialogBase>Klicken Sie hier um einen weiteren Indexordner zur Liste hinzuzufgen. Sie knnen entweder einen Recoll-Konfigurationsordner oder einen Xapian-Index auswhlen.{Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.uiPrefsDialogBase4Datumsformat (strftime(3))Date format (strftime(3))uiPrefsDialogBaseAlle abwhlenDeactivate AlluiPrefsDialogBasehQt-Autovervollstndigung in Suchleiste deaktivieren.*Disable Qt autocompletion in search entry.uiPrefsDialogBase(nderungen verwerfenDiscard changesuiPrefsDialogBaseErzeugen wir eine Zusammenfassung auch dann, wenn das Dokument schon eine Zusammenfassung enthlt?EDo we synthetize an abstract even if the document seemed to have one?uiPrefsDialogBaseVersuchen wir, Zusammenfassungen fr Ergebnisse aus den Fundstellen zu erzeugen?
Dies kann bei groen Dokumenten langsam sein.zDo we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.uiPrefsDialogBaseHZusammenfassungen dynamisch erzeugenDynamically build abstractsuiPrefsDialogBaseLHTML-Header der Ergebnisseite ergnzen#Edit result page html header insertuiPrefsDialogBaseVFormat-String fr Ergebnis-Absatz editieren#Edit result paragraph format stringuiPrefsDialogBaseAktivierenEnableuiPrefsDialogBaseExterne IndizesExternal IndexesuiPrefsDialogBaselHufigkeitsschwellwert in Prozent, ber dem Begriffe nicht beim automatischen
Hinzufgen von Phrasen verwendet werden. Hufige Begriffe beeintrchtigen die
Performance bei Phrasen stark. Weggelassene Begriffe erhhen den Phrasen-Slack
und vermindern den Nutzender automatischen Phrasen. Der Standardwert ist 2.Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). uiPrefsDialogBaseHelvetica-10Helvetica-10uiPrefsDialogBase:Verstecke doppelte ErgebnisseHide duplicate results.uiPrefsDialogBaseBei Auswahl werden Ergebnisse mit dem gleichen Inhalt unter verschiedenen Namen nur einmal gezeigt.XIf checked, results with the same content under different names will only be shown once.uiPrefsDialogBaseNZeilen in PRE-Text werden nicht umgebrochen. Bei Verwendung von BR gehen manche Einrckungen verloren. Mglicherweise ist der Stil 'PRE + Umbruch' das, was Sie wollen.iLines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.uiPrefsDialogBaseXMaximale Textgre fr Vorschau-Hervorhebung5Maximum text size highlighted for preview (megabytes)uiPrefsDialogBase>Anzahl der Ergebnisse pro Seite"Number of entries in a result pageuiPrefsDialogBaseffnet einen Dialog zur Auswahl der Schnipsel-Fenster CSS Style Sheet DateiAOpens a dialog to select the Snippets window CSS style sheet fileuiPrefsDialogBaseffnet einen Dialog zur Auswahl der Schriftart fr die Ergebnisliste-Opens a dialog to select the result list fontuiPrefsDialogBasejffnet einen Dialog zur Auswahl der Style Sheet Datei-Opens a dialog to select the style sheet fileuiPrefsDialogBasePfadumwandlungPaths translationsuiPrefsDialogBaseVZeilen-Stil fr Umwandlung von Text in HTMLPlain text to HTML line styleuiPrefsDialogBasedBei Vorschau HTML gegenber reinem Text bevorzugen&Prefer Html to plain text for preview.uiPrefsDialogBasehMagische Dateinamen-Erweiterungen fr Abfragesprache(Query language magic file name suffixes.uiPrefsDialogBaseLSpeichern, ob Sortierung aktiviert istRemember sort activation state.uiPrefsDialogBaseAus der Liste entfernen. Dies hat keinen Einfluss auf den gespeicherten Index.7Remove from list. This has no effect on the disk index.uiPrefsDialogBase*Ausgewhlte entfernenRemove selecteduiPrefsDialogBaseZErsetzen der Zusammenfassungen aus Dokumenten Replace abstracts from documentsuiPrefsDialogBaseZurcksetzenResetuiPrefsDialogBaseSetzt das Schnipsel-Fenster Style Sheet auf den Standardwert zurck Resets the Snippets window styleuiPrefsDialogBaseSetzt die Schriftart fr die Ergebnisliste auf den Standardwert zurck1Resets the result list font to the system defaultuiPrefsDialogBasebSetzt das Style Sheet auf den Standardwert zurck!Resets the style sheet to defaultuiPrefsDialogBaseErgebnislisteResult ListuiPrefsDialogBase8Schriftart fr ErgebnislisteResult list fontuiPrefsDialogBaseSuchparameterSearch parametersuiPrefsDialogBase6Schnipsel-Fenster CSS DateiSnippets window CSS fileuiPrefsDialogBaseNach dem Start automatisch den Dialog fr die erweiterte Suche ffnen'Start with advanced search dialog open.uiPrefsDialogBase Stemming-SpracheStemming languageuiPrefsDialogBaseStyle SheetStyle sheetuiPrefsDialogBase\Anzahl der Kontextworte in der Zusammenfassung Synthetic abstract context wordsuiPrefsDialogBase`Lnge der erzeugten Zusammenfassung (in Zeichen)$Synthetic abstract size (characters)uiPrefsDialogBaseTexte ber dieser Gre werden in der Vorschau nicht mit Hervorhebungen versehen (zu langsam).CTexts over this size will not be highlighted in preview (too slow).uiPrefsDialogBaseDie Worte in dieser Liste werden automatisch zu ext:xxx Ausdrcken im Abfragesprachen-Eintrag umgewandelt.bThe words in the list will be automatically turned to ext:xxx clauses in the query language entry.uiPrefsDialogBase Auswahl umkehrenToggle selecteduiPrefsDialogBase$BenutzeroberflcheUser interfaceuiPrefsDialogBaserecoll-1.26.3/qtgui/i18n/recoll_el.ts0000644000175000017500000060213313566424763014251 00000000000000



    AdvSearch
    
        All clauses
        Όλες οι ρήτρες
    
    
        Any clause
        Οποιαδήποτε ρήτρα
    
    
        texts
        κείμενα
    
    
        spreadsheets
        φύλλα εργασίας
    
    
        presentations
        παρουσιάσεις
    
    
        media
        πολυμέσα
    
    
        messages
        Μηνύματα
    
    
        other
        άλλα
    
    
        Bad multiplier suffix in size filter
        Κακό επίθεμα πολλαπλασιαστή στο φίλτρο μεγέθους
    
    
        text
        κείμενο
    
    
        spreadsheet
        λογιστικό φύλλο
    
    
        presentation
        παρουσίαση
    
    
        message
        μήνυμα
    


    AdvSearchBase
    
        Advanced search
        Προχωρημένη αναζήτηση
    
    
        Find
        Αναζήτηση
    
    
        All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored.
        Όλα τα μη κενά πεδία στα δεξιά θα συνδυαστούν με ένα συνδυασμό ΚΑΙ (επιλογή «Όλες οι ρήτρες») ή Ή (επιλογή «Μια από τις ρήτρες»). <br> Τα πεδία του τύπου «Μια από αυτές τις λέξεις», «Όλες οι λέξεις» και «Καμιά από αυτές τις λέξεις» δέχονται ένα ανακάτεμα λέξεων και φράσεων σε εισαγωγικά. <br>Τα κενά πεδία αγνοούνται.
    
    
        Search for <br>documents<br>satisfying:
        Αναζήτηση <br>εγγράφων<br>που ικανοποιούν:
    
    
        Delete clause
        Διαγραφή ρήτρας
    
    
        Add clause
        Προσθήκη ρήτρας
    
    
        Filter
        Φίλτρο
    
    
        Check this to enable filtering on dates
        Επιλέξτε αυτό για να ενεργοποιήσετε το φίλτρο στις ημερομηνίες
    
    
        Filter dates
        Φίλτρο ημερομηνίας
    
    
        From
        Από
    
    
        To
        Έως
    
    
        Check this to enable filtering on sizes
        Επιλέξτε αυτό για να ενεργοποιήσετε το φιλτράρισμα στο μέγεθος αρχείων
    
    
        Filter sizes
        Φίλτρο μεγέθους
    
    
        Minimum size. You can use k/K,m/M,g/G as multipliers
        Ελάχιστο μέγεθος: Μπορείτε να χρησιμοποιήσετε τα k/K,m/M,g/G ως πολλαπλασιαστές
    
    
        Min. Size
        Ελαχ. μέγεθος
    
    
        Maximum size. You can use k/K,m/M,g/G as multipliers
        Μέγιστο μέγεθος: Μπορείτε να χρησιμοποιήσετε τα k/K,m/M,g/G ως πολλαπλασιαστές
    
    
        Max. Size
        Μέγ. μέγεθος
    
    
        Check this to enable filtering on file types
        Ενεργοποιήστε αυτή την επιλογή για να χρησιμοποιηθεί το φιλτράρισμα στους τύπους αρχείων
    
    
        Restrict file types
        Περιορισμός του τύπου αρχείων
    
    
        Check this to use file categories instead of raw mime types
        Επιλέξτε το για να χρησιμοποιήσετε τις κατηγορίες αρχείων αντί των τύπων mime
    
    
        By categories
        Ανά κατηγορία
    
    
        Save as default
        Αποθήκευση ως προεπιλογή
    
    
        Searched file types
        Αναζητούμενοι τύποι αρχείων
    
    
        All ---->
        Όλα ---->
    
    
        Sel ----->
        Επιλ ---->
    
    
        <----- Sel
        <----- Επιλ
    
    
        <----- All
        <----- Όλα
    
    
        Ignored file types
        Τύποι αρχείων που θα αγνοηθούν
    
    
        Enter top directory for search
        Εισάγετε τον κατάλογο εκκίνησης της αναζήτησης
    
    
        Browse
        Περιήγηση
    
    
        Restrict results to files in subtree:
        Περιορισμός των αποτελεσμάτων στα αρχεία του δέντρου:
    
    
        Invert
        Αντιστροφή
    
    
        Start Search
        Εκκίνηση αναζήτησης
    
    
        Close
        Κλείσιμο
    


    ConfIndexW
    
        Can't write configuration file
        Αδύνατη η εγγραφή του αρχείου διαμόρφωσης
    
    
        Global parameters
        Καθολικές ρυθμίσεις
    
    
        Local parameters
        Τοπικές ρυθμίσεις
    
    
        Search parameters
        Ρυθμίσεις αναζήτησης
    
    
        Top directories
        Κατάλογοι εκκίνησης
    
    
        The list of directories where recursive indexing starts. Default: your home.
        Η λίστα των καταλόγων για την έναρξη της αναδρομικής ευρετηρίασης. Προεπιλογή: ο προσωπικός σας κατάλογος.
    
    
        Skipped paths
        Παραλειπόμενες διαδρομές
    
    
        These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')
        
    
    
        Stemming languages
        Γλώσσα για την επέκταση των όρων
    
    
        The languages for which stemming expansion<br>dictionaries will be built.
        Οι γλώσσες για τις οποίες θα δημιουργηθούν τα λεξικά επεκτάσεων<br>των όρων.
    
    
        Log file name
        Όνομα του αρχείου καταγραφών
    
    
        The file where the messages will be written.<br>Use 'stderr' for terminal output
        Το αρχείο που θα εγγραφούν τα μηνύματα.<br>Χρησιμοποιήστε 'stderr' για την έξοδο τερματικού
    
    
        Log verbosity level
        Επίπεδο ανάλυσης των καταγραφών
    
    
        This value adjusts the amount of messages,<br>from only errors to a lot of debugging data.
        Αυτή η τιμή ρυθμίζει την ποσότητα των απεσταλμένων μηνυμάτων,<br>από μόνο τα σφάλματα μέχρι πολλά δεδομένα αποσφαλμάτωσης.
    
    
        Index flush megabytes interval
        Καθυστέρηση εγγραφής του ευρετηρίου σε megabyte
    
    
        This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 
        Αυτή η τιμή ρυθμίζει την ποσότητα των δεδομένων που δεικτοδοτούνται μεταξύ των εγγραφών στο δίσκο.<br>Βοηθά στον έλεγχο χρήσης της μνήμης. Προεπιλογή: 10MB 
    
    
        Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit)
        
    
    
        This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit.
        
    
    
        No aspell usage
        Χωρίς χρήση του aspell
    
    
        Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 
        Απενεργοποιεί τη χρήση του aspell για τη δημιουργία των ορθογραφικών προσεγγίσεων.<br>Χρήσιμο αν το aspell δεν είναι εγκατεστημένο ή δεν λειτουργεί. 
    
    
        Aspell language
        Γλώσσα του aspell
    
    
        The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. 
        Η γλώσσα για το λεξικό aspell. Αυτό θα πρέπει να είναι του τύπου «en» ή «el» ...<br> Αν αυτή η τιμή δεν οριστεί, χρησιμοποιείται το εθνικό περιβάλλον NLS για να την υπολογίσει, που συνήθως δουλεύει. Για να πάρετε μια ιδέα του τι είναι εγκατεστημένο στο σύστημά σας, πληκτρολογήστε «aspell config» και παρατηρήστε τα αρχεία .dat στον κατάλογο «data-dir». 
    
    
        Database directory name
        Κατάλογος αποθήκευσης του ευρετηρίου
    
    
        The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.
        Το όνομα του καταλόγου αποθήκευσης του ευρετηρίου<br>Μια σχετική διαδρομή αναφερόμενη στη διαδρομή διαμόρφωσης. Η εξ' ορισμού είναι «xapiandb». 
    
    
        Unac exceptions
        Εξαιρέσεις unac
    
    
        <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.
        <p>Αυτές είναι εξαιρέσεις για τον μηχανισμό unac, ο οποίος εξ' ορισμού, αφαιρεί όλους τους τονισμούς, και πραγματοποιεί κανονική αποσύνθεση. Μπορείτε να αναιρέσετε την αφαίρεση των τονισμών  για ορισμένους χαρακτήρες, ανάλογα με τη γλώσσα σας, και διευκρινίστε άλλους αποσυνθέσεις, για παράδειγμα συμπλεγμένους χαρακτήρες. Στη λίστα διαχωρισμένη με κενά, ο πρώτος χαρακτήρας ενός αντικειμένου είναι η πηγή, το υπόλοιπο είναι η μετάφραση.
    
    
        Process the WEB history queue
        Επεξεργασία της ουράς ιστορικού του Ιστού
    
    
        Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin)
        Ενεργοποιεί τη δεικτοδότηση των επισκεπτόμενων σελίδων στον Firefox.<br>(θα πρέπει να εγκαταστήσετε και το πρόσθετο Firefox Recoll)
    
    
        Web page store directory name
        Όνομα καταλόγου αποθήκευσης ιστοσελίδων
    
    
        The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory.
        Το όνομα του καταλόγου αποθήκευσης αντιγράφων των επισκεφθέντων ιστοσελίδων.<br>Μια σχετική διαδρομή αναφερόμενη στη διαδρομή διαμόρφωσης.
    
    
        Max. size for the web store (MB)
        Μέγ. μέγεθος της λανθάνουσας μνήμης ιστού (MB)
    
    
        Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end).
        
    
    
        Automatic diacritics sensitivity
        Αυτόματη ευαισθησία στους τόνους
    
    
        <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity.
        <p>Αυτόματη εναλλαγή ευαισθησίας τονισμού αν ο όρος αναζήτησης διαθέτει τονισμένους χαρακτήρες (εκτός αυτών του unac_except_trans). Διαφορετικά θα πρέπει να χρησιμοποιήσετε τη γλώσσα της αναζήτησης και τον τροποποιητή <i>D</i> για τον καθορισμό της ευαισθησίας τονισμών.
    
    
        Automatic character case sensitivity
        Αυτόματη ευαισθησία πεζών/κεφαλαίων
    
    
        <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity.
        <p>Αυτόματη εναλλαγή ευαισθησίας διάκρισης πεζών/κεφαλαίων αν η ο όρος αναζήτησης διαθέτει κεφαλαία γράμματα (εκτός του πρώτου γράμματος). Διαφορετικά θα πρέπει να χρησιμοποιήσετε τη γλώσσα της αναζήτησης και τον τροποποιητή <i>C</i> για τον καθορισμό της ευαισθησίας διάκρισης πεζών / κεφαλαίων.
    
    
        Maximum term expansion count
        Μέγιστο μέγεθος επέκτασης ενός όρου
    
    
        <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.
        <p>Μέγιστος αριθμός επέκτασης για έναν όρο (π.χ.: κατά τη χρήση χαρακτήρων υποκατάστασης). Η προκαθορισμένη τιμή 10000 είναι λογική και θα αποφύγει ερωτήματα που εμφανίζονται σαν παγωμένα την ίδια στιγμή που η μηχανή διαπερνά τη λίστα όρων.
    
    
        Maximum Xapian clauses count
        Μέγιστος αριθμός ρητρών Xapian 
    
    
        <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.
        <p>Μέγιστος αριθμός στοιχειωδών ρητρών που προσθέτουμε σε ένα απλό ερώτημα Xapian. Σε μερικές περιπτώσεις, το αποτέλεσμα της επέκτασης των όρων μπορεί να είναι πολλαπλασιαστικό, και θα χρησιμοποιούσε υπερβολική μνήμη. Η προκαθορισμένη τιμή 100000 θα πρέπει να είναι επαρκής και συμβατή με μια τυπική διαμόρφωση υλικού.
    


    ConfSubPanelW
    
        Only mime types
        Μόνο οι τύποι MIME
    
    
        An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive
        Μια αποκλειστική λίστα δεικτοδοτημένων τύπων mime.<br>Δεν θα δεικτοδοτηθεί τίποτα άλλο. Φυσιολογικά κενό και αδρανές
    
    
        Exclude mime types
        Αποκλεισμός τύπων αρχείων
    
    
        Mime types not to be indexed
        Οι τύποι Mime που δεν θα δεικτοδοτηθούν
    
    
        Max. compressed file size (KB)
        Μεγ.μέγεθος για τα συμπιεσμένα αρχεία (KB)
    
    
        This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever.
        Αυτή η τιμή καθορίζει ένα όριο πέραν του οποίου τα συμπιεσμένα αρχεία δεν θα επεξεργάζονται. Χρησιμοποιήστε -1 για κανένα όριο, 0 για να μην επεξεργάζονται τα συμπιεσμένα αρχεία.
    
    
        Max. text file size (MB)
        Μεγ. μέγεθος αρχείων κειμένου (MB)
    
    
        This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. 
This is for excluding monster log files from the index.
        Αυτή η τιμή ορίζει ένα όριο πέραν του οποίου δεν θα γίνεται ευρετηρίαση για τα αρχεία κειμένου. Ορίστε -1 για κανένα όριο.
Αυτό χρησιμεύει για τον αποκλεισμό από την ευρετηρίαση τεράστιων αρχείων καταγραφών.
    
    
        Text file page size (KB)
        Μέγεθος κοπής για τα αρχεία κειμένου (KB)
    
    
        If this value is set (not equal to -1), text files will be split in chunks of this size for indexing.
This will help searching very big text  files (ie: log files).
        Αν αυτή η τιμή έχει οριστεί και είναι θετική, τα αρχεία κειμένου θα κοπούν σε κομμάτια αυτού του μεγέθους για την ευρετηρίαση.
Αυτό βοηθά στη μείωση των καταναλωμένων πόρων από την ευρετηρίαση και βοηθά τη φόρτωση για την προεπισκόπηση.
    
    
        Max. filter exec. time (s)
        
    
    
        External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.

        Τα εξωτερικά φίλτρα σε λειτουργία μεγαλύτερη από αυτό θα διακόπτονται. Χρήσιμο για τη σπάνια περίπτωση (π.χ. postscript) όπου ένα έγγραφο μπορεί να προκαλέσει ένα βρόγχο στο φίλτρο. Ορίστε το σε -1 για να αφαιρέσετε το όριο.
    
    
        Global
        Γενικά
    


    CronToolW
    
        Cron Dialog
        Διάλογος Cron
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> προγραμματισμός της περιοδικής ευρετηρίασης (cron) </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Κάθε πεδίο μπορεί να περιέχει ένα χαρακτήρα υποκατάστασης (*), μια απλή αριθμητική τιμή, λίστες διαχωρισμένες με κόμα (1,3,5) και εύρη (1-7). Γενικότερα, τα πεδία θα χρησιμοποιηθούν <span style=" font-style:italic;">ως έχουν</span> στο αρχείο crontab, και η γενική σύνταξη crontab μπορεί να χρησιμοποιηθεί, δείτε στη σελίδα του εγχειριδίου crontab(5).</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />Για παράδειγμα, εισάγοντας <span style=" font-family:'Courier New,courier';">*</span> στις <span style=" font-style:italic;">Ημέρες, </span><span style=" font-family:'Courier New,courier';">12,19</span> στις <span style=" font-style:italic;">Ώρες</span> και <span style=" font-family:'Courier New,courier';">15</span> στα <span style=" font-style:italic;">Λεπτά</span>, το recollindex θα ξεκινά κάθε μέρα στις 12:15 AM και 7:15 PM</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Ο προγραμματισμός με πολύ συχνές ενεργοποιήσεις είναι πιθανώς λιγότερο αποτελεσματικός από την ευρετηρίαση σε πραγματικό χρόνο.</p></body></html>
    
    
        Days of week (* or 0-7, 0 or 7 is Sunday)
        Ημέρες της εβδομάδας (* ή 0-7, 0 ή 7 σημαίνει Κυριακή)
    
    
        Hours (* or 0-23)
        Ώρες (* ή 0-23)
    
    
        Minutes (0-59)
        Λεπτά (0-59)
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Κάντε κλικ στο <span style=" font-style:italic;">Απενεργοποίηση</span> για να διακόψετε την περιοδική αυτόματη ευρετηρίαση, στο <span style=" font-style:italic;">Ενεργοποίηση</span> για να την ενεργοποιήσετε, και <span style=" font-style:italic;">Ακύρωση</span> για να μην αλλάξει τίποτα.</p></body></html>
    
    
        Enable
        Ενεργοποίηση
    
    
        Disable
        Απενεργοποίηση
    
    
        It seems that manually edited entries exist for recollindex, cannot edit crontab
        Φαίνεται ότι υπάρχουν καταχωρήσεις δημιουργημένες χειροκίνητα για το recollindex. Η επεξεργασία του αρχείου Cron δεν είναι δυνατή
    
    
        Error installing cron entry. Bad syntax in fields ?
        Σφάλμα κατά την εγκατάσταση της καταχώρησης cron. Κακή σύνταξη των πεδίων;
    


    EditDialog
    
        Dialog
        Διάλογος
    


    EditTrans
    
        Source path
        Διαδρομή πηγής
    
    
        Local path
        Τοπική διαδρομή
    
    
        Config error
        Σφάλμα διαμόρφωσης
    
    
        Original path
        Αρχική διαδρομή
    


    EditTransBase
    
        Path Translations
        Διαδρομή μεταφράσεων
    
    
        Setting path translations for 
        Ορισμός διαδρομής μεταφράσεων για 
    
    
        Select one or several file types, then use the controls in the frame below to change how they are processed
        Επιλέξτε έναν οι περισσότερους τύπους αρχείων, και στη συνέχεια χρησιμοποιήστε τα κουμπιά ελέγχου στο παρακάτω πλαίσιο για να αλλάξετε τον τρόπο επεξεργασίας
    
    
        Add
        Προσθήκη
    
    
        Delete
        Διαγραφή
    
    
        Cancel
        Ακύρωση
    
    
        Save
        Αποθήκευση
    


    FirstIdxDialog
    
        First indexing setup
        Διαμόρφωση της πρώτης δεικτοδότησης
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Φαίνεται ότι το ευρετήριο για αυτήν τη διαμόρφωση δεν υπάρχει ακόμα..</span><br /><br />Αν θέλετε απλά να δεικτοδοτήσετε τον προσωπικό σας κατάλογο με ένα ικανοποιητικό σύνολοy προεπιλογών, πατήστε το κουμπί <span style=" font-style:italic;">«Έναρξη της ευρετηρίασης τώρα»</span>. Μπορείτε να ρυθμίσετε τις λεπτομέρειες αργότερα. </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Αν επιθυμείτε περισσότερο έλεγχο, χρησιμοποιήστε τους παρακάτω συνδέσμους για να ρυθμίσετε τη διαμόρφωση της ευρετηρίασης και του προγραμματισμού.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Μπορείτε να έχετε πρόσβαση στα εργαλεία αυτά αργότερα από το μενού <span style=" font-style:italic;">Προτιμήσεις</span>.</p></body></html>
    
    
        Indexing configuration
        Διαμόρφωση ευρετηρίασης
    
    
        This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.
        Σας επιτρέπει τη ρύθμιση των καταλόγων που επιθυμείτε να δεικτοδοτήσετε, και άλλων παραμέτρων όπως οι εξαιρούμενες διαδρομές αρχείων ή ονομάτων, των προκαθορισμένων συνόλων χαρακτήρων, κλπ.
    
    
        Indexing schedule
        Προγραμματισμός ευρετηρίασης
    
    
        This will let you chose between batch and real-time indexing, and set up an automatic  schedule for batch indexing (using cron).
        Σας επιτρέπει την επιλογή μεταξύ της προγραμματισμένης ευρετηρίασης και αυτής σε πραγματικό χρόνο, και τον καθορισμό του προγραμματισμού για την πρώτη (βασισμένη στο εργαλείο cron).
    
    
        Start indexing now
        Έναρξη της ευρετηρίασης τώρα
    


    FragButs
    
        %1 not found.
        Δεν βρέθηκε το %1.
    
    
        %1:
 %2
        %1:
 %2
    
    
        Query Fragments
        Θραύσματα ερωτήματος
    


    IdxSchedW
    
        Index scheduling setup
        Διαμόρφωση του προγραμματισμού ευρετηρίασης
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Η ευρετηρίαση του <span style=" font-weight:600;">Recoll</span> μπορεί να βρίσκεται μόνιμα σε λειτουργία, επεξεργάζοντας τα αρχεία αμέσως μετά αφού τροποποιηθούν, ή να εκτελείται σε προκαθορισμένες στιγμές. </p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Μια ανάγνωση του εγχειριδίου μπορεί να σας βοηθήσει να επιλέξετε μεταξύ αυτών των προσεγγίσεων (πατήστε F1). </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Αυτό το εργαλείο μπορεί να σας βοηθήσει να διαμορφώσετε την προγραμματισμένη ευρετηρίαση ή να ορίσετε μια αυτόματη έναρξη της ευρετηρίασης σε πραγματικό χρόνο κατά τη σύνδεσή σας (ή και τα δύο, κάτι που σπάνια χρειάζεται). </p></body></html>
    
    
        Cron scheduling
        Προγραμματισμός Cron
    
    
        The tool will let you decide at what time indexing should run and will install a crontab entry.
        Ο διάλογος σας επιτρέπει να προσδιορίσετε την ώρα έναρξης της ευρετηρίασης και θα εισάγει μια καταχώρηση crontab.
    
    
        Real time indexing start up
        Έναρξη της ευρετηρίασης σε πραγματικό χρόνο
    
    
        Decide if real time indexing will be started when you log in (only for the default index).
        Προσδιορίστε αν η ευρετηρίαση σε πραγματικό χρόνο θα ξεκινά με τη σύνδεσή σας (μόνο για το προκαθορισμένο ευρετήριο).
    


    ListDialog
    
        Dialog
        Διάλογος
    
    
        GroupBox
        GroupBox
    


    Main
    
        "history" file is damaged or un(read)writeable, please check or remove it: 
        Το αρχείο ιστορικού είτε είναι κατεστραμμένο είτε δεν είναι αναγνώσιμο/εγγράψιμο, παρακαλώ ελέγξτε το ή διαγράψτε το:
    
    
        No db directory in configuration
        Δεν έχει προσδιοριστεί ο κατάλογος της βάσης δεδομένων στη διαμόρφωση
    
    
        "history" file is damaged, please check or remove it: 
        
    


    Preview
    
        &Search for:
        &Αναζήτηση για:
    
    
        &Next
        &Επόμενο
    
    
        &Previous
        &Προηγούμενο
    
    
        Clear
        Καθαρισμός
    
    
        Match &Case
        Διάκριση &πεζών/κεφαλαίων
    
    
        Close Tab
        Κλείσιμο της καρτέλας
    
    
        Cancel
        Ακύρωση
    
    
        Cannot create temporary directory: 
        Αδυναμία δημιουργίας του προσωρινού καταλόγου: 
    
    
        Missing helper program: 
        Ελλείποντα εξωτερικά προγράμματα φίλτρου:
    
    
        Can't turn doc into internal representation for 
        Αδύνατη η μεταγλώττιση του εγγράφου σε εσωτερική αναπαράσταση για 
    
    
        Error while loading file
        Σφάλμα κατά τη φόρτωση του αρχείου
    
    
        Creating preview text
        Δημιουργία του κειμένου προεπισκόπησης
    
    
        Loading preview text into editor
        Φόρτωση του κειμένου προεπισκόπησης στον επεξεργαστή
    
    
        Form
        
    
    
        Tab 1
        
    
    
        Open
        Άνοιγμα
    
    
        Canceled
        
    
    
        Error loading the document: file missing.
        
    
    
        Error loading the document: no permission.
        
    
    
        Error loading: backend not configured.
        
    
    
        Error loading the document: other handler error<br>Maybe the application is locking the file ?
        
    
    
        Error loading the document: other handler error.
        
    
    
        <br>Attempting to display from stored text.
        
    
    
        Could not fetch stored text
        
    


    PreviewTextEdit
    
        Show fields
        Εμφάνιση των πεδίων
    
    
        Show image
        Εμφάνιση της εικόνας
    
    
        Show main text
        Εμφάνιση του σώματος του κειμένου
    
    
        Select All
        Επιλογή όλων
    
    
        Copy
        Αντιγραφή
    
    
        Print
        Εκτύπωση
    
    
        Fold lines
        Αναδίπλωση των γραμμών
    
    
        Preserve indentation
        Διατήρηση της εσοχής
    
    
        Save document to file
        Αποθήκευση του εγγράφου
    
    
        Print Current Preview
        Εκτύπωση του παραθύρου προεπισκόπησης
    
    
        Open document
        
    


    QObject
    
        Global parameters
        Καθολικές ρυθμίσεις
    
    
        Local parameters
        Τοπικές ρυθμίσεις
    
    
        Beagle web history
        Ιστορικό ιστού Beagle
    
    
        <b>Customised subtrees
        <b>Κατάλογοι με προσαρμοσμένες ρυθμίσεις
    
    
        The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty.
        Η λίστα των υποκαταλόγων της ζώνης με ευρετήριο<br>όπου έχουν προκαθοριστεί ορισμένες παράμετροι. Προεπιλογή: κενό.
    
    
        <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons.
        <i>Οι παράμετροι που ακολουθούν έχουν καθοριστεί είτε καθολικά, αν η επιλογή στην παραπάνω λίστα<br>είναι κενή ή μια κενή γραμμή, είτε για τον επιλεγμένο κατάλογο.<br>Μπορείτε να προσθέσετε και να αφαιρέσετε καταλόγους κάνοντας κλικ στα κουμπιά +/-.
    
    
        Skipped names
        Αγνοημένα ονόματα
    
    
        These are patterns for file or directory  names which should not be indexed.
        Μοτίβα που καθορίζουν τα αρχεία ή καταλόγους που δεν θα πρέπει να έχουν ευρετήριο.
    
    
        Default character set
        Σύνολο χαρακτήρων<br>εξ ορισμού
    
    
        This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used.
        Το σύνολο των χαρακτήρων που χρησιμοποιείται για την ανάγνωση των αρχείων στα οποία δεν μπορεί να αναγνωριστεί το σύνολο χαρακτήρων με εσωτερικό τρόπο, για παράδειγμα τα αρχεία απλού κειμένου.<br>Η προκαθορισμένη τιμή είναι κενή, και το πρόγραμμα χρησιμοποιεί αυτή του περιβάλλοντος.
    
    
        Follow symbolic links
        Ακολούθηση των συμβολικών δεσμών
    
    
        Follow symbolic links while indexing. The default is no, to avoid duplicate indexing
        Να δημιουργηθεί ευρετήριο για αρχεία και καταλόγους που υποδεικνύονται από συμβολικούς δεσμούς. Η προκαθορισμένη τιμή είναι όχι, για την αποφυγή διπλότυπης ευρετηρίασης
    
    
        Index all file names
        Ευρετήριο για όλα τα ονόματα αρχείων
    
    
        Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true
        Ευρετήριο για τα ονόματα των αρχείων των οποίων το περιεχόμενο δεν έχει αναγνωριστεί ή επεξεργαστεί (χωρίς τύπο mime, ή μη υποστηριζόμενος τύπος). Η προκαθορισμένη τιμή είναι αληθές
    
    
        Search parameters
        Ρυθμίσεις αναζήτησης
    
    
        Web history
        Ιστορικό ιστού
    
    
        Default<br>character set
        Σύνολο χαρακτήρων<br>εξ ορισμού
    
    
        Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used.
        Το σύνολο των χαρακτήρων που χρησιμοποιείται για την ανάγνωση των αρχείων που δεν έχουν εσωτερικό αναγνωριστικό των χαρακτήρων, για παράδειγμα αρχεία απλού κειμένου: <br>Η τιμή εξ ορισμού είναι κενή, και χρησιμοποιείται η τιμή του περιβάλλοντος NLS.
    
    
        Ignored endings
        Αγνοημένες καταλήξεις
    
    
        These are file name endings for files which will be indexed by name only 
(no MIME type identification attempt, no decompression, no content indexing).
        Αυτές είναι καταλήξεις αρχείων στα οποία η ευρετηρίαση θα γίνει μόνο βάσει του ονόματος (χωρίς προσπάθεια αναγνώρισης του τύπου MIME, χωρίς αποσυμπίεση, χωρίς δεικτοδότηση του περιεχομένου).
    
    
        <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons.
        
    


    QWidget
    
        Create or choose save directory
        Δημιουργία ή επιλογή του καταλόγου αποθήκευσης
    
    
        Choose exactly one directory
        Επιλέξτε μόνο έναν κατάλογο
    
    
        Could not read directory: 
        Αδύνατη η ανάγνωση του καταλόγου: 
    
    
        Unexpected file name collision, cancelling.
        Απροσδόκητη σύγκρουση ονομάτων αρχείων, ακύρωση.
    
    
        Cannot extract document: 
        Αδύνατη η εξαγωγή του εγγράφου: 
    
    
        &Preview
        &Προεπισκόπηση
    
    
        &Open
        Ά&νοιγμα
    
    
        Open With
        Άνοιγμα με
    
    
        Run Script
        Εκτέλεση μακροεντολής
    
    
        Copy &File Name
        Αντιγραφή του ονόματος του α&ρχείου
    
    
        Copy &URL
        Αντιγραφή του &URL
    
    
        &Write to File
        &Εγγραφή σε αρχείο
    
    
        Save selection to files
        Αποθήκευση της επιλογής σε αρχεία
    
    
        Preview P&arent document/folder
        Προεπισκόπηση του &γονικού εγγράφου/καταλόγου
    
    
        &Open Parent document/folder
        &Άνοιγμα του γονικού εγγράφου/καταλόγου
    
    
        Find &similar documents
        Αναζήτηση παρό&μοιων εγγράφων
    
    
        Open &Snippets window
        Άνοιγμα του παραθύρου απο&σπασμάτων
    
    
        Show subdocuments / attachments
        Εμφάνιση των υπο-εγγράφων / συνημμένων
    


    QxtConfirmationMessage
    
        Do not show again.
        
    


    RTIToolW
    
        Real time indexing automatic start
        Αυτόματη έναρξη ευρετηρίασης σε πραγμ. χρόνο
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Η ευρετηρίαση του <span style=" font-weight:600;">Recoll</span> μπορεί να έχει ρυθμιστεί να εκτελείται στο παρασκήνιο, ενημερώνωντας το ευρετήριο σταδιακά κατά την τροποποίηση του αρχείου. Επωφελείστε από ένα ευρετήριο πάντα ενημερωμένο, αλλά καταναλόνωνται συνέχεια πόροι του συστήματος (μνήμη και επεξεργαστής).</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>
    
    
        Start indexing daemon with my desktop session.
        Εκκίνηση του δαίμονα ευρετηρίασης κατά τη σύνδεσή μου.
    
    
        Also start indexing daemon right now.
        Επίσης να γίνει έναρξη της ευρετηρίασης τώρα.
    
    
        Replacing: 
        Αντικατάσταση του:
    
    
        Replacing file
        Αντικατάσταση του αρχείου
    
    
        Can't create: 
        Αδυναμία δημιουργίας:
    
    
        Warning
        Προσοχή
    
    
        Could not execute recollindex
        Αδυναμία εκτέλεσης του recollindex
    
    
        Deleting: 
        Διαγραφή:
    
    
        Deleting file
        Διαγραφή του αρχείου
    
    
        Removing autostart
        Αφαίρεση του autostart
    
    
        Autostart file deleted. Kill current process too ?
        Το αρχείο autostart διαγράφτηκε. Τερματισμός της διεργασίας σε εξέλιξη;
    


    RclMain
    
        All
        Όλα
    
    
        media
        πολυμέσα
    
    
        message
        μήνυμα
    
    
        other
        άλλα
    
    
        presentation
        παρουσίαση
    
    
        spreadsheet
        λογιστικό φύλλο
    
    
        text
        κείμενο
    
    
        sorted
        ταξινομημένο
    
    
        filtered
        φιλτραρισμένο
    
    
        (no stemming)
        (χωρίς επέκταση)
    
    
        (all languages)
        (όλες οι γλώσσες)
    
    
        error retrieving stemming languages
        σφάλμα στη λήψη της λίστας των γλωσσών επέκτασης
    
    
        Document category filter
        Φίλτρο κατηγοριών των εγγράφων
    
    
        Could not open external index. Db not open. Check external indexes list.
        Αδύνατο το άνοιγμα ενός εξωτερικού ευρετηρίου. Η βάση δεδομένων δεν είναι ανοιχτή. Ελέγξτε τη λίστα των εξωτερικών ευρετηρίων.
    
    
        Indexing in progress: 
        Ευρετηρίαση σε εξέλιξη: 
    
    
        None
        Τίποτα
    
    
        Updating
        Ενημέρωση
    
    
        Purge
        Καθαρισμός
    
    
        Stemdb
        Stemdb
    
    
        Closing
        Κλείσιμο
    
    
        Done
        Έγινε
    
    
        Monitor
        Παρακολούθηση
    
    
        Unknown
        Άγνωστο
    
    
        Indexing failed
        Η ευρετηρίαση απέτυχε
    
    
        Stop &Indexing
        Διακοπή της &ευρετηρίασης
    
    
        Update &Index
        Ενημέρωση του &ευρετηρίου
    
    
        Warning
        Προσοχή
    
    
        The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone
        Η διεργασία ευρετηρίασης σε εξέλιξη δεν ξεκίνησε από αυτή τη διεπαφή. Κάντε κλικ στο Εντάξει για να τη σκοτώσετε όπως και να 'χει, ή στο Ακύρωση για να την αφήσετε ήσυχη
    
    
        Erasing index
        Διαγραφή του ευρετηρίου
    
    
        Reset the index and start from scratch ?
        Διαγραφή του ευρετηρίου και επανέναρξη από το μηδέν;
    
    
        Query results
        Αποτελέσματα της αναζήτησης
    
    
        Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program
        Αίτημα σε εξέλιξη.<br>Λόγω εσωτερικών περιορισμών,<br>η ακύρωση θα τερματίσει την εκτέλεση του προγράμματος
    
    
        Result count (est.)
        Αριθμός αποτελεσμάτων (εκτίμ.)
    
    
        No results found
        Δεν βρέθηκαν αποτελέσματα
    
    
        About Recoll
        Σχετικά με το Recoll
    
    
        External applications/commands needed and not found for indexing your file types:


        Απαιτούνται εξωτερικές εφαρμογές/εντολές που δεν βρέθηκαν για την ευρετηρίαση των τύπων των αρχείων σας:


    
    
        No helpers found missing
        Δεν λείπει καμιά εφαρμογή
    
    
        Missing helper programs
        Εφαρμογές που λείπουν
    
    
        Error
        Σφάλμα
    
    
        Index not open
        Το ευρετήριο δεν είναι ανοιχτό
    
    
        Index query error
        Σφάλμα στην αναζήτηση στο ευρετήριο
    
    
        Indexed MIME Types
        Τύποι MIME με ευρετήριο
    
    
        Content has been indexed for these mime types:
        Το περιεχόμενο έχει δεικτοδοτηθεί για αυτούς τους τύπους MIME:
    
    
        Index not up to date for this file. Refusing to risk showing the wrong entry. Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel.
        Το ευρετήριο δεν είναι ενημερωμένο για αυτό το αρχείο. Υπάρχει κίνδυνος εμφάνισης μιας εσφαλμένης καταχώρησης. Κάντε κλικ στο Εντάξει για να ενημερώσετε το ευρετήριο για αυτό το αρχείο, και επανεκκινήστε το αίτημα μετά την ολοκλήρωση της ενημέρωσης του ευρετηρίου. Διαφορετικά, κάντε κλικ στο Ακύρωση.
    
    
        Can't create preview window
        Αδύνατη η δημιουργία του παραθύρου προεπισκόπησης
    
    
        Can't update index: indexer running
        Αδύνατη η ενημέρωση του ευρετηρίου: μια εργασία ευρετηρίασης βρίσκεται σε εξέλιξη
    
    
        This search is not active any more
        Η αναζήτηση δεν είναι ενεργή πια
    
    
        Cannot retrieve document info from database
        Αδύνατη η πρόσβαση στο έγγραφο στη βάση δεδομένων
    
    
        Save file
        Αποθήκευση του αρχείου
    
    
        Cannot extract document or create temporary file
        Αδύνατη η εξαγωγή του εγγράφου ή η δημιουργία ενός προσωρινού αρχείου
    
    
        No external viewer configured for mime type [
        Κανένας ρυθμισμένος προβολέας για τον τύπο MIME [
    
    
        Bad viewer command line for %1: [%2]
Please check the mimeconf file
        Κακοδιατυπωμένη εντολή για %1: [%2]
Παρακαλώ ελέγξτε το αρχείο mimeconf
    
    
        The viewer specified in mimeview for %1: %2 is not found.
Do you want to start the  preferences dialog ?
        Ο καθορισμένος προβολέας στο mimeview για %1: %2 δεν βρέθηκε.
Θέλετε να ξεκινήσετε το διάλογο με τις προτιμήσεις;
    
    
        Can't access file: 
        Αδύνατη η πρόσβαση στο αρχείο:
    
    
        Can't uncompress file: 
        Αδύνατη η αποσυμπίεση του αρχείου: 
    
    
        Executing: [
        Εκτέλεση του: [
    
    
        History data
        Δεδομένα του ιστορικού
    
    
        Document history
        Ιστορικό των ανοιγμένων εγγράφων
    
    
        Query details
        Λεπτομέρειες της αναζήτησης
    
    
        Bad viewer command line for %1: [%2]
Please check the mimeview file
        Λανθασμένη γραμμή εντολής για %1: [%2]
Παρακαλώ ελέγξτε το αρχείο mimeview
    
    
        Viewer command line for %1 specifies both file and parent file value: unsupported
        Η γραμμή εντολής για %1 καθορίζει την ίδια στιγμή το αρχείο και τον γονέα του: δεν υποστηρίζεται
    
    
        Cannot find parent document
        Αδύνατη η εύρεση του γονικού εγγράφου
    
    
        Indexing did not run yet
        Η δεικτοδότηση δεν έχει εκτελεστή εκόμα
    
    
        External applications/commands needed for your file types and not found, as stored by the last indexing pass in 
        Εξωτερικές εφαρμογές και εντολές απαραίτητες για τους τύπους των εγγράφων σας, και που δεν έχουν βρεθεί, όπως έχουν ταξινομηθεί από την τελευταία δεικτοδότηση που έλαβε χώρα στις 
    
    
        Index not up to date for this file. Refusing to risk showing the wrong entry.
        Η δεικτοδότηση δεν είναι ενημερωμένηη για αυτό το αρχείο. Πιθανός κίνδυνος εμφάνισης μιας λανθασμένης εισαγωγής.
    
    
        Click Ok to update the index for this file, then re-run the query when indexing is done. Else, Cancel.
        Κάντε κλικ στο Εντάξει για να ενημερώσετε τη δεικτοδότηση για αυτό το αρχείο, και στη συνέχεια επανεκκινήστε την αναζήτηση όταν θα έχει ολοκληρωθεί η δημιουργία του ευρετηρίου. Διαφορετικά, κλικ στο Ακύρωση.
    
    
        Indexer running so things should improve when it's done
        Η δημιουργία του ευρετηρίου βρίσκεται σε εξέλιξη, το αρχείο θα ενημερωθεί μετά το πέρας της ενημέρωσης
    
    
        Sub-documents and attachments
        Υπο-έγγραφα και συνημμένα
    
    
        Document filter
        Φίλτρο εγγράφου
    
    
        Index not up to date for this file. Refusing to risk showing the wrong entry. 
        Το ευρετήριο δεν είναι ενημερωμένο για αυτό το αρχείο. Άρνηση της διακινδυνευμένης εμφάνισης μιας λανθασμένης καταχώρησης. 
    
    
        Click Ok to update the index for this file, then you will need to re-run the query when indexing is done. 
        Κάντε κλικ στο Εντάξει για να ενημερώσετε το ευρετήριο για αυτό το αρχείο, στη συνέχεια θα πρέπει να εκτελέσετε εκ νέου το ερώτημα μετ το πέρας της δεικτοδότησης.
    
    
        The indexer is running so things should improve when it's done. 
        Τα πράγματα θα βελτιωθούν μετά το πέρας της δεικτοδότησης.
    
    
        The document belongs to an external indexwhich I can't update. 
        Το έγγραφο ανήκει σε ένα εξωτερικό ευρετήριο το οποίο δεν μπορώ να ενημερώσω.
    
    
        Click Cancel to return to the list. Click Ignore to show the preview anyway. 
        Κάντε κλικ στο Ακύρωση για να επιστρέψετε στον κατάλογο. Κάντε κλικ στο Αγνόηση για την εμφάνιση της προεπισκόπησης ούτως ή άλλως.
    
    
        Duplicate documents
        Διπλότυπα έγγραφα
    
    
        These Urls ( | ipath) share the same content:
        Αυτά τα Url (| ipath) μοιράζονται το ίδιο περιεχόμενο:
    
    
        Bad desktop app spec for %1: [%2]
Please check the desktop file
        Κακοδιατυπωμένος προσδιορισμός εφαρμογής επιφάνειας εργασίας για το %1: [%2]
Παρακαλώ ελέγξτε το αρχείο της επιφάνειας εργασίας
    
    
        Indexing interrupted
        Η ευρετηρίαση διεκόπη
    
    
         with additional message: 
        
    
    
        Non-fatal indexing message: 
        
    
    
        Index locked
        
    
    
        Bad paths
        
    
    
        Selection patterns need topdir
        
    
    
        Selection patterns can only be used with a start directory
        
    
    
        The document belongs to an external index which I can't update. 
        
    
    
        No search
        
    
    
        No preserved previous search
        
    
    
        Choose file to save
        
    
    
        Saved Queries (*.rclq)
        
    
    
        Write failed
        
    
    
        Could not write to file
        
    
    
        Read failed
        
    
    
        Could not open file: 
        
    
    
        Load error
        
    
    
        Could not load saved query
        
    
    
        Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location.
        
    
    
        Do not show this warning next time (use GUI preferences to restore).
        
    
    
        Unknown indexer state. Can't access webcache file.
        
    
    
        Indexer is running. Can't access webcache file.
        
    
    
        Index scheduling
        
    
    
        Sorry, not available under Windows for now, use the File menu entries to update the index
        
    
    
        Disabled because the real time indexer was not compiled in.
        
    
    
        This configuration tool only works for the main index.
        
    
    
        Types list empty: maybe wait for indexing to progress?
        
    
    
        Can't set synonyms file (parse error?)
        
    
    
        Viewer command line for %1 specifies parent file but URL is http[s]: unsupported
        
    
    
        Tools
        
    
    
        Results
        
    
    
        Content has been indexed for these MIME types:
        
    
    
        Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index):

        
    
    
        Indexing done
        
    
    
        Can't update index: internal error
        
    
    
        Index not up to date for this file.<br>
        
    
    
        <em>Also, it seems that the last index update for the file failed.</em><br/>
        
    
    
        Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br>
        
    
    
        Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/>
        
    
    
        documents
        
    
    
        document
        
    
    
        files
        
    
    
        file
        
    
    
        errors
        
    
    
        error
        
    
    
        total files)
        
    
    
        No information: initial indexing not yet performed.
        
    


    RclMainBase
    
        Recoll
        Recoll
    
    
        All
        Όλα
    
    
        Search tools
        Εργαλεία αναζήτησης
    
    
        Result list
        Λίστα αποτελεσμάτων
    
    
        &File
        &Αρχείο
    
    
        &Tools
        &Εργαλεία
    
    
        &Preferences
        &Προτιμήσεις
    
    
        &Help
        &Βοήθεια
    
    
        E&xit
        Έ&ξοδος
    
    
        Ctrl+Q
        Ctrl+Q
    
    
        Update &index
        Ε&νημέρωση ευρετηρίου
    
    
        &Rebuild index
        Α&νακατασκευή του ευρετηρίου
    
    
        &Erase document history
        &Διαγραφή του ιστορικού εγγράφων
    
    
        &Erase search history
        Δια&γραφή του ιστορικού αναζητήσεων
    
    
        &Show missing helpers
        Ε&μφάνιση των ελλειπουσών εφαρμογών
    
    
        &Show indexed types
        Εμ&φάνιση των τύπων με ευρετήριο
    
    
        &About Recoll
        &Σχετικά με το Recoll
    
    
        &User manual
        Ε&γχειρίδιο
    
    
        Document &History
        &Ιστορικό των εγγράφων
    
    
        Document  History
        Ιστορικό των εγγράφων
    
    
        &Advanced Search
        &Προχωρημένη αναζήτηση
    
    
        Advanced/complex  Search
        Προχωρημένη αναζήτηση
    
    
        &Sort parameters
        &Ρυθμίσεις ταξινόμησης
    
    
        Sort parameters
        Ρυθμίσεις ταξινόμησης
    
    
        Term &explorer
        Ε&ξερευνητής του ευρετηρίου
    
    
        Term explorer tool
        Εργαλείο εξερεύνησης του ευρετηρίου
    
    
        Next page
        Επόμενη σελίδα
    
    
        Next page of results
        Επόμενη σελίδα των αποτελεσμάτων
    
    
        PgDown
        PgDown
    
    
        First page
        Πρώτη σελίδα
    
    
        Go to first page of results
        Μετάβαση στην πρώτη σελίδα αποτελεσμάτων
    
    
        Shift+PgUp
        Shift+PgUp
    
    
        Previous page
        Προηγούμενη σελίδα
    
    
        Previous page of results
        Προηγούμενη σελίδα αποτελεσμάτων
    
    
        PgUp
        PgUp
    
    
        &Indexing configuration
        Διαμόρφωση ευρετηρίασης
    
    
        &Indexing schedule
        &Προγραμματισμός της ευρετηρίασης
    
    
        &Query configuration
        Δια&μόρφωση της αναζήτησης
    
    
        E&xternal index dialog
        Δια&μόρφωση των εξωτερικών ευρετηρίων
    
    
        External index dialog
        Εξωτερικά ευρετήρια
    
    
        &Full Screen
        Π&λήρης οθόνη
    
    
        Full Screen
        Πλήρης οθόνη
    
    
        F11
        F11
    
    
        sortByDateAsc
        sortByDateAsc
    
    
        Sort by dates from oldest to newest
        Ταξινόμηση ανά ημερομηνία από την παλαιότερη στη νεότερη
    
    
        sortByDateDesc
        sortByDateDesc
    
    
        Sort by dates from newest to oldest
        Ταξινόμηση ανά ημερομηνία από τη νεότερη στην παλαιότερη
    
    
        Show Query Details
        Εμφάνιση της αναζήτησης λεπτομερειακά
    
    
        Show results as table
        Εμφάνιση των αποτελεσμάτων σε πίνακα
    
    
        &Index configuration
        Διαμόρφωση &Ευρετηρίου
    
    
        &GUI configuration
        Διαμόρφωση &Περιβάλλοντος
    
    
        &Results
        Αποτε&λέσματα
    
    
        Sort by date, oldest first
        Ταξινόμηση ανά ημερομηνία, τα παλαιότερα πρώτα
    
    
        Sort by date, newest first
        Ταξινόμηση ανά ημερομηνία, τα νεότερα πρώτα
    
    
        Show as table
        Εμφάνιση ως πίνακας
    
    
        Show results in a spreadsheet-like table
        Εμφάνιση των αποτελεσμάτων σε έναν πίνακα ως φύλλο εργασίας
    
    
        Save as CSV (spreadsheet) file
        Αποθήκευση ως αρχείο CVS (φύλλο εργασίας)
    
    
        Saves the result into a file which you can load in a spreadsheet
        Αποθηκεύει το αποτέλεσμα σε ένα αρχείο το οποίο μπορείτε να φορτώσετε σε ένα φύλλο εργασίας
    
    
        Next Page
        Επόμενη σελίδα
    
    
        Previous Page
        Προηγούμενη σελίδα
    
    
        First Page
        Πρώτη σελίδα
    
    
        Query Fragments
        Θραύσματα ερωτήματος
    
    
            With failed files retrying
            Προσπάθεια εκ νέου με αποτυχημένα αρχεία
    
    
        Next update will retry previously failed files
        Η επόμενη ενημέρωση θα επιχειρήσει ξανά με τα αποτυχημένα αρχεία
    
    
        &View
        
    
    
        Missing &helpers
        
    
    
        Indexed &MIME types
        
    
    
        Indexing &schedule
        
    
    
        Enable synonyms
        
    
    
        Save last query
        
    
    
        Load saved query
        
    
    
        Special Indexing
        
    
    
        Indexing with special options
        
    
    
        Index &statistics
        
    
    
        Webcache Editor
        
    
    
        Trigger incremental pass
        
    


    RclTrayIcon
    
        Restore
        Επαναφορά
    
    
        Quit
        Έξοδος
    


    RecollModel
    
        Abstract
        Απόσπασμα
    
    
        Author
        Συγγραφέας
    
    
        Document size
        Μέγεθος εγγράφου
    
    
        Document date
        Ημερομηνία εγγράφου
    
    
        File size
        Μέγεθος αρχείου
    
    
        File name
        Όνομα αρχείου
    
    
        File date
        Ημερομηνία αρχείου
    
    
        Ipath
        Ipath
    
    
        Keywords
        Λέξεις κλειδιά
    
    
        MIME type
        Τύπος MIME
    
    
        Original character set
        Αρχικό σύνολο χαρακτήρων
    
    
        Relevancy rating
        Εγγύτητα
    
    
        Title
        Τίτλος
    
    
        URL
        URL
    
    
        Mtime
        Mtime
    
    
        Date
        Ημερομηνία
    
    
        Date and time
        Ημερομηνία και ώρα
    
    
        Can't sort by inverse relevance
        
    


    ResList
    
        <p><b>No results found</b><br>
        <p><b>Κανένα αποτέλεσμα</b><br>
    
    
        Documents
        Έγγραφα
    
    
        out of at least
        από τουλάχιστον
    
    
        for
        για
    
    
        Previous
        Προηγούμενο
    
    
        Next
        Επόμενο
    
    
        Unavailable document
        Μη διαθέσιμο έγγραφο
    
    
        Preview
        Προεπισκόπηση
    
    
        Open
        Άνοιγμα
    
    
        (show query)
        (αίτημα)
    
    
        <p><i>Alternate spellings (accents suppressed): </i>
        <p><i>Προτεινόμενη ορθογραφία (χωρίς τόνους): </i>
    
    
        Document history
        Ιστορικό των ανοιγμένων εγγράφων
    
    
        Result list
        Λίστα αποτελεσμάτων
    
    
        &Preview
        &Προεπισκόπηση
    
    
        &Open
        Ά&νοιγμα
    
    
        Copy &File Name
        Αντιγραφή του ονόματος του α&ρχείου
    
    
        Copy &URL
        Αντιγραφή URL
    
    
        &Write to File
        Απο&θήκευση σε
    
    
        Find &similar documents
        Αναζήτηση παρό&μοιων εγγράφων
    
    
        Preview P&arent document/folder
        Προεπισκόπηση του &γονικού εγγράφου/καταλόγου
    
    
        &Open Parent document/folder
        &Άνοιγμα του γονικού εγγράφου/καταλόγου
    
    
        <p><i>Alternate spellings: </i>
        <p><i>Εναλλακτικά λεξικά: </i>
    
    
        Duplicate documents
        Διπλότυπα έγγραφα
    
    
        These Urls ( | ipath) share the same content:
        Αυτά τα URL (| ipath) μοιράζονται το ίδιο περιεχόμενο:
    
    
        Result count (est.)
        Αριθμός αποτελεσμάτων (εκτίμ.)
    
    
        Query details
        Λεπτομέρειες της αναζήτησης
    
    
        Snippets
        Αποσπάσματα
    


    ResTable
    
        Save table to CSV file
        Αποθήκευση σε ένα αρχείο CSV
    
    
        Can't open/create file: 
        Αδύνατο το άνοιγμα/δημιουργία του αρχείου:
    
    
        &Preview
        &Προεπισκόπηση
    
    
        &Open
        Ά&νοιγμα
    
    
        Copy &File Name
        Αντιγραφή του ονόματος του α&ρχείου
    
    
        Copy &URL
        Αντιγραφή URL
    
    
        &Write to File
        Απο&θήκευση σε
    
    
        Find &similar documents
        Αναζήτηση παρό&μοιων εγγράφων
    
    
        Preview P&arent document/folder
        Προεπισκόπηση του &γονικού εγγράφου/καταλόγου
    
    
        &Open Parent document/folder
        &Άνοιγμα του γονικού εγγράφου/καταλόγου
    
    
        &Reset sort
        &Επαναφορά της ταξινόμησης
    
    
        &Save as CSV
        &Αποθήκευση ως CSV
    
    
        &Delete column
        &Αφαίρεση της στήλης
    
    
        Add "%1" column
        Προσθήκη μιας στήλης «%1»
    


    ResTableDetailArea
    
        &Preview
        &Προεπισκόπηση
    
    
        &Open
        Ά&νοιγμα
    
    
        Copy &File Name
        Αντιγραφή του ονόματος του α&ρχείου
    
    
        Copy &URL
        Αντιγραφή URL
    
    
        &Write to File
        Απο&θήκευση σε
    
    
        Find &similar documents
        Αναζήτηση παρό&μοιων εγγράφων
    
    
        Preview P&arent document/folder
        Προεπισκόπηση του &γονικού εγγράφου/καταλόγου
    
    
        &Open Parent document/folder
        &Άνοιγμα του γονικού εγγράφου/καταλόγου
    


    ResultPopup
    
        &Preview
        &Προεπισκόπηση
    
    
        &Open
        Ά&νοιγμα
    
    
        Copy &File Name
        Αντιγραφή του ονόματος του α&ρχείου
    
    
        Copy &URL
        Αντιγραφή URL
    
    
        &Write to File
        Απο&θήκευση σε
    
    
        Save selection to files
        Αποθήκευση της επιλογής σε αρχεία
    
    
        Preview P&arent document/folder
        Προεπισκόπηση του &γονικού εγγράφου/καταλόγου
    
    
        &Open Parent document/folder
        &Άνοιγμα του γονικού εγγράφου/καταλόγου
    
    
        Find &similar documents
        Αναζήτηση παρό&μοιων εγγράφων
    
    
        Open &Snippets window
        Άνοιγμα του παραθύρου απο&σπασμάτων
    
    
        Show subdocuments / attachments
        Εμφάνιση των υπο-εγγράφων / συνημμένων
    


    SSearch
    
        Any term
        Οποιοσδήποτε όρος
    
    
        All terms
        Όλοι οι όροι
    
    
        File name
        Όνομα αρχείου
    
    
        Query language
        Γλώσσα ερωτημάτων
    
    
        Enter query language expression. Cheat sheet:<br>
<i>term1 term2</i> : 'term1' and 'term2' in any field.<br>
<i>field:term1</i> : 'term1' in field 'field'.<br>
 Standard field names/synonyms:<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pseudo-fields: dir, mime/format, type/rclcat, date.<br>
 Two date interval exemples: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br>
  No actual parentheses allowed.<br>
<i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br>
<i>"term1 term2"p</i> : unordered proximity search with default distance.<br>
Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail.

        Εισάγετε μια έκφραση γλώσσας ερωτήματος. Σκονάκι:<br>
<i>term1 term2</i> : 'term1' ΚΑΙ 'term2' σε οποιοδήποτε πεδίο.<br>
<i>field:term1</i> : 'term1' αναζήτηση στο πεδίο 'field'.<br>
 Πρότυπα ονόματα/συνώνυμα πεδίων (χρήση αγγλικών λέξεων):<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pseudo-fields: dir, mime/format, type/rclcat, date.<br>
 Παράδειγμα διαστημάτων ημερομηνιών: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br>
  ΜΗΝ βάλετε τις παρενθέσεις.<br>
<i>"term1 term2"</i> : ακριβής πρόταση. Επιλογές:<br>
<i>"term1 term2"p</i> : εγγύτητα (χωρίς σειρά).<br>
Χρησιμοποιήστε το δεσμό <b>Λεπτομερειακή εμφάνιση του ερωτήματος</b> σε περίπτωση που υπάρχει αμφιβολία στα αποτελέσματα και δείτε το εγχείρίδιο (στα αγγλικά) (&lt;F1>) για περισσότερες λεπτομέρειες.

    
    
        Enter file name wildcard expression.
        Εισάγετε ένα όνομα αρχείου (επιτρέπονται και σύμβολα υποκατάστασης).
    
    
        Enter search terms here. Type ESC SPC for completions of current term.
        Εισάγετε εδώ τους όρους της αναζήτησης. Πατήστε ESC SPC για να εμφανίσετε τις λέξεις που αρχίζουν με τον τρέχοντα όρο.
    
    
        Bad query string
        Μη αναγνωρισμένο ερώτημα
    
    
        Out of memory
        Δεν υπάρχει διαθέσιμη μνήμη
    
    
        Too many completions
        Πολλές πιθανές συμπληρώσεις
    
    
        Completions
        Συμπληρώσεις
    
    
        Select an item:
        Επιλέξτε ένα αντικείμενο:
    
    
        Enter query language expression. Cheat sheet:<br>
<i>term1 term2</i> : 'term1' and 'term2' in any field.<br>
<i>field:term1</i> : 'term1' in field 'field'.<br>
 Standard field names/synonyms:<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br>
 Two date interval exemples: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br>
  You can use parentheses to make things clearer.<br>
<i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br>
<i>"term1 term2"p</i> : unordered proximity search with default distance.<br>
Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail.

        Εισαγωγή έκφρασης γλώσσας ερωτήματος. «Σκονάκι»:<br>
<i>όρος1 όρος2</i> : 'όρος1' και 'όρος2' σε οποιοδήποτε πεδίο.<br>
<i>πεδίο:όρος1</i> : 'όρος1' στο πεδίο 'πεδίο'.<br>
 Τυπικό πεδίο ονόματα/συνώνυμα:<br>
  τίτλος/θέμα/υπόμνημα, συγγραφέας/από, παραλήπτης/προς, όνομα αρχείου, επέκταση.<br>
 Ψευδο-πεδία: κατάλογος, mime/μορφή, τύπος/rclcat, ημερομηνία, μέγεθος.<br>
 Παραδείγματα δυο διαστημάτων ημερομηνιών: 2009-03-01/2009-05-20  2009-03-01/Π2Μ.<br>
<i>όρος1 όρος2 OR όρος3</i> : όρος1 AND (όρος2 OR όρος3).<br>
  Μπορείτε να χρησιμοποιείτε παρενθέσεις για πιο ευανάγνωστες εκφράσεις.<br>
<i>"όρος1 όρος2"</i> : φράση (πρέπει να αντιστοιχεί ακριβώς). Πιθανοί τροποποιητές:<br>
<i>"όρος1 όρος2"p</i> : αταξινόμητη και κατά προσέγγιση αναζήτηση με προκαθορισμένη απόσταση.<br>
Χρησιμοποιήστε τον δεσμό <b>Εμφάνιση ερωτήματος</b> σε περίπτωση αμφιβολίας σχετικά με το αποτέλεσμα και ανατρέξτε στο εγχειρίδιο χρήσης (&lt;F1>) για περισσότερες λεπτομέρειες.

    
    
        Stemming languages for stored query: 
        
    
    
         differ from current preferences (kept)
        
    
    
        Auto suffixes for stored query: 
        
    
    
        External indexes for stored query: 
        
    
    
        Autophrase is set but it was unset for stored query
        
    
    
        Autophrase is unset but it was set for stored query
        
    
    
        Enter search terms here.
        
    


    SSearchBase
    
        SSearchBase
        SSearchBase
    
    
        Erase search entry
        Καθαρισμός της καταχώρησης
    
    
        Clear
        Καθαρισμός
    
    
        Ctrl+S
        Ctrl+S
    
    
        Start query
        Έναρξη της αναζήτησης
    
    
        Search
        Αναζήτηση
    
    
        Choose search type.
        Επιλογή του τύπου αναζήτησης.
    
    
        Enter search terms here. Type ESC SPC for completions of current term.
        Εισάγετε εδώ τους όρους αναζήτησης. Πατήστε ESC SPC για να εμφανίσετε τις λέξεις που αρχίζουν από τον τρέχοντα όρο.
    
    
        Show query history
        
    


    SearchClauseW
    
        Any
        Οποιοδήποτε
    
    
        All
        Όλα
    
    
        None
        Κανένα
    
    
        Phrase
        Φράση
    
    
        Proximity
        Εγγύτητα
    
    
        File name
        Όνομα του αρχείου
    
    
        No field
        Χωρίς πεδίο
    
    
        Select the type of query that will be performed with the words
        Επιλέξτε τον τύπο του ερωτήματος που θα πραγματοποιηθεί με τις λέξεις
    
    
        Number of additional words that may be interspersed with the chosen ones
        Αριθμός των επιπρόσθετων λέξεων που μπορούν να βρεθούν μεταξύ των αναζητηθέντων όρων
    


    Snippets
    
        Snippets
        Αποσπάσματα
    
    
        Find:
        Εύρεση:
    
    
        Next
        Επόμενο
    
    
        Prev
        Προηγούμενο
    


    SnippetsW
    
        Search
        Αναζήτηση
    
    
        <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p>
        <p>Λυπάμαι, δεν βρέθηκε μια ακριβής αντιστοιχία εντός ορίων. Πιθανώς το έγγραφο να είναι ογκώδες και ο δημιουργός αποσπασμάτων χάθηκε σε έναν λαβύρινθο...</p>
    
    
        Sort By Relevance
        
    
    
        Sort By Page
        
    


    SpecIdxW
    
        Special Indexing
        
    
    
        Else only modified or failed files will be processed.
        
    
    
        Erase selected files data before indexing.
        
    
    
        Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs).
        
    
    
        Browse
        Περιήγηση
    
    
        Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set.
        
    
    
        Selection patterns:
        
    
    
        Top indexed entity
        
    
    
        Retry previously failed files.
        
    
    
        Start directory. Must be part of the indexed tree. Use full indexed area if empty.
        
    


    SpellBase
    
        Term Explorer
        Εξερευνητής όρων
    
    
        &Expand 
        &Ανάπτυξη
    
    
        Alt+E
        Alt+E
    
    
        &Close
        &Κλείσιμο
    
    
        Alt+C
        Alt+C
    
    
        No db info.
        Δεν υπάρχουν πληροφορίες για τη βάση δεδομένων.
    
    
        Match
        Ταίριασμα
    
    
        Case
        Πεζά/κεφαλαία
    
    
        Accents
        Τόνοι
    


    SpellW
    
        Wildcards
        Χαρακτήρες υποκατάστασης
    
    
        Regexp
        Κανονική έκφραση
    
    
        Stem expansion
        Γραμματική επέκταση
    
    
        Spelling/Phonetic
        Ορθογραφία/Φωνητικό
    
    
        error retrieving stemming languages
        σφάλμα κατά τη λήψη των γλωσσών επέκτασης
    
    
        Term
        Όρος
    
    
        Doc. / Tot.
        Doc. / Tot.
    
    
        Index: %1 documents, average length %2 terms
        Ευρετήριο: %1 έγγραφα, μέσο μήκος %2 όροι
    
    
        Aspell init failed. Aspell not installed?
        Σφάλμα στην αρχικοποίηση του aspell. Μήπως δεν είναι εγκατεστημένο;
    
    
        Aspell expansion error. 
        Σφάλμα επέκτασης του aspell.
    
    
        No expansion found
        Κανένα αποτέλεσμα
    
    
        Index: %1 documents, average length %2 terms.%3 results
        Ευρετήριο: %1 έγγραφα, μέσο μήκος %2 όροι.%3 αποτελέσματα
    
    
        %1 results
        %1 αποτελέσματα
    
    
        List was truncated alphabetically, some frequent 
        Η λίστα έχει κοπεί αλφαβητικά, μερικοί συχνοί 
    
    
        terms may be missing. Try using a longer root.
        όροι μπορεί να λείπουν. Προσπαθήστε να χρησιμοποιήσετε μια πιο μακριά ρίζα.
    
    
        Show index statistics
        Εμφάνιση στατιστικών του ευρετηρίου
    
    
        Number of documents
        Αριθμός εγγράφων
    
    
        Average terms per document
        Μέσος όρος όρων ανά έγγραφο
    
    
        Smallest document length
        Μικρότερο μήκος εγγράφου
    
    
        Longest document length
        Μεγαλύτερο μήκος εγγράφου
    
    
        Database directory size
        Μέγεθος καταλόγου βάσης δεδομένων
    
    
        MIME types:
        Τύποι MIME:
    
    
        Item
        Αντικείμενο
    
    
        Value
        Τιμή
    
    
        List files which could not be indexed (slow)
        
    
    
        Spell expansion error. 
        
    
    
        Smallest document length (terms)
        
    
    
        Longest document length (terms)
        
    
    
        Results from last indexing:
        
    
    
          Documents created/updated
        
    
    
          Files tested
        
    
    
          Unindexed files
        
    


    UIPrefsDialog
    
        Choose
        Επιλέξτε
    
    
        error retrieving stemming languages
        σφάλμα κατά τη λήψη των γλωσσών επέκτασης
    
    
        Select xapian index directory (ie: /home/buddy/.recoll/xapiandb)
        Επιλέξτε έναν κατάλογο που περιέχει ένα ευρετήριο Xapian (π.χ. /home/buddy/.recoll/xapiandb)
    
    
        The selected directory does not appear to be a Xapian index
        Ο επιλεγμένος κατάλογος δεν φαίνεται ότι είναι ένα ευρετήριο Xapian
    
    
        This is the main/local index!
        Αυτό είναι το κύριο ευρετήριο!
    
    
        The selected directory is already in the index list
        Ο επιλεγμένος κατάλογος βρίσκεται ήδη στη λίστα
    
    
        Result list paragraph format (erase all to reset to default)
        Μορφή λίστας παραγράφου αποτελεσμάτων (διαγραφή όλων για επαναφορά στην εξ' ορισμού)
    
    
        Result list header (default is empty)
        Επικεφαλίδα λίστας αποτελεσμάτων (η εξ' ορισμού είναι κενή)
    
    
        Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb)
        Επιλέξτε τον κατάλογο διαμόρφωσης του recoll ή του καταλόγου ευρετηρίου του xapian (π.χ.: /home/me/.recoll ή /home/me/.recoll/xapiandb)
    
    
        The selected directory looks like a Recoll configuration directory but the configuration could not be read
        Ο επιλεγμένος κατάλογος φαίνεται ως ένας κατάλογος διαμόρφωσης του Recoll αλλά δεν είναι δυνατή η ανάγνωση της διαμόρφωσης
    
    
        At most one index should be selected
        Ένα περισσότερο ευρετήριο θα πρέπει να επιλεχθεί
    
    
        Cant add index with different case/diacritics stripping option
        Αδύνατη η προσθήκη ευρετηρίου με διαφορετικές επιλογές διάκρισης πεζών / κεφαλαίων και αποσπασμάτων
    
    
        Default QtWebkit font
        Γραμματοσειρά εξ ορισμού QtWebkit
    
    
        Any term
        Οποιοσδήποτε όρος
    
    
        All terms
        Όλοι οι όροι
    
    
        File name
        
    
    
        Query language
        Γλώσσα ερωτημάτων
    
    
        Value from previous program exit
        
    


    ViewAction
    
        MIME type
        Τύπος MIME
    
    
        Command
        Εντολή
    
    
        Changing actions with different current values
        Αλλαγή των ενεργειών με διαφορετικές τρέχουσες τιμές
    
    
        Desktop Default
        Προκαθορισμένο Επιφάνειας εργασίας
    
    
        Changing entries with different current values
        Αλλαγή αντικειμένων με τρέχουσες τιμές διαφορετικές
    


    ViewActionBase
    
        Native Viewers
        Εφαρμογές απεικόνισης
    
    
        Select one or several mime types then click "Change Action"<br>You can also close this dialog and check "Use desktop preferences"<br>in the main panel to ignore this list and use your desktop defaults.
        Επιλέξτε έναν ή περισσότερους τύπους MIME και κάντε κλικ στο «Αλλαγή της ενέργειας»<br>Μπορείτε επίσης να κλείσετε το διάλογο και να επιλέξετε «Χρήση των προτιμήσεων του περιβάλλοντος εργασίας»<br>στο κύριο παράθυρο για να αγνοήσετε αυτή τη λίστα.
    
    
        Select one or several file types, then click Change Action to modify the program used to open them
        Επιλέξτε έναν ή περισσότερους τύπους αρχείων και κάντε κλικ στο «Αλλαγή» για να αλλάξετε το πρόγραμμα που χρησιμοποιείται για το άνοιγμά τους
    
    
        Change Action
        Αλλαγή
    
    
        Close
        Κλείσιμο
    
    
        Select one or several mime types then use the controls in the bottom frame to change how they are processed.
        Επιλέξτε έναν οι περισσότερους τύπους αρχείων, και στη συνέχεια χρησιμοποιήστε τα κουμπιά ελέγχου στο πλαίσιο στο κάτω μέρος για να αλλάξετε τον τρόπο επεξεργασίας.
    
    
        Use Desktop preferences by default
        Χρήση εξ' ορισμού των προτιμήσεων της Επιφάνειας εργασίας
    
    
        Select one or several file types, then use the controls in the frame below to change how they are processed
        Επιλέξτε έναν οι περισσότερους τύπους αρχείων, και στη συνέχεια χρησιμοποιήστε τα κουμπιά ελέγχου στο παρακάτω πλαίσιο για να αλλάξετε τον τρόπο επεξεργασίας
    
    
        Exception to Desktop preferences
        Εξαίρεση των προτιμήσεων Επιφάνειας εργασίας
    
    
        Action (empty -> recoll default)
        Ενέργεια (κενό -> προκαθορισμένη του recoll)
    
    
        Apply to current selection
        Εφαρμογή στην τρέχουσα επιλογή
    
    
        Recoll action:
        Ενέργεια Recoll:
    
    
        current value
        τρέχουσα τιμή
    
    
        Select same
        Επιλογή ανά τιμή
    
    
        <b>New Values:</b>
        <b>Νέες τιμές:</b>
    


    Webcache
    
        Webcache editor
        
    
    
        Search regexp
        
    


    WebcacheEdit
    
        Copy URL
        
    
    
        Unknown indexer state. Can't edit webcache file.
        
    
    
        Indexer is running. Can't edit webcache file.
        
    
    
        Delete selection
        
    
    
        Webcache was modified, you will need to run the indexer after closing this window.
        
    


    WebcacheModel
    
        MIME
        
    
    
        Url
        
    


    confgui::ConfBeaglePanelW
    
        Steal Beagle indexing queue
        Κλέψιμο της ουράς ευρετηρίασης του Beagle
    
    
        Beagle MUST NOT be running. Enables processing the beagle queue to index Firefox web history.<br>(you should also install the Firefox Beagle plugin)
        Το Beagle ΔΕΝ ΠΡΕΠΕΙ να εκτελείται. Επιτρέπει την επεξεργασία της ουράς του Beagle για ευρετηρίαση του ιστορικού των ιστοσελίδων του Firefox.<br>(θα πρέπει επίσης να εγκαταστήσετε το πρόσθετο του Beagle για το Firefox)
    
    
        Web page store directory name
        Όνομα καταλόγου αποθήκευσης ιστοσελίδων
    
    
        The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory.
        Το όνομα του καταλόγου αποθήκευσης αντιγράφων των επισκεφθέντων ιστοσελίδων.<br>Μια σχετική διαδρομή αναφερόμενη στη διαδρομή διαμόρφωσης.
    
    
        Max. size for the web store (MB)
        Μέγ. μέγεθος της λανθάνουσας μνήμης ιστού (MB)
    
    
        Entries will be recycled once the size is reached
        Θα γίνεται αντικατάσταση των καταχωρήσεων όταν επιτευχθεί το καθορισμένο μέγεθος
    
    
        Process the WEB history queue
        Επεξεργασία της ουράς ιστορικού του Ιστού
    
    
        Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin)
        Ενεργοποιεί τη δεικτοδότηση των επισκεπτόμενων σελίδων στον Firefox.<br>(θα πρέπει να εγκαταστήσετε και το πρόσθετο Firefox Recoll)
    


    confgui::ConfIndexW
    
        Can't write configuration file
        Αδύνατη η εγγραφή του αρχείου διαμόρφωσης
    


    confgui::ConfParamFNW
    
        Choose
        Επιλέξτε
    


    confgui::ConfParamSLW
    
        +
        +
    
    
        -
        -
    
    
        Add entry
        
    
    
        Delete selected entries
        
    
    
        ~
        
    
    
        Edit selected entries
        
    


    confgui::ConfSearchPanelW
    
        Automatic diacritics sensitivity
        Αυτόματη ευαισθησία στους τόνους
    
    
        <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity.
        <p>Αυτόματη εναλλαγή ευαισθησίας τονισμού αν ο όρος αναζήτησης διαθέτει τονισμένους χαρακτήρες (εκτός αυτών του unac_except_trans). Διαφορετικά θα πρέπει να χρησιμοποιήσετε τη γλώσσα της αναζήτησης και τον τροποποιητή <i>D</i> για τον καθορισμό της ευαισθησίας τονισμών.
    
    
        Automatic character case sensitivity
        Αυτόματη ευαισθησία πεζών/κεφαλαίων
    
    
        <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity.
        <p>Αυτόματη εναλλαγή ευαισθησίας διάκρισης πεζών/κεφαλαίων αν η ο όρος αναζήτησης διαθέτει κεφαλαία γράμματα (εκτός του πρώτου γράμματος). Διαφορετικά θα πρέπει να χρησιμοποιήσετε τη γλώσσα της αναζήτησης και τον τροποποιητή <i>C</i> για τον καθορισμό της ευαισθησίας διάκρισης πεζών / κεφαλαίων.
    
    
        Maximum term expansion count
        Μέγιστο μέγεθος επέκτασης ενός όρου
    
    
        <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.
        <p>Μέγιστος αριθμός επέκτασης για έναν όρο (π.χ.: κατά τη χρήση χαρακτήρων υποκατάστασης). Η προκαθορισμένη τιμή 10000 είναι λογική και θα αποφύγει ερωτήματα που εμφανίζονται σαν παγωμένα την ίδια στιγμή που η μηχανή διαπερνά τη λίστα όρων.
    
    
        Maximum Xapian clauses count
        Μέγιστος αριθμός ρητρών Xapian 
    
    
        <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.
        <p>Μέγιστος αριθμός στοιχειωδών ρητρών που προσθέτουμε σε ένα απλό ερώτημα Xapian. Σε μερικές περιπτώσεις, το αποτέλεσμα της επέκτασης των όρων μπορεί να είναι πολλαπλασιαστικό, και θα χρησιμοποιούσε υπερβολική μνήμη. Η προκαθορισμένη τιμή 100000 θα πρέπει να είναι επαρκής και συμβατή με μια τυπική διαμόρφωση υλικού.
    


    confgui::ConfSubPanelW
    
        Max. compressed file size (KB)
        Μεγ.μέγεθος για τα συμπιεσμένα αρχεία (KB)
    
    
        This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever.
        Αυτή η τιμή καθορίζει ένα όριο πέραν του οποίου τα συμπιεσμένα αρχεία δεν θα επεξεργάζονται. Χρησιμοποιήστε -1 για κανένα όριο, 0 για να μην επεξεργάζονται τα συμπιεσμένα αρχεία.
    
    
        Max. text file size (MB)
        Μεγ. μέγεθος αρχείων κειμένου (MB)
    
    
        This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. 
This is for excluding monster log files from the index.
        Αυτή η τιμή ορίζει ένα όριο πέραν του οποίου δεν θα γίνεται ευρετηρίαση για τα αρχεία κειμένου. Ορίστε -1 για κανένα όριο.
Αυτό χρησιμεύει για τον αποκλεισμό από την ευρετηρίαση τεράστιων αρχείων καταγραφών.
    
    
        Text file page size (KB)
        Μέγεθος κοπής για τα αρχεία κειμένου (KB)
    
    
        If this value is set (not equal to -1), text files will be split in chunks of this size for indexing.
This will help searching very big text  files (ie: log files).
        Αν αυτή η τιμή έχει οριστεί και είναι θετική, τα αρχεία κειμένου θα κοπούν σε κομμάτια αυτού του μεγέθους για την ευρετηρίαση.
Αυτό βοηθά στη μείωση των καταναλωμένων πόρων από την ευρετηρίαση και βοηθά τη φόρτωση για την προεπισκόπηση.
    
    
        Max. filter exec. time (S)
        Μεγ. χρόνος εκτέλεσης για ένα φίλτρο (S)
    
    
        External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loopSet to -1 for no limit.

        Τα εξωτερικά φίλτρα σε λειτουργία μεγαλύτερη από αυτό θα διακόπτονται. Χρήσιμο για τη σπάνια περίπτωση (π.χ. postscript) όπου ένα έγγραφο μπορεί να προκαλέσει ένα βρόγχο στο φίλτρο. ορίστε το σε -1 για να αφαιρέσετε το όριο.
    
    
        Global
        Γενικά
    
    
        External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.

        Τα εξωτερικά φίλτρα σε λειτουργία μεγαλύτερη από αυτό θα διακόπτονται. Χρήσιμο για τη σπάνια περίπτωση (π.χ. postscript) όπου ένα έγγραφο μπορεί να προκαλέσει ένα βρόγχο στο φίλτρο. Ορίστε το σε -1 για να αφαιρέσετε το όριο.
    
    
        Only mime types
        Μόνο οι τύποι MIME
    
    
        An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive
        Μια αποκλειστική λίστα δεικτοδοτημένων τύπων mime.<br>Δεν θα δεικτοδοτηθεί τίποτα άλλο. Φυσιολογικά κενό και αδρανές
    
    
        Exclude mime types
        Αποκλεισμός τύπων αρχείων
    
    
        Mime types not to be indexed
        Οι τύποι Mime που δεν θα δεικτοδοτηθούν
    


    confgui::ConfTopPanelW
    
        Top directories
        Κατάλογοι εκκίνησης
    
    
        The list of directories where recursive indexing starts. Default: your home.
        Η λίστα των καταλόγων για την έναρξη της αναδρομικής ευρετηρίασης. Προεπιλογή: ο προσωπικός σας κατάλογος.
    
    
        Skipped paths
        Παραλειπόμενες διαδρομές
    
    
        These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')
        Πρόκειται για ονόματα καταλόγων που δεν θα δεικτοδοτηθούν.<br>Μπορούν να περιέχουν χαρακτήρες υποκατάστασης. Οι διαδρομές πρέπει να αντιστοιχούν με αυτές που είδε ο δεικτοδότης (π.χ: αν ένας κατάλογος έναρξης είναι '/home/me' και το '/home' είναι ένας δεσμός στο '/usr/home', μια σωστή διαδρομή εδώ θα ήταν '/home/me/tmp*' , όχι '/usr/home/me/tmp*')
    
    
        Stemming languages
        Γλώσσα για την επέκταση των όρων
    
    
        The languages for which stemming expansion<br>dictionaries will be built.
        Οι γλώσσες για τις οποίες θα δημιουργηθούν τα λεξικά επεκτάσεων<br>των όρων.
    
    
        Log file name
        Όνομα του αρχείου καταγραφών
    
    
        The file where the messages will be written.<br>Use 'stderr' for terminal output
        Το αρχείο που θα εγγραφούν τα μηνύματα.<br>Χρησιμοποιήστε 'stderr' για την έξοδο τερματικού
    
    
        Log verbosity level
        Επίπεδο ανάλυσης των καταγραφών
    
    
        This value adjusts the amount of messages,<br>from only errors to a lot of debugging data.
        Αυτή η τιμή ρυθμίζει την ποσότητα των απεσταλμένων μηνυμάτων,<br>από μόνο τα σφάλματα μέχρι πολλά δεδομένα αποσφαλμάτωσης.
    
    
        Index flush megabytes interval
        Καθυστέρηση εγγραφής του ευρετηρίου σε megabyte
    
    
        This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 
        Αυτή η τιμή ρυθμίζει την ποσότητα των δεδομένων που δεικτοδοτούνται μεταξύ των εγγραφών στο δίσκο.<br>Βοηθά στον έλεγχο χρήσης της μνήμης. Προεπιλογή: 10MB 
    
    
        Max disk occupation (%)
        Μεγ. χρήση του δίσκου (%)
    
    
        This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default).
        Το ποσοστό χρήσης του δίσκου που θα σταματήσει η ευρετηρίαση (για να αποφευχθεί η υπερβολική πλήρωση).<br>0 σημαίνει χωρίς όριο (προεπιλογή).
    
    
        Use system's 'file' command
        Χρήση της εντολής 'file' του συστήματος
    
    
        Use the system's 'file' command if internal<br>mime type identification fails.
        Χρήση της εντολής 'file' αν ο εσωτερικός εντοπισμός<br>του τύπου mime δεν επιφέρει αποτελέσματα.
    
    
        No aspell usage
        Χωρίς χρήση του aspell
    
    
        Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 
        Απενεργοποιεί τη χρήση του aspell για τη δημιουργία των ορθογραφικών προσεγγίσεων.<br>Χρήσιμο αν το aspell δεν είναι εγκατεστημένο ή δεν λειτουργεί. 
    
    
        Aspell language
        Γλώσσα του aspell
    
    
        The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. 
        Η γλώσσα για το λεξικό aspell. Αυτό θα πρέπει να είναι του τύπου «en» ή «el» ...<br> Αν αυτή η τιμή δεν οριστεί, χρησιμοποιείται το εθνικό περιβάλλον NLS για να την υπολογίσει, που συνήθως δουλεύει. Για να πάρετε μια ιδέα του τι είναι εγκατεστημένο στο σύστημά σας, πληκτρολογήστε «aspell config» και παρατηρήστε τα αρχεία .dat στον κατάλογο «data-dir». 
    
    
        Database directory name
        Κατάλογος αποθήκευσης του ευρετηρίου
    
    
        The name for a directory where to store the index<br>A non-absolute path is taken relative to the  configuration directory. The default is 'xapiandb'.
        Το όνομα του καταλόγου αποθήκευσης του ευρετηρίου<br>Μια σχετική διαδρομή αναφερόμενη στη διαδρομή διαμόρφωσης. Η εξ' ορισμού είναι «xapiandb». 
    
    
        The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. 
        Η γλώσσα για το λεξικό aspell. Αυτό θα πρέπει να είναι του τύπου «en» ή «el» ...<br> Αν αυτή η τιμή δεν οριστεί, χρησιμοποιείται το εθνικό περιβάλλον NLS για να την υπολογίσει, που συνήθως δουλεύει. Για να πάρετε μια ιδέα του τι είναι εγκατεστημένο στο σύστημά σας, πληκτρολογήστε «aspell config» και παρατηρήστε τα αρχεία .dat στον κατάλογο «data-dir». 
    
    
        The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.
        Το όνομα του καταλόγου αποθήκευσης του ευρετηρίου<br>Μια σχετική διαδρομή αναφερόμενη στη διαδρομή διαμόρφωσης. Η εξ' ορισμού είναι «xapiandb». 
    
    
        Unac exceptions
        Εξαιρέσεις unac
    
    
        <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.
        <p>Αυτές είναι εξαιρέσεις για τον μηχανισμό unac, ο οποίος εξ' ορισμού, αφαιρεί όλους τους τονισμούς, και πραγματοποιεί κανονική αποσύνθεση. Μπορείτε να αναιρέσετε την αφαίρεση των τονισμών  για ορισμένους χαρακτήρες, ανάλογα με τη γλώσσα σας, και διευκρινίστε άλλους αποσυνθέσεις, για παράδειγμα συμπλεγμένους χαρακτήρες. Στη λίστα διαχωρισμένη με κενά, ο πρώτος χαρακτήρας ενός αντικειμένου είναι η πηγή, το υπόλοιπο είναι η μετάφραση.
    


    uiPrefsDialogBase
    
        User preferences
        Προτιμήσεις χρήστη
    
    
        User interface
        Περιβάλλον χρήστη
    
    
        Highlight color for query terms
        Χρώμα τονισμού των όρων αναζήτησης
    
    
        Style sheet
        Φύλλο στυλ
    
    
        Opens a dialog to select the style sheet file
        Ανοίγει έναν διάλογο για την επιλογή ενός αρχείου φύλλου στυλ
    
    
        Choose
        Επιλογή
    
    
        Resets the style sheet to default
        Επαναφέρει την προκαθορισμένη τιμή για το φύλλο στυλ
    
    
        Reset
        Επαναφορά
    
    
        Texts over this size will not be highlighted in preview (too slow).
        Τα κείμενα μεγαλύτερα από αυτό το μέγεθος δεν θα τονιστούν στην προεπισκόπηση (πολύ αργό).
    
    
        Maximum text size highlighted for preview (megabytes)
        Μεγ. μέγεθος τονισμένων κειμένων προς προεπισκόπηση (MB)
    
    
        Prefer Html to plain text for preview.
        Χρήση της μορφής HTML για την προεπισκόπηση.
    
    
        Lines in PRE text are not folded. Using BR loses some indentation.
        Οι γραμμές στις ενότητες PRE δεν δικαιολογούνται. Χρησιμοποιώντας BR χάνονται μερικές εσοχές.
    
    
        Use <PRE> tags instead of <BR>to display plain text as html in preview.
        Χρήση <PRE> αντί <BR> για απλό κείμενο αντί html στην προεπισκόπηση.
    
    
        Use desktop preferences to choose document editor.
        Χρήση ρυθμίσεων του περιβάλλοντος για την επιλογή της εφαρμογής προβολής.
    
    
        Choose editor applications
        Επιλέξτε τους επεξεργαστές για τους διάφορους τύπους αρχείων
    
    
        Display category filter as toolbar instead of button panel (needs restart).
        Εμφάνιση φίλτρ. κατηγορίας ως γρ. εργαλείων αντί για πίν. κουμπιών (απαιτεί επανεκκίνηση).
    
    
        Auto-start simple search on whitespace entry.
        Αυτόματη έναρξη μιας απλής αναζήτησης όταν εισαχθεί ένα κενό.
    
    
        Start with advanced search dialog open.
        Εκκίνηση με τον διάλογο της προχωρημένης αναζήτησης ανοιχτό.
    
    
        Remember sort activation state.
        Απομνημόνευση της κατάστασης ενεργοποίησης της ταξινόμησης.
    
    
        Result List
        Λίστα αποτελεσμάτων
    
    
        Number of entries in a result page
        Αριθμός αποτελεσμάτων ανά σελίδα
    
    
        Result list font
        Γραμματοσειρά λίστας
    
    
        Opens a dialog to select the result list font
        Ανοίγει έναν διάλογο για την επιλογή της γραμματοσειράς για τη λίστα αποτελεσμάτων
    
    
        Helvetica-10
        Helvetica-10
    
    
        Resets the result list font to the system default
        Επαναφέρει τη γραμματοσειρά της λίστας αποτελεσμάτων στην προκαθορισμένη του συστήματος
    
    
        Edit result paragraph format string
        Επεξεργασία της μορφής της παραγράφου αποτελεσμάτων
    
    
        Edit result page html header insert
        Επεξεργασία του τμήματος για εισαγωγή στην κεφαλίδα HTML
    
    
        Date format (strftime(3))
        Μορφή της ημερομηνίας (strftime(3))
    
    
        Abstract snippet separator
        Διαχωριστής αποσπάσματος
    
    
        Search parameters
        Ρυθμίσεις αναζήτησης
    
    
        If checked, results with the same content under different names will only be shown once.
        Εμφανίζει μια μόνο καταχώρηση για τα αποτελέσματα με πανομοιότυπο περιεχόμενο.
    
    
        Hide duplicate results.
        Απόκρυψη των διπλοεγγραφών.
    
    
        Stemming language
        Γλώσσα για την ανάπτυξη των όρων
    
    
        A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.
        Μια αναζήτηση για [χωριάτικη σαλάτα] (2 όροι) θα συμπληρωθεί ως [χωριάτικη Ή σαλάτα Ή (χωριάτικη ΦΡΑΣΗ 2 σαλάτα)].<br>
Αυτό θα πρέπει να αποδώσει μια καλύτερη εγγύτητα των αποτελεσμάτων όπου οι αναζητούμενοι όροι εμφανίζονται ακριβώς με τη σειρά.
    
    
        Automatically add phrase to simple searches
        Προσθήκη αυτόματα μιας φράσης στις απλές αναζητήσεις 
    
    
        Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). 
        Όριο συχνότητας (ποσοστό) πέραν του οποίου οι όροι δεν θα χρησιμοποιούνται.
Οι φράσεις που περιέχουν πολύ συχνούς όρους δημιουργούν προβλήματα στην απόδοση.
Οι αγνοημένοι όροι αυξάνουν την απόσταση της φράσης, και μειώνουν την αποτελεσματικότητα της λειτουργίας αναζήτησης αυτόματης φράσης.
Η προκαθορισμένη τιμή είναι 2%. 
    
    
        Autophrase term frequency threshold percentage
        Όριο συχνότητας του όρου (ποσοστό) για την αυτόματη δημιουργία φράσεων
    
    
        Do we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.
        Αποφασίζει αν θα δημιουργούνται αποσπάσματα από το περιεχόμενο των όρων αναζήτησης.
Μπορεί να επιβραδύνει την απεικόνιση αν τα έγγραφα είναι μεγάλα.
    
    
        Dynamically build abstracts
        Δυναμική δημιουργία των αποσπασμάτων
    
    
        Do we synthetize an abstract even if the document seemed to have one?
        Θα πρέπει να γίνεται σύνθεση μιας σύνοψης ακόμα και αν το αρχικό έγγραφο διαθέτει μια;
    
    
        Replace abstracts from documents
        Αντικατάσταση των υπαρχόντων αποσπασμάτων στα έγγραφα
    
    
        Synthetic abstract size (characters)
        Μέγεθος του συνθετικού αποσπάσματος (χαρακτήρες)
    
    
        Synthetic abstract context words
        Αριθμός σχετικών λέξεων ανά εμφάνιση του όρου στο απόσπασμα
    
    
        The words in the list will be automatically turned to ext:xxx clauses in the query language entry.
        Οι λέξεις στη λίστα θα αλλάξουν αυτόματα σε ρήτρες ext:xxx στις καταχωρήσεις σε γλώσσα ερωτημάτων.
    
    
        Query language magic file name suffixes.
        Αυτόματα επιθήματα για τη γλώσσα ερωτημάτων.
    
    
        Enable
        Ενεργό
    
    
        External Indexes
        Εξωτερικά ευρετήρια
    
    
        Toggle selected
        Αλλαγή κατάστασης επιλεγμένων
    
    
        Activate All
        Ενεργοποίηση όλων
    
    
        Deactivate All
        Απενεργοποίηση όλων
    
    
        Remove from list. This has no effect on the disk index.
        Αφαίρεση από τη λίστα. Δεν έχει επίπτωση στο αποθηκευμένο ευρετήριο.
    
    
        Remove selected
        Αφαίρεση των επιλεγμένων
    
    
        Click to add another index directory to the list
        Κάντε κλικ για να προσθέσετε ένα άλλο ευρετήριο στη λίστα
    
    
        Add index
        Προσθήκη ευρετηρίου
    
    
        Apply changes
        Εφαρμογή των αλλαγών
    
    
        &OK
        &Εντάξει
    
    
        Discard changes
        Απόρριψη των αλλαγών
    
    
        &Cancel
        &Ακύρωση
    
    
        Plain text to HTML line style
        Στυλ μετάφρασης απλό κείμενο σε HTML
    
    
        Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.
        Οι γραμμές που είναι έγκλειστες στα PRE δεν αναδιπλώνονται. Η χρήση BR οδηγεί σε απώλεια ορισμένων εσοχών. Το στυλ PRE + αναδίπλωση φαίνεται να είναι η καλύτερη επιλογή αλλά η σωστή του λειτουργία εξαρτάται από την έκδοση της Qt.
    
    
        <BR>
        <BR>
    
    
        <PRE>
        <PRE>
    
    
        <PRE> + wrap
        <PRE> + αναδίπλωση
    
    
        Disable Qt autocompletion in search entry.
        Απενεργοποίηση της αυτόματης συμπλήρωσης Qt στην εισαγωγή αναζήτησης.
    
    
        Search as you type.
        Αναζήτηση κατά την πληκτρολόγηση.
    
    
        Paths translations
        Διαδρομές μεταφράσεων
    
    
        Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.
        Κάντε κλικ για να προσθέσετε έναν κατάλογο ευρετηρίου στη λίστα. Μπορείτε να επιλέξετε είτε έναν κατάλογο διαμόρφωσης Recoll ή ένα ευρετήριο Xapian.
    
    
        Snippets window CSS file
        Αρχείο CSS παραθύρου αποσπάσματος
    
    
        Opens a dialog to select the Snippets window CSS style sheet file
        Ανοίγει έναν διάλογο που σας επιτρέπει την επιλογή του αρχείου στυλ CSS για το αναδυόμενο παράθυρο αποσπασμάτων
    
    
        Resets the Snippets window style
        Επαναφορά του στυλ του παραθύρου αποσπασμάτων
    
    
        Decide if document filters are shown as radio buttons, toolbar combobox, or menu.
        Καθορίζει αν τα φίλτρα των εγγράφων θα εμφανίζονται ως κουμπιά επιλογών, γραμμή εργαλείων πλαισίων συνδυασμών, ή μενού.
    
    
        Document filter choice style:
        Τεχνοτροπία επιλογής φίλτρου εγγράφων:
    
    
        Buttons Panel
        Πίνακας κουμπιών
    
    
        Toolbar Combobox
        Γραμμή εργαλείων πλαισίων συνδυασμών
    
    
        Menu
        Μενού
    
    
        Show system tray icon.
        Εμφάνιση του εικονιδίου πλαισίου συστήματος.
    
    
        Close to tray instead of exiting.
        Αντί για έξοδο, καταχώνιασμα στο πλαίσιο συστήματος.
    
    
        Highlight CSS style for query terms
        
    
    
        Start with simple search mode
        
    
    
        Show warning when opening temporary file.
        
    
    
        User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header.
        
    
    
        Synonyms file
        
    
    
        Recoll - User Preferences
        
    
    
        Set path translations for the selected index or for the main one if no selection exists.
        
    
    
        Activate links in preview.
        
    
    
        Make links inside the preview window clickable, and start an external browser when they are clicked.
        
    
    
        Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue...
        
    
    
        Start search on completer popup activation.
        
    
    
        Maximum number of snippets displayed in the snippets window
        
    
    
        Sort snippets by page number (default: by weigth).
        
    
    
        Suppress all beeps.
        
    


recoll-1.26.3/qtgui/i18n/recoll_tr.ts0000644000175000017500000041042313566424763014275 00000000000000



    AdvSearch
    
        All clauses
        Tüm ifadeler
    
    
        Any clause
        İfadelerin herhangi biri
    
    
        texts
        metinler
    
    
        spreadsheets
        hesap tabloları
    
    
        presentations
        sunumlar
    
    
        media
        ortamlar
    
    
        messages
        iletiler
    
    
        other
        diğer
    
    
        Bad multiplier suffix in size filter
        
    
    
        text
        
    
    
        spreadsheet
        
    
    
        presentation
        
    
    
        message
        
    


    AdvSearchBase
    
        Advanced search
        Gelişmiş arama
    
    
        Search for <br>documents<br>satisfying:
        Uyan <br>belgeleri<br>ara:
    
    
        Delete clause
        İfadeyi sil
    
    
        Add clause
        İfade ekle
    
    
        Restrict file types
        Dosya tiplerini sınırlandır
    
    
        Check this to enable filtering on file types
        Dosya tipleri üzerinde filtreleme kullanmak için bunu işaretleyin
    
    
        By categories
        Kategorilere göre
    
    
        Check this to use file categories instead of raw mime types
        Dosya tipleri yerine ham mime tipleri üzerinde filtreleme kullanmak için bunu işaretleyin
    
    
        Save as default
        öntanımlı olarak kaydet
    
    
        Searched file types
        Aranan dosya tipleri
    
    
        All ---->
        Tümü ---->
    
    
        Sel ----->
        Seç ----->
    
    
        <----- Sel
        <----- Seç
    
    
        <----- All
        <----- Tümü
    
    
        Ignored file types
        Yoksayılan dosya tipleri
    
    
        Enter top directory for search
        Arama için en üst dizini girin
    
    
        Browse
        Gözat
    
    
        Restrict results to files in subtree:
        Arama sonuçlarını bu dizin ve aşağısı ile sınırlandır:
    
    
        Start Search
        Aramayı Başlat
    
    
        Close
        Kapat
    
    
        All non empty fields on the right will be combined with AND ("All clauses" choice) or OR ("Any clause" choice) conjunctions. <br>"Any" "All" and "None" field types can accept a mix of simple words, and phrases enclosed in double quotes.<br>Fields with no data are ignored.
        
    
    
        Invert
        
    
    
        Minimum size. You can use k/K,m/M,g/G as multipliers
        
    
    
        Min. Size
        
    
    
        Maximum size. You can use k/K,m/M,g/G as multipliers
        
    
    
        Max. Size
        
    
    
        Filter
        
    
    
        From
        
    
    
        To
        
    
    
        Check this to enable filtering on dates
        
    
    
        Filter dates
        
    
    
        Find
        
    
    
        Check this to enable filtering on sizes
        
    
    
        Filter sizes
        
    


    ConfIndexW
    
        Can't write configuration file
        Yapılandırma dosyası yazılamadı
    
    
        Global parameters
        Genel parametreler
    
    
        Local parameters
        Yerel parametreler
    
    
        Search parameters
        Arama parametreleri
    
    
        Top directories
        Üst dizinler
    
    
        The list of directories where recursive indexing starts. Default: your home.
        Özyinelemeli indesklemenin başlayacağı dizinlerin listesi. Öntanımlı: ev dizininiz.
    
    
        Skipped paths
        Atlanan yollar
    
    
        These are pathnames of directories which indexing will not enter.<br>Path elements may contain wildcards. The entries must match the paths seen by the indexer (e.g.: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')
        
    
    
        Stemming languages
        Sözcük kökleri ayrıştırılabilir diller
    
    
        The languages for which stemming expansion<br>dictionaries will be built.
        Kök ayrıştırma genişlemesi için sözlükleri<br>inşa edilecek olan diller.
    
    
        Log file name
        Günlük dosyasının adı
    
    
        The file where the messages will be written.<br>Use 'stderr' for terminal output
        İletilerin yazılacağı dosya.<br>Uçbirim çıktısı için 'stderr' kullanın
    
    
        Log verbosity level
        Günlük dosyası ayrıntı düzeyi
    
    
        This value adjusts the amount of messages,<br>from only errors to a lot of debugging data.
        Bu değer ileti boyutunu ayarlar,<br>sadece hatalardan hata ayıklama verilerine kadar.
    
    
        Index flush megabytes interval
        İndex düzeltme MB aralığı
    
    
        This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 
        Bu değer diske gönderilecek indekslenmiş veri miktarını ayarlar.<br>Bu indeksleyicinin bellek kullanımını kontrol etmeye yarar. Öntanımlı 10MB 
    
    
        Disk full threshold to stop indexing<br>(e.g. 90%, 0 means no limit)
        
    
    
        This is the percentage of disk usage - total disk usage, not index size - at which indexing will fail and stop.<br>The default value of 0 removes any limit.
        
    
    
        No aspell usage
        Aspell kullanımı yok
    
    
        Disables use of aspell to generate spelling approximation in the term explorer tool.<br> Useful if aspell is absent or does not work. 
        
    
    
        Aspell language
        Aspell dili
    
    
        The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works. To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. 
        
    
    
        Database directory name
        Veritabanı dizininin adı
    
    
        The name for a directory where to store the index<br>A non-absolute path is taken relative to the configuration directory. The default is 'xapiandb'.
        
    
    
        Unac exceptions
        
    
    
        <p>These are exceptions to the unac mechanism which, by default, removes all diacritics, and performs canonic decomposition. You can override unaccenting for some characters, depending on your language, and specify additional decompositions, e.g. for ligatures. In each space-separated entry, the first character is the source one, and the rest is the translation.
        
    
    
        Process the WEB history queue
        
    
    
        Enables indexing Firefox visited pages.<br>(you need also install the Firefox Recoll plugin)
        
    
    
        Web page store directory name
        
    
    
        The name for a directory where to store the copies of visited web pages.<br>A non-absolute path is taken relative to the configuration directory.
        
    
    
        Max. size for the web store (MB)
        
    
    
        Entries will be recycled once the size is reached.<br>Only increasing the size really makes sense because reducing the value will not truncate an existing file (only waste space at the end).
        
    
    
        Automatic diacritics sensitivity
        
    
    
        <p>Automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the <i>D</i> modifier to specify diacritics sensitivity.
        
    
    
        Automatic character case sensitivity
        
    
    
        <p>Automatically trigger character case sensitivity if the entry has upper-case characters in any but the first position. Else you need to use the query language and the <i>C</i> modifier to specify character-case sensitivity.
        
    
    
        Maximum term expansion count
        
    
    
        <p>Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10 000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list.
        
    
    
        Maximum Xapian clauses count
        
    
    
        <p>Maximum number of elementary clauses we add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations.
        
    


    ConfSubPanelW
    
        Only mime types
        
    
    
        An exclusive list of indexed mime types.<br>Nothing else will be indexed. Normally empty and inactive
        
    
    
        Exclude mime types
        
    
    
        Mime types not to be indexed
        
    
    
        Max. compressed file size (KB)
        
    
    
        This value sets a threshold beyond which compressedfiles will not be processed. Set to -1 for no limit, to 0 for no decompression ever.
        
    
    
        Max. text file size (MB)
        
    
    
        This value sets a threshold beyond which text files will not be processed. Set to -1 for no limit. 
This is for excluding monster log files from the index.
        
    
    
        Text file page size (KB)
        
    
    
        If this value is set (not equal to -1), text files will be split in chunks of this size for indexing.
This will help searching very big text  files (ie: log files).
        
    
    
        Max. filter exec. time (s)
        
    
    
        External filters working longer than this will be aborted. This is for the rare case (ie: postscript) where a document could cause a filter to loop. Set to -1 for no limit.

        
    
    
        Global
        Genel
    


    CronToolW
    
        Cron Dialog
        
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> batch indexing schedule (cron) </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Each field can contain a wildcard (*), a single numeric value, comma-separated lists (1,3,5) and ranges (1-7). More generally, the fields will be used <span style=" font-style:italic;">as is</span> inside the crontab file, and the full crontab syntax can be used, see crontab(5).</p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br />For example, entering <span style=" font-family:'Courier New,courier';">*</span> in <span style=" font-style:italic;">Days, </span><span style=" font-family:'Courier New,courier';">12,19</span> in <span style=" font-style:italic;">Hours</span> and <span style=" font-family:'Courier New,courier';">15</span> in <span style=" font-style:italic;">Minutes</span> would start recollindex every day at 12:15 AM and 7:15 PM</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">A schedule with very frequent activations is probably less efficient than real time indexing.</p></body></html>
        
    
    
        Days of week (* or 0-7, 0 or 7 is Sunday)
        
    
    
        Hours (* or 0-23)
        
    
    
        Minutes (0-59)
        
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Click <span style=" font-style:italic;">Disable</span> to stop automatic batch indexing, <span style=" font-style:italic;">Enable</span> to activate it, <span style=" font-style:italic;">Cancel</span> to change nothing.</p></body></html>
        
    
    
        Enable
        
    
    
        Disable
        
    
    
        It seems that manually edited entries exist for recollindex, cannot edit crontab
        
    
    
        Error installing cron entry. Bad syntax in fields ?
        
    


    EditDialog
    
        Dialog
        
    


    EditTrans
    
        Source path
        
    
    
        Local path
        
    
    
        Config error
        
    
    
        Original path
        
    


    EditTransBase
    
        Path Translations
        
    
    
        Setting path translations for 
        
    
    
        Select one or several file types, then use the controls in the frame below to change how they are processed
        
    
    
        Add
        
    
    
        Delete
        
    
    
        Cancel
        İptal
    
    
        Save
        
    


    FirstIdxDialog
    
        First indexing setup
        
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">It appears that the index for this configuration does not exist.</span><br /><br />If you just want to index your home directory with a set of reasonable defaults, press the <span style=" font-style:italic;">Start indexing now</span> button. You will be able to adjust the details later. </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">If you want more control, use the following links to adjust the indexing configuration and schedule.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">These tools can be accessed later from the <span style=" font-style:italic;">Preferences</span> menu.</p></body></html>
        
    
    
        Indexing configuration
        
    
    
        This will let you adjust the directories you want to index, and other parameters like excluded file paths or names, default character sets, etc.
        
    
    
        Indexing schedule
        
    
    
        This will let you chose between batch and real-time indexing, and set up an automatic  schedule for batch indexing (using cron).
        
    
    
        Start indexing now
        
    


    FragButs
    
        %1 not found.
        
    
    
        %1:
 %2
        
    
    
        Query Fragments
        
    


    IdxSchedW
    
        Index scheduling setup
        
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p>
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html>
        
    
    
        Cron scheduling
        
    
    
        The tool will let you decide at what time indexing should run and will install a crontab entry.
        
    
    
        Real time indexing start up
        
    
    
        Decide if real time indexing will be started when you log in (only for the default index).
        
    


    ListDialog
    
        Dialog
        
    
    
        GroupBox
        
    


    Main
    
        Configuration problem (dynconf
        Yapılandırma sorunu
    
    
        No db directory in configuration
        Yapılandırma içerisinde veritabanı dizini yok
    
    
        Could not open database in 
        Veritabanı açılamadı
    
    
        .
Click Cancel if you want to edit the configuration file before indexing starts, or Ok to let it proceed.
        .
İndekseleme başlamadan yapılandırmayı düzenlemek için İptal düğmesine basın ya da Tamam düğmesine basarak işleme izin verin.
    
    
        "history" file is damaged, please check or remove it: 
        
    


    Preview
    
        Close Tab
        Sekmeyi Kapat
    
    
        Cannot create temporary directory
        Geçici dizin oluşturulamadı
    
    
        Cancel
        İptal
    
    
        Missing helper program: 
        Yardımcı program kayıp: 
    
    
        Can't turn doc into internal representation for 
        Şunun için iç gösterim yapılamıyor
    
    
        Creating preview text
        Önizleme metni oluşturuluyor
    
    
        Loading preview text into editor
        Önizleme metni düzenleyiciye yükleniyor
    
    
        &Search for:
        A&ra:
    
    
        &Next
        &Sonraki
    
    
        &Previous
        &Önceki
    
    
        Clear
        Temizle
    
    
        Match &Case
        Eşleşme Şa&rtı
    
    
        Form
        
    
    
        Tab 1
        
    
    
        Open
        
    
    
        Canceled
        
    
    
        Error loading the document: file missing.
        
    
    
        Error loading the document: no permission.
        
    
    
        Error loading: backend not configured.
        
    
    
        Error loading the document: other handler error<br>Maybe the application is locking the file ?
        
    
    
        Error loading the document: other handler error.
        
    
    
        <br>Attempting to display from stored text.
        
    
    
        Could not fetch stored text
        
    


    PreviewTextEdit
    
        Show fields
        
    
    
        Show main text
        
    
    
        Print
        
    
    
        Print Current Preview
        
    
    
        Show image
        
    
    
        Select All
        
    
    
        Copy
        
    
    
        Save document to file
        
    
    
        Fold lines
        
    
    
        Preserve indentation
        
    
    
        Open document
        
    


    QObject
    
        Global parameters
        Genel parametreler
    
    
        Local parameters
        Yerel parametreler
    
    
        <b>Customised subtrees
        <b>Özelleştirilmiş alt ağaçlar
    
    
        The list of subdirectories in the indexed hierarchy <br>where some parameters need to be redefined. Default: empty.
        İndekslenmiş sıralama içerisindeki alt dizinlerin listesi <br>ki burada bazı parametrelerin yeniden tanımlanması gerekir. Öntanımlı: boş.
    
    
        <i>The parameters that follow are set either at the top level, if nothing<br>or an empty line is selected in the listbox above, or for the selected subdirectory.<br>You can add or remove directories by clicking the +/- buttons.
        <i>Aşağıdaki parametreler, ya seçili alt dizin için uygulanır ya da üst düzeyde veya üstteki metin kutusunda hiçbir şey seçilmediğinde yada boş bir satır seçildiğinde uygulanır.<br>+/- düğmelerine tıklayarak dizinleri ekleyip çıkarabilirsiniz.
    
    
        Skipped names
        Atlanan isimler
    
    
        These are patterns for file or directory  names which should not be indexed.
        Bu nitelikler insekslenmemesi  gereken dosya ve dizinler içindir.
    
    
        Default character set
        Öntanımlı karakter seti
    
    
        This is the character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used.
        Bu karakter seti, karakter kodlaması uygulama tarafından belirlenemeyen dosyalar için kulanılır, Örneğin salt metin dosyaları.<br>Öntanımlı değer boştur ve NLS çevresel değişkeni kullanılır.
    
    
        Follow symbolic links
        Sembolik bağlantıları izle
    
    
        Follow symbolic links while indexing. The default is no, to avoid duplicate indexing
        İndekslerken sembolik bağlantıları izle. Aynı ögelerin yeniden indekslenmesinden kaçınmak için öntanımlı değer hayır
    
    
        Index all file names
        Tüm dosya isimlerini indeksle
    
    
        Index the names of files for which the contents cannot be identified or processed (no or unsupported mime type). Default true
        İçeriği tanınmayan ya da işlenemeyen (ya da desteklenmeyen mime tipi) dosyaları indeksle. Öntanımlı evet
    
    
        Search parameters
        Arama parametreleri
    
    
        Default<br>character set
        
    
    
        Character set used for reading files which do not identify the character set internally, for example pure text files.<br>The default value is empty, and the value from the NLS environnement is used.
        
    
    
        Ignored endings
        
    
    
        These are file name endings for files which will be indexed by name only 
(no MIME type identification attempt, no decompression, no content indexing).
        
    
    
        <i>The parameters that follow are set either at the top level, if nothing or an empty line is selected in the listbox above, or for the selected subdirectory. You can add or remove directories by clicking the +/- buttons.
        
    


    QWidget
    
        Create or choose save directory
        
    
    
        Choose exactly one directory
        
    
    
        Could not read directory: 
        
    
    
        Unexpected file name collision, cancelling.
        
    
    
        Cannot extract document: 
        
    
    
        &Preview
        &Önizle
    
    
        &Open
        
    
    
        Open With
        
    
    
        Run Script
        
    
    
        Copy &File Name
        &Dosya Adını Kopyala
    
    
        Copy &URL
        &Adresi Kopyala
    
    
        &Write to File
        
    
    
        Save selection to files
        
    
    
        Preview P&arent document/folder
        
    
    
        &Open Parent document/folder
        
    
    
        Find &similar documents
        Benzer belgeleri &bul
    
    
        Open &Snippets window
        
    
    
        Show subdocuments / attachments
        
    


    QxtConfirmationMessage
    
        Do not show again.
        
    


    RTIToolW
    
        Real time indexing automatic start
        
    
    
        <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">
<html><head><meta name="qrichtext" content="1" /><style type="text/css">
p, li { white-space: pre-wrap; }
</style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;">
<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can be set up to run as a daemon, updating the index as files change, in real time. You gain an always up to date index, but system resources are used permanently.</p>
<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p></body></html>
        
    
    
        Start indexing daemon with my desktop session.
        
    
    
        Also start indexing daemon right now.
        
    
    
        Replacing: 
        
    
    
        Replacing file
        
    
    
        Can't create: 
        
    
    
        Warning
        Uyarı
    
    
        Could not execute recollindex
        
    
    
        Deleting: 
        
    
    
        Deleting file
        
    
    
        Removing autostart
        
    
    
        Autostart file deleted. Kill current process too ?
        
    


    RclMain
    
        (no stemming)
        (kök ayrıştırma kullanma)
    
    
        (all languages)
        (tüm diller)
    
    
        error retrieving stemming languages
        sözcük kökleri ayrıştırılabilir diller alınırken hata oluştu
    
    
        Indexing in progress: 
        İndeksleme devam ediyor: 
    
    
        Files
        Dosyalar
    
    
        Purge
        Temizle
    
    
        Stemdb
        KökAyrıştırmaVeritabanı
    
    
        Closing
        Kapatılıyor
    
    
        Unknown
        Bilinmeyen
    
    
        Can't start query: 
        Sorgu başlatılamadı: 
    
    
        Query results
        Arama Sonuçları
    
    
        Cannot retrieve document info from database
        Veritabanından belge bilgileri alınamadı
    
    
        Warning
        Uyarı
    
    
        Can't create preview window
        Önizleme penceresi oluşturulamıyor
    
    
        This search is not active any more
        Bu arama atrık etkin değil
    
    
        Bad viewer command line for %1: [%2]
Please check the mimeconf file
        %1 için uygun olmayan komut: [%2]
Lütfen mimeconf dosyasını kontrol edin
    
    
        Cannot extract document or create temporary file
        Belge açılamadı ya da geçici dosya oluşturulamadı
    
    
        Executing: [
        Çalıştırılıyor: [
    
    
        About Recoll
        Recoll Hakkında
    
    
        History data
        Geçmiş verileri
    
    
        Document history
        Belge geçmişi
    
    
        Update &Index
        
    
    
        Stop &Indexing
        
    
    
        All
        
    
    
        media
        ortamlar
    
    
        message
        
    
    
        other
        diğer
    
    
        presentation
        
    
    
        spreadsheet
        
    
    
        text
        
    
    
        sorted
        
    
    
        filtered
        
    
    
        No helpers found missing
        
    
    
        Missing helper programs
        
    
    
        No external viewer configured for mime type [
        
    
    
        The viewer specified in mimeview for %1: %2 is not found.
Do you want to start the  preferences dialog ?
        
    
    
        Can't access file: 
        
    
    
        Can't uncompress file: 
        
    
    
        Save file
        
    
    
        Result count (est.)
        
    
    
        Query details
        Sorgu detayları
    
    
        Could not open external index. Db not open. Check external indexes list.
        
    
    
        No results found
        
    
    
        None
        
    
    
        Updating
        
    
    
        Done
        
    
    
        Monitor
        
    
    
        Indexing failed
        
    
    
        The current indexing process was not started from this interface. Click Ok to kill it anyway, or Cancel to leave it alone
        
    
    
        Erasing index
        
    
    
        Reset the index and start from scratch ?
        
    
    
        Query in progress.<br>Due to limitations of the indexing library,<br>cancelling will exit the program
        
    
    
        Error
        
    
    
        Index query error
        
    
    
        Can't update index: indexer running
        
    
    
        Indexed MIME Types
        
    
    
        Bad viewer command line for %1: [%2]
Please check the mimeview file
        
    
    
        Viewer command line for %1 specifies both file and parent file value: unsupported
        
    
    
        Cannot find parent document
        
    
    
        External applications/commands needed for your file types and not found, as stored by the last indexing pass in 
        
    
    
        Sub-documents and attachments
        
    
    
        Document filter
        
    
    
        The indexer is running so things should improve when it's done. 
        
    
    
        Duplicate documents
        
    
    
        These Urls ( | ipath) share the same content:
        
    
    
        Bad desktop app spec for %1: [%2]
Please check the desktop file
        
    
    
        Indexing interrupted
        
    
    
        Bad paths
        
    
    
        Selection patterns need topdir
        
    
    
        Selection patterns can only be used with a start directory
        
    
    
        No search
        
    
    
        No preserved previous search
        
    
    
        Choose file to save
        
    
    
        Saved Queries (*.rclq)
        
    
    
        Write failed
        
    
    
        Could not write to file
        
    
    
        Read failed
        
    
    
        Could not open file: 
        
    
    
        Load error
        
    
    
        Could not load saved query
        
    
    
        Index scheduling
        
    
    
        Sorry, not available under Windows for now, use the File menu entries to update the index
        
    
    
        Disabled because the real time indexer was not compiled in.
        
    
    
        This configuration tool only works for the main index.
        
    
    
        Can't set synonyms file (parse error?)
        
    
    
        The document belongs to an external index which I can't update. 
        
    
    
        Opening a temporary copy. Edits will be lost if you don't save<br/>them to a permanent location.
        
    
    
        Do not show this warning next time (use GUI preferences to restore).
        
    
    
        Index locked
        
    
    
        Unknown indexer state. Can't access webcache file.
        
    
    
        Indexer is running. Can't access webcache file.
        
    
    
         with additional message: 
        
    
    
        Non-fatal indexing message: 
        
    
    
        Types list empty: maybe wait for indexing to progress?
        
    
    
        Viewer command line for %1 specifies parent file but URL is http[s]: unsupported
        
    
    
        Tools
        
    
    
        Results
        
    
    
        Content has been indexed for these MIME types:
        
    
    
        Empty or non-existant paths in configuration file. Click Ok to start indexing anyway (absent data will not be purged from the index):

        
    
    
        Indexing done
        
    
    
        Can't update index: internal error
        
    
    
        Index not up to date for this file.<br>
        
    
    
        <em>Also, it seems that the last index update for the file failed.</em><br/>
        
    
    
        Click Ok to try to update the index for this file. You will need to run the query again when indexing is done.<br>
        
    
    
        Click Cancel to return to the list.<br>Click Ignore to show the preview anyway (and remember for this session). There is a risk of showing the wrong entry.<br/>
        
    
    
        documents
        
    
    
        document
        
    
    
        files
        
    
    
        file
        
    
    
        errors
        
    
    
        error
        
    
    
        total files)
        
    
    
        No information: initial indexing not yet performed.
        
    


    RclMainBase
    
        Recoll
        Recoll
    
    
        Search tools
        Arama araçları
    
    
        Result list
        Sonuç listesi
    
    
        &File
        &Dosya
    
    
        &Tools
        &Araçlar
    
    
        &Preferences
        &Tercihler
    
    
        &Help
        &Yardım
    
    
        E&xit
        &Çık
    
    
        Ctrl+Q
        Ctrl+Q
    
    
        Update &index
        İndeksi g&üncelle
    
    
        &Erase document history
        &Belge geçmişini temizle
    
    
        &About Recoll
        &Recoll Hakkında
    
    
        &User manual
        &Kullanıcı El Kitabı
    
    
        Document &History
        Belge &Geçmişi
    
    
        Document  History
        Belge  Geçmişi
    
    
        &Advanced Search
        &Gelişmiş arama
    
    
        Advanced/complex  Search
        Gelişmiş/karmaşık   Arama
    
    
        &Sort parameters
        &Sıralama Ölçütleri
    
    
        Sort parameters
        Sıralama ölçütleri
    
    
        Term &explorer
        İfade g&österici
    
    
        Term explorer tool
        İfade gösterme aracı
    
    
        Next page
        Sonraki sayfa
    
    
        Next page of results
        Sonuçların sonraki sayfası
    
    
        First page
        İlk sayfa
    
    
        Go to first page of results
        Sonuçların ilk sayfasına git
    
    
        Previous page
        Önceki sayfa
    
    
        Previous page of results
        Sonuçların önceki sayfası
    
    
        &Query configuration
        &Sorgu yapılandırması
    
    
        External index dialog
        Dış indeksler penceresi
    
    
        &Indexing configuration
        İ&ndeksleme yapılandırması 
    
    
        PgDown
        
    
    
        PgUp
        
    
    
        &Full Screen
        
    
    
        F11
        
    
    
        Full Screen
        
    
    
        &Erase search history
        
    
    
        Sort by dates from oldest to newest
        
    
    
        Sort by dates from newest to oldest
        
    
    
        Show Query Details
        
    
    
        &Rebuild index
        
    
    
        Shift+PgUp
        
    
    
        E&xternal index dialog
        
    
    
        &Index configuration
        
    
    
        &GUI configuration
        
    
    
        &Results
        
    
    
        Sort by date, oldest first
        
    
    
        Sort by date, newest first
        
    
    
        Show as table
        
    
    
        Show results in a spreadsheet-like table
        
    
    
        Save as CSV (spreadsheet) file
        
    
    
        Saves the result into a file which you can load in a spreadsheet
        
    
    
        Next Page
        
    
    
        Previous Page
        
    
    
        First Page
        
    
    
        Query Fragments
        
    
    
            With failed files retrying
        
    
    
        Next update will retry previously failed files
        
    
    
        Indexing &schedule
        
    
    
        Enable synonyms
        
    
    
        Save last query
        
    
    
        Load saved query
        
    
    
        Special Indexing
        
    
    
        Indexing with special options
        
    
    
        &View
        
    
    
        Missing &helpers
        
    
    
        Indexed &MIME types
        
    
    
        Index &statistics
        
    
    
        Webcache Editor
        
    
    
        Trigger incremental pass
        
    


    RclTrayIcon
    
        Restore
        
    
    
        Quit
        
    


    RecollModel
    
        File name
        Dosya adı
    
    
        Mime type
        Mime Tipi
    
    
        Date
        Tarih
    
    
        Abstract
        
    
    
        Author
        
    
    
        Document size
        
    
    
        Document date
        
    
    
        File size
        
    
    
        File date
        
    
    
        Keywords
        
    
    
        Original character set
        
    
    
        Relevancy rating
        
    
    
        Title
        
    
    
        URL
        
    
    
        Mtime
        
    
    
        Date and time
        
    
    
        Ipath
        
    
    
        MIME type
        
    
    
        Can't sort by inverse relevance
        
    


    ResList
    
        Result list
        Sonuç listesi
    
    
        <p><b>No results found</b><br>
        <p><b>Sonuç bulunamadı</b><br>
    
    
        (show query)
        (sorguyu göster)
    
    
        Unavailable document
        Erişilemez belge
    
    
        Previous
        Önceki
    
    
        Next
        Sonraki
    
    
        &Preview
        &Önizle
    
    
        Copy &File Name
        &Dosya Adını Kopyala
    
    
        Copy &URL
        &Adresi Kopyala
    
    
        Find &similar documents
        Benzer belgeleri &bul
    
    
        Query details
        Sorgu detayları
    
    
        Document history
        Belge geçmişi
    
    
        Preview
        Önizle
    
    
        Open
        
    
    
        <p><i>Alternate spellings (accents suppressed): </i>
        
    
    
        Documents
        
    
    
        out of at least
        
    
    
        for
        
    
    
        <p><i>Alternate spellings: </i>
        
    
    
        Result count (est.)
        
    
    
        Snippets
        
    


    ResTable
    
        &Reset sort
        
    
    
        &Delete column
        
    
    
        Save table to CSV file
        
    
    
        Can't open/create file: 
        
    
    
        &Preview
        &Önizle
    
    
        Copy &File Name
        &Dosya Adını Kopyala
    
    
        Copy &URL
        &Adresi Kopyala
    
    
        Find &similar documents
        Benzer belgeleri &bul
    
    
        &Save as CSV
        
    
    
        Add "%1" column
        
    


    ResTableDetailArea
    
        &Preview
        &Önizle
    
    
        Copy &File Name
        &Dosya Adını Kopyala
    
    
        Copy &URL
        &Adresi Kopyala
    
    
        Find &similar documents
        Benzer belgeleri &bul
    


    ResultPopup
    
        &Preview
        &Önizle
    
    
        Copy &File Name
        &Dosya Adını Kopyala
    
    
        Copy &URL
        &Adresi Kopyala
    
    
        Find &similar documents
        Benzer belgeleri &bul
    


    SSearch
    
        Any term
        Sözcüklerin herhangi biri
    
    
        All terms
        Tüm sözcükler
    
    
        File name
        Dosya adı
    
    
        Query language
        Arama dili
    
    
        Bad query string
        Uygunsuz arama ifadesi
    
    
        Out of memory
        Yetersiz bellek
    
    
        Too many completions
        Çok fazla tamamlama
    
    
        Completions
        Tamamlamalar
    
    
        Select an item:
        Bir öge seçin:
    
    
        Enter file name wildcard expression.
        
    
    
        Enter search terms here. Type ESC SPC for completions of current term.
        Aranacak ifadeleri buraya girin. Geçerli sözcüğün tamamlanması için ESC SPACE kullanın.
    
    
        Enter query language expression. Cheat sheet:<br>
<i>term1 term2</i> : 'term1' and 'term2' in any field.<br>
<i>field:term1</i> : 'term1' in field 'field'.<br>
 Standard field names/synonyms:<br>
  title/subject/caption, author/from, recipient/to, filename, ext.<br>
 Pseudo-fields: dir, mime/format, type/rclcat, date, size.<br>
 Two date interval exemples: 2009-03-01/2009-05-20  2009-03-01/P2M.<br>
<i>term1 term2 OR term3</i> : term1 AND (term2 OR term3).<br>
  You can use parentheses to make things clearer.<br>
<i>"term1 term2"</i> : phrase (must occur exactly). Possible modifiers:<br>
<i>"term1 term2"p</i> : unordered proximity search with default distance.<br>
Use <b>Show Query</b> link when in doubt about result and see manual (&lt;F1>) for more detail.

        
    
    
        Stemming languages for stored query: 
        
    
    
         differ from current preferences (kept)
        
    
    
        Auto suffixes for stored query: 
        
    
    
        External indexes for stored query: 
        
    
    
        Autophrase is set but it was unset for stored query
        
    
    
        Autophrase is unset but it was set for stored query
        
    
    
        Enter search terms here.
        
    


    SSearchBase
    
        SSearchBase
        SSearchBase
    
    
        Clear
        Temizle
    
    
        Ctrl+S
        Ctrl+S
    
    
        Erase search entry
        Arama girdisini temizle
    
    
        Search
        Ara
    
    
        Start query
        Sorguyu başlat
    
    
        Enter search terms here. Type ESC SPC for completions of current term.
        Aranacak ifadeleri buraya girin. Geçerli sözcüğün tamamlanması için ESC SPACE kullanın.
    
    
        Choose search type.
        
    
    
        Show query history
        
    


    SearchClauseW
    
        SearchClauseW
        SearchClauseW
    
    
        Any of these
        Bunların herhangi biri
    
    
        All of these
        Bunların tümü
    
    
        None of these
        Bunların hiçbiri
    
    
        This phrase
        Tam olarak bu ifade
    
    
        Terms in proximity
        Yakın ifadeler
    
    
        File name matching
        Dosya adı eşleşen
    
    
        Select the type of query that will be performed with the words
        Sözcükler ile kullanılacak sorgu biçimini seç
    
    
        Number of additional words that may be interspersed with the chosen ones
        Seçilen sözcüklerin arasında yer alabilecek ek sözcüklerin sayısı
    
    
        No field
        
    
    
        Any
        
    
    
        All
        
    
    
        None
        
    
    
        Phrase
        
    
    
        Proximity
        
    
    
        File name
        Dosya adı
    


    Snippets
    
        Snippets
        
    
    
        Find:
        
    
    
        Next
        Sonraki
    
    
        Prev
        
    


    SnippetsW
    
        Search
        Ara
    
    
        <p>Sorry, no exact match was found within limits. Probably the document is very big and the snippets generator got lost in a maze...</p>
        
    
    
        Sort By Relevance
        
    
    
        Sort By Page
        
    


    SortForm
    
        Date
        Tarih
    
    
        Mime type
        Mime Tipi
    


    SortFormBase
    
        Sort Criteria
        Sıralama Ölçütü
    
    
        Sort the
        Sırala
    
    
        most relevant results by:
        en uygun sonuç veren:
    
    
        Descending
        Azalan
    
    
        Apply
        Uygula
    
    
        Close
        Kapat
    


    SpecIdxW
    
        Special Indexing
        
    
    
        Else only modified or failed files will be processed.
        
    
    
        Erase selected files data before indexing.
        
    
    
        Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs).
        
    
    
        Browse
        Gözat
    
    
        Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set.
        
    
    
        Selection patterns:
        
    
    
        Top indexed entity
        
    
    
        Retry previously failed files.
        
    
    
        Start directory. Must be part of the indexed tree. Use full indexed area if empty.
        
    


    SpellBase
    
        Term Explorer
        İfade Gösterici
    
    
        &Expand 
        &Genişlet 
    
    
        Alt+E
        Alt+G
    
    
        &Close
        &Kapat
    
    
        Alt+C
        Alt+K
    
    
        Term
        İfade
    
    
        No db info.
        
    
    
        Match
        
    
    
        Case
        
    
    
        Accents
        
    


    SpellW
    
        Wildcards
        Özel karakterler
    
    
        Regexp
        Düzenli ifade
    
    
        Stem expansion
        Kök ayrıştırma genişlemesi
    
    
        Spelling/Phonetic
        Heceleme/Fonetik
    
    
        error retrieving stemming languages
        sözcük kökleri ayrıştırılabilir diller alınırken hata oluştu
    
    
        Aspell init failed. Aspell not installed?
        Aspell başlatılamadı. Yüklenmemiş olabilir mi?
    
    
        Aspell expansion error. 
        Aspell heceleme genişlemesi hatası. 
    
    
        No expansion found
        Hiç genişleme bulunamadı
    
    
        Term
        İfade
    
    
        Doc. / Tot.
        
    
    
        Index: %1 documents, average length %2 terms.%3 results
        
    
    
        %1 results
        
    
    
        List was truncated alphabetically, some frequent 
        
    
    
        terms may be missing. Try using a longer root.
        
    
    
        Show index statistics
        
    
    
        Number of documents
        
    
    
        Average terms per document
        
    
    
        Database directory size
        
    
    
        MIME types:
        
    
    
        Item
        
    
    
        Value
        
    
    
        Smallest document length (terms)
        
    
    
        Longest document length (terms)
        
    
    
        Results from last indexing:
        
    
    
          Documents created/updated
        
    
    
          Files tested
        
    
    
          Unindexed files
        
    
    
        List files which could not be indexed (slow)
        
    
    
        Spell expansion error. 
        
    


    UIPrefsDialog
    
        error retrieving stemming languages
        sözcük kökleri ayrıştırılabilir diller alınırken hata oluştu
    
    
        The selected directory does not appear to be a Xapian index
        Seçilen dizin bir Xapian indeks dizini gibi görünmüyor
    
    
        This is the main/local index!
        Bu ana/yerel veritabanı!
    
    
        The selected directory is already in the index list
        Seçilen dizin zaten indeks listesinde var
    
    
        Select xapian index directory (ie: /home/buddy/.recoll/xapiandb)
        Xapian indeks dizinini seç (/home/kullanıcı_adınız/.recoll/xapiandb gibi.)
    
    
        Choose
        Gözat
    
    
        Result list paragraph format (erase all to reset to default)
        
    
    
        Result list header (default is empty)
        
    
    
        Select recoll config directory or xapian index directory (e.g.: /home/me/.recoll or /home/me/.recoll/xapiandb)
        
    
    
        The selected directory looks like a Recoll configuration directory but the configuration could not be read
        
    
    
        At most one index should be selected
        
    
    
        Cant add index with different case/diacritics stripping option
        
    
    
        Default QtWebkit font
        
    
    
        Any term
        Sözcüklerin herhangi biri
    
    
        All terms
        Tüm sözcükler
    
    
        File name
        Dosya adı
    
    
        Query language
        Arama dili
    
    
        Value from previous program exit
        
    


    UIPrefsDialogBase
    
        User preferences
        Kullanıcı tercihleri
    
    
        User interface
        Kullanıcı arayüzü
    
    
        Number of entries in a result page
        Bir sonuç sayfasındaki sonuç sayısı
    
    
        Result list font
        Sonuç listesi yazıtipi
    
    
        Helvetica-10
        Helvetica-10
    
    
        Opens a dialog to select the result list font
        Sonuç listesi yazıtipini seçmek için bir pencere açar
    
    
        Reset
        Sıfırla
    
    
        Resets the result list font to the system default
        Sonuç listesi yazıtipini sistem ayarlarına döndür
    
    
        Result paragraph<br>format string
        Sonuç paragrafı<br>biçimlendirme ifadesi
    
    
        Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br>
        Tüm sonuç listesi paragraflarını tanımlar. Qt html biçimini ve printf benzeri yer değiştiricileri kullanın:<br>%A Özet<br> %D Tarih<br> %I Simge resminin adı<br> %K Anahtar sözcükler (eğer varsa)<br> %L Önizle ve Düzenle bağlantıları<br> %M Mime tipi<br> %N Sonuç sayısı<br> %R Uyum yüzdesi<br> %S Boyut bilgileri<br> %T Başlık<br> %U Url<br>
    
    
        Texts over this size will not be highlighted in preview (too slow).
        Bu boyuttan büyük metinler önizlemede vurgulanmayacak (çok yavaş).
    
    
        Maximum text size highlighted for preview (megabytes)
        Önizlemede vurgulanacak en fazla metin boyutu (MB)
    
    
        Auto-start simple search on whitespace entry.
        Beyaz alan girdisi olduğunda basit aramayı otomatik olarak başlat.
    
    
        Start with advanced search dialog open.
        Gelişmiş arama penceresi ile başla.
    
    
        Start with sort dialog open.
        Sıralama penceresi ile başla.
    
    
        Use desktop preferences to choose document editor.
        Belge düzenleyiciyi seçmek için masaüstü tercihlerini kullan.
    
    
        Remember sort activation state.
        Sıralama kurallarını hatırla.
    
    
        Search parameters
        Arama parametreleri
    
    
        Stemming language
        Kök ayrıştırma dili
    
    
        Automatically add phrase to simple searches
        Basit aramalara ifadeyi otomatik olarak ekle
    
    
        A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.
        [linux kernel] (2 sözcük) araması [linux veya kernel veya (linux ifadesi 2 tane kernel)] olarak değiştirilecektir.  
Bu, aranacak sözcüklerin tam olarak girildiği gibi görüntülendiği sonuçlara yüksek öncelik verilmesini sağlayacaktır.
    
    
        Dynamically build abstracts
        Özetleri dinamik olarak oluştur
    
    
        Do we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.
        Sorgu sözcükleri kullanılarak sonuç listesi girdileri için özet oluşturulsun mu ? 
Büyük boyutlu belgelerde yavaş olabilir.
    
    
        Replace abstracts from documents
        Belgelerden özetleri kaldır
    
    
        Do we synthetize an abstract even if the document seemed to have one?
        Belgenin bir özeti varsa bile bir yapay özet oluşturulsun mu?
    
    
        Synthetic abstract size (characters)
        Yapay özet boyutu (karakter sayısı)
    
    
        Synthetic abstract context words
        Yapay özet sözcükleri
    
    
        External Indexes
        Dış indeksler
    
    
        External indexes
        Dış indeksler
    
    
        Toggle selected
        Seç /Bırak
    
    
        Activate All
        Tümünü Etkinleştir
    
    
        Deactivate All
        Tümünü Pasifleştir
    
    
        Remove selected
        Seçileni sil
    
    
        Remove from list. This has no effect on the disk index.
        Listeden sil. Bu diskteki indeksi etkilemez.
    
    
        Add index
        İndeks ekle
    
    
        Select the xapiandb directory for the index you want to add, then click Add Index
        İstediğiniz indeksi eklemek için xapiandb (veritabanı) dizinini seçin ve İndeks Ekle düğmesine tıklayın
    
    
        Browse
        Gözat
    
    
        &OK
        &TAMAM
    
    
        Apply changes
        Değişiklikleri uygula
    
    
        &Cancel
        &İptal
    
    
        Discard changes
        Değişiklikleri sil
    


    ViewAction
    
        Changing actions with different current values
        Farklı değerlerle eylemler değiştiriliyor
    
    
        Mime type
        Mime Tipi
    
    
        Command
        
    
    
        MIME type
        
    
    
        Desktop Default
        
    
    
        Changing entries with different current values
        
    


    ViewActionBase
    
        Native Viewers
        Doğal Göstericiler
    
    
        Select one or several file types, then click Change Action to modify the program used to open them
        Bir ya da birkaç dosya tipi seçin ve Eylemi Değiştir düğmesine tıklayarak hangi uygulama ile açılacağını değiştirin
    
    
        File type
        Dosya tipi
    
    
        Action
        Davranış
    
    
        Change Action
        Davranışı Değiştir
    
    
        Close
        Kapat
    
    
        Select one or several mime types then use the controls in the bottom frame to change how they are processed.
        
    
    
        Use Desktop preferences by default
        
    
    
        Select one or several file types, then use the controls in the frame below to change how they are processed
        
    
    
        Exception to Desktop preferences
        
    
    
        Action (empty -> recoll default)
        
    
    
        Apply to current selection
        
    
    
        Recoll action:
        
    
    
        current value
        
    
    
        Select same
        
    
    
        <b>New Values:</b>
        
    


    Webcache
    
        Webcache editor
        
    
    
        Search regexp
        
    


    WebcacheEdit
    
        Copy URL
        
    
    
        Unknown indexer state. Can't edit webcache file.
        
    
    
        Indexer is running. Can't edit webcache file.
        
    
    
        Delete selection
        
    
    
        Webcache was modified, you will need to run the indexer after closing this window.
        
    


    WebcacheModel
    
        MIME
        
    
    
        Url
        
    


    confgui::ConfIndexW
    
        Can't write configuration file
        Yapılandırma dosyası yazılamadı
    


    confgui::ConfParamFNW
    
        Browse
        Gözat
    
    
        Choose
        Gözat
    


    confgui::ConfParamSLW
    
        +
        +
    
    
        -
        -
    
    
        Add entry
        
    
    
        Delete selected entries
        
    
    
        ~
        
    
    
        Edit selected entries
        
    


    confgui::ConfSubPanelW
    
        Global
        Genel
    


    confgui::ConfTopPanelW
    
        Top directories
        Üst dizinler
    
    
        The list of directories where recursive indexing starts. Default: your home.
        Özyinelemeli indesklemenin başlayacağı dizinlerin listesi. Öntanımlı: ev dizininiz.
    
    
        Skipped paths
        Atlanan yollar
    
    
        These are names of directories which indexing will not enter.<br> May contain wildcards. Must match the paths seen by the indexer (ie: if topdirs includes '/home/me' and '/home' is actually a link to '/usr/home', a correct skippedPath entry would be '/home/me/tmp*', not '/usr/home/me/tmp*')
        Bunlar indekslemenin girmeyeceği dizinlerin adlarıdır.<br> * gibi özel karakterler içerebilir. İndeksleyici tarafından görülen yollar ile eşleşmelidir (örneğin: eğer en üst dizinler '/home/ben' ve '/home' içeriyorsa ve home '/usr/home' dizinine bağlantılı ise atlanacak dizin yolu '/home/me/tmp*' olmalıdır, '/usr/home/me/tmp*' değil)
    
    
        Stemming languages
        Sözcük kökleri ayrıştırılabilir diller
    
    
        The languages for which stemming expansion<br>dictionaries will be built.
        Kök ayrıştırma genişlemesi için sözlükleri<br>inşa edilecek olan diller.
    
    
        Log file name
        Günlük dosyasının adı
    
    
        The file where the messages will be written.<br>Use 'stderr' for terminal output
        İletilerin yazılacağı dosya.<br>Uçbirim çıktısı için 'stderr' kullanın
    
    
        Log verbosity level
        Günlük dosyası ayrıntı düzeyi
    
    
        This value adjusts the amount of messages,<br>from only errors to a lot of debugging data.
        Bu değer ileti boyutunu ayarlar,<br>sadece hatalardan hata ayıklama verilerine kadar.
    
    
        Index flush megabytes interval
        İndex düzeltme MB aralığı
    
    
        This value adjust the amount of data which is indexed between flushes to disk.<br>This helps control the indexer memory usage. Default 10MB 
        Bu değer diske gönderilecek indekslenmiş veri miktarını ayarlar.<br>Bu indeksleyicinin bellek kullanımını kontrol etmeye yarar. Öntanımlı 10MB 
    
    
        Max disk occupation (%)
        En yüksek disk kullanımı (%)
    
    
        This is the percentage of disk occupation where indexing will fail and stop (to avoid filling up your disk).<br>0 means no limit (this is the default).
        Bu disk kullanımının yüzdesidir ki bu orana erişildiğinde indeksleme durdurulur (diskin doldurulmasını engellemek için).<br>0 kısıtlama yok demektir (Öntanımlı).
    
    
        No aspell usage
        Aspell kullanımı yok
    
    
        Aspell language
        Aspell dili
    
    
        The language for the aspell dictionary. This should look like 'en' or 'fr' ...<br>If this value is not set, the NLS environment will be used to compute it, which usually works.To get an idea of what is installed on your system, type 'aspell config' and look for .dat files inside the 'data-dir' directory. 
        Aspell sözlükleri için dil. Bu 'en' ya da 'fr' gibi olmalıdır ...<br>Eğer bu değer ayarlanmazsa şimdi kullandığnız NLS çevresel değişkeni kullanılacaktır. Sisteminizde neyin yüklü olduğu hakkında bilgi almak için 'aspell config' yazıp 'data-dir' içerisindeki .dat dosyalarına bakın. 
    
    
        Database directory name
        Veritabanı dizininin adı
    
    
        The name for a directory where to store the index<br>A non-absolute path is taken relative to the  configuration directory. The default is 'xapiandb'.
        İndeksin duracağı dizinin adı<br>Eğer tam yol verilmezse yol yapılandırma  dizinine göre belirlenecek. Öntanımlı dizin adı 'xapiandb'.
    
    
        Use system's 'file' command
        Sistemdeki 'file' komutunu kullan
    
    
        Use the system's 'file' command if internal<br>mime type identification fails.
        İç mime tipi belirleme işlemi başarısız olursa<br> sistemdeki 'file' komutunu kullan.
    


    uiPrefsDialogBase
    
        User preferences
        Kullanıcı tercihleri
    
    
        User interface
        Kullanıcı arayüzü
    
    
        Number of entries in a result page
        Bir sonuç sayfasındaki sonuç sayısı
    
    
        If checked, results with the same content under different names will only be shown once.
        
    
    
        Hide duplicate results.
        
    
    
        Result list font
        Sonuç listesi yazıtipi
    
    
        Opens a dialog to select the result list font
        Sonuç listesi yazıtipini seçmek için bir pencere açar
    
    
        Helvetica-10
        Helvetica-10
    
    
        Resets the result list font to the system default
        Sonuç listesi yazıtipini sistem ayarlarına döndür
    
    
        Reset
        Sıfırla
    
    
        Defines the format for each result list paragraph. Use qt html format and printf-like replacements:<br>%A Abstract<br> %D Date<br> %I Icon image name<br> %K Keywords (if any)<br> %L Preview and Edit links<br> %M Mime type<br> %N Result number<br> %R Relevance percentage<br> %S Size information<br> %T Title<br> %U Url<br>
        Tüm sonuç listesi paragraflarını tanımlar. Qt html biçimini ve printf benzeri yer değiştiricileri kullanın:<br>%A Özet<br> %D Tarih<br> %I Simge resminin adı<br> %K Anahtar sözcükler (eğer varsa)<br> %L Önizle ve Düzenle bağlantıları<br> %M Mime tipi<br> %N Sonuç sayısı<br> %R Uyum yüzdesi<br> %S Boyut bilgileri<br> %T Başlık<br> %U Url<br>
    
    
        Result paragraph<br>format string
        Sonuç paragrafı<br>biçimlendirme ifadesi
    
    
        Texts over this size will not be highlighted in preview (too slow).
        Bu boyuttan büyük metinler önizlemede vurgulanmayacak (çok yavaş).
    
    
        Maximum text size highlighted for preview (megabytes)
        Önizlemede vurgulanacak en fazla metin boyutu (MB)
    
    
        Use desktop preferences to choose document editor.
        Belge düzenleyiciyi seçmek için masaüstü tercihlerini kullan.
    
    
        Choose editor applications
        
    
    
        Auto-start simple search on whitespace entry.
        Beyaz alan girdisi olduğunda basit aramayı otomatik olarak başlat.
    
    
        Start with advanced search dialog open.
        Gelişmiş arama penceresi ile başla.
    
    
        Start with sort dialog open.
        Sıralama penceresi ile başla.
    
    
        Remember sort activation state.
        Sıralama kurallarını hatırla.
    
    
        Prefer Html to plain text for preview.
        
    
    
        Search parameters
        Arama parametreleri
    
    
        Stemming language
        Kök ayrıştırma dili
    
    
        A search for [rolling stones] (2 terms) will be changed to [rolling or stones or (rolling phrase 2 stones)]. 
This should give higher precedence to the results where the search terms appear exactly as entered.
        [linux kernel] (2 sözcük) araması [linux veya kernel veya (linux ifadesi 2 tane kernel)] olarak değiştirilecektir.  
Bu, aranacak sözcüklerin tam olarak girildiği gibi görüntülendiği sonuçlara yüksek öncelik verilmesini sağlayacaktır.
    
    
        Automatically add phrase to simple searches
        Basit aramalara ifadeyi otomatik olarak ekle
    
    
        Do we try to build abstracts for result list entries by using the context of query terms ? 
May be slow for big documents.
        Sorgu sözcükleri kullanılarak sonuç listesi girdileri için özet oluşturulsun mu ? 
Büyük boyutlu belgelerde yavaş olabilir.
    
    
        Dynamically build abstracts
        Özetleri dinamik olarak oluştur
    
    
        Do we synthetize an abstract even if the document seemed to have one?
        Belgenin bir özeti varsa bile bir yapay özet oluşturulsun mu?
    
    
        Replace abstracts from documents
        Belgelerden özetleri kaldır
    
    
        Synthetic abstract size (characters)
        Yapay özet boyutu (karakter sayısı)
    
    
        Synthetic abstract context words
        Yapay özet sözcükleri
    
    
        The words in the list will be automatically turned to ext:xxx clauses in the query language entry.
        
    
    
        Query language magic file name suffixes.
        
    
    
        Enable
        
    
    
        External Indexes
        Dış indeksler
    
    
        Toggle selected
        Seç /Bırak
    
    
        Activate All
        Tümünü Etkinleştir
    
    
        Deactivate All
        Tümünü Pasifleştir
    
    
        Remove from list. This has no effect on the disk index.
        Listeden sil. Bu diskteki indeksi etkilemez.
    
    
        Remove selected
        Seçileni sil
    
    
        Add index
        İndeks ekle
    
    
        Apply changes
        Değişiklikleri uygula
    
    
        &OK
        &TAMAM
    
    
        Discard changes
        Değişiklikleri sil
    
    
        &Cancel
        &İptal
    
    
        Abstract snippet separator
        
    
    
        Style sheet
        
    
    
        Opens a dialog to select the style sheet file
        
    
    
        Choose
        Gözat
    
    
        Resets the style sheet to default
        
    
    
        Result List
        
    
    
        Edit result paragraph format string
        
    
    
        Edit result page html header insert
        
    
    
        Date format (strftime(3))
        
    
    
        Frequency percentage threshold over which we do not use terms inside autophrase. 
Frequent terms are a major performance issue with phrases. 
Skipped terms augment the phrase slack, and reduce the autophrase efficiency.
The default value is 2 (percent). 
        
    
    
        Autophrase term frequency threshold percentage
        
    
    
        Plain text to HTML line style
        
    
    
        Lines in PRE text are not folded. Using BR loses some indentation. PRE + Wrap style may be what you want.
        
    
    
        <BR>
        
    
    
        <PRE>
        
    
    
        <PRE> + wrap
        
    
    
        Disable Qt autocompletion in search entry.
        
    
    
        Paths translations
        
    
    
        Click to add another index directory to the list. You can select either a Recoll configuration directory or a Xapian index.
        
    
    
        Snippets window CSS file
        
    
    
        Opens a dialog to select the Snippets window CSS style sheet file
        
    
    
        Resets the Snippets window style
        
    
    
        Decide if document filters are shown as radio buttons, toolbar combobox, or menu.
        
    
    
        Document filter choice style:
        
    
    
        Buttons Panel
        
    
    
        Toolbar Combobox
        
    
    
        Menu
        
    
    
        Show system tray icon.
        
    
    
        Close to tray instead of exiting.
        
    
    
        Start with simple search mode
        
    
    
        User style to apply to the snippets window.<br> Note: the result page header insert is also included in the snippets window header.
        
    
    
        Synonyms file
        
    
    
        Show warning when opening temporary file.
        
    
    
        Highlight CSS style for query terms
        
    
    
        Recoll - User Preferences
        
    
    
        Set path translations for the selected index or for the main one if no selection exists.
        
    
    
        Activate links in preview.
        
    
    
        Make links inside the preview window clickable, and start an external browser when they are clicked.
        
    
    
        Query terms highlighting in results. <br>Maybe try something like "color:red;background:yellow" for something more lively than the default blue...
        
    
    
        Start search on completer popup activation.
        
    
    
        Maximum number of snippets displayed in the snippets window
        
    
    
        Sort snippets by page number (default: by weigth).
        
    
    
        Suppress all beeps.
        
    


recoll-1.26.3/qtgui/rclm_idx.cpp0000644000175000017500000004526313533651561013464 00000000000000/* Copyright (C) 2005 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include "autoconfig.h"

#include 
#include "safeunistd.h"

#include 
#include 

#include "execmd.h"
#include "log.h"
#include "transcode.h"
#include "indexer.h"
#include "rclmain_w.h"
#include "specialindex.h"
#include "readfile.h"
#include "snippets_w.h"
#include "idxstatus.h"

using namespace std;


// This is called from periodic100 if we started an indexer, or from
// the rclmain idxstatus file watcher, every time the file changes.
void RclMain::updateIdxStatus()
{
    DbIxStatus status;
    readIdxStatus(theconfig, status);
    QString msg = tr("Indexing in progress: ");
    QString phs;
    switch (status.phase) {
    case DbIxStatus::DBIXS_NONE:phs=tr("None");break;
    case DbIxStatus::DBIXS_FILES: phs=tr("Updating");break;
    case DbIxStatus::DBIXS_PURGE: phs=tr("Purge");break;
    case DbIxStatus::DBIXS_STEMDB: phs=tr("Stemdb");break;
    case DbIxStatus::DBIXS_CLOSING:phs=tr("Closing");break;
    case DbIxStatus::DBIXS_DONE:phs=tr("Done");break;
    case DbIxStatus::DBIXS_MONITOR:phs=tr("Monitor");break;
    default: phs=tr("Unknown");break;
    }
    msg += phs + " ";
    if (status.phase == DbIxStatus::DBIXS_FILES) {
        QString sdocs = status.docsdone > 1 ?tr("documents") : tr("document");
        QString sfiles = status.filesdone > 1 ? tr("files") : tr("file");
        QString serrors = status.fileerrors > 1 ? tr("errors") : tr("error");
        QString stats;
        if (status.dbtotdocs > 0) {
            stats = QString("(%1 ") + sdocs + "/%2 " + sfiles +
                "/%3 " + serrors + "/%4 " + tr("total files)");
            stats = stats.arg(status.docsdone).arg(status.filesdone).
                arg(status.fileerrors).arg(status.totfiles);
        } else {
            stats = QString("(%1 ") + sdocs + "/%2 " + sfiles +
                "/%3 " + serrors + ") ";
            stats = stats.arg(status.docsdone).arg(status.filesdone).
                arg(status.fileerrors);
        }
        msg += stats + " ";
    }
    string mf;int ecnt = 0;
    string fcharset = theconfig->getDefCharset(true);
    // If already UTF-8 let it be, else try to transcode, or url-encode
    if (!transcode(status.fn, mf, "UTF-8", "UTF-8", &ecnt) || ecnt) {
        if (!transcode(status.fn, mf, fcharset, "UTF-8", &ecnt) || ecnt) {
            mf = url_encode(status.fn, 0);
        }
    }
    msg += QString::fromUtf8(mf.c_str());
    statusBar()->showMessage(msg, 4000);
}

// This is called by a periodic timer to check the status of 
// indexing, a possible need to exit, and cleanup exited viewers
void RclMain::periodic100()
{
    LOGDEB2("Periodic100\n" );
    if (!m_idxreasontmp || !m_idxreasontmp->ok()) {
        // We just store the pointer and let the tempfile cleaner deal
        // with delete on exiting
        TempFile temp(".txt");
        m_idxreasontmp = rememberTempFile(temp);
    }

    if (m_idxproc) {
        // An indexing process was launched. If its' done, see status.
        int status;
        bool exited = m_idxproc->maybereap(&status);
        if (exited) {
            QString reasonmsg;
            if (m_idxreasontmp && m_idxreasontmp->ok()) {
                string reasons;
                file_to_string(m_idxreasontmp->filename(), reasons);
                if (!reasons.empty()) {
                    ConfSimple rsn(reasons);
                    vector sects = rsn.getNames("");
                    for (const auto& nm : sects) {
                        string val;
                        if (rsn.get(nm, val)) {
                            reasonmsg.append(u8s2qs(string("
") + nm + " : " + val)); } } } } deleteZ(m_idxproc); if (status) { if (m_idxkilled) { QMessageBox::warning(0, "Recoll", tr("Indexing interrupted")); m_idxkilled = false; } else { QString msg(tr("Indexing failed")); if (!reasonmsg.isEmpty()) { msg.append(tr(" with additional message: ")); msg.append(reasonmsg); } QMessageBox::warning(0, "Recoll", msg); } } else { // On the first run, show missing helpers. We only do this once if (m_firstIndexing) showMissingHelpers(); if (!reasonmsg.isEmpty()) { QString msg = tr("Non-fatal indexing message: "); msg.append(reasonmsg); QMessageBox::warning(0, "Recoll", msg); } } string reason; maybeOpenDb(reason, 1); } else { // update/show status even if the status file did not // change (else the status line goes blank during // lengthy operations). updateIdxStatus(); } } // Update the "start/stop indexing" menu entry, can't be done from // the "start/stop indexing" slot itself IndexerState prevstate = m_indexerState; if (m_idxproc) { m_indexerState = IXST_RUNNINGMINE; fileToggleIndexingAction->setText(tr("Stop &Indexing")); fileToggleIndexingAction->setEnabled(true); fileRebuildIndexAction->setEnabled(false); actionSpecial_Indexing->setEnabled(false); periodictimer->setInterval(200); } else { Pidfile pidfile(theconfig->getPidfile()); pid_t pid = pidfile.open(); fileBumpIndexingAction->setEnabled(false); if (pid == getpid()) { // Locked by me m_indexerState = IXST_NOTRUNNING; fileToggleIndexingAction->setText(tr("Index locked")); fileToggleIndexingAction->setEnabled(false); fileRebuildIndexAction->setEnabled(false); actionSpecial_Indexing->setEnabled(false); periodictimer->setInterval(1000); } else if (pid == 0) { m_indexerState = IXST_NOTRUNNING; fileToggleIndexingAction->setText(tr("Update &Index")); fileToggleIndexingAction->setEnabled(true); fileRebuildIndexAction->setEnabled(true); actionSpecial_Indexing->setEnabled(true); periodictimer->setInterval(1000); } else { // Real time or externally started batch indexer running m_indexerState = IXST_RUNNINGNOTMINE; fileToggleIndexingAction->setText(tr("Stop &Indexing")); DbIxStatus status; readIdxStatus(theconfig, status); if (status.hasmonitor) { // Real-time indexer running. We can trigger an // incremental pass fileBumpIndexingAction->setEnabled(true); } fileToggleIndexingAction->setEnabled(true); fileRebuildIndexAction->setEnabled(false); actionSpecial_Indexing->setEnabled(false); periodictimer->setInterval(200); } } if ((prevstate == IXST_RUNNINGMINE || prevstate == IXST_RUNNINGNOTMINE) && m_indexerState == IXST_NOTRUNNING) { showTrayMessage(tr("Indexing done")); } // Possibly cleanup the dead viewers for (vector::iterator it = m_viewers.begin(); it != m_viewers.end(); it++) { int status; if ((*it)->maybereap(&status)) { deleteZ(*it); } } vector v; for (vector::iterator it = m_viewers.begin(); it != m_viewers.end(); it++) { if (*it) v.push_back(*it); } m_viewers = v; if (recollNeedsExit) fileExit(); } bool RclMain::checkIdxPaths() { string badpaths; vector args {"recollindex", "-c", theconfig->getConfDir(), "-E"}; ExecCmd::backtick(args, badpaths); if (!badpaths.empty()) { int rep = QMessageBox::warning( 0, tr("Bad paths"), tr("Empty or non-existant paths in configuration file. " "Click Ok to start indexing anyway " "(absent data will not be purged from the index):\n") + QString::fromLocal8Bit(badpaths.c_str()), QMessageBox::Ok, QMessageBox::Cancel, QMessageBox::NoButton); if (rep == QMessageBox::Cancel) return false; } return true; } // This gets called when the "update index" action is activated. It executes // the requested action, and disables the menu entry. This will be // re-enabled by the indexing status check void RclMain::toggleIndexing() { switch (m_indexerState) { case IXST_RUNNINGMINE: if (m_idxproc) { // Indexing was in progress, request stop. Let the periodic // routine check for the results. if (m_idxproc->requestChildExit()) { m_idxkilled = true; } } break; case IXST_RUNNINGNOTMINE: { int rep = QMessageBox::information( 0, tr("Warning"), tr("The current indexing process was not started from this " "interface. Click Ok to kill it " "anyway, or Cancel to leave it alone"), QMessageBox::Ok, QMessageBox::Cancel, QMessageBox::NoButton); if (rep == QMessageBox::Ok) { #ifdef _WIN32 // No simple way to signal the process. Use the stop file ::close(::creat(theconfig->getIdxStopFile().c_str(), 0666)); #else Pidfile pidfile(theconfig->getPidfile()); pid_t pid = pidfile.open(); if (pid > 0) kill(pid, SIGTERM); #endif // !_WIN32 } } break; case IXST_NOTRUNNING: { // Could also mean that no helpers are missing, but then we // won't try to show a message anyway (which is what // firstIndexing is used for) string mhd; m_firstIndexing = !theconfig->getMissingHelperDesc(mhd); if (!checkIdxPaths()) { return; } vector args{"-c", theconfig->getConfDir()}; if (m_idxreasontmp && m_idxreasontmp->ok()) { args.push_back("-R"); args.push_back(m_idxreasontmp->filename()); } m_idxproc = new ExecCmd; m_idxproc->startExec("recollindex", args, false, false); } break; case IXST_UNKNOWN: return; } } #ifndef _WIN32 void RclMain::bumpIndexing() { DbIxStatus status; readIdxStatus(theconfig, status); if (status.hasmonitor) { string cmd("touch "); string path = path_cat(theconfig->getConfDir(), "recoll.conf"); cmd += path; int status; if ((status = system(cmd.c_str()))) { cerr << cmd << " failed with status " << status << endl; } } } #else // Because moc does not understand ifdefs, have to have this as an empty func void RclMain::bumpIndexing() { } #endif static void delay(int millisecondsWait) { QEventLoop loop; QTimer t; t.connect(&t, SIGNAL(timeout()), &loop, SLOT(quit())); t.start(millisecondsWait); loop.exec(); } void RclMain::rebuildIndex() { if (!m_idxreasontmp || !m_idxreasontmp->ok()) { // We just store the pointer and let the tempfile cleaner deal // with delete on exiting TempFile temp(".txt"); m_idxreasontmp = rememberTempFile(temp); } if (m_indexerState == IXST_UNKNOWN) { delay(1500); } switch (m_indexerState) { case IXST_UNKNOWN: case IXST_RUNNINGMINE: case IXST_RUNNINGNOTMINE: return; //?? Should not have been called case IXST_NOTRUNNING: { if (m_idxproc) { LOGERR("RclMain::rebuildIndex: current indexer exec not null\n" ); return; } int rep = QMessageBox::warning(0, tr("Erasing index"), tr("Reset the index and start " "from scratch ?"), QMessageBox::Ok, QMessageBox::Cancel, QMessageBox::NoButton); if (rep == QMessageBox::Ok) { #ifdef _WIN32 // Under windows, it is necessary to close the db here, // else Xapian won't be able to do what it wants with the // (open) files. Of course if there are several GUI // instances, this won't work... Also it's quite difficult // to make sure that there are no more references to the // db because, for example of the Enquire objects inside // Query inside Docsource etc. // // !! At this moment, this does not work if a preview has // !! been opened. Could not find the reason (mysterious // !! Xapian::Database reference somewhere?). The indexing // !! fails, leaving a partial index directory. Then need // !! to restart the GUI to succeed in reindexing. if (rcldb) { resetSearch(); deleteZ(m_snippets); rcldb->close(); } #endif // _WIN32 // Could also mean that no helpers are missing, but then we // won't try to show a message anyway (which is what // firstIndexing is used for) string mhd; m_firstIndexing = !theconfig->getMissingHelperDesc(mhd); if (!checkIdxPaths()) { return; } vector args{"-c", theconfig->getConfDir(), "-z"}; if (m_idxreasontmp && m_idxreasontmp->ok()) { args.push_back("-R"); args.push_back(m_idxreasontmp->filename()); } m_idxproc = new ExecCmd; m_idxproc->startExec("recollindex", args, false, false); } } break; } } void SpecIdxW::onBrowsePB_clicked() { QString dir = myGetFileName(true, tr("Top indexed entity"), true); targLE->setText(dir); } bool SpecIdxW::noRetryFailed() { return !retryFailedCB->isChecked(); } bool SpecIdxW::eraseFirst() { return eraseBeforeCB->isChecked(); } std::vector SpecIdxW::selpatterns() { vector pats; string text = qs2utf8s(selPatsLE->text()); if (!text.empty()) { stringToStrings(text, pats); } return pats; } std::string SpecIdxW::toptarg() { return qs2utf8s(targLE->text()); } void SpecIdxW::onTargLE_textChanged(const QString& text) { if (text.isEmpty()) selPatsLE->setEnabled(false); else selPatsLE->setEnabled(true); } static string execToString(const string& cmd, const vector& args) { string command = cmd + " "; for (vector::const_iterator it = args.begin(); it != args.end(); it++) { command += "{" + *it + "} "; } return command; } void RclMain::specialIndex() { LOGDEB("RclMain::specialIndex\n" ); if (!m_idxreasontmp || !m_idxreasontmp->ok()) { // We just store the pointer and let the tempfile cleaner deal // with delete on exiting TempFile temp(".txt"); m_idxreasontmp = rememberTempFile(temp); } switch (m_indexerState) { case IXST_UNKNOWN: case IXST_RUNNINGMINE: case IXST_RUNNINGNOTMINE: return; //?? Should not have been called case IXST_NOTRUNNING: default: break; } if (m_idxproc) { LOGERR("RclMain::rebuildIndex: current indexer exec not null\n" ); return; } if (!specidx) // ?? return; vector args{"-c", theconfig->getConfDir()}; if (m_idxreasontmp && m_idxreasontmp->ok()) { args.push_back("-R"); args.push_back(m_idxreasontmp->filename()); } string top = specidx->toptarg(); if (!top.empty()) { args.push_back("-r"); } if (specidx->eraseFirst()) { if (top.empty()) { args.push_back("-Z"); } else { args.push_back("-e"); // -e also needs -i, else we don't reindex, just erase args.push_back("-i"); } } if (!specidx->noRetryFailed()) { args.push_back("-k"); } else { args.push_back("-K"); } vector selpats = specidx->selpatterns(); if (!selpats.empty() && top.empty()) { QMessageBox::warning(0, tr("Selection patterns need topdir"), tr("Selection patterns can only be used with a " "start directory"), QMessageBox::Ok, QMessageBox::NoButton); return; } for (vector::const_iterator it = selpats.begin(); it != selpats.end(); it++) { args.push_back("-p"); args.push_back(*it); } if (!top.empty()) { args.push_back(top); } m_idxproc = new ExecCmd; LOGINFO("specialIndex: exec: " << execToString("recollindex", args) <startExec("recollindex", args, false, false); } void RclMain::updateIdxForDocs(vector& docs) { if (m_idxproc) { QMessageBox::warning(0, tr("Warning"), tr("Can't update index: indexer running"), QMessageBox::Ok, QMessageBox::NoButton); return; } vector paths; if (Rcl::docsToPaths(docs, paths)) { vector args{"-c", theconfig->getConfDir(), "-i",}; if (m_idxreasontmp && m_idxreasontmp->ok()) { args.push_back("-R"); args.push_back(m_idxreasontmp->filename()); } args.insert(args.end(), paths.begin(), paths.end()); m_idxproc = new ExecCmd; m_idxproc->startExec("recollindex", args, false, false); // Call periodic100 to update the menu entries states periodic100(); } else { QMessageBox::warning(0, tr("Warning"), tr("Can't update index: internal error"), QMessageBox::Ok, QMessageBox::NoButton); return; } } recoll-1.26.3/qtgui/idxsched.ui0000644000175000017500000001045713566424763013316 00000000000000 IdxSchedW 0 0 504 403 Index scheduling setup 0 1 <!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd"> <html><head><meta name="qrichtext" content="1" /><style type="text/css"> p, li { white-space: pre-wrap; } </style></head><body style=" font-family:'Sans Serif'; font-size:10pt; font-weight:400; font-style:normal;"> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" font-weight:600;">Recoll</span> indexing can run permanently, indexing files as they change, or run at discrete intervals. </p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Reading the manual may help you to decide between these approaches (press F1). </p> <p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"></p> <p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">This tool can help you set up a schedule to automate batch indexing runs, or start real time indexing when you log in (or both, which rarely makes sense). </p></body></html> Qt::AutoText true Cron scheduling false The tool will let you decide at what time indexing should run and will install a crontab entry. Real time indexing start up false Decide if real time indexing will be started when you log in (only for the default index). Qt::Horizontal QDialogButtonBox::Close buttonBox accepted() IdxSchedW accept() 248 254 157 274 buttonBox rejected() IdxSchedW reject() 316 260 286 274 recoll-1.26.3/qtgui/multisave.h0000644000175000017500000000165313533651561013334 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _MULTISAVE_W_H_INCLUDED_ #define _MULTISAVE_W_H_INCLUDED_ #include extern void multiSave(QWidget *parent, vector& docs); #endif /* _MULTISAVE_W_H_INCLUDED_ */ recoll-1.26.3/qtgui/firstidx.h0000644000175000017500000000204013533651561013146 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _FIRSTIDX_H_INCLUDED_ #define _FIRSTIDX_H_INCLUDED_ #include "ui_firstidx.h" class FirstIdxDialog : public QDialog, public Ui::FirstIdxDialog { Q_OBJECT public: FirstIdxDialog(QWidget * parent = 0) : QDialog(parent) { setupUi(this); } }; #endif /* _FIRSTIDX_H_INCLUDED_ */ recoll-1.26.3/qtgui/specialindex.ui0000644000175000017500000001024513533651561014156 00000000000000 SpecIdxW Qt::WindowModal 0 0 610 192 Special Indexing Retry previously failed files. Else only modified or failed files will be processed. Erase selected files data before indexing. 8 0 300 0 Directory to recursively index. This must be inside the regular indexed area<br> as defined in the configuration file (topdirs). Browse false Start directory. Must be part of the indexed tree. Use full indexed area if empty. false Leave empty to select all files. You can use multiple space-separated shell-type patterns.<br>Patterns with embedded spaces should be quoted with double quotes.<br>Can only be used if the start target is set. Selection patterns: Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok buttonBox accepted() SpecIdxW accept() 248 254 157 274 buttonBox rejected() SpecIdxW reject() 316 260 286 274 recoll-1.26.3/qtgui/preview_load.cpp0000644000175000017500000000544013533651561014334 00000000000000/* Copyright (C) 2014 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include "log.h" #include "preview_load.h" #include "internfile.h" #include "rcldoc.h" #include "pathut.h" #include "cancelcheck.h" #include "rclconfig.h" LoadThread::LoadThread(RclConfig *config, const Rcl::Doc& idc, bool pvhtm, QObject *parent) : QThread(parent), status(1), m_idoc(idc), m_previewHtml(pvhtm), m_config(*config) { } void LoadThread::run() { FileInterner interner(m_idoc, &m_config, FileInterner::FIF_forPreview); FIMissingStore mst; interner.setMissingStore(&mst); // Even when previewHtml is set, we don't set the interner's // target mtype to html because we do want the html filter to // do its work: we won't use the text/plain, but we want the // text/html to be converted to utf-8 (for highlight processing) try { string ipath = m_idoc.ipath; FileInterner::Status ret = interner.internfile(fdoc, ipath); if (ret == FileInterner::FIDone || ret == FileInterner::FIAgain) { // FIAgain is actually not nice here. It means that the record // for the *file* of a multidoc was selected. Actually this // shouldn't have had a preview link at all, but we don't know // how to handle it now. Better to show the first doc than // a mysterious error. Happens when the file name matches a // a search term. status = 0; // If we prefer HTML and it is available, replace the // text/plain document text if (m_previewHtml && !interner.get_html().empty()) { fdoc.text = interner.get_html(); fdoc.mimetype = "text/html"; } tmpimg = interner.get_imgtmp(); } else { fdoc.mimetype = interner.getMimetype(); mst.getMissingExternal(missing); explain = FileInterner::tryGetReason(&m_config, m_idoc); status = -1; } } catch (CancelExcept) { LOGDEB("LoadThread: cancelled\n" ); status = -1; } } recoll-1.26.3/qtgui/widgets/0000755000175000017500000000000013570165410012665 500000000000000recoll-1.26.3/qtgui/widgets/listdialog.h0000644000175000017500000000177013533651561015124 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef LISTDIALOG_H #define LISTDIALOG_H #include "ui_listdialog.h" class ListDialog : public QDialog, public Ui::ListDialog { Q_OBJECT public: ListDialog(QWidget * parent = 0) : QDialog(parent) { setupUi(this); } }; #endif // LISTDIALOG_H recoll-1.26.3/qtgui/widgets/qxtconfirmationmessage.cpp0000644000175000017500000003404113303776057020116 00000000000000#include "qxtconfirmationmessage.h" /**************************************************************************** ** Copyright (c) 2006 - 2011, the LibQxt project. ** See the Qxt AUTHORS file for a list of authors and copyright holders. ** All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions are met: ** * Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** * Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** * Neither the name of the LibQxt project nor the ** names of its contributors may be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ** DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY ** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ** *****************************************************************************/ #include #include #include #include #include static const QLatin1String DEFAULT_ORGANIZATION("QxtWidgets"); static const QLatin1String DEFAULT_APPLICATION("QxtConfirmationMessage"); class QxtConfirmationMessagePrivate : public QxtPrivate { public: QXT_DECLARE_PUBLIC(QxtConfirmationMessage) void init(const QString& message = QString()); QString key() const; QString applicationName() const; QString organizationName() const; int showAgain(); void doNotShowAgain(int result); void reset(); bool remember; QCheckBox* confirm; QString overrideApp; QString overrideKey; QString overrideOrg; static QString path; static QSettings::Scope scope; static QSettings::Format format; }; QString QxtConfirmationMessagePrivate::path; QSettings::Scope QxtConfirmationMessagePrivate::scope = QSettings::UserScope; QSettings::Format QxtConfirmationMessagePrivate::format = QSettings::NativeFormat; void QxtConfirmationMessagePrivate::init(const QString& message) { remember = false; confirm = new QCheckBox(&qxt_p()); if (!message.isNull()) confirm->setText(message); else confirm->setText(QxtConfirmationMessage::tr("Do not show again.")); QGridLayout* grid = qobject_cast(qxt_p().layout()); QDialogButtonBox* buttons = qxt_p().findChild(); if (grid && buttons) { const int idx = grid->indexOf(buttons); int row, column, rowSpan, columnSpan = 0; grid->getItemPosition(idx, &row, &column, &rowSpan, &columnSpan); QLayoutItem* buttonsItem = grid->takeAt(idx); grid->addWidget(confirm, row, column, rowSpan, columnSpan, Qt::AlignLeft | Qt::AlignTop); grid->addItem(buttonsItem, ++row, column, rowSpan, columnSpan); } } QString QxtConfirmationMessagePrivate::key() const { QString value = overrideKey; if (value.isEmpty()) { const QString all = qxt_p().windowTitle() + qxt_p().text() + qxt_p().informativeText(); const QByteArray data = all.toLocal8Bit(); value = QString::number(qChecksum(data.constData(), data.length())); } return value; } QString QxtConfirmationMessagePrivate::applicationName() const { QString name = overrideApp; if (name.isEmpty()) name = QCoreApplication::applicationName(); if (name.isEmpty()) name = DEFAULT_APPLICATION; return name; } QString QxtConfirmationMessagePrivate::organizationName() const { QString name = overrideOrg; if (name.isEmpty()) name = QCoreApplication::organizationName(); if (name.isEmpty()) name = DEFAULT_ORGANIZATION; return name; } int QxtConfirmationMessagePrivate::showAgain() { QSettings settings(format, scope, organizationName(), applicationName()); if (!path.isEmpty()) settings.beginGroup(path); return settings.value(key(), -1).toInt(); } void QxtConfirmationMessagePrivate::doNotShowAgain(int result) { QSettings settings(format, scope, organizationName(), applicationName()); if (!path.isEmpty()) settings.beginGroup(path); settings.setValue(key(), result); } void QxtConfirmationMessagePrivate::reset() { QSettings settings(format, scope, organizationName(), applicationName()); if (!path.isEmpty()) settings.beginGroup(path); settings.remove(key()); } /*! \class QxtConfirmationMessage \inmodule QxtWidgets \brief The QxtConfirmationMessage class provides a confirmation message. QxtConfirmationMessage is a confirmation message with checkable \bold {"Do not show again."} option. A checked and accepted confirmation message is no more shown until reseted. Example usage: \code void MainWindow::closeEvent(QCloseEvent* event) { static const QString text(tr("Are you sure you want to quit?")); if (QxtConfirmationMessage::confirm(this, tr("Confirm"), text) == QMessageBox::No) event->ignore(); } \endcode \image qxtconfirmationmessage.png "QxtConfirmationMessage in action." \bold {Note:} QCoreApplication::organizationName and QCoreApplication::applicationName are used for storing settings. In case these properties are empty, \bold "QxtWidgets" and \bold "QxtConfirmationMessage" are used, respectively. */ /*! Constructs a new QxtConfirmationMessage with \a parent. */ QxtConfirmationMessage::QxtConfirmationMessage(QWidget* parent) : QMessageBox(parent) { QXT_INIT_PRIVATE(QxtConfirmationMessage); qxt_d().init(); } /*! Constructs a new QxtConfirmationMessage with \a icon, \a title, \a text, \a confirmation, \a buttons, \a parent and \a flags. */ QxtConfirmationMessage::QxtConfirmationMessage(QMessageBox::Icon icon, const QString& title, const QString& text, const QString& confirmation, QMessageBox::StandardButtons buttons, QWidget* parent, Qt::WindowFlags flags) : QMessageBox(icon, title, text, buttons, parent, flags) { QXT_INIT_PRIVATE(QxtConfirmationMessage); qxt_d().init(confirmation); } /*! Destructs the confirmation message. */ QxtConfirmationMessage::~QxtConfirmationMessage() { } /*! Opens an confirmation message box with the specified \a title, \a text and \a confirmation. The standard \a buttons are added to the message box. \a defaultButton specifies the button used when Enter is pressed. \a defaultButton must refer to a button that was given in \a buttons. If \a defaultButton is QMessageBox::NoButton, QMessageBox chooses a suitable default automatically. Returns the identity of the standard button that was clicked. If Esc was pressed instead, the escape button is returned. If \a parent is \c 0, the message box is an application modal dialog box. If \a parent is a widget, the message box is window modal relative to \a parent. */ QMessageBox::StandardButton QxtConfirmationMessage::confirm(QWidget* parent, const QString& title, const QString& text, const QString& confirmation, QMessageBox::StandardButtons buttons, QMessageBox::StandardButton defaultButton) { QxtConfirmationMessage msgBox(QMessageBox::NoIcon, title, text, confirmation, QMessageBox::NoButton, parent); QDialogButtonBox* buttonBox = msgBox.findChild(); Q_ASSERT(buttonBox != 0); uint mask = QMessageBox::FirstButton; while (mask <= QMessageBox::LastButton) { uint sb = buttons & mask; mask <<= 1; if (!sb) continue; QPushButton* button = msgBox.addButton((QMessageBox::StandardButton)sb); // Choose the first accept role as the default if (msgBox.defaultButton()) continue; if ((defaultButton == QMessageBox::NoButton && buttonBox->buttonRole(button) == QDialogButtonBox::AcceptRole) || (defaultButton != QMessageBox::NoButton && sb == uint(defaultButton))) msgBox.setDefaultButton(button); } if (msgBox.exec() == -1) return QMessageBox::Cancel; return msgBox.standardButton(msgBox.clickedButton()); } /*! \property QxtConfirmationMessage::confirmationText \brief the confirmation text The default value is \bold {"Do not show again."} */ QString QxtConfirmationMessage::confirmationText() const { return qxt_d().confirm->text(); } void QxtConfirmationMessage::setConfirmationText(const QString& confirmation) { qxt_d().confirm->setText(confirmation); } /*! \property QxtConfirmationMessage::overrideSettingsApplication \brief the override application name used for settings QCoreApplication::applicationName is used when no \bold overrideSettingsApplication has been set. The application name falls back to \bold "QxtConfirmationMessage" when no QCoreApplication::applicationName has been set. The default value is an empty string. */ QString QxtConfirmationMessage::overrideSettingsApplication() const { return qxt_d().overrideApp; } void QxtConfirmationMessage::setOverrideSettingsApplication(const QString& application) { qxt_d().overrideApp = application; } /*! \property QxtConfirmationMessage::overrideSettingsKey \brief the override key used for settings When no \bold overrideSettingsKey has been set, the key is calculated with qChecksum() based on title, text and confirmation message. The default value is an empty string. */ QString QxtConfirmationMessage::overrideSettingsKey() const { return qxt_d().overrideKey; } void QxtConfirmationMessage::setOverrideSettingsKey(const QString& key) { qxt_d().overrideKey = key; } /*! \property QxtConfirmationMessage::overrideSettingsOrganization \brief the override organization name used for settings QCoreApplication::organizationName is used when no \bold overrideSettingsOrganization has been set. The organization name falls back to \bold "QxtWidgets" when no QCoreApplication::organizationName has been set. The default value is an empty string. */ QString QxtConfirmationMessage::overrideSettingsOrganization() const { return qxt_d().overrideOrg; } void QxtConfirmationMessage::setOverrideSettingsOrganization(const QString& organization) { qxt_d().overrideOrg = organization; } /*! \property QxtConfirmationMessage::rememberOnReject \brief whether \bold {"Do not show again."} option is stored even if the message box is rejected (eg. user presses Cancel). The default value is \c false. */ bool QxtConfirmationMessage::rememberOnReject() const { return qxt_d().remember; } void QxtConfirmationMessage::setRememberOnReject(bool remember) { qxt_d().remember = remember; } /*! Returns The format used for storing settings. The default value is QSettings::NativeFormat. */ QSettings::Format QxtConfirmationMessage::settingsFormat() { return QxtConfirmationMessagePrivate::format; } /*! Sets the \a format used for storing settings. */ void QxtConfirmationMessage::setSettingsFormat(QSettings::Format format) { QxtConfirmationMessagePrivate::format = format; } /*! Returns The scope used for storing settings. The default value is QSettings::UserScope. */ QSettings::Scope QxtConfirmationMessage::settingsScope() { return QxtConfirmationMessagePrivate::scope; } /*! Sets the \a scope used for storing settings. */ void QxtConfirmationMessage::setSettingsScope(QSettings::Scope scope) { QxtConfirmationMessagePrivate::scope = scope; } /*! Returns the path used for storing settings. The default value is an empty string. */ QString QxtConfirmationMessage::settingsPath() { return QxtConfirmationMessagePrivate::path; } /*! Sets the \a path used for storing settings. */ void QxtConfirmationMessage::setSettingsPath(const QString& path) { QxtConfirmationMessagePrivate::path = path; } /*! Shows the confirmation message if necessary. The confirmation message is not shown in case \bold {"Do not show again."} has been checked while the same confirmation message was earlierly accepted. A confirmation message is identified by the combination of title, QMessageBox::text and optional QMessageBox::informativeText. A clicked button with role QDialogButtonBox::AcceptRole or QDialogButtonBox::YesRole is considered as "accepted". \warning This function does not reimplement but shadows QMessageBox::exec(). \sa QWidget::windowTitle, QMessageBox::text, QMessageBox::informativeText */ int QxtConfirmationMessage::exec() { int res = qxt_d().showAgain(); if (res == -1) res = QMessageBox::exec(); return res; } /*! \reimp */ void QxtConfirmationMessage::done(int result) { QDialogButtonBox* buttons = this->findChild(); Q_ASSERT(buttons != 0); int role = buttons->buttonRole(clickedButton()); if (qxt_d().confirm->isChecked() && (qxt_d().remember || role != QDialogButtonBox::RejectRole)) { qxt_d().doNotShowAgain(result); } QMessageBox::done(result); } /*! Resets this instance of QxtConfirmationMessage. A reseted confirmation message is shown again until user checks \bold {"Do not show again."} and accepts the confirmation message. */ void QxtConfirmationMessage::reset() { qxt_d().reset(); } recoll-1.26.3/qtgui/widgets/editdialog.ui0000644000175000017500000000271513303776057015267 00000000000000 EditDialog 0 0 614 509 Dialog Qt::Horizontal QDialogButtonBox::Cancel|QDialogButtonBox::Ok buttonBox accepted() Dialog accept() 248 254 157 274 buttonBox rejected() Dialog reject() 316 260 286 274 recoll-1.26.3/qtgui/widgets/qxtconfirmationmessage.h0000644000175000017500000001052313303776057017562 00000000000000#ifndef QXTCONFIRMATIONMESSAGE_H /**************************************************************************** ** Copyright (c) 2006 - 2011, the LibQxt project. ** See the Qxt AUTHORS file for a list of authors and copyright holders. ** All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions are met: ** * Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** * Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** * Neither the name of the LibQxt project nor the ** names of its contributors may be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ** DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY ** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ** *****************************************************************************/ #define QXTCONFIRMATIONMESSAGE_H #include #include #include "qxtglobal.h" class QxtConfirmationMessagePrivate; class QXT_GUI_EXPORT QxtConfirmationMessage : public QMessageBox { Q_OBJECT QXT_DECLARE_PRIVATE(QxtConfirmationMessage) Q_PROPERTY(QString confirmationText READ confirmationText WRITE setConfirmationText) Q_PROPERTY(QString overrideSettingsApplication READ overrideSettingsApplication WRITE setOverrideSettingsApplication) Q_PROPERTY(QString overrideSettingsKey READ overrideSettingsKey WRITE setOverrideSettingsKey) Q_PROPERTY(QString overrideSettingsOrganization READ overrideSettingsOrganization WRITE setOverrideSettingsOrganization) Q_PROPERTY(bool rememberOnReject READ rememberOnReject WRITE setRememberOnReject) public: explicit QxtConfirmationMessage(QWidget* parent = 0); virtual ~QxtConfirmationMessage(); QxtConfirmationMessage(QMessageBox::Icon icon, const QString& title, const QString& text, const QString& confirmation = QString(), QMessageBox::StandardButtons buttons = QMessageBox::NoButton, QWidget* parent = 0, Qt::WindowFlags flags = Qt::Dialog | Qt::MSWindowsFixedSizeDialogHint); static QMessageBox::StandardButton confirm(QWidget* parent, const QString& title, const QString& text, const QString& confirmation = QString(), QMessageBox::StandardButtons buttons = QMessageBox::Yes | QMessageBox::No, QMessageBox::StandardButton defaultButton = QMessageBox::NoButton); QString confirmationText() const; void setConfirmationText(const QString& confirmation); QString overrideSettingsApplication() const; void setOverrideSettingsApplication(const QString& application); QString overrideSettingsKey() const; void setOverrideSettingsKey(const QString& key); QString overrideSettingsOrganization() const; void setOverrideSettingsOrganization(const QString& organization); bool rememberOnReject() const; void setRememberOnReject(bool remember); static QSettings::Format settingsFormat(); static void setSettingsFormat(QSettings::Format format); static QSettings::Scope settingsScope(); static void setSettingsScope(QSettings::Scope scope); static QString settingsPath(); static void setSettingsPath(const QString& path); public Q_SLOTS: int exec(); void reset(); virtual void done(int result); }; #endif // QXTCONFIRMATIONMESSAGE_H recoll-1.26.3/qtgui/widgets/editdialog.h0000644000175000017500000000201513533651561015067 00000000000000/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef EDITDIALOG_H #define EDITDIALOG_H #include #include "ui_editdialog.h" class EditDialog : public QDialog, public Ui::EditDialog { Q_OBJECT public: EditDialog(QWidget * parent = 0) : QDialog(parent) { setupUi(this); } }; #endif // EDITDIALOG_H recoll-1.26.3/qtgui/widgets/listdialog.ui0000644000175000017500000000337213303776057015315 00000000000000 ListDialog 0 0 400 300 Dialog GroupBox Qt::Horizontal QDialogButtonBox::Ok true buttonBox accepted() ListDialog accept() 248 254 157 274 buttonBox rejected() ListDialog reject() 316 260 286 274 recoll-1.26.3/qtgui/widgets/qxtglobal.h0000644000175000017500000001564313303776057014775 00000000000000 /**************************************************************************** ** Copyright (c) 2006 - 2011, the LibQxt project. ** See the Qxt AUTHORS file for a list of authors and copyright holders. ** All rights reserved. ** ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions are met: ** * Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** * Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** * Neither the name of the LibQxt project nor the ** names of its contributors may be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ** ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED ** WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE ** DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY ** DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES ** (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; ** LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ** ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS ** SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ** ** *****************************************************************************/ #ifndef QXTGLOBAL_H #define QXTGLOBAL_H #include #define QXT_VERSION 0x000700 #define QXT_VERSION_STR "0.7.0" //--------------------------global macros------------------------------ #ifndef QXT_NO_MACROS #ifndef _countof #define _countof(x) (sizeof(x)/sizeof(*x)) #endif #endif // QXT_NO_MACROS //--------------------------export macros------------------------------ #define QXT_STATIC #define QXT_DLLEXPORT DO_NOT_USE_THIS_ANYMORE #if !defined(QXT_STATIC) && !defined(QXT_DOXYGEN_RUN) # if defined(BUILD_QXT_CORE) # define QXT_CORE_EXPORT Q_DECL_EXPORT # else # define QXT_CORE_EXPORT Q_DECL_IMPORT # endif #else # define QXT_CORE_EXPORT #endif // BUILD_QXT_CORE #if !defined(QXT_STATIC) && !defined(QXT_DOXYGEN_RUN) # if defined(BUILD_QXT_GUI) # define QXT_GUI_EXPORT Q_DECL_EXPORT # else # define QXT_GUI_EXPORT Q_DECL_IMPORT # endif #else # define QXT_GUI_EXPORT #endif // BUILD_QXT_GUI #if !defined(QXT_STATIC) && !defined(QXT_DOXYGEN_RUN) # if defined(BUILD_QXT_NETWORK) # define QXT_NETWORK_EXPORT Q_DECL_EXPORT # else # define QXT_NETWORK_EXPORT Q_DECL_IMPORT # endif #else # define QXT_NETWORK_EXPORT #endif // BUILD_QXT_NETWORK #if !defined(QXT_STATIC) && !defined(QXT_DOXYGEN_RUN) # if defined(BUILD_QXT_SQL) # define QXT_SQL_EXPORT Q_DECL_EXPORT # else # define QXT_SQL_EXPORT Q_DECL_IMPORT # endif #else # define QXT_SQL_EXPORT #endif // BUILD_QXT_SQL #if !defined(QXT_STATIC) && !defined(QXT_DOXYGEN_RUN) # if defined(BUILD_QXT_WEB) # define QXT_WEB_EXPORT Q_DECL_EXPORT # else # define QXT_WEB_EXPORT Q_DECL_IMPORT # endif #else # define QXT_WEB_EXPORT #endif // BUILD_QXT_WEB #if !defined(QXT_STATIC) && !defined(QXT_DOXYGEN_RUN) # if defined(BUILD_QXT_BERKELEY) # define QXT_BERKELEY_EXPORT Q_DECL_EXPORT # else # define QXT_BERKELEY_EXPORT Q_DECL_IMPORT # endif #else # define QXT_BERKELEY_EXPORT #endif // BUILD_QXT_BERKELEY #if !defined(QXT_STATIC) && !defined(QXT_DOXYGEN_RUN) # if defined(BUILD_QXT_ZEROCONF) # define QXT_ZEROCONF_EXPORT Q_DECL_EXPORT # else # define QXT_ZEROCONF_EXPORT Q_DECL_IMPORT # endif #else # define QXT_ZEROCONF_EXPORT #endif // QXT_ZEROCONF_EXPORT #if defined(BUILD_QXT_CORE) || defined(BUILD_QXT_GUI) || defined(BUILD_QXT_SQL) || defined(BUILD_QXT_NETWORK) || defined(BUILD_QXT_WEB) || defined(BUILD_QXT_BERKELEY) || defined(BUILD_QXT_ZEROCONF) # define BUILD_QXT #endif QXT_CORE_EXPORT const char* qxtVersion(); #ifndef QT_BEGIN_NAMESPACE #define QT_BEGIN_NAMESPACE #endif #ifndef QT_END_NAMESPACE #define QT_END_NAMESPACE #endif #ifndef QT_FORWARD_DECLARE_CLASS #define QT_FORWARD_DECLARE_CLASS(Class) class Class; #endif /**************************************************************************** ** This file is derived from code bearing the following notice: ** The sole author of this file, Adam Higerd, has explicitly disclaimed all ** copyright interest and protection for the content within. This file has ** been placed in the public domain according to United States copyright ** statute and case law. In jurisdictions where this public domain dedication ** is not legally recognized, anyone who receives a copy of this file is ** permitted to use, modify, duplicate, and redistribute this file, in whole ** or in part, with no restrictions or conditions. In these jurisdictions, ** this file shall be copyright (C) 2006-2008 by Adam Higerd. ****************************************************************************/ #define QXT_DECLARE_PRIVATE(PUB) friend class PUB##Private; QxtPrivateInterface qxt_d; #define QXT_DECLARE_PUBLIC(PUB) friend class PUB; #define QXT_INIT_PRIVATE(PUB) qxt_d.setPublic(this); #define QXT_D(PUB) PUB##Private& d = qxt_d() #define QXT_P(PUB) PUB& p = qxt_p() template class QxtPrivate { public: virtual ~QxtPrivate() {} inline void QXT_setPublic(PUB* pub) { qxt_p_ptr = pub; } protected: inline PUB& qxt_p() { return *qxt_p_ptr; } inline const PUB& qxt_p() const { return *qxt_p_ptr; } inline PUB* qxt_ptr() { return qxt_p_ptr; } inline const PUB* qxt_ptr() const { return qxt_p_ptr; } private: PUB* qxt_p_ptr; }; template class QxtPrivateInterface { friend class QxtPrivate; public: QxtPrivateInterface() { pvt = new PVT; } ~QxtPrivateInterface() { delete pvt; } inline void setPublic(PUB* pub) { pvt->QXT_setPublic(pub); } inline PVT& operator()() { return *static_cast(pvt); } inline const PVT& operator()() const { return *static_cast(pvt); } inline PVT * operator->() { return static_cast(pvt); } inline const PVT * operator->() const { return static_cast(pvt); } private: QxtPrivateInterface(const QxtPrivateInterface&) { } QxtPrivateInterface& operator=(const QxtPrivateInterface&) { } QxtPrivate* pvt; }; #endif // QXT_GLOBAL recoll-1.26.3/doc/0000755000175000017500000000000013570165407010641 500000000000000recoll-1.26.3/doc/prog/0000755000175000017500000000000013570165410011602 500000000000000recoll-1.26.3/doc/prog/Makefile0000644000175000017500000000007713303776057013177 00000000000000documentation: doxygen Doxyfile clean: rm -f docprog_html/* recoll-1.26.3/doc/prog/Doxyfile0000644000175000017500000014134213533651561013243 00000000000000# Doxyfile 1.4.1 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project # # All text after a hash (#) is considered a comment and will be ignored # The format is: # TAG = value [value, ...] # For lists items can also be appended using: # TAG += value [value, ...] # Values that contain spaces should be placed between quotes (" ") #--------------------------------------------------------------------------- # Project related configuration options #--------------------------------------------------------------------------- # The PROJECT_NAME tag is a single word (or a sequence of words surrounded # by quotes) that should identify the project. PROJECT_NAME = Recoll # The PROJECT_NUMBER tag can be used to enter a project or revision number. # This could be handy for archiving the generated documentation or # if some version control system is used. PROJECT_NUMBER = # The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) # base path where the generated documentation will be put. # If a relative path is entered, it will be relative to the location # where doxygen was started. If left blank the current directory will be used. OUTPUT_DIRECTORY = # If the CREATE_SUBDIRS tag is set to YES, then doxygen will create # 4096 sub-directories (in 2 levels) under the output directory of each output # format and will distribute the generated files over these directories. # Enabling this option can be useful when feeding doxygen a huge amount of # source files, where putting all generated files in the same directory would # otherwise cause performance problems for the file system. CREATE_SUBDIRS = NO # The OUTPUT_LANGUAGE tag is used to specify the language in which all # documentation generated by doxygen is written. Doxygen will use this # information to generate all constant output in the proper language. # The default language is English, other supported languages are: # Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, # Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese, # Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian, # Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish, # Swedish, and Ukrainian. OUTPUT_LANGUAGE = English # This tag can be used to specify the encoding used in the generated output. # The encoding is not always determined by the language that is chosen, # but also whether or not the output is meant for Windows or non-Windows users. # In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES # forces the Windows encoding (this is the default for the Windows binary), # whereas setting the tag to NO uses a Unix-style encoding (the default for # all platforms other than Windows). USE_WINDOWS_ENCODING = NO # If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will # include brief member descriptions after the members that are listed in # the file and class documentation (similar to JavaDoc). # Set to NO to disable this. BRIEF_MEMBER_DESC = YES # If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend # the brief description of a member or function before the detailed description. # Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the # brief descriptions will be completely suppressed. REPEAT_BRIEF = YES # This tag implements a quasi-intelligent brief description abbreviator # that is used to form the text in various listings. Each string # in this list, if found as the leading text of the brief description, will be # stripped from the text and the result after processing the whole list, is # used as the annotated text. Otherwise, the brief description is used as-is. # If left blank, the following values are used ("$name" is automatically # replaced with the name of the entity): "The $name class" "The $name widget" # "The $name file" "is" "provides" "specifies" "contains" # "represents" "a" "an" "the" ABBREVIATE_BRIEF = # If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then # Doxygen will generate a detailed section even if there is only a brief # description. ALWAYS_DETAILED_SEC = NO # If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all # inherited members of a class in the documentation of that class as if those # members were ordinary class members. Constructors, destructors and assignment # operators of the base classes will not be shown. INLINE_INHERITED_MEMB = NO # If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full # path before files name in the file list and in the header files. If set # to NO the shortest path that makes the file name unique will be used. FULL_PATH_NAMES = YES # If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag # can be used to strip a user-defined part of the path. Stripping is # only done if one of the specified strings matches the left-hand part of # the path. The tag can be used to show relative paths in the file list. # If left blank the directory from which doxygen is run is used as the # path to strip. STRIP_FROM_PATH = # The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of # the path mentioned in the documentation of a class, which tells # the reader which header file to include in order to use a class. # If left blank only the name of the header file containing the class # definition is used. Otherwise one should specify the include paths that # are normally passed to the compiler using the -I flag. STRIP_FROM_INC_PATH = # If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter # (but less readable) file names. This can be useful is your file systems # doesn't support long names like on DOS, Mac, or CD-ROM. SHORT_NAMES = NO # If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen # will interpret the first line (until the first dot) of a JavaDoc-style # comment as the brief description. If set to NO, the JavaDoc # comments will behave just like the Qt-style comments (thus requiring an # explicit @brief command for a brief description. JAVADOC_AUTOBRIEF = NO # The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen # treat a multi-line C++ special comment block (i.e. a block of //! or /// # comments) as a brief description. This used to be the default behaviour. # The new default is to treat a multi-line C++ comment block as a detailed # description. Set this tag to YES if you prefer the old behaviour instead. MULTILINE_CPP_IS_BRIEF = NO # If the DETAILS_AT_TOP tag is set to YES then Doxygen # will output the detailed description near the top, like JavaDoc. # If set to NO, the detailed description appears after the member # documentation. DETAILS_AT_TOP = NO # If the INHERIT_DOCS tag is set to YES (the default) then an undocumented # member inherits the documentation from any documented member that it # re-implements. INHERIT_DOCS = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC # tag is set to YES, then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. DISTRIBUTE_GROUP_DOC = NO # The TAB_SIZE tag can be used to set the number of spaces in a tab. # Doxygen uses this value to replace tabs by spaces in code fragments. TAB_SIZE = 8 # This tag can be used to specify a number of aliases that acts # as commands in the documentation. An alias has the form "name=value". # For example adding "sideeffect=\par Side Effects:\n" will allow you to # put the command \sideeffect (or @sideeffect) in the documentation, which # will result in a user-defined paragraph with heading "Side Effects:". # You can put \n's in the value part of an alias to insert newlines. ALIASES = # Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C # sources only. Doxygen will then generate output that is more tailored for C. # For instance, some of the names that are used will be different. The list # of all members will be omitted, etc. OPTIMIZE_OUTPUT_FOR_C = NO # Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources # only. Doxygen will then generate output that is more tailored for Java. # For instance, namespaces will be presented as packages, qualified scopes # will look different, etc. OPTIMIZE_OUTPUT_JAVA = NO # Set the SUBGROUPING tag to YES (the default) to allow class member groups of # the same type (for instance a group of public functions) to be put as a # subgroup of that type (e.g. under the Public Functions section). Set it to # NO to prevent subgrouping. Alternatively, this can be done per class using # the \nosubgrouping command. SUBGROUPING = YES #--------------------------------------------------------------------------- # Build related configuration options #--------------------------------------------------------------------------- # If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in # documentation are documented, even if no documentation was available. # Private class members and static file members will be hidden unless # the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES EXTRACT_ALL = YES # If the EXTRACT_PRIVATE tag is set to YES all private members of a class # will be included in the documentation. EXTRACT_PRIVATE = YES # If the EXTRACT_STATIC tag is set to YES all static members of a file # will be included in the documentation. EXTRACT_STATIC = YES # If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) # defined locally in source files will be included in the documentation. # If set to NO only classes defined in header files are included. EXTRACT_LOCAL_CLASSES = YES # This flag is only useful for Objective-C code. When set to YES local # methods, which are defined in the implementation section but not in # the interface are included in the documentation. # If set to NO (the default) only methods in the interface are included. EXTRACT_LOCAL_METHODS = NO # If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all # undocumented members of documented classes, files or namespaces. # If set to NO (the default) these members will be included in the # various overviews, but no documentation section is generated. # This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. # If set to NO (the default) these classes will be included in the various # overviews. This option has no effect if EXTRACT_ALL is enabled. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all # friend (class|struct|union) declarations. # If set to NO (the default) these declarations will be included in the # documentation. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any # documentation blocks found inside the body of a function. # If set to NO (the default) these blocks will be appended to the # function's detailed documentation block. HIDE_IN_BODY_DOCS = NO # The INTERNAL_DOCS tag determines if documentation # that is typed after a \internal command is included. If the tag is set # to NO (the default) then the documentation will be excluded. # Set it to YES to include the internal documentation. INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate # file names in lower-case letters. If set to YES upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. CASE_SENSE_NAMES = YES # If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen # will show members with their full class and namespace scopes in the # documentation. If set to YES the scope will be hidden. HIDE_SCOPE_NAMES = NO # If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen # will put a list of the files that are included by a file in the documentation # of that file. SHOW_INCLUDE_FILES = YES # If the INLINE_INFO tag is set to YES (the default) then a tag [inline] # is inserted in the documentation for inline members. INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen # will sort the (detailed) documentation of file and class members # alphabetically by member name. If set to NO the members will appear in # declaration order. SORT_MEMBER_DOCS = YES # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the # brief documentation of file, namespace and class members alphabetically # by member name. If set to NO (the default) the members will appear in # declaration order. SORT_BRIEF_DOCS = NO # If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be # sorted by fully-qualified names, including namespaces. If set to # NO (the default), the class list will be sorted only by class name, # not including the namespace part. # Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. # Note: This option applies only to the class list, not to the # alphabetical list. SORT_BY_SCOPE_NAME = NO # The GENERATE_TODOLIST tag can be used to enable (YES) or # disable (NO) the todo list. This list is created by putting \todo # commands in the documentation. GENERATE_TODOLIST = YES # The GENERATE_TESTLIST tag can be used to enable (YES) or # disable (NO) the test list. This list is created by putting \test # commands in the documentation. GENERATE_TESTLIST = YES # The GENERATE_BUGLIST tag can be used to enable (YES) or # disable (NO) the bug list. This list is created by putting \bug # commands in the documentation. GENERATE_BUGLIST = YES # The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or # disable (NO) the deprecated list. This list is created by putting # \deprecated commands in the documentation. GENERATE_DEPRECATEDLIST= YES # The ENABLED_SECTIONS tag can be used to enable conditional # documentation sections, marked by \if sectionname ... \endif. ENABLED_SECTIONS = # The MAX_INITIALIZER_LINES tag determines the maximum number of lines # the initial value of a variable or define consists of for it to appear in # the documentation. If the initializer consists of more lines than specified # here it will be hidden. Use a value of 0 to hide initializers completely. # The appearance of the initializer of individual variables and defines in the # documentation can be controlled using \showinitializer or \hideinitializer # command in the documentation regardless of this setting. MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated # at the bottom of the documentation of classes and structs. If set to YES the # list will mention the files that were used to generate the documentation. SHOW_USED_FILES = YES # If the sources in your project are distributed over multiple directories # then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy # in the documentation. SHOW_DIRECTORIES = YES # The FILE_VERSION_FILTER tag can be used to specify a program or script that # doxygen should invoke to get the current version for each file (typically from the # version control system). Doxygen will invoke the program by executing (via # popen()) the command , where is the value of # the FILE_VERSION_FILTER tag, and is the name of an input file # provided by doxygen. Whatever the progam writes to standard output # is used as the file version. See the manual for examples. FILE_VERSION_FILTER = #--------------------------------------------------------------------------- # configuration options related to warning and progress messages #--------------------------------------------------------------------------- # The QUIET tag can be used to turn on/off the messages that are generated # by doxygen. Possible values are YES and NO. If left blank NO is used. QUIET = NO # The WARNINGS tag can be used to turn on/off the warning messages that are # generated by doxygen. Possible values are YES and NO. If left blank # NO is used. WARNINGS = YES # If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings # for undocumented members. If EXTRACT_ALL is set to YES then this flag will # automatically be disabled. WARN_IF_UNDOCUMENTED = YES # If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for # potential errors in the documentation, such as not documenting some # parameters in a documented function, or documenting parameters that # don't exist or using markup commands wrongly. WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be abled to get warnings for # functions that are documented, but have no documentation for their parameters # or return value. If set to NO (the default) doxygen will only warn about # wrong or incomplete parameter documentation, but not about the absence of # documentation. WARN_NO_PARAMDOC = NO # The WARN_FORMAT tag determines the format of the warning messages that # doxygen can produce. The string should contain the $file, $line, and $text # tags, which will be replaced by the file and line number from which the # warning originated and the warning text. Optionally the format may contain # $version, which will be replaced by the version of the file (if it could # be obtained via FILE_VERSION_FILTER) WARN_FORMAT = "$file:$line: $text" # The WARN_LOGFILE tag can be used to specify a file to which warning # and error messages should be written. If left blank the output is written # to stderr. WARN_LOGFILE = #--------------------------------------------------------------------------- # configuration options related to the input files #--------------------------------------------------------------------------- # The INPUT tag can be used to specify the files and/or directories that contain # documented source files. You may enter file names like "myfile.cpp" or # directories like "/usr/src/myproject". Separate the files or directories # with spaces. INPUT = ../../../src top.txt filters.txt # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank the following patterns are tested: # *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx # *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm FILE_PATTERNS = # The RECURSIVE tag can be used to turn specify whether or not subdirectories # should be searched for input files as well. Possible values are YES and NO. # If left blank NO is used. RECURSIVE = YES # The EXCLUDE tag can be used to specify files and/or directories that should # excluded from the INPUT source files. This way you can easily exclude a # subdirectory from a directory tree whose root is specified with the INPUT tag. EXCLUDE = .moc .ui # The EXCLUDE_SYMLINKS tag can be used select whether or not files or # directories that are symbolic links (a Unix filesystem feature) are excluded # from the input. EXCLUDE_SYMLINKS = NO # If the value of the INPUT tag contains directories, you can use the # EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude # certain files from those directories. EXCLUDE_PATTERNS = # The EXAMPLE_PATH tag can be used to specify one or more files or # directories that contain example code fragments that are included (see # the \include command). EXAMPLE_PATH = # If the value of the EXAMPLE_PATH tag contains directories, you can use the # EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp # and *.h) to filter out the source-files in the directories. If left # blank all files are included. EXAMPLE_PATTERNS = # If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be # searched for input files to be used with the \include or \dontinclude # commands irrespective of the value of the RECURSIVE tag. # Possible values are YES and NO. If left blank NO is used. EXAMPLE_RECURSIVE = NO # The IMAGE_PATH tag can be used to specify one or more files or # directories that contain image that are included in the documentation (see # the \image command). IMAGE_PATH = # The INPUT_FILTER tag can be used to specify a program that doxygen should # invoke to filter for each input file. Doxygen will invoke the filter program # by executing (via popen()) the command , where # is the value of the INPUT_FILTER tag, and is the name of an # input file. Doxygen will then use the output that the filter program writes # to standard output. If FILTER_PATTERNS is specified, this tag will be # ignored. INPUT_FILTER = # The FILTER_PATTERNS tag can be used to specify filters on a per file pattern # basis. Doxygen will compare the file name with each pattern and apply the # filter if there is a match. The filters are a list of the form: # pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further # info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER # is applied to all files. FILTER_PATTERNS = # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using # INPUT_FILTER) will be used to filter the input files when producing source # files to browse (i.e. when SOURCE_BROWSER is set to YES). FILTER_SOURCE_FILES = NO #--------------------------------------------------------------------------- # configuration options related to source browsing #--------------------------------------------------------------------------- # If the SOURCE_BROWSER tag is set to YES then a list of source files will # be generated. Documented entities will be cross-referenced with these sources. # Note: To get rid of all source code in the generated output, make sure also # VERBATIM_HEADERS is set to NO. SOURCE_BROWSER = NO # Setting the INLINE_SOURCES tag to YES will include the body # of functions and classes directly in the documentation. INLINE_SOURCES = NO # Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct # doxygen to hide any special comment blocks from generated source code # fragments. Normal C and C++ comments will always remain visible. STRIP_CODE_COMMENTS = YES # If the REFERENCED_BY_RELATION tag is set to YES (the default) # then for each documented function all documented # functions referencing it will be listed. REFERENCED_BY_RELATION = YES # If the REFERENCES_RELATION tag is set to YES (the default) # then for each documented function all documented entities # called/used by that function will be listed. REFERENCES_RELATION = YES # If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen # will generate a verbatim copy of the header file for each class for # which an include is specified. Set to NO to disable this. VERBATIM_HEADERS = YES #--------------------------------------------------------------------------- # configuration options related to the alphabetical class index #--------------------------------------------------------------------------- # If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index # of all compounds will be generated. Enable this if the project # contains a lot of classes, structs, unions or interfaces. ALPHABETICAL_INDEX = NO # If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then # the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns # in which this list will be split (can be a number in the range [1..20]) COLS_IN_ALPHA_INDEX = 5 # In case all classes in a project start with a common prefix, all # classes will be put under the same header in the alphabetical index. # The IGNORE_PREFIX tag can be used to specify one or more prefixes that # should be ignored while generating the index headers. IGNORE_PREFIX = #--------------------------------------------------------------------------- # configuration options related to the HTML output #--------------------------------------------------------------------------- # If the GENERATE_HTML tag is set to YES (the default) Doxygen will # generate HTML output. GENERATE_HTML = YES # The HTML_OUTPUT tag is used to specify where the HTML docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `html' will be used as the default path. HTML_OUTPUT = docprog_html # The HTML_FILE_EXTENSION tag can be used to specify the file extension for # each generated HTML page (for example: .htm,.php,.asp). If it is left blank # doxygen will generate files with .html extension. HTML_FILE_EXTENSION = .html # The HTML_HEADER tag can be used to specify a personal HTML header for # each generated HTML page. If it is left blank doxygen will generate a # standard header. HTML_HEADER = # The HTML_FOOTER tag can be used to specify a personal HTML footer for # each generated HTML page. If it is left blank doxygen will generate a # standard footer. HTML_FOOTER = # The HTML_STYLESHEET tag can be used to specify a user-defined cascading # style sheet that is used by each HTML page. It can be used to # fine-tune the look of the HTML output. If the tag is left blank doxygen # will generate a default style sheet. Note that doxygen will try to copy # the style sheet file to the HTML output directory, so don't put your own # stylesheet in the HTML output directory as well, or it will be erased! HTML_STYLESHEET = # If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, # files or namespaces will be aligned in HTML using tables. If set to # NO a bullet list will be used. HTML_ALIGN_MEMBERS = YES # If the GENERATE_HTMLHELP tag is set to YES, additional index files # will be generated that can be used as input for tools like the # Microsoft HTML help workshop to generate a compressed HTML help file (.chm) # of the generated HTML documentation. GENERATE_HTMLHELP = NO # If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can # be used to specify the file name of the resulting .chm file. You # can add a path in front of the file if the result should not be # written to the html output directory. CHM_FILE = # If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can # be used to specify the location (absolute path including file name) of # the HTML help compiler (hhc.exe). If non-empty doxygen will try to run # the HTML help compiler on the generated index.hhp. HHC_LOCATION = # If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag # controls if a separate .chi index file is generated (YES) or that # it should be included in the master .chm file (NO). GENERATE_CHI = NO # If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag # controls whether a binary table of contents is generated (YES) or a # normal table of contents (NO) in the .chm file. BINARY_TOC = NO # The TOC_EXPAND flag can be set to YES to add extra items for group members # to the contents of the HTML help documentation and to the tree view. TOC_EXPAND = NO # The DISABLE_INDEX tag can be used to turn on/off the condensed index at # top of each HTML page. The value NO (the default) enables the index and # the value YES disables it. DISABLE_INDEX = NO # This tag can be used to set the number of enum values (range [1..20]) # that doxygen will group on one line in the generated HTML documentation. ENUM_VALUES_PER_LINE = 4 # If the GENERATE_TREEVIEW tag is set to YES, a side panel will be # generated containing a tree-like index structure (just like the one that # is generated for HTML Help). For this to work a browser that supports # JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, # Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are # probably better off using the HTML help feature. GENERATE_TREEVIEW = NO # If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be # used to set the initial width (in pixels) of the frame in which the tree # is shown. TREEVIEW_WIDTH = 250 #--------------------------------------------------------------------------- # configuration options related to the LaTeX output #--------------------------------------------------------------------------- # If the GENERATE_LATEX tag is set to YES (the default) Doxygen will # generate Latex output. GENERATE_LATEX = NO # The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `latex' will be used as the default path. LATEX_OUTPUT = latex # The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be # invoked. If left blank `latex' will be used as the default command name. LATEX_CMD_NAME = latex # The MAKEINDEX_CMD_NAME tag can be used to specify the command name to # generate index for LaTeX. If left blank `makeindex' will be used as the # default command name. MAKEINDEX_CMD_NAME = makeindex # If the COMPACT_LATEX tag is set to YES Doxygen generates more compact # LaTeX documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_LATEX = NO # The PAPER_TYPE tag can be used to set the paper type that is used # by the printer. Possible values are: a4, a4wide, letter, legal and # executive. If left blank a4wide will be used. PAPER_TYPE = a4wide # The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX # packages that should be included in the LaTeX output. EXTRA_PACKAGES = # The LATEX_HEADER tag can be used to specify a personal LaTeX header for # the generated latex document. The header should contain everything until # the first chapter. If it is left blank doxygen will generate a # standard header. Notice: only use this tag if you know what you are doing! LATEX_HEADER = # If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated # is prepared for conversion to pdf (using ps2pdf). The pdf file will # contain links (just like the HTML output) instead of page references # This makes the output suitable for online browsing using a pdf viewer. PDF_HYPERLINKS = NO # If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of # plain latex in the generated Makefile. Set this option to YES to get a # higher quality PDF documentation. USE_PDFLATEX = NO # If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. # command to the generated LaTeX files. This will instruct LaTeX to keep # running if errors occur, instead of asking the user for help. # This option is also used when generating formulas in HTML. LATEX_BATCHMODE = NO # If LATEX_HIDE_INDICES is set to YES then doxygen will not # include the index chapters (such as File Index, Compound Index, etc.) # in the output. LATEX_HIDE_INDICES = NO #--------------------------------------------------------------------------- # configuration options related to the RTF output #--------------------------------------------------------------------------- # If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output # The RTF output is optimized for Word 97 and may not look very pretty with # other RTF readers or editors. GENERATE_RTF = NO # The RTF_OUTPUT tag is used to specify where the RTF docs will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `rtf' will be used as the default path. RTF_OUTPUT = rtf # If the COMPACT_RTF tag is set to YES Doxygen generates more compact # RTF documents. This may be useful for small projects and may help to # save some trees in general. COMPACT_RTF = NO # If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated # will contain hyperlink fields. The RTF file will # contain links (just like the HTML output) instead of page references. # This makes the output suitable for online browsing using WORD or other # programs which support those fields. # Note: wordpad (write) and others do not support links. RTF_HYPERLINKS = NO # Load stylesheet definitions from file. Syntax is similar to doxygen's # config file, i.e. a series of assignments. You only have to provide # replacements, missing definitions are set to their default value. RTF_STYLESHEET_FILE = # Set optional variables used in the generation of an rtf document. # Syntax is similar to doxygen's config file. RTF_EXTENSIONS_FILE = #--------------------------------------------------------------------------- # configuration options related to the man page output #--------------------------------------------------------------------------- # If the GENERATE_MAN tag is set to YES (the default) Doxygen will # generate man pages GENERATE_MAN = NO # The MAN_OUTPUT tag is used to specify where the man pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `man' will be used as the default path. MAN_OUTPUT = man # The MAN_EXTENSION tag determines the extension that is added to # the generated man pages (default is the subroutine's section .3) MAN_EXTENSION = .3 # If the MAN_LINKS tag is set to YES and Doxygen generates man output, # then it will generate one additional man file for each entity # documented in the real man page(s). These additional files # only source the real man page, but without them the man command # would be unable to find the correct page. The default is NO. MAN_LINKS = NO #--------------------------------------------------------------------------- # configuration options related to the XML output #--------------------------------------------------------------------------- # If the GENERATE_XML tag is set to YES Doxygen will # generate an XML file that captures the structure of # the code including all documentation. GENERATE_XML = NO # The XML_OUTPUT tag is used to specify where the XML pages will be put. # If a relative path is entered the value of OUTPUT_DIRECTORY will be # put in front of it. If left blank `xml' will be used as the default path. XML_OUTPUT = xml # The XML_SCHEMA tag can be used to specify an XML schema, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_SCHEMA = # The XML_DTD tag can be used to specify an XML DTD, # which can be used by a validating XML parser to check the # syntax of the XML files. XML_DTD = # If the XML_PROGRAMLISTING tag is set to YES Doxygen will # dump the program listings (including syntax highlighting # and cross-referencing information) to the XML output. Note that # enabling this will significantly increase the size of the XML output. XML_PROGRAMLISTING = YES #--------------------------------------------------------------------------- # configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- # If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will # generate an AutoGen Definitions (see autogen.sf.net) file # that captures the structure of the code including all # documentation. Note that this feature is still experimental # and incomplete at the moment. GENERATE_AUTOGEN_DEF = NO #--------------------------------------------------------------------------- # configuration options related to the Perl module output #--------------------------------------------------------------------------- # If the GENERATE_PERLMOD tag is set to YES Doxygen will # generate a Perl module file that captures the structure of # the code including all documentation. Note that this # feature is still experimental and incomplete at the # moment. GENERATE_PERLMOD = NO # If the PERLMOD_LATEX tag is set to YES Doxygen will generate # the necessary Makefile rules, Perl scripts and LaTeX code to be able # to generate PDF and DVI output from the Perl module output. PERLMOD_LATEX = NO # If the PERLMOD_PRETTY tag is set to YES the Perl module output will be # nicely formatted so it can be parsed by a human reader. This is useful # if you want to understand what is going on. On the other hand, if this # tag is set to NO the size of the Perl module output will be much smaller # and Perl will parse it just the same. PERLMOD_PRETTY = YES # The names of the make variables in the generated doxyrules.make file # are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. # This is useful so different doxyrules.make files included by the same # Makefile don't overwrite each other's variables. PERLMOD_MAKEVAR_PREFIX = #--------------------------------------------------------------------------- # Configuration options related to the preprocessor #--------------------------------------------------------------------------- # If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will # evaluate all C-preprocessor directives found in the sources and include # files. ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro # names in the source code. If set to NO (the default) only conditional # compilation will be performed. Macro expansion can be done in a controlled # way by setting EXPAND_ONLY_PREDEF to YES. MACRO_EXPANSION = NO # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES # then the macro expansion is limited to the macros specified with the # PREDEFINED and EXPAND_AS_PREDEFINED tags. EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES (the default) the includes files # in the INCLUDE_PATH (see below) will be search if a #include is found. SEARCH_INCLUDES = YES # The INCLUDE_PATH tag can be used to specify one or more directories that # contain include files that are not input files but should be processed by # the preprocessor. INCLUDE_PATH = # You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard # patterns (like *.h and *.hpp) to filter out the header-files in the # directories. If left blank, the patterns specified with FILE_PATTERNS will # be used. INCLUDE_FILE_PATTERNS = # The PREDEFINED tag can be used to specify one or more macro names that # are defined before the preprocessor is started (similar to the -D option of # gcc). The argument of the tag is a list of macros of the form: name # or name=definition (no spaces). If the definition and the = are # omitted =1 is assumed. To prevent a macro definition from being # undefined via #undef or recursively expanded use the := operator # instead of the = operator. PREDEFINED = # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then # this tag can be used to specify a list of macro names that should be expanded. # The macro definition that is found in the sources will be used. # Use the PREDEFINED tag if you want to use a different macro definition. EXPAND_AS_DEFINED = # If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then # doxygen's preprocessor will remove all function-like macros that are alone # on a line, have an all uppercase name, and do not end with a semicolon. Such # function macros are typically used for boiler-plate code, and will confuse # the parser if not removed. SKIP_FUNCTION_MACROS = YES #--------------------------------------------------------------------------- # Configuration::additions related to external references #--------------------------------------------------------------------------- # The TAGFILES option can be used to specify one or more tagfiles. # Optionally an initial location of the external documentation # can be added for each tagfile. The format of a tag file without # this location is as follows: # TAGFILES = file1 file2 ... # Adding location for the tag files is done as follows: # TAGFILES = file1=loc1 "file2 = loc2" ... # where "loc1" and "loc2" can be relative or absolute paths or # URLs. If a location is present for each tag, the installdox tool # does not have to be run to correct the links. # Note that each tag file must have a unique name # (where the name does NOT include the path) # If a tag file is not located in the directory in which doxygen # is run, you must also specify the path to the tagfile here. TAGFILES = # When a file name is specified after GENERATE_TAGFILE, doxygen will create # a tag file that is based on the input files it reads. GENERATE_TAGFILE = # If the ALLEXTERNALS tag is set to YES all external classes will be listed # in the class index. If set to NO only the inherited external classes # will be listed. ALLEXTERNALS = NO # If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed # in the modules index. If set to NO, only the current project's groups will # be listed. EXTERNAL_GROUPS = YES # The PERL_PATH should be the absolute path and name of the perl script # interpreter (i.e. the result of `which perl'). PERL_PATH = /usr/bin/perl #--------------------------------------------------------------------------- # Configuration options related to the dot tool #--------------------------------------------------------------------------- # If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will # generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base # or super classes. Setting the tag to NO turns the diagrams off. Note that # this option is superseded by the HAVE_DOT option below. This is only a # fallback. It is recommended to install and use dot, since it yields more # powerful graphs. CLASS_DIAGRAMS = YES # If set to YES, the inheritance and collaboration graphs will hide # inheritance and usage relations if the target is undocumented # or is not a class. HIDE_UNDOC_RELATIONS = NO # If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is # available from the path. This tool is part of Graphviz, a graph visualization # toolkit from AT&T and Lucent Bell Labs. The other options in this section # have no effect if this option is set to NO (the default) HAVE_DOT = YES # If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect inheritance relations. Setting this tag to YES will force the # the CLASS_DIAGRAMS tag to NO. CLASS_GRAPH = YES # If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen # will generate a graph for each documented class showing the direct and # indirect implementation dependencies (inheritance, containment, and # class references variables) of the class with other documented classes. COLLABORATION_GRAPH = YES # If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen # will generate a graph for groups, showing the direct groups dependencies GROUP_GRAPHS = YES # If the UML_LOOK tag is set to YES doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. UML_LOOK = NO # If set to YES, the inheritance and collaboration graphs will show the # relations between templates and their instances. TEMPLATE_RELATIONS = NO # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT # tags are set to YES then doxygen will generate a graph for each documented # file showing the direct and indirect include dependencies of the file with # other documented files. INCLUDE_GRAPH = YES # If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and # HAVE_DOT tags are set to YES then doxygen will generate a graph for each # documented header file showing the documented files that directly or # indirectly include this file. INCLUDED_BY_GRAPH = YES # If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will # generate a call dependency graph for every global function or class method. # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected # functions only using the \callgraph command. CALL_GRAPH = NO # If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen # will graphical hierarchy of all classes instead of a textual one. GRAPHICAL_HIERARCHY = YES # If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES # then doxygen will show the dependencies a directory has on other directories # in a graphical way. The dependency relations are determined by the #include # relations between the files in the directories. DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images # generated by dot. Possible values are png, jpg, or gif # If left blank png will be used. DOT_IMAGE_FORMAT = png # The tag DOT_PATH can be used to specify the path where the dot tool can be # found. If left blank, it is assumed the dot tool can be found in the path. DOT_PATH = # The DOTFILE_DIRS tag can be used to specify one or more directories that # contain dot files that are included in the documentation (see the # \dotfile command). DOTFILE_DIRS = # The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_WIDTH = 1024 # The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height # (in pixels) of the graphs generated by dot. If a graph becomes larger than # this value, doxygen will try to truncate the graph, so that it fits within # the specified constraint. Beware that most browsers cannot cope with very # large images. MAX_DOT_GRAPH_HEIGHT = 1024 # The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the # graphs generated by dot. A depth value of 3 means that only nodes reachable # from the root by following a path via at most 3 edges will be shown. Nodes # that lay further from the root node will be omitted. Note that setting this # option to 1 or 2 may greatly reduce the computation time needed for large # code bases. Also note that a graph may be further truncated if the graph's # image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH # and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), # the graph is not depth-constrained. MAX_DOT_GRAPH_DEPTH = 0 # Set the DOT_TRANSPARENT tag to YES to generate images with a transparent # background. This is disabled by default, which results in a white background. # Warning: Depending on the platform used, enabling this option may lead to # badly anti-aliased labels on the edges of a graph (i.e. they become hard to # read). DOT_TRANSPARENT = NO # Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) # support this, this feature is disabled by default. DOT_MULTI_TARGETS = NO # If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will # generate a legend page explaining the meaning of the various boxes and # arrows in the dot generated graphs. GENERATE_LEGEND = YES # If the DOT_CLEANUP tag is set to YES (the default) Doxygen will # remove the intermediate dot files that are used to generate # the various graphs. DOT_CLEANUP = YES #--------------------------------------------------------------------------- # Configuration::additions related to the search engine #--------------------------------------------------------------------------- # The SEARCHENGINE tag specifies whether or not a search engine should be # used. If set to NO the values of all tags below this one will be ignored. SEARCHENGINE = NO recoll-1.26.3/doc/prog/filters.txt0000644000175000017500000000275213515016472013743 00000000000000/*!@filters \page filters About filters \section filtintro Overview Before a document can be processed either for indexing or previewing, it must be translated into an internal common format. The MimeHandler class defines the virtual interface for filters. There are derived classes for text, html (MimeHandlerHtml), and mail folders (MimeHandlerMail) There is also a derived class (MimeHandlerExec) that will execute an external program to translate the document to simple html (to be further processed by MimeHandlerHtml). To extend Recoll for a new document type, you may either subclass the MimeHandler class (look at one of the existing subclasses), or write an external filter, which will probably be the simpler solution in most cases. \section extfilts External filters Filters are programs (usually shell scripts) that will turn a document of foreign type into something that Recoll can understand. HTML was chosen as a pivot format for its ability to carry structured information. The meta-information tags that Recoll will use at the moment are the following: - title - charset - keywords - description For an example, you can take a look at the rclsoff filter which translates openoffice documents. The filter is executed with the input file name as a parameter and should output the result to stdout. \section extassoc Associating a filter to a mime type This is done in the mimeconf configuration file. Take a look at the file, the format is self-explanatory. */ recoll-1.26.3/doc/prog/top.txt0000644000175000017500000000126013303776057013075 00000000000000/*!@mainpage Recoll: A personal/desktop text-search program \author email address: jfd@recoll.org \section intro Introduction Home page: http://www.recoll.org This is the documentation for Recoll, a personal text search tool. Recoll is written in C++, has a QT-based gui and uses the Xapian full text search engine. Recoll supports a number of document types, the decoding of which is either performed with internal code or an externally executed, program. It should be relatively easy to write a new filter for some yet unsupported file type, and the documentation for this is found here: Writing input filters */ recoll-1.26.3/doc/man/0000755000175000017500000000000013570165407011414 500000000000000recoll-1.26.3/doc/man/xadump.10000644000175000017500000000375513347664027012732 00000000000000.TH XADUMP 1 "18 November 2017" .SH NAME xadump \- low level access to a Recoll Xapian index. .SH SYNOPSIS .B xadump [ .B \-d ] [ .B \-e ] .B \-i .B \-D .br .B xadump [ .B \-d ] [ .B \-e ] .B \-i .B \-X .br .B xadump [ .B \-d ] [ .B \-e ] .B \-i [ .B \-x ] .B \-T .br .B xadump [ .B \-d ] [ .B \-e ] .B \-i [ .B \-x ] .B \-r .br .B xadump [ .B \-d ] [ .B \-e ] .B \-t .B \-E .br .B xadump [ .B \-d ] [ .B \-e ] .B \-t .B \-F .br .B xadump [ .B \-d ] [ .B \-e ] .B \-t .B \-P .br .B xadump .B \-T [ .B \-f ] [ .B \-n ] [ .B \-l ] .br .B xadump .B \-q term [term ...] .SH DESCRIPTION The .B xadump command is a low-level access and diagnostic tool for a Xapian index as organized by the Recoll indexer. The index directory to be used is specified with option .B \-d. .PP Options -D, -X, -T and -r take a single .B docid argument specified with option .B \-i. .B \-D displays the document data record. .B \-X deletes all index data for the document. .B \-T prints the term list for the document. Without a docid argument, this option will list the whole index term list. .B \-f can be set to precede each term with its occurrence count (only if no docid is specified). .B \-n can be set to omit the enclosing brackets. .B \-l can be set to skip prefixed terms. .B \-r prints the document text as reconstructed from index data. When option .B \-x is set, terms are printed with each character separated by a space, which can be useful to check some encoding issues. .PP Options -E, -F and -P all need a term argument, specified with .B \-t . .B \-E performs a term existence test. .B \-F retrieves the term frequency for the given term. .B \-P displays the postings for the given term. .PP With option .B \-q, xadump performs a simple AND query on the index, using the given term arguments. recoll-1.26.3/doc/man/recollq.10000644000175000017500000000631413347664027013067 00000000000000.\" $Id: recollq.1,v 1.1 2007-11-13 10:07:35 dockes Exp $ (C) 2005 J.F.Dockes\$ .TH RECOLLQ 1 "13 November 2007" .SH NAME recollq \- command line / standard output Recoll query command. .SH SYNOPSIS .B recollq [ .B \-c ] [ .B \-o | .B \-f | .B \-a ] [ .B \-b ] [ .B \-d ] [ .B \-A ] [ .B \-e ] [ .B \-m ] [ .B \-n <[first-]cnt> ] [ .B \-Q ] [ .B \-s ] [ .B \-S ] [ .B \-D ] [ .B \-i ] [ .B \-F ] .B recollq \-P .SH DESCRIPTION The .B recollq command will execute the Recoll query specified on the command line and print the results to the standard output. It is primarily designed for diagnostics, or piping the data to some other program. The basic format and its variations can be useful for command line querying. The \-F option should exclusively be used for using the output data in another program, as it is the only one for which output is guaranteed to be fully parseable. .PP The .B \-c option specifies the configuration directory name, overriding the default or $RECOLL_CONFDIR. .PP The query string is built by concatenating all arguments found at the end of the command line (after the options). It will be interpreted by default as a query language string. Quoting should be used as needed to escape characters that might be interpreted by the shell (ie: wildcards). .B \-a is specified, the query string will be interpreted as an .I all words simple search query. If .B \-o is specified, the query string will be interpreted as an .I any word simple search query. If .B \-f is specified, the query string will be interpreted as a .I file name simple search query. .PP .B \-b (basic) can be specified to only print the result urls in the output stream. .PP If .B \-d is set, the text for the result files contents will be dumped to stdout. .PP If .B \-m is set, the whole metadata array will be dumped for each document. .PP If .B \-A is set, the document abstracts will be printed. .PP .B \-S sorts the results according to the specified field. Use .B \-D for descending order. .PP .B \-n can be used to set the maximum number of results that should be printed. The default is 2000. Use a value of 0 for no limit. .PP .B \-s selects the word stemming language. The value should match an existing stemming database (as set in the configuration or added with recollindex \-s). .PP .B \-i adds the specified Xapian index to the set used for the query. Can be specified multiple times. .PP .B \-F (one argument, e.g. "author title") should be used for piping the data to another program. After 2 initial lines showing the actual query and the estimated result counts, it will print one line for each result document. Each line will have exactly the fields requested on the command line. Fields are encoded in base64 and separated by one space character. Empty fields are indicated by consecutive space characters. There is one additional space character at the end of each line. .PP .B recollq \-P (Period) will print the minimum and maximum modification years for documents in the index. .SH SEE ALSO .PP recollindex(1) recollq(1) recoll.conf(5) recoll-1.26.3/doc/man/recoll.10000644000175000017500000000440613303776057012705 00000000000000.\" $Id: recoll.1,v 1.3 2007-11-13 18:42:18 dockes Exp $ (C) 2005 J.F.Dockes\$ .TH RECOLL 1 "8 January 2006" .SH NAME recoll \- user interface for the Recoll full text search system .SH SYNOPSIS .B recoll [ .B \-c ] [ .B \-o | .B \-l | .B \-f | .B \-a ] [ .B \-t ] [ .B \-q ] .B recoll [ .B \-c ] .SH DESCRIPTION In the first form, the .B recoll command will start the graphical user interface for querying the .B Recoll database. .PP On the first run, .B recoll will create the user configuration which can be customized before starting the first indexation. .PP The .B \-c option specifies the configuration directory name, overriding the default or $RECOLL_CONFDIR. .PP The .B \-q option can be used to specify an initial query on the command line. This query will be interpreted by default as a query language string. If .B \-a is specified, the query string will be interpreted as an .I all words simple search query. If .B \-o is specified, the query string will be interpreted as an .I any word simple search query. If .B \-f is specified, the query string will be interpreted as a .I file name simple search query. If .B \-l is specified, the query string will be interpreted as a .I query language simple search query. .PP If .B \-t is specified, or if .B recoll is called as .B recollq (through a link), the Graphical User Interface will not be started, and results will be printed to the standard output. Additional options understood by the .B recollq command may be specified in this case. These can control the output format and the maximum number of results to be printed. .PP Please refer to online help for a full description. .PP In the second form, the .B recoll command can be used to start a native viewer for a document indexed by Recoll. It will understand a final URL fragment (separated by a '#' character) to indicate an .I ipath , the specifier for the part of the Recoll document access path which is is internal to a container such as a mbox folder or a zip archive, and will, if needed, create a temporary file to let a normal system utility display the document. .PP The second form is mostly used for opening embedded documents from the Ubuntu Unity Recoll lens. .SH SEE ALSO .PP recollindex(1) recollq(1) recoll.conf(5) recoll-1.26.3/doc/man/recoll.conf.50000644000175000017500000010123013566424763013633 00000000000000.TH RECOLL.CONF 5 "14 November 2012" .SH NAME recoll.conf \- main personal configuration file for Recoll .SH DESCRIPTION This file defines the index configuration for the Recoll full-text search system. .LP The system-wide configuration file is normally located inside /usr/[local]/share/recoll/examples. Any parameter set in the common file may be overridden by setting it in the personal configuration file, by default: .IR $HOME/.recoll/recoll.conf .LP Please note while I try to keep this manual page reasonably up to date, it will frequently lag the current state of the software. The best source of information about the configuration are the comments in the system-wide configuration file or the user manual which you can access from the recoll GUI help menu or on the recoll web site. .LP A short extract of the file might look as follows: .IP .nf # Space-separated list of directories to index. topdirs = ~/docs /usr/share/doc [~/somedirectory-with-utf8-txt-files] defaultcharset = utf-8 .fi .LP There are three kinds of lines: .RS .IP \(bu Comment or empty .IP \(bu Parameter affectation .IP \(bu Section definition .RE .LP Empty lines or lines beginning with # are ignored. .LP Affectation lines are in the form 'name = value'. .LP Section lines allow redefining a parameter for a directory subtree. Some of the parameters used for indexing are looked up hierarchically from the more to the less specific. Not all parameters can be meaningfully redefined, this is specified for each in the next section. .LP The tilde character (~) is expanded in file names to the name of the user's home directory. .LP Where values are lists, white space is used for separation, and elements with embedded spaces can be quoted with double-quotes. .SH OPTIONS .TP .BI "topdirs = "string Space-separated list of files or directories to recursively index. Default to ~ (indexes $HOME). You can use symbolic links in the list, they will be followed, independently of the value of the followLinks variable. .TP .BI "monitordirs = "string Space-separated list of files or directories to monitor for updates. When running the real-time indexer, this allows monitoring only a subset of the whole indexed area. The elements must be included in the tree defined by the 'topdirs' members. .TP .BI "skippedNames = "string Files and directories which should be ignored. White space separated list of wildcard patterns (simple ones, not paths, must contain no / ), which will be tested against file and directory names. The list in the default configuration does not exclude hidden directories (names beginning with a dot), which means that it may index quite a few things that you do not want. On the other hand, email user agents like Thunderbird usually store messages in hidden directories, and you probably want this indexed. One possible solution is to have ".*" in "skippedNames", and add things like "~/.thunderbird" "~/.evolution" to "topdirs". Not even the file names are indexed for patterns in this list, see the "noContentSuffixes" variable for an alternative approach which indexes the file names. Can be redefined for any subtree. .TP .BI "skippedNames- = "string List of name endings to remove from the default skippedNames list. .TP .BI "skippedNames+ = "string List of name endings to add to the default skippedNames list. .TP .BI "noContentSuffixes = "string List of name endings (not necessarily dot-separated suffixes) for which we don't try MIME type identification, and don't uncompress or index content. Only the names will be indexed. This complements the now obsoleted recoll_noindex list from the mimemap file, which will go away in a future release (the move from mimemap to recoll.conf allows editing the list through the GUI). This is different from skippedNames because these are name ending matches only (not wildcard patterns), and the file name itself gets indexed normally. This can be redefined for subdirectories. .TP .BI "noContentSuffixes- = "string List of name endings to remove from the default noContentSuffixes list. .TP .BI "noContentSuffixes+ = "string List of name endings to add to the default noContentSuffixes list. .TP .BI "skippedPaths = "string Absolute paths we should not go into. Space-separated list of wildcard expressions for absolute filesystem paths. Must be defined at the top level of the configuration file, not in a subsection. Can contain files and directories. The database and configuration directories will automatically be added. The expressions are matched using 'fnmatch(3)' with the FNM_PATHNAME flag set by default. This means that '/' characters must be matched explicitly. You can set 'skippedPathsFnmPathname' to 0 to disable the use of FNM_PATHNAME (meaning that '/*/dir3' will match '/dir1/dir2/dir3'). The default value contains the usual mount point for removable media to remind you that it is a bad idea to have Recoll work on these (esp. with the monitor: media gets indexed on mount, all data gets erased on unmount). Explicitly adding '/media/xxx' to the 'topdirs' variable will override this. .TP .BI "skippedPathsFnmPathname = "bool Set to 0 to override use of FNM_PATHNAME for matching skipped paths. .TP .BI "nowalkfn = "string File name which will cause its parent directory to be skipped. Any directory containing a file with this name will be skipped as if it was part of the skippedPaths list. Ex: .recoll-noindex .TP .BI "daemSkippedPaths = "string skippedPaths equivalent specific to real time indexing. This enables having parts of the tree which are initially indexed but not monitored. If daemSkippedPaths is not set, the daemon uses skippedPaths. .TP .BI "zipUseSkippedNames = "bool Use skippedNames inside Zip archives. Fetched directly by the rclzip handler. Skip the patterns defined by skippedNames inside Zip archives. Can be redefined for subdirectories. See https://www.lesbonscomptes.com/recoll/faqsandhowtos/FilteringOutZipArchiveMembers.html .TP .BI "zipSkippedNames = "string Space-separated list of wildcard expressions for names that should be ignored inside zip archives. This is used directly by the zip handler. If zipUseSkippedNames is not set, zipSkippedNames defines the patterns to be skipped inside archives. If zipUseSkippedNames is set, the two lists are concatenated and used. Can be redefined for subdirectories. See https://www.lesbonscomptes.com/recoll/faqsandhowtos/FilteringOutZipArchiveMembers.html .TP .BI "followLinks = "bool Follow symbolic links during indexing. The default is to ignore symbolic links to avoid multiple indexing of linked files. No effort is made to avoid duplication when this option is set to true. This option can be set individually for each of the 'topdirs' members by using sections. It can not be changed below the 'topdirs' level. Links in the 'topdirs' list itself are always followed. .TP .BI "indexedmimetypes = "string Restrictive list of indexed mime types. Normally not set (in which case all supported types are indexed). If it is set, only the types from the list will have their contents indexed. The names will be indexed anyway if indexallfilenames is set (default). MIME type names should be taken from the mimemap file (the values may be different from xdg-mime or file -i output in some cases). Can be redefined for subtrees. .TP .BI "excludedmimetypes = "string List of excluded MIME types. Lets you exclude some types from indexing. MIME type names should be taken from the mimemap file (the values may be different from xdg-mime or file -i output in some cases) Can be redefined for subtrees. .TP .BI "nomd5types = "string Don't compute md5 for these types. md5 checksums are used only for deduplicating results, and can be very expensive to compute on multimedia or other big files. This list lets you turn off md5 computation for selected types. It is global (no redefinition for subtrees). At the moment, it only has an effect for external handlers (exec and execm). The file types can be specified by listing either MIME types (e.g. audio/mpeg) or handler names (e.g. rclaudio). .TP .BI "compressedfilemaxkbs = "int Size limit for compressed files. We need to decompress these in a temporary directory for identification, which can be wasteful in some cases. Limit the waste. Negative means no limit. 0 results in no processing of any compressed file. Default 50 MB. .TP .BI "textfilemaxmbs = "int Size limit for text files. Mostly for skipping monster logs. Default 20 MB. .TP .BI "indexallfilenames = "bool Index the file names of unprocessed files Index the names of files the contents of which we don't index because of an excluded or unsupported MIME type. .TP .BI "usesystemfilecommand = "bool Use a system command for file MIME type guessing as a final step in file type identification This is generally useful, but will usually cause the indexing of many bogus 'text' files. See 'systemfilecommand' for the command used. .TP .BI "systemfilecommand = "string Command used to guess MIME types if the internal methods fails This should be a "file -i" workalike. The file path will be added as a last parameter to the command line. "xdg-mime" works better than the traditional "file" command, and is now the configured default (with a hard-coded fallback to "file") .TP .BI "processwebqueue = "bool Decide if we process the Web queue. The queue is a directory where the Recoll Web browser plugins create the copies of visited pages. .TP .BI "textfilepagekbs = "int Page size for text files. If this is set, text/plain files will be divided into documents of approximately this size. Will reduce memory usage at index time and help with loading data in the preview window at query time. Particularly useful with very big files, such as application or system logs. Also see textfilemaxmbs and compressedfilemaxkbs. .TP .BI "membermaxkbs = "int Size limit for archive members. This is passed to the filters in the environment as RECOLL_FILTER_MAXMEMBERKB. .TP .BI "indexStripChars = "bool Decide if we store character case and diacritics in the index. If we do, searches sensitive to case and diacritics can be performed, but the index will be bigger, and some marginal weirdness may sometimes occur. The default is a stripped index. When using multiple indexes for a search, this parameter must be defined identically for all. Changing the value implies an index reset. .TP .BI "indexStoreDocText = "bool Decide if we store the documents' text content in the index. Storing the text allows extracting snippets from it at query time, instead of building them from index position data. Newer Xapian index formats have rendered our use of positions list unacceptably slow in some cases. The last Xapian index format with good performance for the old method is Chert, which is default for 1.2, still supported but not default in 1.4 and will be dropped in 1.6. The stored document text is translated from its original format to UTF-8 plain text, but not stripped of upper-case, diacritics, or punctuation signs. Storing it increases the index size by 10-20% typically, but also allows for nicer snippets, so it may be worth enabling it even if not strictly needed for performance if you can afford the space. The variable only has an effect when creating an index, meaning that the xapiandb directory must not exist yet. Its exact effect depends on the Xapian version. For Xapian 1.4, if the variable is set to 0, the Chert format will be used, and the text will not be stored. If the variable is 1, Glass will be used, and the text stored. For Xapian 1.2, and for versions after 1.5 and newer, the index format is always the default, but the variable controls if the text is stored or not, and the abstract generation method. With Xapian 1.5 and later, and the variable set to 0, abstract generation may be very slow, but this setting may still be useful to save space if you do not use abstract generation at all. .TP .BI "nonumbers = "bool Decides if terms will be generated for numbers. For example "123", "1.5e6", 192.168.1.4, would not be indexed if nonumbers is set ("value123" would still be). Numbers are often quite interesting to search for, and this should probably not be set except for special situations, ie, scientific documents with huge amounts of numbers in them, where setting nonumbers will reduce the index size. This can only be set for a whole index, not for a subtree. .TP .BI "dehyphenate = "bool Determines if we index 'coworker' also when the input is 'co-worker'. This is new in version 1.22, and on by default. Setting the variable to off allows restoring the previous behaviour. .TP .BI "backslashasletter = "bool Process backslash as normal letter This may make sense for people wanting to index TeX commands as such but is not of much general use. .TP .BI "maxtermlength = "int Maximum term length. Words longer than this will be discarded. The default is 40 and used to be hard-coded, but it can now be adjusted. You need an index reset if you change the value. .TP .BI "nocjk = "bool Decides if specific East Asian (Chinese Korean Japanese) characters/word splitting is turned off. This will save a small amount of CPU if you have no CJK documents. If your document base does include such text but you are not interested in searching it, setting nocjk may be a significant time and space saver. .TP .BI "cjkngramlen = "int This lets you adjust the size of n-grams used for indexing CJK text. The default value of 2 is probably appropriate in most cases. A value of 3 would allow more precision and efficiency on longer words, but the index will be approximately twice as large. .TP .BI "indexstemminglanguages = "string Languages for which to create stemming expansion data. Stemmer names can be found by executing 'recollindex -l', or this can also be set from a list in the GUI. .TP .BI "defaultcharset = "string Default character set. This is used for files which do not contain a character set definition (e.g.: text/plain). Values found inside files, e.g. a 'charset' tag in HTML documents, will override it. If this is not set, the default character set is the one defined by the NLS environment ($LC_ALL, $LC_CTYPE, $LANG), or ultimately iso-8859-1 (cp-1252 in fact). If for some reason you want a general default which does not match your LANG and is not 8859-1, use this variable. This can be redefined for any sub-directory. .TP .BI "unac_except_trans = "string A list of characters, encoded in UTF-8, which should be handled specially when converting text to unaccented lowercase. For example, in Swedish, the letter a with diaeresis has full alphabet citizenship and should not be turned into an a. Each element in the space-separated list has the special character as first element and the translation following. The handling of both the lowercase and upper-case versions of a character should be specified, as appartenance to the list will turn-off both standard accent and case processing. The value is global and affects both indexing and querying. Examples: Swedish: unac_except_trans = ää Ää öö Öö üü Üü ßss œoe Œoe æae Æae ffff fifi flfl åå Åå . German: unac_except_trans = ää Ää öö Öö üü Üü ßss œoe Œoe æae Æae ffff fifi flfl In French, you probably want to decompose oe and ae and nobody would type a German ß unac_except_trans = ßss œoe Œoe æae Æae ffff fifi flfl . The default for all until someone protests follows. These decompositions are not performed by unac, but it is unlikely that someone would type the composed forms in a search. unac_except_trans = ßss œoe Œoe æae Æae ffff fifi flfl .TP .BI "maildefcharset = "string Overrides the default character set for email messages which don't specify one. This is mainly useful for readpst (libpst) dumps, which are utf-8 but do not say so. .TP .BI "localfields = "string Set fields on all files (usually of a specific fs area). Syntax is the usual: name = value ; attr1 = val1 ; [...] value is empty so this needs an initial semi-colon. This is useful, e.g., for setting the rclaptg field for application selection inside mimeview. .TP .BI "testmodifusemtime = "bool Use mtime instead of ctime to test if a file has been modified. The time is used in addition to the size, which is always used. Setting this can reduce re-indexing on systems where extended attributes are used (by some other application), but not indexed, because changing extended attributes only affects ctime. Notes: - This may prevent detection of change in some marginal file rename cases (the target would need to have the same size and mtime). - You should probably also set noxattrfields to 1 in this case, except if you still prefer to perform xattr indexing, for example if the local file update pattern makes it of value (as in general, there is a risk for pure extended attributes updates without file modification to go undetected). Perform a full index reset after changing this. .TP .BI "noxattrfields = "bool Disable extended attributes conversion to metadata fields. This probably needs to be set if testmodifusemtime is set. .TP .BI "metadatacmds = "string Define commands to gather external metadata, e.g. tmsu tags. There can be several entries, separated by semi-colons, each defining which field name the data goes into and the command to use. Don't forget the initial semi-colon. All the field names must be different. You can use aliases in the "field" file if necessary. As a not too pretty hack conceded to convenience, any field name beginning with "rclmulti" will be taken as an indication that the command returns multiple field values inside a text blob formatted as a recoll configuration file ("fieldname = fieldvalue" lines). The rclmultixx name will be ignored, and field names and values will be parsed from the data. Example: metadatacmds = ; tags = tmsu tags %f; rclmulti1 = cmdOutputsConf %f .TP .BI "cachedir = "dfn Top directory for Recoll data. Recoll data directories are normally located relative to the configuration directory (e.g. ~/.recoll/xapiandb, ~/.recoll/mboxcache). If 'cachedir' is set, the directories are stored under the specified value instead (e.g. if cachedir is ~/.cache/recoll, the default dbdir would be ~/.cache/recoll/xapiandb). This affects dbdir, webcachedir, mboxcachedir, aspellDicDir, which can still be individually specified to override cachedir. Note that if you have multiple configurations, each must have a different cachedir, there is no automatic computation of a subpath under cachedir. .TP .BI "maxfsoccuppc = "int Maximum file system occupation over which we stop indexing. The value is a percentage, corresponding to what the "Capacity" df output column shows. The default value is 0, meaning no checking. .TP .BI "dbdir = "dfn Xapian database directory location. This will be created on first indexing. If the value is not an absolute path, it will be interpreted as relative to cachedir if set, or the configuration directory (-c argument or $RECOLL_CONFDIR). If nothing is specified, the default is then ~/.recoll/xapiandb/ .TP .BI "idxstatusfile = "fn Name of the scratch file where the indexer process updates its status. Default: idxstatus.txt inside the configuration directory. .TP .BI "mboxcachedir = "dfn Directory location for storing mbox message offsets cache files. This is normally 'mboxcache' under cachedir if set, or else under the configuration directory, but it may be useful to share a directory between different configurations. .TP .BI "mboxcacheminmbs = "int Minimum mbox file size over which we cache the offsets. There is really no sense in caching offsets for small files. The default is 5 MB. .TP .BI "webcachedir = "dfn Directory where we store the archived web pages. This is only used by the web history indexing code Default: cachedir/webcache if cachedir is set, else $RECOLL_CONFDIR/webcache .TP .BI "webcachemaxmbs = "int Maximum size in MB of the Web archive. This is only used by the web history indexing code. Default: 40 MB. Reducing the size will not physically truncate the file. .TP .BI "webqueuedir = "fn The path to the Web indexing queue. This used to be hard-coded in the old plugin as ~/.recollweb/ToIndex so there would be no need or possibility to change it, but the WebExtensions plugin now downloads the files to the user Downloads directory, and a script moves them to webqueuedir. The script reads this value from the config so it has become possible to change it. .TP .BI "webdownloadsdir = "fn The path to browser downloads directory. This is where the new browser add-on extension has to create the files. They are then moved by a script to webqueuedir. .TP .BI "aspellDicDir = "dfn Aspell dictionary storage directory location. The aspell dictionary (aspdict.(lang).rws) is normally stored in the directory specified by cachedir if set, or under the configuration directory. .TP .BI "filtersdir = "dfn Directory location for executable input handlers. If RECOLL_FILTERSDIR is set in the environment, we use it instead. Defaults to $prefix/share/recoll/filters. Can be redefined for subdirectories. .TP .BI "iconsdir = "dfn Directory location for icons. The only reason to change this would be if you want to change the icons displayed in the result list. Defaults to $prefix/share/recoll/images .TP .BI "idxflushmb = "int Threshold (megabytes of new data) where we flush from memory to disk index. Setting this allows some control over memory usage by the indexer process. A value of 0 means no explicit flushing, which lets Xapian perform its own thing, meaning flushing every $XAPIAN_FLUSH_THRESHOLD documents created, modified or deleted: as memory usage depends on average document size, not only document count, the Xapian approach is is not very useful, and you should let Recoll manage the flushes. The program compiled value is 0. The configured default value (from this file) is now 50 MB, and should be ok in many cases. You can set it as low as 10 to conserve memory, but if you are looking for maximum speed, you may want to experiment with values between 20 and 200. In my experience, values beyond this are always counterproductive. If you find otherwise, please drop me a note. .TP .BI "filtermaxseconds = "int Maximum external filter execution time in seconds. Default 1200 (20mn). Set to 0 for no limit. This is mainly to avoid infinite loops in postscript files (loop.ps) .TP .BI "filtermaxmbytes = "int Maximum virtual memory space for filter processes (setrlimit(RLIMIT_AS)), in megabytes. Note that this includes any mapped libs (there is no reliable Linux way to limit the data space only), so we need to be a bit generous here. Anything over 2000 will be ignored on 32 bits machines. .TP .BI "thrQSizes = "string Stage input queues configuration. There are three internal queues in the indexing pipeline stages (file data extraction, terms generation, index update). This parameter defines the queue depths for each stage (three integer values). If a value of -1 is given for a given stage, no queue is used, and the thread will go on performing the next stage. In practise, deep queues have not been shown to increase performance. Default: a value of 0 for the first queue tells Recoll to perform autoconfiguration based on the detected number of CPUs (no need for the two other values in this case). Use thrQSizes = -1 -1 -1 to disable multithreading entirely. .TP .BI "thrTCounts = "string Number of threads used for each indexing stage. The three stages are: file data extraction, terms generation, index update). The use of the counts is also controlled by some special values in thrQSizes: if the first queue depth is 0, all counts are ignored (autoconfigured); if a value of -1 is used for a queue depth, the corresponding thread count is ignored. It makes no sense to use a value other than 1 for the last stage because updating the Xapian index is necessarily single-threaded (and protected by a mutex). .TP .BI "loglevel = "int Log file verbosity 1-6. A value of 2 will print only errors and warnings. 3 will print information like document updates, 4 is quite verbose and 6 very verbose. .TP .BI "logfilename = "fn Log file destination. Use 'stderr' (default) to write to the console. .TP .BI "idxloglevel = "int Override loglevel for the indexer. .TP .BI "idxlogfilename = "fn Override logfilename for the indexer. .TP .BI "daemloglevel = "int Override loglevel for the indexer in real time mode. The default is to use the idx... values if set, else the log... values. .TP .BI "daemlogfilename = "fn Override logfilename for the indexer in real time mode. The default is to use the idx... values if set, else the log... values. .TP .BI "orgidxconfdir = "dfn Original location of the configuration directory. This is used exclusively for movable datasets. Locating the configuration directory inside the directory tree makes it possible to provide automatic query time path translations once the data set has moved (for example, because it has been mounted on another location). .TP .BI "curidxconfdir = "dfn Current location of the configuration directory. Complement orgidxconfdir for movable datasets. This should be used if the configuration directory has been copied from the dataset to another location, either because the dataset is readonly and an r/w copy is desired, or for performance reasons. This records the original moved location before copy, to allow path translation computations. For example if a dataset originally indexed as '/home/me/mydata/config' has been mounted to '/media/me/mydata', and the GUI is running from a copied configuration, orgidxconfdir would be '/home/me/mydata/config', and curidxconfdir (as set in the copied configuration) would be '/media/me/mydata/config'. .TP .BI "idxrundir = "dfn Indexing process current directory. The input handlers sometimes leave temporary files in the current directory, so it makes sense to have recollindex chdir to some temporary directory. If the value is empty, the current directory is not changed. If the value is (literal) tmp, we use the temporary directory as set by the environment (RECOLL_TMPDIR else TMPDIR else /tmp). If the value is an absolute path to a directory, we go there. .TP .BI "checkneedretryindexscript = "fn Script used to heuristically check if we need to retry indexing files which previously failed. The default script checks the modified dates on /usr/bin and /usr/local/bin. A relative path will be looked up in the filters dirs, then in the path. Use an absolute path to do otherwise. .TP .BI "recollhelperpath = "string Additional places to search for helper executables. This is only used on Windows for now. .TP .BI "idxabsmlen = "int Length of abstracts we store while indexing. Recoll stores an abstract for each indexed file. The text can come from an actual 'abstract' section in the document or will just be the beginning of the document. It is stored in the index so that it can be displayed inside the result lists without decoding the original file. The idxabsmlen parameter defines the size of the stored abstract. The default value is 250 bytes. The search interface gives you the choice to display this stored text or a synthetic abstract built by extracting text around the search terms. If you always prefer the synthetic abstract, you can reduce this value and save a little space. .TP .BI "idxmetastoredlen = "int Truncation length of stored metadata fields. This does not affect indexing (the whole field is processed anyway), just the amount of data stored in the index for the purpose of displaying fields inside result lists or previews. The default value is 150 bytes which may be too low if you have custom fields. .TP .BI "idxtexttruncatelen = "int Truncation length for all document texts. Only index the beginning of documents. This is not recommended except if you are sure that the interesting keywords are at the top and have severe disk space issues. .TP .BI "aspellLanguage = "string Language definitions to use when creating the aspell dictionary. The value must match a set of aspell language definition files. You can type "aspell dicts" to see a list The default if this is not set is to use the NLS environment to guess the value. .TP .BI "aspellAddCreateParam = "string Additional option and parameter to aspell dictionary creation command. Some aspell packages may need an additional option (e.g. on Debian Jessie: --local-data-dir=/usr/lib/aspell). See Debian bug 772415. .TP .BI "aspellKeepStderr = "bool Set this to have a look at aspell dictionary creation errors. There are always many, so this is mostly for debugging. .TP .BI "noaspell = "bool Disable aspell use. The aspell dictionary generation takes time, and some combinations of aspell version, language, and local terms, result in aspell crashing, so it sometimes makes sense to just disable the thing. .TP .BI "monauxinterval = "int Auxiliary database update interval. The real time indexer only updates the auxiliary databases (stemdb, aspell) periodically, because it would be too costly to do it for every document change. The default period is one hour. .TP .BI "monixinterval = "int Minimum interval (seconds) between processings of the indexing queue. The real time indexer does not process each event when it comes in, but lets the queue accumulate, to diminish overhead and to aggregate multiple events affecting the same file. Default 30 S. .TP .BI "mondelaypatterns = "string Timing parameters for the real time indexing. Definitions for files which get a longer delay before reindexing is allowed. This is for fast-changing files, that should only be reindexed once in a while. A list of wildcardPattern:seconds pairs. The patterns are matched with fnmatch(pattern, path, 0) You can quote entries containing white space with double quotes (quote the whole entry, not the pattern). The default is empty. Example: mondelaypatterns = *.log:20 "*with spaces.*:30" .TP .BI "monioniceclass = "int ionice class for the real time indexing process On platforms where this is supported. The default value is 3. .TP .BI "monioniceclassdata = "string ionice class parameter for the real time indexing process. On platforms where this is supported. The default is empty. .TP .BI "autodiacsens = "bool auto-trigger diacritics sensitivity (raw index only). IF the index is not stripped, decide if we automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the "D" modifier to specify diacritics sensitivity. Default is no. .TP .BI "autocasesens = "bool auto-trigger case sensitivity (raw index only). IF the index is not stripped (see indexStripChars), decide if we automatically trigger character case sensitivity if the search term has upper-case characters in any but the first position. Else you need to use the query language and the "C" modifier to specify character-case sensitivity. Default is yes. .TP .BI "maxTermExpand = "int Maximum query expansion count for a single term (e.g.: when using wildcards). This only affects queries, not indexing. We used to not limit this at all (except for filenames where the limit was too low at 1000), but it is unreasonable with a big index. Default 10000. .TP .BI "maxXapianClauses = "int Maximum number of clauses we add to a single Xapian query. This only affects queries, not indexing. In some cases, the result of term expansion can be multiplicative, and we want to avoid eating all the memory. Default 50000. .TP .BI "snippetMaxPosWalk = "int Maximum number of positions we walk while populating a snippet for the result list. The default of 1,000,000 may be insufficient for very big documents, the consequence would be snippets with possibly meaning-altering missing words. .TP .BI "pdfocr = "bool Attempt OCR of PDF files with no text content if both tesseract and pdftoppm are installed. The default is off because OCR is so very slow. .TP .BI "pdfocrlang = "string Language to assume for PDF OCR. This is very important for having a reasonable rate of errors with tesseract. This can also be set through a configuration variable or directory-local parameters. See the rclpdf.py script. .TP .BI "pdfattach = "bool Enable PDF attachment extraction by executing pdftk (if available). This is normally disabled, because it does slow down PDF indexing a bit even if not one attachment is ever found. .TP .BI "pdfextrameta = "string Extract text from selected XMP metadata tags. This is a space-separated list of qualified XMP tag names. Each element can also include a translation to a Recoll field name, separated by a '|' character. If the second element is absent, the tag name is used as the Recoll field names. You will also need to add specifications to the "fields" file to direct processing of the extracted data. .TP .BI "pdfextrametafix = "fn Define name of XMP field editing script. This defines the name of a script to be loaded for editing XMP field values. The script should define a 'MetaFixer' class with a metafix() method which will be called with the qualified tag name and value of each selected field, for editing or erasing. A new instance is created for each document, so that the object can keep state for, e.g. eliminating duplicate values. .TP .BI "mhmboxquirks = "string Enable thunderbird/mozilla-seamonkey mbox format quirks Set this for the directory where the email mbox files are stored. .SH SEE ALSO .PP recollindex(1) recoll(1) recoll-1.26.3/doc/man/recollindex.10000644000175000017500000001621113303776057013732 00000000000000.\" $Id: recollindex.1,v 1.7 2008-09-05 10:25:54 dockes Exp $ (C) 2005 J.F.Dockes\$ .TH RECOLLINDEX 1 "8 January 2006" .SH NAME recollindex \- indexing command for the Recoll full text search system .SH SYNOPSIS .B recollindex \-h .br .B recollindex [ .B \-c ] [ .B \-z|\-Z ] [ .B \-k ] .br .B recollindex [ .B \-c ] .B \-m [ .B \-w ] [ .B \-D ] [ .B \-x ] [ .B \-C ] [ .B \-n|-k ] .br .B recollindex [ .B \-c ] .B \-i [ .B \-Z \-k \-f \-P ] [] .br .B recollindex [ .B \-c ] .B \-r [ .B \-Z \-K \-e \-f ] [ .B \-p pattern ] .br .B recollindex [ .B \-c ] .B \-e [] .br .B recollindex [ .B \-c ] .B \-l .br .B recollindex [ .B \-c ] .B \-s .br .B recollindex [ .B \-c ] .B \-S .br .B recollindex [ .B \-c ] .B \-E .SH DESCRIPTION The .B recollindex command is the Recoll indexer. .PP As indexing can sometimes take a long time, the command can be interrupted by sending an interrupt (Ctrl-C, SIGINT) or terminate (SIGTERM) signal. Some time may elapse before the process exits, because it needs to properly flush and close the index. This can also be done from the recoll GUI (menu entry: File/Stop_Indexing). After such an interruption, the index will be somewhat inconsistent because some operations which are normally performed at the end of the indexing pass will have been skipped (for example, the stemming and spelling databases will be inexistant or out of date). You just need to restart indexing at a later time to restore consistency. The indexing will restart at the interruption point (the full file tree will be traversed, but files that were indexed up to the interruption and for which the index is still up to date will not need to be reindexed). .PP The .B \-c option specifies the configuration directory name, overriding the default or $RECOLL_CONFDIR. .PP There are several modes of operation. .PP The normal mode will index the set of files described in the configuration file .B recoll.conf. This will incrementally update the database with files that changed since the last run. If option .B \-z is given, the database will be erased before starting. If option .B \-Z is given, the database will not be reset, but all files will be considered as needing reindexing (in place reset). .PP As of version 1.21, .B recollindex usually does not process again files which previously failed to index (for example because of a missing helper program). If option .B \-k is given, .B recollindex will try again to process all failed files. Please note that .B recollindex may also decide to retry failed files if the auxiliary checking script defined by the "checkneedretryindexscript" configuration variable indicates that this should happen. .PP If option .B \-m is given, recollindex is started for real time monitoring, using the file system monitoring package it was configured for (either fam, gamin, or inotify). This mode must have been explicitly configured when building the package, it is not available by default. The program will normally detach from the controlling terminal and become a daemon. If option .B \-D is given, it will stay in the foreground. Option .B \-w can be used to specify that the program should sleep for the specified time before indexing begins. The default value is 60. The daemon normally monitors the X11 session and exits when it is reset. Option .B \-x disables this X11 session monitoring (daemon will stay alive even if it cannot connect to the X11 server). You need to use this too if you use the daemon without an X11 context. You can use option .B \-n to skip the initial incrementing pass which is normally performed before monitoring starts. Once monitoring is started, the daemon normally monitors the configuration and restarts from scratch if a change is made. You can disable this with option .B \-C .PP .B recollindex \-i will index individual files into the database. The stem expansion and aspell databases will not be updated. The skippedPaths and skippedNames configuration variables will be used, so that some files may be skipped. You can tell recollindex to ignore skippedPaths and skippedNames by setting the .B \-f option. This allows fully custom file selection for a given subtree, for which you would add the top directory to skippedPaths, and use any custom tool to generate the file list (ie: a tool from a source code control system). When run this way, the indexer normally does not perform the deleted files purge pass, because it cannot be sure to have seen all the existing files. You can force a purge pass with .B \-P. .PP .B recollindex \-e will erase data for individual files from the database. The stem expansion databases will not be updated. .PP Options .B \-i and .B \-e can be combined. This will first perform the purge, then the indexing. .PP With options .B \-i or .B \-e , if no file names are given on the command line, they will be read from stdin, so that you could for example run: .PP find /path/to/dir \-print | recollindex \-e \-i .PP to force the reindexing of a directory tree (which has to exist inside the file system area defined by .I topdirs in recoll.conf). You could mostly accomplish the same thing with .PP find /path/to/dir \-print | recollindex \-Z \-i .PP The latter will perform a less thorough job of purging stale sub-documents though. .PP .B recollindex \-r mostly works like .B \-i , but the parameter is a single directory, which will be recursively updated. This mostly does nothing more than .B find topdir | recollindex \-i but it may be more convenient to use when started from another program. This retries failed files by default, use option .B \-K to change. One or multiple .B \-p options can be used to set shell-type selection patterns (e.g.: *.pdf). .PP .B recollindex \-l will list the names of available language stemmers. .PP .B recollindex \-s will build the stem expansion database for a given language, which may or may not be part of the list in the configuration file. If the language is not part of the configuration, the stem expansion database will be deleted at the end of the next normal indexing run. You can get the list of stemmer names from the .B recollindex \-l command. Note that this is mostly for experimental use, the normal way to add a stemming language is to set it in the configuration, either by editing "recoll.conf" or by using the GUI indexing configuration dialog. .br At the time of this writing, the following languages are recognized (out of Xapian's stem.h): .IP \(bu danish .IP \(bu dutch .IP \(bu english Martin Porter's 2002 revision of his stemmer .IP \(bu english_lovins Lovin's stemmer .IP \(bu english_porter Porter's stemmer as described in his 1980 paper .IP \(bu finnish .IP \(bu french .IP \(bu german .IP \(bu italian .IP \(bu norwegian .IP \(bu portuguese .IP \(bu russian .IP \(bu spanish .IP \(bu swedish .PP .B recollindex \-S will rebuild the phonetic/orthographic index. This feature uses the .B aspell package, which must be installed on the system. .PP .B recollindex \-E will check the configuration file for topdirs and other relevant paths existence (to help catch typos). .SH SEE ALSO .PP recoll(1) recoll.conf(5) recoll-1.26.3/doc/user/0000755000175000017500000000000013570165410011611 500000000000000recoll-1.26.3/doc/user/usermanual.html0000644000175000017500000174573413566424763014636 00000000000000 Recoll user manual

Recoll user manual

Jean-Francois Dockes

Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license can be found at the following location: GNU web site.

This document introduces full text search notions and describes the installation and use of the Recoll application. This version describes Recoll 1.25.


Table of Contents

1. Introduction
1.1. Giving it a try
1.2. Full text search
1.3. Recoll overview
2. Indexing
2.1. Introduction
2.1.1. Indexing modes
2.1.2. Configurations, multiple indexes
2.1.3. Document types
2.1.4. Indexing failures
2.1.5. Recovery
2.2. Index storage
2.2.1. Xapian index formats
2.2.2. Security aspects
2.2.3. Special considerations for big indexes
2.3. Index configuration
2.3.1. Multiple indexes
2.3.2. Index case and diacritics sensitivity
2.3.3. Indexing threads configuration (Unix-like systems)
2.3.4. The index configuration GUI
2.4. Removable volumes
2.5. Unix-like systems: indexing visited WEB pages
2.6. Unix-like systems: using extended attributes
2.7. Unix-like systems: importing external tags
2.8. The PDF input handler
2.8.1. OCR with Tesseract
2.8.2. XMP fields extraction
2.8.3. PDF attachment indexing
2.9. Periodic indexing
2.10. Unix-like systems: real time indexing
3. Searching
3.1. Introduction
3.2. Searching with the Qt graphical user interface
3.2.1. Simple search
3.2.2. The default result list
3.2.3. The result table
3.2.4. Running arbitrary commands on result files (1.20 and later)
3.2.5. Displaying thumbnails
3.2.6. The preview window
3.2.7. The Query Fragments window
3.2.8. Complex/advanced search
3.2.9. The term explorer tool
3.2.10. Multiple indexes
3.2.11. Document history
3.2.12. Sorting search results and collapsing duplicates
3.2.13. Search tips, shortcuts
3.2.14. Saving and restoring queries (1.21 and later)
3.2.15. Customizing the search interface
3.3. Searching with the KDE KIO slave
3.3.1. What's this
3.3.2. Searchable documents
3.4. Searching on the command line
3.5. The query language
3.5.1. Range clauses
3.5.2. Modifiers
3.6. Anchored searches and wildcards
3.6.1. More about wildcards
3.6.2. Anchored searches
3.7. Using Synonyms (1.22)
3.8. Path translations
3.9. Search case and diacritics sensitivity
3.10. Desktop integration
3.10.1. Hotkeying recoll
3.10.2. The KDE Kicker Recoll applet
4. Programming interface
4.1. Writing a document input handler
4.1.1. Simple input handlers
4.1.2. "Multiple" handlers
4.1.3. Telling Recoll about the handler
4.1.4. Input handler output
4.1.5. Page numbers
4.2. Field data processing
4.3. Python API
4.3.1. Introduction
4.3.2. Interface elements
4.3.3. Log messages for Python scripts
4.3.4. Python search interface
4.3.5. Creating Python external indexers
4.3.6. Package compatibility with the previous version
5. Installation and configuration
5.1. Installing a binary copy
5.2. Supporting packages
5.3. Building from source
5.3.1. Prerequisites
5.3.2. Building
5.3.3. Installing
5.3.4. Python API package
5.3.5. Building on Solaris
5.4. Configuration overview
5.4.1. Environment variables
5.4.2. Recoll main configuration file, recoll.conf
5.4.3. The fields file
5.4.4. The mimemap file
5.4.5. The mimeconf file
5.4.6. The mimeview file
5.4.7. The ptrans file
5.4.8. Examples of configuration adjustments

Chapter 1. Introduction

This document introduces full text search notions and describes the installation and use of the Recoll application. It is updated for Recoll 1.25.

Recoll was for a long time dedicated to Unix-like systems. It was only lately (2015) ported to MS-Windows. Many references in this manual, especially file locations, are specific to Unix, and not valid on Windows, where some described features are also not available. The manual will be progressively updated. Until this happens, on Windows, most references to shared files can be translated by looking under the Recoll installation directory (esp. the Share subdirectory). The user configuration is stored by default under AppData/Local/Recoll inside the user directory, along with the index itself.

1.1. Giving it a try

If you do not like reading manuals (who does?) but wish to give Recoll a try, just install the application and start the recoll graphical user interface (GUI), which will ask permission to index your home directory, allowing you to search immediately after indexing completes.

Do not do this if your home directory contains a huge number of documents and you do not want to wait or are very short on disk space. In this case, you may first want to customize the configuration to restrict the indexed area (shortcut: from the recoll GUI go to: PreferencesIndexing configuration, then adjust the Top directories section).

On Unix-like systems, you may need to install the appropriate supporting applications for document types that need them (for example antiword for Microsoft Word files). The Recoll for Windows package is self-contained and includes most useful auxiliary programs.

1.2. Full text search

Recoll is a full text search application, which means that it finds your data by content rather than by external attributes (like the file name). You specify words (terms) which should or should not appear in the text you are looking for, and receive in return a list of matching documents, ordered so that the most relevant documents will appear first.

You do not need to remember in what file or email message you stored a given piece of information. You just ask for related terms, and the tool will return a list of documents where these terms are prominent, in a similar way to Internet search engines.

Full text search applications try to determine which documents are most relevant to the search terms you provide. Computer algorithms for determining relevance can be very complex, and in general are inferior to the power of the human mind to rapidly determine relevance. The quality of relevance guessing is probably the most important aspect when evaluating a search application. Recoll relies on the Xapian probabilistic information retrieval library to determine relevance.

In many cases, you are looking for all the forms of a word, including plurals, different tenses for a verb, or terms derived from the same root or stem (example: floor, floors, floored, flooring...). Queries are usually automatically expanded to all such related terms (words that reduce to the same stem). This can be prevented for searching for a specific form.

Stemming, by itself, does not accommodate for misspellings or phonetic searches. A full text search application may also support this form of approximation. For example, a search for aliterattion returning no result might propose alliteration, alteration, alterations, or altercation as possible replacement terms. Recoll bases its suggestions on the actual index contents, so that suggestions may be made for words which would not appear in a standard dictionary.

1.3. Recoll overview

Recoll uses the Xapian information retrieval library as its storage and retrieval engine. Xapian is a very mature package using a sophisticated probabilistic ranking model.

The Xapian library manages an index database which describes where terms appear in your document files. It efficiently processes the complex queries which are produced by the Recoll query expansion mechanism, and is in charge of the all-important relevance computation task.

Recoll provides the mechanisms and interface to get data into and out of the index. This includes translating the many possible document formats into pure text, handling term variations (using Xapian stemmers), and spelling approximations (using the aspell speller), interpreting user queries and presenting results.

In a shorter way, Recoll does the dirty footwork, Xapian deals with the intelligent parts of the process.

The Xapian index can be big (roughly the size of the original document set), but it is not a document archive. Recoll can only display documents that still exist at the place from which they were indexed.

Recoll stores all internal data in Unicode UTF-8 format, and it can index many types of files with different character sets, encodings, and languages into the same index. It can process documents embedded inside other documents (for example a PDF document stored inside a Zip archive sent as an email attachment...), down to an arbitrary depth.

Stemming is the process by which Recoll reduces words to their radicals so that searching does not depend, for example, on a word being singular or plural (floor, floors), or on a verb tense (flooring, floored). Because the mechanisms used for stemming depend on the specific grammatical rules for each language, there is a separate Xapian stemmer module for most common languages where stemming makes sense.

Recoll stores the unstemmed versions of terms in the main index and uses auxiliary databases for term expansion (one for each stemming language), which means that you can switch stemming languages between searches, or add a language without needing a full reindex.

Storing documents written in different languages in the same index is possible, and commonly done. In this situation, you can specify several stemming languages for the index.

Recoll currently makes no attempt at automatic language recognition, which means that the stemmer will sometimes be applied to terms from other languages with potentially strange results. In practise, even if this introduces possibilities of confusion, this approach has been proven quite useful, and it is much less cumbersome than separating your documents according to what language they are written in.

By default, Recoll strips most accents and diacritics from terms, and converts them to lower case before either storing them in the index or searching for them. As a consequence, it is impossible to search for a particular capitalization of a term (US / us), or to discriminate two terms based on diacritics (sake / saké, mate / maté).

Recoll can optionally store the raw terms, without accent stripping or case conversion. In this configuration, default searches will behave as before, but it is possible to perform searches sensitive to case and diacritics. This is described in more detail in the section about index case and diacritics sensitivity.

Recoll uses many parameters to define exactly what to index, and how to classify and decode the source documents. These are kept in configuration files. A default configuration is copied into a standard location (usually something like /usr/share/recoll/examples) during installation. The default values set by the configuration files in this directory may be overridden by values set inside your personal configuration. With the default configuration, Recoll will index your home directory with generic parameters. The configuration can be customized either by editing the text files or by using configuration menus in the recoll GUI.

The indexing process is started automatically (after asking permission), the first time you execute the recoll GUI. Indexing can also be performed by executing the recollindex command. Recoll indexing is multithreaded by default when appropriate hardware resources are available, and can perform in parallel multiple tasks for text extraction, segmentation and index updates.

Searches are usually performed inside the recoll GUI, which has many options to help you find what you are looking for. However, there are other ways to query the index:

Chapter 2. Indexing

2.1. Introduction

Indexing is the process by which the set of documents is analyzed and the data entered into the database. Recoll indexing is normally incremental: documents will only be processed if they have been modified since the last run. On the first execution, all documents will need processing. A full index build can be forced later by specifying an option to the indexing command (recollindex -z or -Z).

recollindex skips files which caused an error during a previous pass. This is a performance optimization, and a new behaviour in version 1.21 (failed files were always retried by previous versions). The command line option -k can be set to retry failed files, for example after updating an input handler.

The following sections give an overview of different aspects of the indexing processes and configuration, with links to detailed sections.

Depending on your data, temporary files may be needed during indexing, some of them possibly quite big. You can use the RECOLL_TMPDIR or TMPDIR environment variables to determine where they are created (the default is to use /tmp). Using TMPDIR has the nice property that it may also be taken into account by auxiliary commands executed by recollindex.

2.1.1. Indexing modes

Recoll indexing can be performed along two main modes:

  • Periodic (or batch) indexingrecollindex is executed at discrete times. On Unix-like systems, the typical usage is to have a nightly run programmed into your cron file. On Windows, this is the only mode available, and the indexer is usually started from the GUI (but there is nothing to prevent starting it from a command script).

  • Real time indexing(Only available on Unix-like systems). recollindex runs permanently as a daemon and uses a file system alteration monitor (e.g. inotify) to detect file changes. New or updated files are indexed at once. Monitoring a big file system tree can consume significant system resources.

Unix-like systems: choosing an indexing mode

The choice between the two methods is mostly a matter of preference, and they can be combined by setting up multiple indexes (ie: use periodic indexing on a big documentation directory, and real time indexing on a small home directory), or, with Recoll 1.24 and newer, by configuring the index so that only a subset of the tree will be monitored.

The choice of method and the parameters used can be configured from the recoll GUI: PreferencesIndexing schedule

2.1.2. Configurations, multiple indexes

Recoll supports defining multiple indexes, each defined by its own configuration directory. A configuration directory contains several files which describe what should be indexed and how.

When recoll or recollindex is first executed, it creates a default configuration directory. This configuration is the one used for indexing and querying when no specific configuration is specified. It is located in $HOME/.recoll/ for Unix-like systems and %LOCALAPPDATA%\Recoll on Windows (typically C:\Users\[me]\Appdata\Local\Recoll).

All configuration parameters have defaults, defined in system-wide files. Without further customisation, the default configuration will process your complete home directory, with a reasonable set of defaults. It can be adjusted to process a different area of the file system, select files in different ways, and many other things.

In some cases, it may be useful to create additional configuration directories, for example, to separate personal and shared indexes, or to take advantage of the organization of your data to improve search precision.

In order to do this, you would create an empty directory in a location of your choice, and then instruct recoll or recollindex to use it by setting either a command line option (-c /some/directory), or an environment variable (RECOLL_CONFDIR=/some/directory). Any modification performed by the commands (e.g. configuration customisation or searches by recoll or index creation by recollindex) would then apply to the new directory and not to the default one.

Once multiple indexes are created, you can use each of them separately by setting the -c option or the RECOLL_CONFDIR environment variable when starting a command, to select the desired index.

It is also possible to instruct one configuration to query one or several other indexes in addition to its own, by using the External index function in the recoll GUI, or some other functions in the command line and programming tools.

A plausible usage scenario for the multiple index feature would be for a system administrator to set up a central index for shared data, that you choose to search or not in addition to your personal data. Of course, there are other possibilities. for example, there are many cases where you know the subset of files that should be searched, and where narrowing the search can improve the results. You can achieve approximately the same effect with the directory filter in advanced search, but multiple indexes may have better performance and may be worth the trouble in some cases.

A more advanced use case would be to use multiple index to improve indexing performance, by updating several indexes in parallel (using multiple CPU cores and disks, or possibly several machines), and then merging them, or querying them in parallel.

See the section about configuring multiple indexes for more detail

2.1.3. Document types

Recoll knows about quite a few different document types. The parameters for document types recognition and processing are set in configuration files.

Most file types, like HTML or word processing files, only hold one document. Some file types, like email folders or zip archives, can hold many individually indexed documents, which may themselves be compound ones. Such hierarchies can go quite deep, and Recoll can process, for example, a LibreOffice document stored as an attachment to an email message inside an email folder archived in a zip file...

recollindex processes plain text, HTML, OpenDocument (Open/LibreOffice), email formats, and a few others internally.

Other file types (ie: postscript, pdf, ms-word, rtf ...) need external applications for preprocessing. The list is in the installation section. After every indexing operation, Recoll updates a list of commands that would be needed for indexing existing files types. This list can be displayed by selecting the menu option FileShow Missing Helpers in the recoll GUI. It is stored in the missing text file inside the configuration directory.

By default, Recoll will try to index any file type that it has a way to read. This is sometimes not desirable, and there are ways to either exclude some types, or on the contrary define a positive list of types to be indexed. In the latter case, any type not in the list will be ignored.

Excluding files by name can be done by adding wildcard name patterns to the skippedNames list, which can be done from the GUI Index configuration menu. Excluding by type can be done by setting the excludedmimetypes list in the configuration file (1.20 and later). This can be redefined for subdirectories.

You can also define an exclusive list of MIME types to be indexed (no others will be indexed), by settting the indexedmimetypes configuration variable. Example:

        indexedmimetypes = text/html application/pdf
      

It is possible to redefine this parameter for subdirectories. Example:

      [/path/to/my/dir]
      indexedmimetypes = application/pdf
    

(When using sections like this, don't forget that they remain in effect until the end of the file or another section indicator).

excludedmimetypes or indexedmimetypes, can be set either by editing the configuration file (recoll.conf) for the index, or by using the GUI index configuration tool.

Note about MIME types

When editing the indexedmimetypes or excludedmimetypes lists, you should use the MIME values listed in the mimemap file or in Recoll result lists in preference to file -i output: there are a number of differences. The file -i output should only be used for files without extensions, or for which the extension is not listed in mimemap

2.1.4. Indexing failures

Indexing may fail for some documents, for a number of reasons: a helper program may be missing, the document may be corrupt, we may fail to uncompress a file because no file system space is available, etc.

Recoll versions prior to 1.21 always retried to index files which had previously caused an error. This guaranteed that anything that may have become indexable (for example because a helper had been installed) would be indexed. However this was bad for performance because some indexing failures may be quite costly (for example failing to uncompress a big file because of insufficient disk space).

The indexer in Recoll versions 1.21 and later does not retry failed files by default. Retrying will only occur if an explicit option (-k) is set on the recollindex command line, or if a script executed when recollindex starts up says so. The script is defined by a configuration variable (checkneedretryindexscript), and makes a rather lame attempt at deciding if a helper command may have been installed, by checking if any of the common bin directories have changed.

2.1.5. Recovery

In the rare case where the index becomes corrupted (which can signal itself by weird search results or crashes), the index files need to be erased before restarting a clean indexing pass. Just delete the xapiandb directory (see next section), or, alternatively, start the next recollindex with the -z option, which will reset the database before indexing. The difference between the two methods is that the second will not change the current index format, which may be undesirable if a newer format is supported by the Xapian version.

2.2. Index storage

The default location for the index data is the xapiandb subdirectory of the Recoll configuration directory, typically $HOME/.recoll/xapiandb/. This can be changed via two different methods (with different purposes):

  1. For a given configuration directory, you can specify a non-default storage location for the index by setting the dbdir parameter in the configuration file (see the configuration section). This method would mainly be of use if you wanted to keep the configuration directory in its default location, but desired another location for the index, typically out of disk occupation or performance concerns.

  2. You can specify a different configuration directory by setting the RECOLL_CONFDIR environment variable, or using the -c option to the Recoll commands. This method would typically be used to index different areas of the file system to different indexes. For example, if you were to issue the following command:

                  recoll -c ~/.indexes-email

    Then Recoll would use configuration files stored in ~/.indexes-email/ and, (unless specified otherwise in recoll.conf) would look for the index in ~/.indexes-email/xapiandb/.

    Using multiple configuration directories and configuration options allows you to tailor multiple configurations and indexes to handle whatever subset of the available data you wish to make searchable.

The size of the index is determined by the size of the set of documents, but the ratio can vary a lot. For a typical mixed set of documents, the index size will often be close to the data set size. In specific cases (a set of compressed mbox files for example), the index can become much bigger than the documents. It may also be much smaller if the documents contain a lot of images or other non-indexed data (an extreme example being a set of mp3 files where only the tags would be indexed).

Of course, images, sound and video do not increase the index size, which means that in most cases, the space used by the index will be negligible against the total amount of data on the computer.

The index data directory (xapiandb) only contains data that can be completely rebuilt by an index run (as long as the original documents exist), and it can always be destroyed safely.

2.2.1. Xapian index formats

Xapian versions usually support several formats for index storage. A given major Xapian version will have a current format, used to create new indexes, and will also support the format from the previous major version.

Xapian will not convert automatically an existing index from the older format to the newer one. If you want to upgrade to the new format, or if a very old index needs to be converted because its format is not supported any more, you will have to explicitly delete the old index (typically ~/.recoll/xapiandb), then run a normal indexing command. Using recollindex option -z would not work in this situation.

2.2.2. Security aspects

The Recoll index does not hold complete copies of the indexed documents (it almost does after version 1.24). But it does hold enough data to allow for an almost complete reconstruction. If confidential data is indexed, access to the database directory should be restricted.

Recoll will create the configuration directory with a mode of 0700 (access by owner only). As the index data directory is by default a sub-directory of the configuration directory, this should result in appropriate protection.

If you use another setup, you should think of the kind of protection you need for your index, set the directory and files access modes appropriately, and also maybe adjust the umask used during index updates.

2.2.3. Special considerations for big indexes

This only needs concern you if your index is going to be bigger than around 5 GBytes. Beyond 10 GBytes, it becomes a serious issue. Most people have much smaller indexes. For reference, 5 GBytes would be around 2000 bibles, a lot of text. If you have a huge text dataset (remember: images don't count, the text content of PDFs is typically less than 5% of the file size), read on.

The amount of writing performed by Xapian during index creation is not linear with the index size (it is somewhere between linear and quadratic). For big indexes this becomes a performance issue, and may even be an SSD disk wear issue.

The problem can be mitigated by observing the following rules:

  • Partition the data set and create several indexes of reasonable size rather than a huge one. These indexes can then be queried in parallel (using the Recoll external indexes facility), or merged using xapian-compact.

  • Have a lot of RAM available and set the idxflushmb Recoll configuration parameter as high as you can without swapping (experimentation will be needed). 200 would be a minimum in this context.

  • Use Xapian 1.4.10 or newer, as this version brought a significant improvement in the amount of writes.

2.3. Index configuration

Variables set inside the Recoll configuration files control which areas of the file system are indexed, and how files are processed. These variables can be set either by editing the text files or by using the dialogs in the recoll GUI.

The first time you start recoll, you will be asked whether or not you would like it to build the index. If you want to adjust the configuration before indexing, just click Cancel at this point, which will get you into the configuration interface. If you exit at this point, recoll will have created a default configuration directory with empty configuration files, which you can then edit.

The configuration is documented inside the installation chapter of this document, or in the recoll.conf(5) manual page.Both documents are automatically generated from the comments inside the configuration file.

The most immediately useful variable is probably topdirs, which lists the subtrees and files to be indexed.

The applications needed to index file types other than text, HTML or email (ie: pdf, postscript, ms-word...) are described in the external packages section.

As of Recoll 1.18 there are two incompatible types of Recoll indexes, depending on the treatment of character case and diacritics. A further section describes the two types in more detail.

2.3.1. Multiple indexes

Multiple Recoll indexes can be created by using several configuration directories which are typically set to index different areas of the file system.

A specific index can be selected by setting the RECOLL_CONFDIR environment variable or giving the -c option to recoll and recollindex.

The recollindex program, used for creating or updating indexes, always works on a single index. The different configurations are entirely independant (no parameters are ever shared between configurations when indexing).

All the search interfaces (recoll, recollq, the Python API, etc.) operate with a main configuration, from which both configuration and index data are used, and can also query data from multiple additional indexes. Only the index data from the latter is used, their configuration parameters are ignored. This implies that some parameters should be consistent among index configurations which are to be used together.

When searching, the current main index (defined by RECOLL_CONFDIR or -c) is always active. If this is undesirable, you can set up your base configuration to index an empty directory.

Index configuration parameters can be set either by using a text editor on the files, or, for most parameters, by using the recoll index configuration GUI. In the latter case, the configuration directory for which parameters are modified is the one which was selected by RECOLL_CONFDIR or the -c parameter, and there is no way to switch configurations within the GUI.

See the configuration section for a detailed description of the parameters

Some configuration parameters must be consistent among a set of multiple indexes used together for searches. Most importantly, all indexes to be queried concurrently must have the same option concerning character case and diacritics stripping, but there are other constraints. Most of the relevant parameters affect the term generation.

Using multiple configurations implies a small level of command line or file manager usage. The user must explicitely create additional configuration directories, the GUI will not do it. This is to avoid mistakenly creating additional directories when an argument is mistyped. Also, the GUI or the indexer must be launched with a specific option or environment to work on the right configuration.

In practise: creating and using an additional index

Initially creating the configuration and index:

mkdir /path/to/my/new/config

Configuring the new index can be done from the recoll GUI, launched from the command line to pass the -c option (you could create a desktop file to do it for you), and then using the GUI index configuration tool to set up the index.

recoll -c /path/to/my/new/config

Alternatively, you can just start a text editor on the main configuration file:

someEditor /path/to/my/new/config/recoll.conf

Creating and updating the index can be done from the command line:

recollindex -c /path/to/my/new/config

or from the File menu of a GUI launched with the same option (recoll, see above).

The same GUI would also let you set up batch indexing for the new index. Real time indexing can only be set up from the GUI for the default index (the menu entry will be inactive if the GUI was started with a non-default -c option).

The new index can be queried alone with

recoll -c /path/to/my/new/config

Or, in parallel with the default index, by starting recoll without a -c option, and using the PreferencesExternal Index Dialog menu.

2.3.2. Index case and diacritics sensitivity

As of Recoll version 1.18 you have a choice of building an index with terms stripped of character case and diacritics, or one with raw terms. For a source term of Résumé, the former will store resume, the latter Résumé.

Each type of index allows performing searches insensitive to case and diacritics: with a raw index, the user entry will be expanded to match all case and diacritics variations present in the index. With a stripped index, the search term will be stripped before searching.

A raw index allows using case and diacritics to discriminate between terms, e.g., returning different results when searching for US and us or resume and résumé. Read the section about search case and diacritics sensitivity for more details.

The type of index to be created is controlled by the indexStripChars configuration variable which can only be changed by editing the configuration file. Any change implies an index reset (not automated by Recoll), and all indexes in a search must be set in the same way (again, not checked by Recoll).

Recoll creates a stripped index by default if indexStripChars is not set.

As a cost for added capability, a raw index will be slightly bigger than a stripped one (around 10%). Also, searches will be more complex, so probably slightly slower, and the feature is relatively little used, so that a certain amount of weirdness cannot be excluded.

One of the most adverse consequence of using a raw index is that some phrase and proximity searches may become impossible: because each term needs to be expanded, and all combinations searched for, the multiplicative expansion may become unmanageable.

2.3.3. Indexing threads configuration (Unix-like systems)

The Recoll indexing process recollindex can use multiple threads to speed up indexing on multiprocessor systems. The work done to index files is divided in several stages and some of the stages can be executed by multiple threads. The stages are:

  1. File system walking: this is always performed by the main thread.

  2. File conversion and data extraction.

  3. Text processing (splitting, stemming, etc.).

  4. Xapian index update.

You can also read a longer document about the transformation of Recoll indexing to multithreading.

The threads configuration is controlled by two configuration file parameters.

thrQSizes

This variable defines the job input queues configuration. There are three possible queues for stages 2, 3 and 4, and this parameter should give the queue depth for each stage (three integer values). If a value of -1 is used for a given stage, no queue is used, and the thread will go on performing the next stage. In practise, deep queues have not been shown to increase performance. A value of 0 for the first queue tells Recoll to perform autoconfiguration (no need for anything else in this case, thrTCounts is not used) - this is the default configuration.

thrTCounts

This defines the number of threads used for each stage. If a value of -1 is used for one of the queue depths, the corresponding thread count is ignored. It makes no sense to use a value other than 1 for the last stage because updating the Xapian index is necessarily single-threaded (and protected by a mutex).

Note

If the first value in thrQSizes is 0, thrTCounts is ignored.

The following example would use three queues (of depth 2), and 4 threads for converting source documents, 2 for processing their text, and one to update the index. This was tested to be the best configuration on the test system (quadri-processor with multiple disks).

          thrQSizes = 2 2 2
          thrTCounts =  4 2 1
        

The following example would use a single queue, and the complete processing for each document would be performed by a single thread (several documents will still be processed in parallel in most cases). The threads will use mutual exclusion when entering the index update stage. In practise the performance would be close to the precedent case in general, but worse in certain cases (e.g. a Zip archive would be performed purely sequentially), so the previous approach is preferred. YMMV... The 2 last values for thrTCounts are ignored.

          thrQSizes = 2 -1 -1
          thrTCounts =  6 1 1
        

The following example would disable multithreading. Indexing will be performed by a single thread.

          thrQSizes = -1 -1 -1
        

2.3.4. The index configuration GUI

Most parameters for a given index configuration can be set from a recoll GUI running on this configuration (either as default, or by setting RECOLL_CONFDIR or the -c option.)

The interface is started from the PreferencesIndex Configuration menu entry. It is divided in four tabs, Global parameters, Local parameters, Web history (which is explained in the next section) and Search parameters.

The Global parameters tab allows setting global variables, like the lists of top directories, skipped paths, or stemming languages.

The Local parameters tab allows setting variables that can be redefined for subdirectories. This second tab has an initially empty list of customisation directories, to which you can add. The variables are then set for the currently selected directory (or at the top level if the empty line is selected).

The Search parameters section defines parameters which are used at query time, but are global to an index and affect all search tools, not only the GUI.

The meaning for most entries in the interface is self-evident and documented by a ToolTip popup on the text label. For more detail, you will need to refer to the configuration section of this guide.

The configuration tool normally respects the comments and most of the formatting inside the configuration file, so that it is quite possible to use it on hand-edited files, which you might nevertheless want to backup first...

2.4. Removable volumes

Recoll used to have no support for indexing removable volumes (portable disks, USB keys, etc.). Recent versions have improved the situation and support indexing removable volumes in two different ways:

  • By indexing the volume in the main, fixed, index, and ensuring that the volume data is not purged if the indexing runs while the volume is mounted. (Recoll 1.25.2).

  • By storing a volume index on the volume itself (Recoll 1.24).

Indexing removable volumes in the main index

As of version 1.25.2, Recoll provides a simple way to ensure that the index data for an absent volume will not be purged. Two conditions must be met:

  • The volume mount point must be a member of the topdirs list.

  • The mount directory must be empty (when the volume is not mounted).

If recollindex finds that one of the topdirs is empty when starting up, any existing data for the tree will be preserved by the indexing pass (no purge for this area).

Self contained volumes

As of Recoll 1.24, it has become possible to build self-contained datasets including a Recoll configuration directory and index together with the indexed documents, and to move such a dataset around (for example copying it to an USB drive), without having to adjust the configuration for querying the index.

Note

This is a query-time feature only. The index must only be updated in its original location. If an update is necessary in a different location, the index must be reset.

The principle of operation is that the configuration stores the location of the original configuration directory, which must reside on the movable volume. If the volume is later mounted elsewhere, Recoll adjusts the paths stored inside the index by the difference between the original and current locations of the configuration directory.

To make a long story short, here follows a script to create a Recoll configuration and index under a given directory (given as single parameter). The resulting data set (files + recoll directory) can later to be moved to a CDROM or thumb drive. Longer explanations come after the script.

#!/bin/sh

fatal()
{
    echo $*;exit 1
}
usage()
{
    fatal "Usage: init-recoll-volume.sh <top-directory>"
}

test $# = 1 || usage
topdir=$1
test -d "$topdir" || fatal $topdir should be a directory

confdir="$topdir/recoll-config"
test ! -d "$confdir" || fatal $confdir should not exist

mkdir "$confdir"
cd "$topdir"
topdir=`pwd`
cd "$confdir"
confdir=`pwd`

(echo topdirs = '"'$topdir'"'; \
 echo orgidxconfdir = $topdir/recoll-config) > "$confdir/recoll.conf"

recollindex -c "$confdir"

The examples below will assume that you have a dataset under /home/me/mydata/, with the index configuration and data stored inside /home/me/mydata/recoll-confdir.

In order to be able to run queries after the dataset has been moved, you must ensure the following:

  • The main configuration file must define the orgidxconfdir variable to be the original location of the configuration directory (orgidxconfdir=/home/me/mydata/recoll-confdir must be set inside /home/me/mydata/recoll-confdir/recoll.conf in the example above).

  • The configuration directory must exist with the documents, somewhere under the directory which will be moved. E.g. if you are moving /home/me/mydata around, the configuration directory must exist somewhere below this point, for example /home/me/mydata/recoll-confdir, or /home/me/mydata/sub/recoll-confdir.

  • You should keep the default locations for the index elements which are relative to the configuration directory by default (principally dbdir). Only the paths referring to the documents themselves (e.g. topdirs values) should be absolute (in general, they are only used when indexing anyway).

Only the first point needs an explicit user action, the Recoll defaults are compatible with the third one, and the second is natural.

If, after the move, the configuration directory needs to be copied out of the dataset (for example because the thumb drive is too slow), you can set the curidxconfdir, variable inside the copied configuration to define the location of the moved one. For example if /home/me/mydata is now mounted onto /media/me/somelabel, but the configuration directory and index has been copied to /tmp/tempconfig, you would set curidxconfdir to /media/me/somelabel/recoll-confdir inside /tmp/tempconfig/recoll.conf. orgidxconfdir would still be /home/me/mydata/recoll-confdir in the original and the copy.

If you are regularly copying the configuration out of the dataset, it will be useful to write a script to automate the procedure. This can't really be done inside Recoll because there are probably many possible variants. One example would be to copy the configuration to make it writable, but keep the index data on the medium because it is too big - in this case, the script would also need to set dbdir in the copied configuration.

The same set of modifications (Recoll 1.24) has also made it possible to run queries from a readonly configuration directory (with slightly reduced function of course, such as not recording the query history).

2.5. Unix-like systems: indexing visited WEB pages

With the help of a Firefox extension, Recoll can index the Internet pages that you visit. The extension has a long history: it was initially designed for the Beagle indexer, then adapted to Recoll and the Firefox XUL API. A new version of the addon has been written to work with the WebExtensions API, which is the only one supported after Firefox version 57.

The extension works by copying visited WEB pages to an indexing queue directory, which Recoll then processes, indexing the data, storing it into a local cache, then removing the file from the queue.

Because the WebExtensions API introduces more constraints to what extensions can do, the new version works with one more step: the files are first created in the browser default downloads location (typically $HOME/Downloads ), then moved by a script in the old queue location. The script is automatically executed by the Recoll indexer versions 1.23.5 and newer. It could conceivably be executed independantly to make the new browser extension compatible with an older Recoll version (the script is named recoll-we-move-files.py).

Note

For the WebExtensions-based version to work, it is necessary to set the webdownloadsdir value in the configuration if it was changed from the default $HOME/Downloads in the browser preferences.

The visited WEB pages indexing feature can be enabled on the Recoll side from the GUI Index configuration panel, or by editing the configuration file (set processwebqueue to 1).

A current pointer to the extension can be found, along with up-to-date instructions, on the Recoll wiki.

A copy of the indexed WEB pages is retained by Recoll in a local cache (from which previews can be fetched). The cache size can be adjusted from the Index configuration / Web history panel. Once the maximum size is reached, old pages are purged - both from the cache and the index - to make room for new ones, so you need to explicitly archive in some other place the pages that you want to keep indefinitely.

2.6. Unix-like systems: using extended attributes

User extended attributes are named pieces of information that most modern file systems can attach to any file.

Recoll processes extended attributes as document fields by default.

A freedesktop standard defines a few special attributes, which are handled as such by Recoll:

mime_type

If set, this overrides any other determination of the file MIME type.

charset

If set, this defines the file character set (mostly useful for plain text files).

By default, other attributes are handled as Recoll fields of the same name.

On Linux, the user prefix is removed from the name.

The name translation can be configured more precisely inside the fields configuration file.

2.7. Unix-like systems: importing external tags

During indexing, it is possible to import metadata for each file by executing commands. This allows, for example, extracting tag data from an external application and storing it in a field for indexing.

See the section about the metadatacmds field in the main configuration chapter for a description of the configuration syntax.

For example, if you would want Recoll to use tags managed by tmsu in a field named tags, you would add the following to the configuration file:

[/some/area/of/the/fs]
metadatacmds = ; tags = tmsu tags %f
      

Note

Depending on the tmsu version, you may need/want to add options like --database=/some/db.

You may want to restrict this processing to a subset of the directory tree, because it may slow down indexing a bit ([some/area/of/the/fs]).

Note the initial semi-colon after the equal sign.

In the example above, the output of tmsu is used to set a field named tags. The field name is arbitrary and could be tmsu or myfield just the same, but tags is an alias for the standard Recoll keywords field, and the tmsu output will just augment its contents. This will avoid the need to extend the field configuration.

Once re-indexing is performed (you will need to force the file reindexing, Recoll will not detect the need by itself), you will be able to search from the query language, through any of its aliases: tags:some/alternate/values or tags:all,these,values (the compact field search syntax is supported for recoll 1.20 and later. For older versions, you would need to repeat the tags: specifier for each term, e.g. tags:some OR tags:alternate).

Tags changes will not be detected by the indexer if the file itself did not change. One possible workaround would be to update the file ctime when you modify the tags, which would be consistent with how extended attributes function. A pair of chmod commands could accomplish this, or a touch -a . Alternatively, just couple the tag update with a recollindex -e -i /path/to/the/file.

2.8. The PDF input handler

The PDF format is very important for scientific and technical documentation, and document archival. It has extensive facilities for storing metadata along with the document, and these facilities are actually used in the real world.

In consequence, the rclpdf.py PDF input handler has more complex capabilities than most others, and it is also more configurable. Specifically, rclpdf.py can automatically use tesseract to perform OCR if the document text is empty, it can be configured to extract specific metadata tags from an XMP packet, and to extract PDF attachments.

2.8.1. OCR with Tesseract

If both tesseract and pdftoppm (generally from the poppler-utils package) are installed, the PDF handler may attempt OCR on PDF files with no text content. This is controlled by the pdfocr configuration variable, which is false by default because OCR is very slow.

The choice of language is very important for successfull OCR. Recoll has currently no way to determine this from the document itself. You can set the language to use through the contents of a .ocrpdflang text file in the same directory as the PDF document, or through the RECOLL_TESSERACT_LANG environment variable, or through the contents of an ocrpdf text file inside the configuration directory. If none of the above are used, Recoll will try to guess the language from the NLS environment.

2.8.2. XMP fields extraction

The rclpdf.py script in Recoll version 1.23.2 and later can extract XMP metadata fields by executing the pdfinfo command (usually found with poppler-utils). This is controlled by the pdfextrameta configuration variable, which specifies which tags to extract and, possibly, how to rename them.

The pdfextrametafix variable can be used to designate a file with Python code to edit the metadata fields (available for Recoll 1.23.3 and later. 1.23.2 has equivalent code inside the handler script). Example:

import sys
        import re

        class MetaFixer(object):
            def __init__(self):
                pass

            def metafix(self, nm, txt):
                if nm == 'bibtex:pages':
                    txt = re.sub(r'--', '-', txt)
                elif nm == 'someothername':
                    # do something else
                    pass
                elif nm == 'stillanother':
                    # etc.
                    pass
        
                return txt
            def wrapup(self, metaheaders):
                pass
        

If the 'metafix()' method is defined, it is called for each metadata field. A new MetaFixer object is created for each PDF document (so the object can keep state for, for example, eliminating duplicate values). If the 'wrapup()' method is defined, it is called at the end of XMP fields processing with the whole metadata as parameter, as an array of '(nm, val)' pairs, allowing an alternate approach for editing or adding/deleting fields.

2.8.3. PDF attachment indexing

If pdftk is installed, and if the the pdfattach configuration variable is set, the PDF input handler will try to extract PDF attachements for indexing as sub-documents of the PDF file. This is disabled by default, because it slows down PDF indexing a bit even if not one attachment is ever found (PDF attachments are uncommon in my experience).

2.9. Periodic indexing

Running the indexer

The recollindex program performs index updates. You can start it either from the command line or from the File menu in the recoll GUI program. When started from the GUI, the indexing will run on the same configuration recoll was started on. When started from the command line, recollindex will use the RECOLL_CONFDIR variable or accept a -c confdir option to specify a non-default configuration directory.

If the recoll program finds no index when it starts, it will automatically start indexing (except if canceled).

The GUI File menu has entries to start or stop the current indexing operation.

When no indexing is running, you have a choice of updating the index or rebuilding it (the first choice only processes changed files, the second one zeroes the index before starting so that all files are processed).

On Linux, the recollindex indexing process can be interrupted by sending an interrupt (Ctrl-C, SIGINT) or terminate (SIGTERM) signal.

On Linux and Windows, the GUI can used to manage the indexing operation. Stopping the indexer can be done from the recoll GUI FileStop Indexing menu entry.

When stopped, some time may elapse before recollindex exits, because it needs to properly flush and close the index.

After an interruption, the index will be somewhat inconsistent because some operations which are normally performed at the end of the indexing pass will have been skipped (for example, the stemming and spelling databases will be inexistant or out of date). You just need to restart indexing at a later time to restore consistency. The indexing will restart at the interruption point (the full file tree will be traversed, but files that were indexed up to the interruption and for which the index is still up to date will not need to be reindexed).

recollindex has many options which are listed in its manual page. Only a few will be described here.

Option -z will reset the index when starting. This is almost the same as destroying the index files (the nuance is that the Xapian format version will not be changed).

Option -Z will force the update of all documents without resetting the index first. This will not have the "clean start" aspect of -z, but the advantage is that the index will remain available for querying while it is rebuilt, which can be a significant advantage if it is very big (some installations need days for a full index rebuild).

Option -k will force retrying files which previously failed to be indexed, for example because of a missing helper program.

Of special interest also, maybe, are the -i and -f options. -i allows indexing an explicit list of files (given as command line parameters or read on stdin). -f tells recollindex to ignore file selection parameters from the configuration. Together, these options allow building a custom file selection process for some area of the file system, by adding the top directory to the skippedPaths list and using an appropriate file selection method to build the file list to be fed to recollindex -if. Trivial example:

          find . -name indexable.txt -print | recollindex -if
        

recollindex -i will not descend into subdirectories specified as parameters, but just add them as index entries. It is up to the external file selection method to build the complete file list.

Linux: using cron to automate indexing

The most common way to set up indexing is to have a cron task execute it every night. For example the following crontab entry would do it every day at 3:30AM (supposing recollindex is in your PATH):

        30 3 * * * recollindex > /some/tmp/dir/recolltrace 2>&1
        

Or, using anacron:

        1  15  su mylogin -c "recollindex recollindex > /tmp/rcltraceme 2>&1"
        

The Recoll GUI has dialogs to manage crontab entries for recollindex. You can reach them from the PreferencesIndexing Schedule menu. They only work with the good old cron, and do not give access to all features of cron scheduling.

The usual command to edit your crontab is crontab -e (which will usually start the vi editor to edit the file). You may have more sophisticated tools available on your system.

Please be aware that there may be differences between your usual interactive command line environment and the one seen by crontab commands. Especially the PATH variable may be of concern. Please check the crontab manual pages about possible issues.

2.10. Unix-like systems: real time indexing

Real time monitoring/indexing is performed by starting the recollindex -m command. With this option, recollindex will detach from the terminal and become a daemon, permanently monitoring file changes and updating the index.

In this situation, the recoll GUI File menu makes two operations available: 'Stop' and 'Trigger incremental pass'.

While it is convenient that data is indexed in real time, repeated indexing can generate a significant load on the system when files such as email folders change. Also, monitoring large file trees by itself significantly taxes system resources. You probably do not want to enable it if your system is short on resources. Periodic indexing is adequate in most cases.

As of Recoll 1.24, you can set the monitordirs configuration variable to specify that only a subset of your indexed files will be monitored for instant indexing. In this situation, an incremental pass on the full tree can be triggered by either restarting the indexer, or just running recollindex, which will notify the running process. The recoll GUI also has a menu entry for this.

Automatic daemon start

Under KDE, Gnome and some other desktop environments, the daemon can automatically started when you log in, by creating a desktop file inside the ~/.config/autostart directory. This can be done for you by the Recoll GUI. Use the Preferences->Indexing Schedule menu.

With older X11 setups, starting the daemon is normally performed as part of the user session script.

The rclmon.sh script can be used to easily start and stop the daemon. It can be found in the examples directory (typically /usr/local/[share/]recoll/examples).

For example, a good old xdm-based session could have a .xsession script with the following lines at the end:

recollconf=$HOME/.recoll-home
recolldata=/usr/local/share/recoll
RECOLL_CONFDIR=$recollconf $recolldata/examples/rclmon.sh start

fvwm 

The indexing daemon gets started, then the window manager, for which the session waits.

By default the indexing daemon will monitor the state of the X11 session, and exit when it finishes, it is not necessary to kill it explicitly. (The X11 server monitoring can be disabled with option -x to recollindex).

If you use the daemon completely out of an X11 session, you need to add option -x to disable X11 session monitoring (else the daemon will not start).

Miscellaneous details

By default, the messages from the indexing daemon will be sent to the same file as those from the interactive commands (logfilename). You may want to change this by setting the daemlogfilename and daemloglevel configuration parameters. Also the log file will only be truncated when the daemon starts. If the daemon runs permanently, the log file may grow quite big, depending on the log level.

Increasing resources for inotify. On Linux systems, monitoring a big tree may need increasing the resources available to inotify, which are normally defined in /etc/sysctl.conf.

### inotify
#
# cat  /proc/sys/fs/inotify/max_queued_events   - 16384
# cat  /proc/sys/fs/inotify/max_user_instances  - 128
# cat  /proc/sys/fs/inotify/max_user_watches    - 16384
#
# -- Change to:
#
fs.inotify.max_queued_events=32768
fs.inotify.max_user_instances=256
fs.inotify.max_user_watches=32768
        

Especially, you will need to trim your tree or adjust the max_user_watches value if indexing exits with a message about errno ENOSPC (28) from inotify_add_watch.

Slowing down the reindexing rate for fast changing files. When using the real time monitor, it may happen that some files need to be indexed, but change so often that they impose an excessive load for the system. Recoll provides a configuration option to specify the minimum time before which a file, specified by a wildcard pattern, cannot be reindexed. See the mondelaypatterns parameter in the configuration section.

Chapter 3. Searching

3.1. Introduction

Getting answers to specific queries is of course the whole point of Recoll. The multiple provided interfaces always understand simple queries made of one or several words, and return appropriate results in most cases.

In order to make the most of Recoll though, it may be worthwhile to understand how it processes your input. Five different modes exist:

  • In All Terms mode, Recoll looks for documents containing all your input terms.

  • Query Language mode behaves like All Terms in the absence of special input, but it can also do much more. This is the best mode for getting the most of Recoll.

  • In Any Term mode, Recoll looks for documents containing any your input terms, preferring those which contain more.

  • In File Name mode, Recoll will only match file names, not content. Using a small subset of the index allows things like left-hand wildcards without performance issues, and may sometimes be useful.

  • The GUI Advanced Search mode is actually not more powerful than the query language, but it helps you build complex queries without having to remember the language, and avoids any interpretation ambiguity, as it bypasses the user input parser.

These five input modes are supported by the different user interfaces which are described in the following sections.

3.2. Searching with the Qt graphical user interface

The recoll program provides the main user interface for searching. It is based on the Qt library.

recoll has two search modes:

  • Simple search (the default, on the main screen) has a single entry field where you can enter multiple words.

  • Advanced search (a panel accessed through the Tools menu or the toolbox bar icon) has multiple entry fields, which you may use to build a logical condition, with additional filtering on file type, location in the file system, modification date, and size.

In most cases, you can enter the terms as you think them, even if they contain embedded punctuation or other non-textual characters (e.g. Recoll can handle things like email addresses).

The main case where you should enter text differently from how it is printed is for east-asian languages (Chinese, Japanese, Korean). Words composed of single or multiple characters should be entered separated by white space in this case (they would typically be printed without white space).

Some searches can be quite complex, and you may want to re-use them later, perhaps with some tweaking. Recoll can save and restore searches. See Saving and restoring queries.

3.2.1. Simple search

  1. Start the recoll program.

  2. Possibly choose a search mode: Any term, All terms, File name or Query language.

  3. Enter search term(s) in the text field at the top of the window.

  4. Click the Search button or hit the Enter key to start the search.

The initial default search mode is Query language. Without special directives, this will look for documents containing all of the search terms (the ones with more terms will get better scores), just like the All terms mode. Any term will search for documents where at least one of the terms appear. File name will exclusively look for file names, not contents

All search modes allow terms to be expanded with wildcards characters (*, ?, []). See the section about wildcards for more details.

In all modes except File name, you can search for exact phrases (adjacent words in a given order) by enclosing the input inside double quotes. Ex: "virtual reality".

The Query Language features are described in a separate section.

The File name search mode will specifically look for file names. The point of having a separate file name search is that wild card expansion can be performed more efficiently on a small subset of the index (allowing wild cards on the left of terms without excessive cost). Things to know:

  • White space in the entry should match white space in the file name, and is not treated specially.

  • The search is insensitive to character case and accents, independantly of the type of index.

  • An entry without any wild card character and not capitalized will be prepended and appended with '*' (ie: etc -> *etc*, but Etc -> etc).

  • If you have a big index (many files), excessively generic fragments may result in inefficient searches.

When using a stripped index (the default), character case has no influence on search, except that you can disable stem expansion for any term by capitalizing it. Ie: a search for floor will also normally look for flooring, floored, etc., but a search for Floor will only look for floor, in any character case. Stemming can also be disabled globally in the preferences. When using a raw index, the rules are a bit more complicated.

Recoll remembers the last few searches that you performed. You can directly access the search history by clicking the clock button on the right of the search entry, while the latter is empty. Otherwise, the history is used for entry completion (see next). Only the search texts are remembered, not the mode (all/any/file name).

While text is entered in the search area, recoll will display possible completions, filtered from the history and the index search terms. This can be disabled with a GUI Preferences option.

Double-clicking on a word in the result list or a preview window will insert it into the simple search entry field.

You can cut and paste any text into an All terms or Any term search field, punctuation, newlines and all - except for wildcard characters (single ? characters are ok). Recoll will process it and produce a meaningful search. This is what most differentiates this mode from the Query Language mode, where you have to care about the syntax.

You can use the ToolsAdvanced search dialog for more complex searches.

3.2.2. The default result list

After starting a search, a list of results will instantly be displayed in the main list window.

By default, the document list is presented in order of relevance (how well the system estimates that the document matches the query). You can sort the result by ascending or descending date by using the vertical arrows in the toolbar.

Clicking on the Preview link for an entry will open an internal preview window for the document. Further Preview clicks for the same search will open tabs in the existing preview window. You can use Shift+Click to force the creation of another preview window, which may be useful to view the documents side by side. (You can also browse successive results in a single preview window by typing Shift+ArrowUp/Down in the window).

Clicking the Open link will start an external viewer for the document. By default, Recoll lets the desktop choose the appropriate application for most document types (there is a short list of exceptions, see further). If you prefer to completely customize the choice of applications, you can uncheck the Use desktop preferences option in the GUI preferences dialog, and click the Choose editor applications button to adjust the predefined Recoll choices. The tool accepts multiple selections of MIME types (e.g. to set up the editor for the dozens of office file types).

Even when Use desktop preferences is checked, there is a small list of exceptions, for MIME types where the Recoll choice should override the desktop one. These are applications which are well integrated with Recoll, especially evince for viewing PDF and Postscript files because of its support for opening the document at a specific page and passing a search string as an argument. Of course, you can edit the list (in the GUI preferences) if you would prefer to lose the functionality and use the standard desktop tool.

You may also change the choice of applications by editing the mimeview configuration file if you find this more convenient.

Each result entry also has a right-click menu with an Open With entry. This lets you choose an application from the list of those which registered with the desktop for the document MIME type.

The Preview and Open edit links may not be present for all entries, meaning that Recoll has no configured way to preview a given file type (which was indexed by name only), or no configured external editor for the file type. This can sometimes be adjusted simply by tweaking the mimemap and mimeview configuration files (the latter can be modified with the user preferences dialog).

The format of the result list entries is entirely configurable by using the preference dialog to edit an HTML fragment.

You can click on the Query details link at the top of the results page to see the query actually performed, after stem expansion and other processing.

Double-clicking on any word inside the result list or a preview window will insert it into the simple search text.

The result list is divided into pages (the size of which you can change in the preferences). Use the arrow buttons in the toolbar or the links at the bottom of the page to browse the results.

No results: the spelling suggestions

When a search yields no result, and if the aspell dictionary is configured, Recoll will try to check for misspellings among the query terms, and will propose lists of replacements. Clicking on one of the suggestions will replace the word and restart the search. You can hold any of the modifier keys (Ctrl, Shift, etc.) while clicking if you would rather stay on the suggestion screen because several terms need replacement.

The result list right-click menu

Apart from the preview and edit links, you can display a pop-up menu by right-clicking over a paragraph in the result list. This menu has the following entries:

  • Preview

  • Open

  • Open With

  • Run Script

  • Copy File Name

  • Copy Url

  • Save to File

  • Find similar

  • Preview Parent document

  • Open Parent document

  • Open Snippets Window

The Preview and Open entries do the same thing as the corresponding links.

Open With lets you open the document with one of the applications claiming to be able to handle its MIME type (the information comes from the .desktop files in /usr/share/applications).

Run Script allows starting an arbitrary command on the result file. It will only appear for results which are top-level files. See further for a more detailed description.

The Copy File Name and Copy Url copy the relevant data to the clipboard, for later pasting.

Save to File allows saving the contents of a result document to a chosen file. This entry will only appear if the document does not correspond to an existing file, but is a subdocument inside such a file (ie: an email attachment). It is especially useful to extract attachments with no associated editor.

The Open/Preview Parent document entries allow working with the higher level document (e.g. the email message an attachment comes from). Recoll is sometimes not totally accurate as to what it can or can't do in this area. For example the Parent entry will also appear for an email which is part of an mbox folder file, but you can't actually visualize the mbox (there will be an error dialog if you try).

If the document is a top-level file, Open Parent will start the default file manager on the enclosing filesystem directory.

The Find similar entry will select a number of relevant term from the current document and enter them into the simple search field. You can then start a simple search, with a good chance of finding documents related to the current result. I can't remember a single instance where this function was actually useful to me...

The Open Snippets Window entry will only appear for documents which support page breaks (typically PDF, Postscript, DVI). The snippets window lists extracts from the document, taken around search terms occurrences, along with the corresponding page number, as links which can be used to start the native viewer on the appropriate page. If the viewer supports it, its search function will also be primed with one of the search terms.

3.2.3. The result table

In Recoll 1.15 and newer, the results can be displayed in spreadsheet-like fashion. You can switch to this presentation by clicking the table-like icon in the toolbar (this is a toggle, click again to restore the list).

Clicking on the column headers will allow sorting by the values in the column. You can click again to invert the order, and use the header right-click menu to reset sorting to the default relevance order (you can also use the sort-by-date arrows to do this).

Both the list and the table display the same underlying results. The sort order set from the table is still active if you switch back to the list mode. You can click twice on a date sort arrow to reset it from there.

The header right-click menu allows adding or deleting columns. The columns can be resized, and their order can be changed (by dragging). All the changes are recorded when you quit recoll

Hovering over a table row will update the detail area at the bottom of the window with the corresponding values. You can click the row to freeze the display. The bottom area is equivalent to a result list paragraph, with links for starting a preview or a native application, and an equivalent right-click menu. Typing Esc (the Escape key) will unfreeze the display.

3.2.4. Running arbitrary commands on result files (1.20 and later)

Apart from the Open and Open With operations, which allow starting an application on a result document (or a temporary copy), based on its MIME type, it is also possible to run arbitrary commands on results which are top-level files, using the Run Script entry in the results pop-up menu.

The commands which will appear in the Run Script submenu must be defined by .desktop files inside the scripts subdirectory of the current configuration directory.

Here follows an example of a .desktop file, which could be named for example, ~/.recoll/scripts/myscript.desktop (the exact file name inside the directory is irrelevant):

          [Desktop Entry]
          Type=Application
          Name=MyFirstScript
          Exec=/home/me/bin/tryscript %F
          MimeType=*/*
        

The Name attribute defines the label which will appear inside the Run Script menu. The Exec attribute defines the program to be run, which does not need to actually be a script, of course. The MimeType attribute is not used, but needs to exist.

The commands defined this way can also be used from links inside the result paragraph.

As an example, it might make sense to write a script which would move the document to the trash and purge it from the Recoll index.

3.2.5. Displaying thumbnails

The default format for the result list entries and the detail area of the result table display an icon for each result document. The icon is either a generic one determined from the MIME type, or a thumbnail of the document appearance. Thumbnails are only displayed if found in the standard freedesktop location, where they would typically have been created by a file manager.

Recoll has no capability to create thumbnails. A relatively simple trick is to use the Open parent document/folder entry in the result list popup menu. This should open a file manager window on the containing directory, which should in turn create the thumbnails (depending on your settings). Restarting the search should then display the thumbnails.

There are also some pointers about thumbnail generation on the Recoll wiki.

3.2.6. The preview window

The preview window opens when you first click a Preview link inside the result list.

Subsequent preview requests for a given search open new tabs in the existing window (except if you hold the Shift key while clicking which will open a new window for side by side viewing).

Starting another search and requesting a preview will create a new preview window. The old one stays open until you close it.

You can close a preview tab by typing Ctrl-W (Ctrl + W) in the window. Closing the last tab for a window will also close the window.

Of course you can also close a preview window by using the window manager button in the top of the frame.

You can display successive or previous documents from the result list inside a preview tab by typing Shift+Down or Shift+Up (Down and Up are the arrow keys).

A right-click menu in the text area allows switching between displaying the main text or the contents of fields associated to the document (ie: author, abtract, etc.). This is especially useful in cases where the term match did not occur in the main text but in one of the fields. In the case of images, you can switch between three displays: the image itself, the image metadata as extracted by exiftool and the fields, which is the metadata stored in the index.

You can print the current preview window contents by typing Ctrl-P (Ctrl + P) in the window text.

Searching inside the preview

The preview window has an internal search capability, mostly controlled by the panel at the bottom of the window, which works in two modes: as a classical editor incremental search, where we look for the text entered in the entry zone, or as a way to walk the matches between the document and the Recoll query that found it.

Incremental text search

The preview tabs have an internal incremental search function. You initiate the search either by typing a / (slash) or CTL-F inside the text area or by clicking into the Search for: text field and entering the search string. You can then use the Next and Previous buttons to find the next/previous occurrence. You can also type F3 inside the text area to get to the next occurrence.

If you have a search string entered and you use Ctrl-Up/Ctrl-Down to browse the results, the search is initiated for each successive document. If the string is found, the cursor will be positioned at the first occurrence of the search string.

Walking the match lists

If the entry area is empty when you click the Next or Previous buttons, the editor will be scrolled to show the next match to any search term (the next highlighted zone). If you select a search group from the dropdown list and click Next or Previous, the match list for this group will be walked. This is not the same as a text search, because the occurences will include non-exact matches (as caused by stemming or wildcards). The search will revert to the text mode as soon as you edit the entry area.

3.2.7. The Query Fragments window

Selecting the ToolsQuery Fragments menu entry will open a window with radio- and check-buttons which can be used to activate query language fragments for filtering the current query. This can be useful if you have frequent reusable selectors, for example, filtering on alternate directories, or searching just one category of files, not covered by the standard category selectors.

The contents of the window are entirely customizable, and defined by the contents of the fragbuts.xml file inside the configuration directory. The sample file distributed with Recoll (which you should be able to find under /usr/share/recoll/examples/fragbuts.xml), contains an example which filters the results from the WEB history.

Here follows an example:

          <?xml version="1.0" encoding="UTF-8"?>

          <fragbuts version="1.0">

          <radiobuttons>

          <fragbut>
          <label>Include Web Results</label>
          <frag></frag>
          </fragbut>

          <fragbut>
          <label>Exclude Web Results</label>
          <frag>-rclbes:BGL</frag>
          </fragbut>

          <fragbut>
          <label>Only Web Results</label>
          <frag>rclbes:BGL</frag>
          </fragbut>

          </radiobuttons>

          <buttons>

          <fragbut>
          <label>Year 2010</label>
          <frag>date:2010-01-01/2010-12-31</frag>
          </fragbut>

          <fragbut>
          <label>My Great Directory Only</label>
          <frag>dir:/my/great/directory</frag>
          </fragbut>

          </buttons>
          </fragbuts>
        

Each radiobuttons or buttons section defines a line of checkbuttons or radiobuttons inside the window. Any number of buttons can be selected, but the radiobuttons in a line are exclusive.

Each fragbut section defines the label for a button, and the Query Language fragment which will be added (as an AND filter) before performing the query if the button is active.

This feature is new in Recoll 1.20, and will probably be refined depending on user feedback.

3.2.8. Complex/advanced search

The advanced search dialog helps you build more complex queries without memorizing the search language constructs. It can be opened through the Tools menu or through the main toolbar.

Recoll keeps a history of searches. See Advanced search history.

The dialog has two tabs:

  1. The first tab lets you specify terms to search for, and permits specifying multiple clauses which are combined to build the search.

  2. The second tab lets filter the results according to file size, date of modification, MIME type, or location.

Click on the Start Search button in the advanced search dialog, or type Enter in any text field to start the search. The button in the main window always performs a simple search.

Click on the Show query details link at the top of the result page to see the query expansion.

Avanced search: the "find" tab

This part of the dialog lets you constructc a query by combining multiple clauses of different types. Each entry field is configurable for the following modes:

  • All terms.

  • Any term.

  • None of the terms.

  • Phrase (exact terms in order within an adjustable window).

  • Proximity (terms in any order within an adjustable window).

  • Filename search.

Additional entry fields can be created by clicking the Add clause button.

When searching, the non-empty clauses will be combined either with an AND or an OR conjunction, depending on the choice made on the left (All clauses or Any clause).

Entries of all types except "Phrase" and "Near" accept a mix of single words and phrases enclosed in double quotes. Stemming and wildcard expansion will be performed as for simple search.

Phrases and Proximity searches. These two clauses work in similar ways, with the difference that proximity searches do not impose an order on the words. In both cases, an adjustable number (slack) of non-matched words may be accepted between the searched ones (use the counter on the left to adjust this count). For phrases, the default count is zero (exact match). For proximity it is ten (meaning that two search terms, would be matched if found within a window of twelve words). Examples: a phrase search for quick fox with a slack of 0 will match quick fox but not quick brown fox. With a slack of 1 it will match the latter, but not fox quick. A proximity search for quick fox with the default slack will match the latter, and also a fox is a cunning and quick animal.

Avanced search: the "filter" tab

This part of the dialog has several sections which allow filtering the results of a search according to a number of criteria

  • The first section allows filtering by dates of last modification. You can specify both a minimum and a maximum date. The initial values are set according to the oldest and newest documents found in the index.

  • The next section allows filtering the results by file size. There are two entries for minimum and maximum size. Enter decimal numbers. You can use suffix multipliers: k/K, m/M, g/G, t/T for 1E3, 1E6, 1E9, 1E12 respectively.

  • The next section allows filtering the results by their MIME types, or MIME categories (ie: media/text/message/etc.).

    You can transfer the types between two boxes, to define which will be included or excluded by the search.

    The state of the file type selection can be saved as the default (the file type filter will not be activated at program start-up, but the lists will be in the restored state).

  • The bottom section allows restricting the search results to a sub-tree of the indexed area. You can use the Invert checkbox to search for files not in the sub-tree instead. If you use directory filtering often and on big subsets of the file system, you may think of setting up multiple indexes instead, as the performance may be better.

    You can use relative/partial paths for filtering. Ie, entering dirA/dirB would match either /dir1/dirA/dirB/myfile1 or /dir2/dirA/dirB/someother/myfile2.

Avanced search history

The advanced search tool memorizes the last 100 searches performed. You can walk the saved searches by using the up and down arrow keys while the keyboard focus belongs to the advanced search dialog.

The complex search history can be erased, along with the one for simple search, by selecting the FileErase Search History menu entry.

3.2.9. The term explorer tool

Recoll automatically manages the expansion of search terms to their derivatives (ie: plural/singular, verb inflections). But there are other cases where the exact search term is not known. For example, you may not remember the exact spelling, or only know the beginning of the name.

The search will only propose replacement terms with spelling variations when no matching document were found. In some cases, both proper spellings and mispellings are present in the index, and it may be interesting to look for them explicitely.

The term explorer tool (started from the toolbar icon or from the Term explorer entry of the Tools menu) can be used to search the full index terms list. It has three modes of operations:

Wildcard

In this mode of operation, you can enter a search string with shell-like wildcards (*, ?, []). ie: xapi* would display all index terms beginning with xapi. (More about wildcards here ).

Regular expression

This mode will accept a regular expression as input. Example: word[0-9]+. The expression is implicitely anchored at the beginning. Ie: press will match pression but not expression. You can use .*press to match the latter, but be aware that this will cause a full index term list scan, which can be quite long.

Stem expansion

This mode will perform the usual stem expansion normally done as part user input processing. As such it is probably mostly useful to demonstrate the process.

Spelling/Phonetic

In this mode, you enter the term as you think it is spelled, and Recoll will do its best to find index terms that sound like your entry. This mode uses the Aspell spelling application, which must be installed on your system for things to work (if your documents contain non-ascii characters, Recoll needs an aspell version newer than 0.60 for UTF-8 support). The language which is used to build the dictionary out of the index terms (which is done at the end of an indexing pass) is the one defined by your NLS environment. Weird things will probably happen if languages are mixed up.

Note that in cases where Recoll does not know the beginning of the string to search for (ie a wildcard expression like *coll), the expansion can take quite a long time because the full index term list will have to be processed. The expansion is currently limited at 10000 results for wildcards and regular expressions. It is possible to change the limit in the configuration file.

Double-clicking on a term in the result list will insert it into the simple search entry field. You can also cut/paste between the result list and any entry field (the end of lines will be taken care of).

3.2.10. Multiple indexes

See the section describing the use of multiple indexes for generalities. Only the aspects concerning the recoll GUI are described here.

A recoll program instance is always associated with a specific index, which is the one to be updated when requested from the File menu, but it can use any number of Recoll indexes for searching. The external indexes can be selected through the external indexes tab in the preferences dialog.

Index selection is performed in two phases. A set of all usable indexes must first be defined, and then the subset of indexes to be used for searching. These parameters are retained across program executions (there are kept separately for each Recoll configuration). The set of all indexes is usually quite stable, while the active ones might typically be adjusted quite frequently.

The main index (defined by RECOLL_CONFDIR) is always active. If this is undesirable, you can set up your base configuration to index an empty directory.

When adding a new index to the set, you can select either a Recoll configuration directory, or directly a Xapian index directory. In the first case, the Xapian index directory will be obtained from the selected configuration.

As building the set of all indexes can be a little tedious when done through the user interface, you can use the RECOLL_EXTRA_DBS environment variable to provide an initial set. This might typically be set up by a system administrator so that every user does not have to do it. The variable should define a colon-separated list of index directories, ie:

          export RECOLL_EXTRA_DBS=/some/place/xapiandb:/some/other/db

Another environment variable, RECOLL_ACTIVE_EXTRA_DBS allows adding to the active list of indexes. This variable was suggested and implemented by a Recoll user. It is mostly useful if you use scripts to mount external volumes with Recoll indexes. By using RECOLL_EXTRA_DBS and RECOLL_ACTIVE_EXTRA_DBS, you can add and activate the index for the mounted volume when starting recoll.

RECOLL_ACTIVE_EXTRA_DBS is available for Recoll versions 1.17.2 and later. A change was made in the same update so that recoll will automatically deactivate unreachable indexes when starting up.

3.2.11. Document history

Documents that you actually view (with the internal preview or an external tool) are entered into the document history, which is remembered.

You can display the history list by using the Tools/Doc History menu entry.

You can erase the document history by using the Erase document history entry in the File menu.

3.2.12. Sorting search results and collapsing duplicates

The documents in a result list are normally sorted in order of relevance. It is possible to specify a different sort order, either by using the vertical arrows in the GUI toolbox to sort by date, or switching to the result table display and clicking on any header. The sort order chosen inside the result table remains active if you switch back to the result list, until you click one of the vertical arrows, until both are unchecked (you are back to sort by relevance).

Sort parameters are remembered between program invocations, but result sorting is normally always inactive when the program starts. It is possible to keep the sorting activation state between program invocations by checking the Remember sort activation state option in the preferences.

It is also possible to hide duplicate entries inside the result list (documents with the exact same contents as the displayed one). The test of identity is based on an MD5 hash of the document container, not only of the text contents (so that ie, a text document with an image added will not be a duplicate of the text only). Duplicates hiding is controlled by an entry in the GUI configuration dialog, and is off by default.

As of release 1.19, when a result document does have undisplayed duplicates, a Dups link will be shown with the result list entry. Clicking the link will display the paths (URLs + ipaths) for the duplicate entries.

3.2.13. Search tips, shortcuts

Terms and search expansion

Term completion. Typing Esc Space in the simple search entry field while entering a word will either complete the current word if its beginning matches a unique term in the index, or open a window to propose a list of completions.

Picking up new terms from result or preview text. Double-clicking on a word in the result list or in a preview window will copy it to the simple search entry field.

Wildcards. Wildcards can be used inside search terms in all forms of searches. More about wildcards.

Automatic suffixes. Words like odt or ods can be automatically turned into query language ext:xxx clauses. This can be enabled in the Search preferences panel in the GUI.

Disabling stem expansion. Entering a capitalized word in any search field will prevent stem expansion (no search for gardening if you enter Garden instead of garden). This is the only case where character case should make a difference for a Recoll search. You can also disable stem expansion or change the stemming language in the preferences.

Finding related documents. Selecting the Find similar documents entry in the result list paragraph right-click menu will select a set of "interesting" terms from the current result, and insert them into the simple search entry field. You can then possibly edit the list and start a search to find documents which may be apparented to the current result.

File names. File names are added as terms during indexing, and you can specify them as ordinary terms in normal search fields (Recoll used to index all directories in the file path as terms. This has been abandoned as it did not seem really useful). Alternatively, you can use the specific file name search which will only look for file names, and may be faster than the generic search especially when using wildcards.

Working with phrases and proximity

Phrases and Proximity searches. A phrase can be looked for by enclosing it in double quotes. Example: "user manual" will look only for occurrences of user immediately followed by manual. You can use the This phrase field of the advanced search dialog to the same effect. Phrases can be entered along simple terms in all simple or advanced search entry fields (except This exact phrase).

AutoPhrases. This option can be set in the preferences dialog. If it is set, a phrase will be automatically built and added to simple searches when looking for Any terms. This will not change radically the results, but will give a relevance boost to the results where the search terms appear as a phrase. Ie: searching for virtual reality will still find all documents where either virtual or reality or both appear, but those which contain virtual reality should appear sooner in the list.

Phrase searches can strongly slow down a query if most of the terms in the phrase are common. This is why the autophrase option is off by default for Recoll versions before 1.17. As of version 1.17, autophrase is on by default, but very common terms will be removed from the constructed phrase. The removal threshold can be adjusted from the search preferences.

Phrases and abbreviations. As of Recoll version 1.17, dotted abbreviations like I.B.M. are also automatically indexed as a word without the dots: IBM. Searching for the word inside a phrase (ie: "the IBM company") will only match the dotted abrreviation if you increase the phrase slack (using the advanced search panel control, or the o query language modifier). Literal occurences of the word will be matched normally.

Others

Using fields. You can use the query language and field specifications to only search certain parts of documents. This can be especially helpful with email, for example only searching emails from a specific originator: search tips from:helpfulgui

Ajusting the result table columns. When displaying results in table mode, you can use a right click on the table headers to activate a pop-up menu which will let you adjust what columns are displayed. You can drag the column headers to adjust their order. You can click them to sort by the field displayed in the column. You can also save the result list in CSV format.

Changing the GUI geometry. It is possible to configure the GUI in wide form factor by dragging the toolbars to one of the sides (their location is remembered between sessions), and moving the category filters to a menu (can be set in the PreferencesGUI configurationUser interface panel).

Query explanation. You can get an exact description of what the query looked for, including stem expansion, and Boolean operators used, by clicking on the result list header.

Advanced search history. As of Recoll 1.18, you can display any of the last 100 complex searches performed by using the up and down arrow keys while the advanced search panel is active.

Browsing the result list inside a preview window. Entering Shift-Down or Shift-Up (Shift + an arrow key) in a preview window will display the next or the previous document from the result list. Any secondary search currently active will be executed on the new document.

Scrolling the result list from the keyboard. You can use PageUp and PageDown to scroll the result list, Shift+Home to go back to the first page. These work even while the focus is in the search entry.

Result table: moving the focus to the table. You can use Ctrl-r to move the focus from the search entry to the table, and then use the arrow keys to change the current row. Ctrl-Shift-s returns to the search.

Result table: open / preview. With the focus in the result table, you can use Ctrl-o to open the document from the current row, Ctrl-Shift-o to open the document and close recoll, Ctrl-d to preview the document, and Ctrl-e to open the document snippets window.

Editing a new search while the focus is not in the search entry. You can use the Ctrl-Shift-S shortcut to return the cursor to the search entry (and select the current search text), while the focus is anywhere in the main window.

Forced opening of a preview window. You can use Shift+Click on a result list Preview link to force the creation of a preview window instead of a new tab in the existing one.

Closing previews. Entering Ctrl-W in a tab will close it (and, for the last tab, close the preview window). Entering Esc will close the preview window and all its tabs.

Printing previews. Entering Ctrl-P in a preview window will print the currently displayed text.

Quitting. Entering Ctrl-Q almost anywhere will close the application.

3.2.14. Saving and restoring queries (1.21 and later)

Both simple and advanced query dialogs save recent history, but the amount is limited: old queries will eventually be forgotten. Also, important queries may be difficult to find among others. This is why both types of queries can also be explicitely saved to files, from the GUI menus: FileSave last query / Load last query

The default location for saved queries is a subdirectory of the current configuration directory, but saved queries are ordinary files and can be written or moved anywhere.

Some of the saved query parameters are part of the preferences (e.g. autophrase or the active external indexes), and may differ when the query is loaded from the time it was saved. In this case, Recoll will warn of the differences, but will not change the user preferences.

3.2.15. Customizing the search interface

You can customize some aspects of the search interface by using the GUI configuration entry in the Preferences menu.

There are several tabs in the dialog, dealing with the interface itself, the parameters used for searching and returning results, and what indexes are searched.

User interface parameters: 

  • Highlight color for query terms: Terms from the user query are highlighted in the result list samples and the preview window. The color can be chosen here. Any Qt color string should work (ie red, #ff0000). The default is blue.

  • Style sheet: The name of a Qt style sheet text file which is applied to the whole Recoll application on startup. The default value is empty, but there is a skeleton style sheet (recoll.qss) inside the /usr/share/recoll/examples directory. Using a style sheet, you can change most recoll graphical parameters: colors, fonts, etc. See the sample file for a few simple examples.

    You should be aware that parameters (e.g.: the background color) set inside the Recoll GUI style sheet will override global system preferences, with possible strange side effects: for example if you set the foreground to a light color and the background to a dark one in the desktop preferences, but only the background is set inside the Recoll style sheet, and it is light too, then text will appear light-on-light inside the Recoll GUI.

  • Maximum text size highlighted for preview Inserting highlights on search term inside the text before inserting it in the preview window involves quite a lot of processing, and can be disabled over the given text size to speed up loading.

  • Prefer HTML to plain text for preview if set, Recoll will display HTML as such inside the preview window. If this causes problems with the Qt HTML display, you can uncheck it to display the plain text version instead.

  • Activate links in preview if set, Recoll will turn HTTP links found inside plain text into proper HTML anchors, and clicking a link inside a preview window will start the default browser on the link target.

  • Plain text to HTML line style: when displaying plain text inside the preview window, Recoll tries to preserve some of the original text line breaks and indentation. It can either use PRE HTML tags, which will well preserve the indentation but will force horizontal scrolling for long lines, or use BR tags to break at the original line breaks, which will let the editor introduce other line breaks according to the window width, but will lose some of the original indentation. The third option has been available in recent releases and is probably now the best one: use PRE tags with line wrapping.

  • Choose editor application: this opens a dialog which allows you to select the application to be used to open each MIME type. The default is to use the xdg-open utility, but you can use this dialog to override it, setting exceptions for MIME types that will still be opened according to Recoll preferences. This is useful for passing parameters like page numbers or search strings to applications that support them (e.g. evince). This cannot be done with xdg-open which only supports passing one parameter.

  • Disable Qt autocompletion in search entry: this will disable the completion popup. Il will only appear, and display the full history, either if you enter only white space in the search area, or if you click the clock button on the right of the area.

  • Document filter choice style: this will let you choose if the document categories are displayed as a list or a set of buttons, or a menu.

  • Start with simple search mode: this lets you choose the value of the simple search type on program startup. Either a fixed value (e.g. Query Language, or the value in use when the program last exited.

  • Start with advanced search dialog open : If you use this dialog frequently, checking the entries will get it to open when recoll starts.

  • Remember sort activation state if set, Recoll will remember the sort tool stat between invocations. It normally starts with sorting disabled.

Result list parameters: 

  • Number of results in a result page

  • Result list font: There is quite a lot of information shown in the result list, and you may want to customize the font and/or font size. The rest of the fonts used by Recoll are determined by your generic Qt config (try the qtconfig command).

  • Edit result list paragraph format string: allows you to change the presentation of each result list entry. See the result list customisation section.

  • Edit result page HTML header insert: allows you to define text inserted at the end of the result page HTML header. More detail in the result list customisation section.

  • Date format: allows specifying the format used for displaying dates inside the result list. This should be specified as an strftime() string (man strftime).

  • Abstract snippet separator: for synthetic abstracts built from index data, which are usually made of several snippets from different parts of the document, this defines the snippet separator, an ellipsis by default.

Search parameters: 

  • Hide duplicate results: decides if result list entries are shown for identical documents found in different places.

  • Stemming language: stemming obviously depends on the document's language. This listbox will let you chose among the stemming databases which were built during indexing (this is set in the main configuration file), or later added with recollindex -s (See the recollindex manual). Stemming languages which are dynamically added will be deleted at the next indexing pass unless they are also added in the configuration file.

  • Automatically add phrase to simple searches: a phrase will be automatically built and added to simple searches when looking for Any terms. This will give a relevance boost to the results where the search terms appear as a phrase (consecutive and in order).

  • Autophrase term frequency threshold percentage: very frequent terms should not be included in automatic phrase searches for performance reasons. The parameter defines the cutoff percentage (percentage of the documents where the term appears).

  • Replace abstracts from documents: this decides if we should synthesize and display an abstract in place of an explicit abstract found within the document itself.

  • Dynamically build abstracts: this decides if Recoll tries to build document abstracts (lists of snippets) when displaying the result list. Abstracts are constructed by taking context from the document information, around the search terms.

  • Synthetic abstract size: adjust to taste...

  • Synthetic abstract context words: how many words should be displayed around each term occurrence.

  • Query language magic file name suffixes: a list of words which automatically get turned into ext:xxx file name suffix clauses when starting a query language query (e.g.: doc xls xlsx...). This will save some typing for people who use file types a lot when querying.

External indexes: This panel will let you browse for additional indexes that you may want to search. External indexes are designated by their database directory (ie: /home/someothergui/.recoll/xapiandb, /usr/local/recollglobal/xapiandb).

Once entered, the indexes will appear in the External indexes list, and you can chose which ones you want to use at any moment by checking or unchecking their entries.

Your main database (the one the current configuration indexes to), is always implicitly active. If this is not desirable, you can set up your configuration so that it indexes, for example, an empty directory. An alternative indexer may also need to implement a way of purging the index from stale data,

The result list format

Newer versions of Recoll (from 1.17) normally use WebKit HTML widgets for the result list and the snippets window (this may be disabled at build time). Total customisation is possible with full support for CSS and Javascript. Conversely, there are limits to what you can do with the older Qt QTextBrowser, but still, it is possible to decide what data each result will contain, and how it will be displayed.

The result list presentation can be exhaustively customized by adjusting two elements:

  • The paragraph format

  • HTML code inside the header section. For versions 1.21 and later, this is also used for the snippets window.

The paragraph format and the header fragment can be edited from the Result list tab of the GUI configuration.

The header fragment is used both for the result list and the snippets window. The snippets list is a table and has a snippets class attribute. Each paragraph in the result list is a table, with class respar, but this can be changed by editing the paragraph format.

There are a few examples on the page about customising the result list on the Recoll web site.

The paragraph format

This is an arbitrary HTML string where the following printf-like % substitutions will be performed:

  • %A. Abstract

  • %D. Date

  • %I. Icon image name. This is normally determined from the MIME type. The associations are defined inside the mimeconf configuration file. If a thumbnail for the file is found at the standard Freedesktop location, this will be displayed instead.

  • %K. Keywords (if any)

  • %L. Precooked Preview, Edit, and possibly Snippets links

  • %M. MIME type

  • %N. result Number inside the result page

  • %P. Parent folder Url. In the case of an embedded document, this is the parent folder for the top level container file.

  • %R. Relevance percentage

  • %S. Size information

  • %T. Title or Filename if not set.

  • %t. Title or empty.

  • %(filename). File name.

  • %U. Url

The format of the Preview, Edit, and Snippets links is <a href="P%N">, <a href="E%N"> and <a href="A%N"> where docnum (%N) expands to the document number inside the result page).

A link target defined as "F%N" will open the document corresponding to the %P parent folder expansion, usually creating a file manager window on the folder where the container file resides. E.g.:

              <a href="F%N">%P</a>

A link target defined as R%N|scriptname will run the corresponding script on the result file (if the document is embedded, the script will be started on the top-level parent). See the section about defining scripts.

In addition to the predefined values above, all strings like %(fieldname) will be replaced by the value of the field named fieldname for this document. Only stored fields can be accessed in this way, the value of indexed but not stored fields is not known at this point in the search process (see field configuration). There are currently very few fields stored by default, apart from the values above (only author and filename), so this feature will need some custom local configuration to be useful. An example candidate would be the recipient field which is generated by the message input handlers.

The default value for the paragraph format string is:

            "<table class=\"respar\">\n"
            "<tr>\n"
            "<td><a href='%U'><img src='%I' width='64'></a></td>\n"
            "<td>%L &nbsp;<i>%S</i> &nbsp;&nbsp;<b>%T</b><br>\n"
            "<span style='white-space:nowrap'><i>%M</i>&nbsp;%D</span>&nbsp;&nbsp;&nbsp; <i>%U</i>&nbsp;%i<br>\n"
            "%A %K</td>\n"
            "</tr></table>\n"
            

You may, for example, try the following for a more web-like experience:

            <u><b><a href="P%N">%T</a></b></u><br>
            %A<font color=#008000>%U - %S</font> - %L
            

Note that the P%N link in the above paragraph makes the title a preview link. Or the clean looking:

            <img src="%I" align="left">%L <font color="#900000">%R</font>
            &nbsp;&nbsp;<b>%T&</b><br>%S&nbsp;
            <font color="#808080"><i>%U</i></font>
            <table bgcolor="#e0e0e0">
            <tr><td><div>%A</div></td></tr>
            </table>%K
            

These samples, and some others are on the web site, with pictures to show how they look.

It is also possible to define the value of the snippet separator inside the abstract section.

3.3. Searching with the KDE KIO slave

3.3.1. What's this

The Recoll KIO slave allows performing a Recoll search by entering an appropriate URL in a KDE open dialog, or with an HTML-based interface displayed in Konqueror.

The HTML-based interface is similar to the Qt-based interface, but slightly less powerful for now. Its advantage is that you can perform your search while staying fully within the KDE framework: drag and drop from the result list works normally and you have your normal choice of applications for opening files.

The alternative interface uses a directory view of search results. Due to limitations in the current KIO slave interface, it is currently not obviously useful (to me).

The interface is described in more detail inside a help file which you can access by entering recoll:/ inside the konqueror URL line (this works only if the recoll KIO slave has been previously installed).

The instructions for building this module are located in the source tree. See: kde/kio/recoll/00README.txt. Some Linux distributions do package the kio-recoll module, so check before diving into the build process, maybe it's already out there ready for one-click installation.

3.3.2. Searchable documents

As a sample application, the Recoll KIO slave could allow preparing a set of HTML documents (for example a manual) so that they become their own search interface inside konqueror.

This can be done by either explicitly inserting <a href="recoll://..."> links around some document areas, or automatically by adding a very small javascript program to the documents, like the following example, which would initiate a search by double-clicking any term:

          <script language="JavaScript">
        function recollsearch() {
        var t = document.getSelection();
        window.location.href = 'recoll://search/query?qtp=a&p=0&q=' +
        encodeURIComponent(t);
        }
        </script>
        ....
        <body ondblclick="recollsearch()">

        

3.4. Searching on the command line

There are several ways to obtain search results as a text stream, without a graphical interface:

  • By passing option -t to the recoll program, or by calling it as recollq (through a link).

  • By using the recollq program.

  • By writing a custom Python program, using the Recoll Python API.

The first two methods work in the same way and accept/need the same arguments (except for the additional -t to recoll). The query to be executed is specified as command line arguments.

recollq is not always built by default. You can use the Makefile in the query directory to build it. This is a very simple program, and if you can program a little c++, you may find it useful to taylor its output format to your needs. Apart from being easily customised, recollq is only really useful on systems where the Qt libraries are not available, else it is redundant with recoll -t.

recollq has a man page. The Usage string follows:

recollq: usage:
 -P: Show the date span for all the documents present in the index
 [-o|-a|-f] [-q] <query string>
 Runs a recoll query and displays result lines. 
  Default: will interpret the argument(s) as a xesam query string
  Query elements: 
   * Implicit AND, exclusion, field spec:  t1 -t2 title:t3
   * OR has priority: t1 OR t2 t3 OR t4 means (t1 OR t2) AND (t3 OR t4)
   * Phrase: "t1 t2" (needs additional quoting on cmd line)
 -o Emulate the GUI simple search in ANY TERM mode
 -a Emulate the GUI simple search in ALL TERMS mode
 -f Emulate the GUI simple search in filename mode
 -q is just ignored (compatibility with the recoll GUI command line)
Common options:
 -c <configdir> : specify config directory, overriding $RECOLL_CONFDIR
 -d also dump file contents
 -n [first-]<cnt> define the result slice. The default value for [first]
    is 0. Without the option, the default max count is 2000.
    Use n=0 for no limit
 -b : basic. Just output urls, no mime types or titles
 -Q : no result lines, just the processed query and result count
 -m : dump the whole document meta[] array for each result
 -A : output the document abstracts
 -S fld : sort by field <fld>
   -D : sort descending
 -s stemlang : set stemming language to use (must exist in index...)
    Use -s "" to turn off stem expansion
 -T <synonyms file>: use the parameter (Thesaurus) for word expansion 
 -i <dbdir> : additional index, several can be given
 -e use url encoding (%xx) for urls
 -F <field name list> : output exactly these fields for each result.
    The field values are encoded in base64, output in one line and 
    separated by one space character. This is the recommended format 
    for use by other programs. Use a normal query with option -m to 
    see the field names. Use -F '' to output all fields, but you probably
    also want option -N in this case
  -N : with -F, print the (plain text) field names before the field values
      

Sample execution:

recollq 'ilur -nautique mime:text/html'
Recoll query: ((((ilur:(wqf=11) OR ilurs) AND_NOT (nautique:(wqf=11) OR nautiques OR nautiqu OR nautiquement)) FILTER Ttext/html))
4 results
text/html       [file:///Users/uncrypted-dockes/projets/bateaux/ilur/comptes.html]      [comptes.html]  18593   bytes   
text/html       [file:///Users/uncrypted-dockes/projets/nautique/webnautique/articles/ilur1/index.html] [Constructio...
text/html       [file:///Users/uncrypted-dockes/projets/pagepers/index.html]    [psxtcl/writemime/recoll]...
text/html       [file:///Users/uncrypted-dockes/projets/bateaux/ilur/factEtCie/recu-chasse-maree....
      

3.5. The query language

The query language processor is activated in the GUI simple search entry when the search mode selector is set to Query Language. It can also be used with the KIO slave or the command line search. It broadly has the same capabilities as the complex search interface in the GUI.

The language was based on the now defunct Xesam user search language specification.

If the results of a query language search puzzle you and you doubt what has been actually searched for, you can use the GUI Show Query link at the top of the result list to check the exact query which was finally executed by Xapian.

Here follows a sample request that we are going to explain:

        author:"john doe" Beatles OR Lennon Live OR Unplugged -potatoes
      

This would search for all documents with John Doe appearing as a phrase in the author field (exactly what this is would depend on the document type, ie: the From: header, for an email message), and containing either beatles or lennon and either live or unplugged but not potatoes (in any part of the document).

An element is composed of an optional field specification, and a value, separated by a colon (the field separator is the last colon in the element). Examples: Eugenie, author:balzac, dc:title:grandet dc:title:"eugenie grandet"

The colon, if present, means "contains". Xesam defines other relations, which are mostly unsupported for now (except in special cases, described further down).

All elements in the search entry are normally combined with an implicit AND. It is possible to specify that elements be OR'ed instead, as in Beatles OR Lennon. The OR must be entered literally (capitals), and it has priority over the AND associations: word1 word2 OR word3 means word1 AND (word2 OR word3) not (word1 AND word2) OR word3.

Recoll versions 1.21 and later, allow using parentheses to group elements, which will sometimes make things clearer, and may allow expressing combinations which would have been difficult otherwise.

An element preceded by a - specifies a term that should not appear.

As usual, words inside quotes define a phrase (the order of words is significant), so that title:"prejudice pride" is not the same as title:prejudice title:pride, and is unlikely to find a result.

Words inside phrases and capitalized words are not stem-expanded. Wildcards may be used anywhere inside a term. Specifying a wild-card on the left of a term can produce a very slow search (or even an incorrect one if the expansion is truncated because of excessive size). Also see More about wildcards.

To save you some typing, recent Recoll versions (1.20 and later) interpret a comma-separated list of terms for a field as an AND list inside the field. Use slash characters ('/') for an OR list. No white space is allowed. So

author:john,lennon

will search for documents with john and lennon inside the author field (in any order), and

author:john/ringo

would search for john or ringo. This behaviour only happens for field queries (input without a field, comma- or slash- separated input will produce a phrase search). You can use a text field name to search the main text this way.

Modifiers can be set on a double-quote value, for example to specify a proximity search (unordered). See the modifier section. No space must separate the final double-quote and the modifiers value, e.g. "two one"po10

Recoll currently manages the following default fields:

  • title, subject or caption are synonyms which specify data to be searched for in the document title or subject.

  • author or from for searching the documents originators.

  • recipient or to for searching the documents recipients.

  • keyword for searching the document-specified keywords (few documents actually have any).

  • filename for the document's file name. This is not necessarily set for all documents: internal documents contained inside a compound one (for example an EPUB section) do not inherit the container file name any more, this was replaced by an explicit field (see next). Sub-documents can still have a specific filename, if it is implied by the document format, for example the attachment file name for an email attachment.

  • containerfilename. This is set for all documents, both top-level and contained sub-documents, and is always the name of the filesystem directory entry which contains the data. The terms from this field can only be matched by an explicit field specification (as opposed to terms from filename which are also indexed as general document content). This avoids getting matches for all the sub-documents when searching for the container file name.

  • ext specifies the file name extension (Ex: ext:html)

Recoll 1.20 and later have a way to specify aliases for the field names, which will save typing, for example by aliasing filename to fn or containerfilename to cfn. See the section about the fields file.

The document input handlers used while indexing have the possibility to create other fields with arbitrary names, and aliases may be defined in the configuration, so that the exact field search possibilities may be different for you if someone took care of the customisation.

The field syntax also supports a few field-like, but special, criteria:

  • dir for filtering the results on file location (Ex: dir:/home/me/somedir). -dir also works to find results not in the specified directory (release >= 1.15.8). Tilde expansion will be performed as usual (except for a bug in versions 1.19 to 1.19.11p1). Wildcards will be expanded, but please have a look at an important limitation of wildcards in path filters.

    Relative paths also make sense, for example, dir:share/doc would match either /usr/share/doc or /usr/local/share/doc

    Several dir clauses can be specified, both positive and negative. For example the following makes sense:

              dir:recoll dir:src -dir:utils -dir:common
              

    This would select results which have both recoll and src in the path (in any order), and which have not either utils or common.

    You can also use OR conjunctions with dir: clauses.

    A special aspect of dir clauses is that the values in the index are not transcoded to UTF-8, and never lower-cased or unaccented, but stored as binary. This means that you need to enter the values in the exact lower or upper case, and that searches for names with diacritics may sometimes be impossible because of character set conversion issues. Non-ASCII UNIX file paths are an unending source of trouble and are best avoided.

    You need to use double-quotes around the path value if it contains space characters.

  • size for filtering the results on file size. Example: size<10000. You can use <, > or = as operators. You can specify a range like the following: size>100 size<1000. The usual k/K, m/M, g/G, t/T can be used as (decimal) multipliers. Ex: size>1k to search for files bigger than 1000 bytes.

  • date for searching or filtering on dates. The syntax for the argument is based on the ISO8601 standard for dates and time intervals. Only dates are supported, no times. The general syntax is 2 elements separated by a / character. Each element can be a date or a period of time. Periods are specified as PnYnMnD. The n numbers are the respective numbers of years, months or days, any of which may be missing. Dates are specified as YYYY-MM-DD. The days and months parts may be missing. If the / is present but an element is missing, the missing element is interpreted as the lowest or highest date in the index. Examples:

    • 2001-03-01/2002-05-01 the basic syntax for an interval of dates.

    • 2001-03-01/P1Y2M the same specified with a period.

    • 2001/ from the beginning of 2001 to the latest date in the index.

    • 2001 the whole year of 2001

    • P2D/ means 2 days ago up to now if there are no documents with dates in the future.

    • /2003 all documents from 2003 or older.

    Periods can also be specified with small letters (ie: p2y).

  • mime or format for specifying the MIME type. These clauses are processed besides the normal Boolean logic of the search. Multiple values will be OR'ed (instead of the normal AND). You can specify types to be excluded, with the usual -, and use wildcards. Example: mime:text/* -mime:text/plain Specifying an explicit boolean operator before a mime specification is not supported and will produce strange results.

  • type or rclcat for specifying the category (as in text/media/presentation/etc.). The classification of MIME types in categories is defined in the Recoll configuration (mimeconf), and can be modified or extended. The default category names are those which permit filtering results in the main GUI screen. Categories are OR'ed like MIME types above, and can be negated with -.

Note

mime, rclcat, size and date criteria always affect the whole query (they are applied as a final filter), even if set with other terms inside a parenthese.

Note

mime (or the equivalent rclcat) is the only field with an OR default. You do need to use OR with ext terms for example.

3.5.1. Range clauses

Recoll 1.24 and later support range clauses on fields which have been configured to support it. No default field uses them currently, so this paragraph is only interesting if you modified the fields configuration and possibly use a custom input handler.

A range clause looks like one of the following:

myfield:small..big
myfield:small..
myfield:..big
        

The nature of the clause is indicated by the two dots .., and the effect is to filter the results for which the myfield value is in the possibly open-ended interval.

See the section about the fields configuration file for the details of configuring a field for range searches (list them in the [values] section).

3.5.2. Modifiers

Some characters are recognized as search modifiers when found immediately after the closing double quote of a phrase, as in "some term"modifierchars. The actual "phrase" can be a single term of course. Supported modifiers:

  • l can be used to turn off stemming (mostly makes sense with p because stemming is off by default for phrases).

  • s can be used to turn off synonym expansion, if a synonyms file is in place (only for Recoll 1.22 and later).

  • o can be used to specify a "slack" for phrase and proximity searches: the number of additional terms that may be found between the specified ones. If o is followed by an integer number, this is the slack, else the default is 10.

  • p can be used to turn the default phrase search into a proximity one (unordered). Example: "order any in"p

  • C will turn on case sensitivity (if the index supports it).

  • D will turn on diacritics sensitivity (if the index supports it).

  • A weight can be specified for a query element by specifying a decimal value at the start of the modifiers. Example: "Important"2.5.

3.6. Anchored searches and wildcards

Some special characters are interpreted by Recoll in search strings to expand or specialize the search. Wildcards expand a root term in controlled ways. Anchor characters can restrict a search to succeed only if the match is found at or near the beginning of the document or one of its fields.

3.6.1. More about wildcards

All words entered in Recoll search fields will be processed for wildcard expansion before the request is finally executed.

The wildcard characters are:

  • * which matches 0 or more characters.

  • ? which matches a single character.

  • [] which allow defining sets of characters to be matched (ex: [abc] matches a single character which may be 'a' or 'b' or 'c', [0-9] matches any number.

You should be aware of a few things when using wildcards.

  • Using a wildcard character at the beginning of a word can make for a slow search because Recoll will have to scan the whole index term list to find the matches. However, this is much less a problem for field searches, and queries like author:*@domain.com can sometimes be very useful.

  • For Recoll version 18 only, when working with a raw index (preserving character case and diacritics), the literal part of a wildcard expression will be matched exactly for case and diacritics. This is not true any more for versions 19 and later.

  • Using a * at the end of a word can produce more matches than you would think, and strange search results. You can use the term explorer tool to check what completions exist for a given term. You can also see exactly what search was performed by clicking on the link at the top of the result list. In general, for natural language terms, stem expansion will produce better results than an ending * (stem expansion is turned off when any wildcard character appears in the term).

Wildcards and path filtering

Due to the way that Recoll processes wildcards inside dir path filtering clauses, they will have a multiplicative effect on the query size. A clause containg wildcards in several paths elements, like, for example, dir:/home/me/*/*/docdir, will almost certainly fail if your indexed tree is of any realistic size.

Depending on the case, you may be able to work around the issue by specifying the paths elements more narrowly, with a constant prefix, or by using 2 separate dir: clauses instead of multiple wildcards, as in dir:/home/me dir:docdir. The latter query is not equivalent to the initial one because it does not specify a number of directory levels, but that's the best we can do (and it may be actually more useful in some cases).

3.6.2. Anchored searches

Two characters are used to specify that a search hit should occur at the beginning or at the end of the text. ^ at the beginning of a term or phrase constrains the search to happen at the start, $ at the end force it to happen at the end.

As this function is implemented as a phrase search it is possible to specify a maximum distance at which the hit should occur, either through the controls of the advanced search panel, or using the query language, for example, as in:

"^someterm"o10

which would force someterm to be found within 10 terms of the start of the text. This can be combined with a field search as in somefield:"^someterm"o10 or somefield:someterm$.

This feature can also be used with an actual phrase search, but in this case, the distance applies to the whole phrase and anchor, so that, for example, bla bla my unexpected term at the beginning of the text would be a match for "^my term"o5.

Anchored searches can be very useful for searches inside somewhat structured documents like scientific articles, in case explicit metadata has not been supplied (a most frequent case), for example for looking for matches inside the abstract or the list of authors (which occur at the top of the document).

3.7. Using Synonyms (1.22)

Term synonyms: there are a number of ways to use term synonyms for searching text:

  • At index creation time, they can be used to alter the indexed terms, either increasing or decreasing their number, by expanding the original terms to all synonyms, or by reducing all synonym terms to a canonical one.

  • At query time, they can be used to match texts containing terms which are synonyms of the ones specified by the user, either by expanding the query for all synonyms, or by reducing the user entry to canonical terms (the latter only works if the corresponding processing has been performed while creating the index).

Recoll only uses synonyms at query time. A user query term which part of a synonym group will be optionally expanded into an OR query for all terms in the group.

Synonym groups are defined inside ordinary text files. Each line in the file defines a group.

Example:

        hi hello "good morning"

        # not sure about "au revoir" though. Is this english ?
        bye goodbye "see you" \
        "au revoir" 
      

As usual, lines beginning with a # are comments, empty lines are ignored, and lines can be continued by ending them with a backslash.

Multi-word synonyms are supported, but be aware that these will generate phrase queries, which may degrade performance and will disable stemming expansion for the phrase terms.

The contents of the synonyms file must be casefolded (not only lowercased), because this is what expected at the point in the query processing where it is used. There are a few cases where this makes a difference, for example, German sharp s should be expressed as ss, Greek final sigma as sigma. For reference, Python3 has an easy way to casefold words (str.casefold()).

The synonyms file can be specified in the Search parameters tab of the GUI configuration Preferences menu entry, or as an option for command-line searches.

Once the file is defined, the use of synonyms can be enabled or disabled directly from the Preferences menu.

The synonyms are searched for matches with user terms after the latter are stem-expanded, but the contents of the synonyms file itself is not subjected to stem expansion. This means that a match will not be found if the form present in the synonyms file is not present anywhere in the document set (same with accents when using a raw index).

The synonyms function is probably not going to help you find your letters to Mr. Smith. It is best used for domain-specific searches. For example, it was initially suggested by a user performing searches among historical documents: the synonyms file would contains nicknames and aliases for each of the persons of interest.

3.8. Path translations

In some cases, the document paths stored inside the index do not match the actual ones, so that document previews and accesses will fail. This can occur in a number of circumstances:

  • When using multiple indexes it is a relatively common occurrence that some will actually reside on a remote volume, for exemple mounted via NFS. In this case, the paths used to access the documents on the local machine are not necessarily the same than the ones used while indexing on the remote machine. For example, /home/me may have been used as a topdirs elements while indexing, but the directory might be mounted as /net/server/home/me on the local machine.

  • The case may also occur with removable disks. It is perfectly possible to configure an index to live with the documents on the removable disk, but it may happen that the disk is not mounted at the same place so that the documents paths from the index are invalid.

  • As a last exemple, one could imagine that a big directory has been moved, but that it is currently inconvenient to run the indexer.

Recoll has a facility for rewriting access paths when extracting the data from the index. The translations can be defined for the main index and for any additional query index.

The path translation facility will be useful whenever the documents paths seen by the indexer are not the same as the ones which should be used at query time.

In the above NFS example, Recoll could be instructed to rewrite any file:///home/me URL from the index to file:///net/server/home/me, allowing accesses from the client.

The translations are defined in the ptrans configuration file, which can be edited by hand or from the GUI external indexes configuration dialog: PreferencesExternal index dialog, then click the Paths translations button on the right below the index list.

Note

Due to a current bug, the GUI must be restarted after changing the ptrans values (even when they were changed from the GUI).

3.9. Search case and diacritics sensitivity

For Recoll versions 1.18 and later, and when working with a raw index (not the default), searches can be sensitive to character case and diacritics. How this happens is controlled by configuration variables and what search data is entered.

The general default is that searches entered without upper-case or accented characters are insensitive to case and diacritics. An entry of resume will match any of Resume, RESUME, résumé, Résumé etc.

Two configuration variables can automate switching on sensitivity (they were documented but actually did nothing until Recoll 1.22):

autodiacsens

If this is set, search sensitivity to diacritics will be turned on as soon as an accented character exists in a search term. When the variable is set to true, resume will start a diacritics-unsensitive search, but résumé will be matched exactly. The default value is false.

autocasesens

If this is set, search sensitivity to character case will be turned on as soon as an upper-case character exists in a search term except for the first one. When the variable is set to true, us or Us will start a diacritics-unsensitive search, but US will be matched exactly. The default value is true (contrary to autodiacsens).

As in the past, capitalizing the first letter of a word will turn off its stem expansion and have no effect on case-sensitivity.

You can also explicitely activate case and diacritics sensitivity by using modifiers with the query language. C will make the term case-sensitive, and D will make it diacritics-sensitive. Examples:

        "us"C
      

will search for the term us exactly (Us will not be a match).

        "resume"D
      

will search for the term resume exactly (résumé will not be a match).

When either case or diacritics sensitivity is activated, stem expansion is turned off. Having both does not make much sense.

3.10. Desktop integration

Being independant of the desktop type has its drawbacks: Recoll desktop integration is minimal. However there are a few tools available:

Here follow a few other things that may help.

3.10.1. Hotkeying recoll

It is surprisingly convenient to be able to show or hide the Recoll GUI with a single keystroke. Recoll comes with a small Python script, based on the libwnck window manager interface library, which will allow you to do just this. The detailed instructions are on this wiki page.

3.10.2. The KDE Kicker Recoll applet

This is probably obsolete now. Anyway:

The Recoll source tree contains the source code to the recoll_applet, a small application derived from the find_applet. This can be used to add a small Recoll launcher to the KDE panel.

The applet is not automatically built with the main Recoll programs, nor is it included with the main source distribution (because the KDE build boilerplate makes it relatively big). You can download its source from the recoll.org download page. Use the omnipotent configure;make;make install incantation to build and install.

You can then add the applet to the panel by right-clicking the panel and choosing the Add applet entry.

The recoll_applet has a small text window where you can type a Recoll query (in query language form), and an icon which can be used to restrict the search to certain types of files. It is quite primitive, and launches a new recoll GUI instance every time (even if it is already running). You may find it useful anyway.

Chapter 4. Programming interface

Recoll has an Application Programming Interface, usable both for indexing and searching, currently accessible from the Python language.

Another less radical way to extend the application is to write input handlers for new types of documents.

The processing of metadata attributes for documents (fields) is highly configurable.

4.1. Writing a document input handler

Terminology

The small programs or pieces of code which handle the processing of the different document types for Recoll used to be called filters, which is still reflected in the name of the directory which holds them and many configuration variables. They were named this way because one of their primary functions is to filter out the formatting directives and keep the text content. However these modules may have other behaviours, and the term input handler is now progressively substituted in the documentation. filter is still used in many places though.

Recoll input handlers cooperate to translate from the multitude of input document formats, simple ones as opendocument, acrobat, or compound ones such as Zip or Email, into the final Recoll indexing input format, which is plain text (in many cases the processing pipeline has an intermediary HTML step, which may be used for better previewing presentation). Most input handlers are executable programs or scripts. A few handlers are coded in C++ and live inside recollindex. This latter kind will not be described here.

There are currently (since version 1.13) two kinds of external executable input handlers:

  • Simple exec handlers run once and exit. They can be bare programs like antiword, or scripts using other programs. They are very simple to write, because they just need to print the converted document to the standard output. Their output can be plain text or HTML. HTML is usually preferred because it can store metadata fields and it allows preserving some of the formatting for the GUI preview. However, these handlers have limitations:

    • They can only process one document per file.

    • The output MIME type must be known and fixed.

    • The character encoding, if relevant, must be known and fixed (or possibly just depending on location).

  • Multiple execm handlers can process multiple files (sparing the process startup time which can be very significant), or multiple documents per file (e.g.: for archives or multi-chapter publications). They communicate with the indexer through a simple protocol, but are nevertheless a bit more complicated than the older kind. Most of the new handlers are written in Python (exception: rclimg which is written in Perl because exiftool has no real Python equivalent). The Python handlers use common modules to factor out the boilerplate, which can make them very simple in favorable cases. The subdocuments output by these handlers can be directly indexable (text or HTML), or they can be other simple or compound documents that will need to be processed by another handler.

In both cases, handlers deal with regular file system files, and can process either a single document, or a linear list of documents in each file. Recoll is responsible for performing up to date checks, deal with more complex embedding and other upper level issues.

A simple handler returning a document in text/plain format, can transfer no metadata to the indexer. Generic metadata, like document size or modification date, will be gathered and stored by the indexer.

Handlers that produce text/html format can return an arbitrary amount of metadata inside HTML meta tags. These will be processed according to the directives found in the fields configuration file.

The handlers that can handle multiple documents per file return a single piece of data to identify each document inside the file. This piece of data, called an ipath will be sent back by Recoll to extract the document at query time, for previewing, or for creating a temporary file to be opened by a viewer. These handlers can also return metadata either as HTML meta tags, or as named data through the communication protocol.

The following section describes the simple handlers, and the next one gives a few explanations about the execm ones. You could conceivably write a simple handler with only the elements in the manual. This will not be the case for the other ones, for which you will have to look at the code.

4.1.1. Simple input handlers

Recoll simple handlers are usually shell-scripts, but this is in no way necessary. Extracting the text from the native format is the difficult part. Outputting the format expected by Recoll is trivial. Happily enough, most document formats have translators or text extractors which can be called from the handler. In some cases the output of the translating program is completely appropriate, and no intermediate shell-script is needed.

Input handlers are called with a single argument which is the source file name. They should output the result to stdout.

When writing a handler, you should decide if it will output plain text or HTML. Plain text is simpler, but you will not be able to add metadata or vary the output character encoding (this will be defined in a configuration file). Additionally, some formatting may be easier to preserve when previewing HTML. Actually the deciding factor is metadata: Recoll has a way to extract metadata from the HTML header and use it for field searches..

The RECOLL_FILTER_FORPREVIEW environment variable (values yes, no) tells the handler if the operation is for indexing or previewing. Some handlers use this to output a slightly different format, for example stripping uninteresting repeated keywords (ie: Subject: for email) when indexing. This is not essential.

You should look at one of the simple handlers, for example rclps for a starting point.

Don't forget to make your handler executable before testing !

4.1.2. "Multiple" handlers

If you can program and want to write an execm handler, it should not be too difficult to make sense of one of the existing handlers.

The existing handlers differ in the amount of helper code which they are using:

  • rclimg is written in Perl and handles the execm protocol all by itself (showing how trivial it is).

  • All the Python handlers share at least the rclexecm.py module, which handles the communication. Have a look at, for example, rclzip for a handler which uses rclexecm.py directly.

  • Most Python handlers which process single-document files by executing another command are further abstracted by using the rclexec1.py module. See for example rclrtf.py for a simple one, or rcldoc.py for a slightly more complicated one (possibly executing several commands).

  • Handlers which extract text from an XML document by using an XSLT style sheet are now executed inside recollindex, with only the style sheet stored in the filters/ directory. These can use a single style sheet (e.g. abiword.xsl), or two sheets for the data and metadata (e.g. opendoc-body.xsl and opendoc-meta.xsl). The mimeconf configuration file defines how the sheets are used, have a look. Before the C++ import, the xsl-based handlers used a common module rclgenxslt.py, it is still around but unused. The handler for OpenXML presentations is still the Python version because the format did not fit with what the C++ code does. It would be a good base for another similar issue.

There is a sample trivial handler based on rclexecm.py, with many comments, not actually used by Recoll. It would index a text file as one document per line. Look for rcltxtlines.py in the src/filters directory in the online Recoll Git repository (the sample not in the distributed release at the moment).

You can also have a look at the slightly more complex rclzip which uses Zip file paths as identifiers (ipath).

execm handlers sometimes need to make a choice for the nature of the ipath elements that they use in communication with the indexer. Here are a few guidelines:

  • Use ASCII or UTF-8 (if the identifier is an integer print it, for example, like printf %d would do).

  • If at all possible, the data should make some kind of sense when printed to a log file to help with debugging.

  • Recoll uses a colon (:) as a separator to store a complex path internally (for deeper embedding). Colons inside the ipath elements output by a handler will be escaped, but would be a bad choice as a handler-specific separator (mostly, again, for debugging issues).

In any case, the main goal is that it should be easy for the handler to extract the target document, given the file name and the ipath element.

execm handlers will also produce a document with a null ipath element. Depending on the type of document, this may have some associated data (e.g. the body of an email message), or none (typical for an archive file). If it is empty, this document will be useful anyway for some operations, as the parent of the actual data documents.

4.1.3. Telling Recoll about the handler

There are two elements that link a file to the handler which should process it: the association of file to MIME type and the association of a MIME type with a handler.

The association of files to MIME types is mostly based on name suffixes. The types are defined inside the mimemap file. Example:


            .doc = application/msword
          

If no suffix association is found for the file name, Recoll will try to execute a system command (typically file -i or xdg-mime) to determine a MIME type.

The second element is the association of MIME types to handlers in the mimeconf file. A sample will probably be better than a long explanation:


          [index]
          application/msword = exec antiword -t -i 1 -m UTF-8;\
          mimetype = text/plain ; charset=utf-8

          application/ogg = exec rclogg

          text/rtf = exec unrtf --nopict --html; charset=iso-8859-1; mimetype=text/html

          application/x-chm = execm rclchm
        

The fragment specifies that:

  • application/msword files are processed by executing the antiword program, which outputs text/plain encoded in utf-8.

  • application/ogg files are processed by the rclogg script, with default output type (text/html, with encoding specified in the header, or utf-8 by default).

  • text/rtf is processed by unrtf, which outputs text/html. The iso-8859-1 encoding is specified because it is not the utf-8 default, and not output by unrtf in the HTML header section.

  • application/x-chm is processed by a persistant handler. This is determined by the execm keyword.

4.1.4. Input handler output

Both the simple and persistent input handlers can return any MIME type to Recoll, which will further process the data according to the MIME configuration.

Most input filters filters produce either text/plain or text/html data. There are exceptions, for example, filters which process archive file (zip, tar, etc.) will usually return the documents as they are found, without processing them further.

There is nothing to say about text/plain output, except that its character encoding should be consistent with what is specified in the mimeconf file.

For filters producing HTML, the output could be very minimal like the following example:

          <html>
          <head>
          <meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
          </head>
          <body>
          Some text content
          </body>
          </html>
        

You should take care to escape some characters inside the text by transforming them into appropriate entities. At the very minimum, "&" should be transformed into "&amp;", "<" should be transformed into "&lt;". This is not always properly done by external helper programs which output HTML, and of course never by those which output plain text.

When encapsulating plain text in an HTML body, the display of a preview may be improved by enclosing the text inside <pre> tags.

The character set needs to be specified in the header. It does not need to be UTF-8 (Recoll will take care of translating it), but it must be accurate for good results.

Recoll will process meta tags inside the header as possible document fields candidates. Documents fields can be processed by the indexer in different ways, for searching or displaying inside query results. This is described in a following section.

By default, the indexer will process the standard header fields if they are present: title, meta/description, and meta/keywords are both indexed and stored for query-time display.

A predefined non-standard meta tag will also be processed by Recoll without further configuration: if a date tag is present and has the right format, it will be used as the document date (for display and sorting), in preference to the file modification date. The date format should be as follows:

          <meta name="date" content="YYYY-mm-dd HH:MM:SS">
          or
          <meta name="date" content="YYYY-mm-ddTHH:MM:SS">
        

Example:

          <meta name="date" content="2013-02-24 17:50:00">
        

Input handlers also have the possibility to "invent" field names. This should also be output as meta tags:

          <meta name="somefield" content="Some textual data" />
        

You can embed HTML markup inside the content of custom fields, for improving the display inside result lists. In this case, add a (wildly non-standard) markup attribute to tell Recoll that the value is HTML and should not be escaped for display.

          <meta name="somefield" markup="html" content="Some <i>textual</i> data" />
        

As written above, the processing of fields is described in a further section.

Persistent filters can use another, probably simpler, method to produce metadata, by calling the setfield() helper method. This avoids the necessity to produce HTML, and any issue with HTML quoting. See, for example, rclaudio in Recoll 1.23 and later for an example of handler which outputs text/plain and uses setfield() to produce metadata.

4.1.5. Page numbers

The indexer will interpret ^L characters in the handler output as indicating page breaks, and will record them. At query time, this allows starting a viewer on the right page for a hit or a snippet. Currently, only the PDF, Postscript and DVI handlers generate page breaks.

4.2. Field data processing

Fields are named pieces of information in or about documents, like title, author, abstract.

The field values for documents can appear in several ways during indexing: either output by input handlers as meta fields in the HTML header section, or extracted from file extended attributes, or added as attributes of the Doc object when using the API, or again synthetized internally by Recoll.

The Recoll query language allows searching for text in a specific field.

Recoll defines a number of default fields. Additional ones can be output by handlers, and described in the fields configuration file.

Fields can be:

  • indexed, meaning that their terms are separately stored in inverted lists (with a specific prefix), and that a field-specific search is possible.

  • stored, meaning that their value is recorded in the index data record for the document, and can be returned and displayed with search results.

A field can be either or both indexed and stored. This and other aspects of fields handling is defined inside the fields configuration file.

Some fields may also designated as supporting range queries, meaning that the results may be selected for an interval of its values. See the configuration section for more details.

The sequence of events for field processing is as follows:

  • During indexing, recollindex scans all meta fields in HTML documents (most document types are transformed into HTML at some point). It compares the name for each element to the configuration defining what should be done with fields (the fields file)

  • If the name for the meta element matches one for a field that should be indexed, the contents are processed and the terms are entered into the index with the prefix defined in the fields file.

  • If the name for the meta element matches one for a field that should be stored, the content of the element is stored with the document data record, from which it can be extracted and displayed at query time.

  • At query time, if a field search is performed, the index prefix is computed and the match is only performed against appropriately prefixed terms in the index.

  • At query time, the field can be displayed inside the result list by using the appropriate directive in the definition of the result list paragraph format. All fields are displayed on the fields screen of the preview window (which you can reach through the right-click menu). This is independant of the fact that the search which produced the results used the field or not.

You can find more information in the section about the fields file, or in comments inside the file.

You can also have a look at the example in the FAQs area, detailing how one could add a page count field to pdf documents for displaying inside result lists.

4.3. Python API

4.3.1. Introduction

The Recoll Python programming interface can be used both for searching and for creating/updating an index. Bindings exist for Python2 and Python3.

The search interface is used in a number of active projects: the Recoll Gnome Shell Search Provider, the Recoll Web UI, and the upmpdcli UPnP Media Server, in addition to many small scripts.

The index update section of the API may be used to create and update Recoll indexes on specific configurations (separate from the ones created by recollindex). The resulting databases can be queried alone, or in conjunction with regular ones, through the GUI or any of the query interfaces.

The search API is modeled along the Python database API version 2.0 specification (early versions used the version 1.0 spec).

The recoll package contains two modules:

  • The recoll module contains functions and classes used to query (or update) the index.

  • The rclextract module contains functions and classes used at query time to access document data. The recoll module must be imported before rclextract

There is a good chance that your system repository has packages for the Recoll Python API, sometimes in a package separate from the main one (maybe named something like python-recoll). Else refer to the Building from source chapter.

As an introduction, the following small sample will run a query and list the title and url for each of the results. The python/samples source directory contains several examples of Python programming with Recoll, exercising the extension more completely, and especially its data extraction features.

        #!/usr/bin/env python

        from recoll import recoll

        db = recoll.connect()
        query = db.query()
        nres = query.execute("some query")
        results = query.fetchmany(20)
        for doc in results:
            print("%s %s" % (doc.url, doc.title))
        

You can also take a look at the source for the Recoll WebUI, the upmpdcli local media server, or the Gnome Shell Search Provider.

4.3.2. Interface elements

A few elements in the interface are specific and and need an explanation.

ipath

This data value (set as a field in the Doc object) is stored, along with the URL, but not indexed by Recoll. Its contents are not interpreted by the index layer, and its use is up to the application. For example, the Recoll file system indexer uses the ipath to store the part of the document access path internal to (possibly imbricated) container documents. ipath in this case is a vector of access elements (e.g, the first part could be a path inside a zip file to an archive member which happens to be an mbox file, the second element would be the message sequential number inside the mbox etc.). url and ipath are returned in every search result and define the access to the original document. ipath is empty for top-level document/files (e.g. a PDF document which is a filesystem file). The Recoll GUI knows about the structure of the ipath values used by the filesystem indexer, and uses it for such functions as opening the parent of a given document.

udi

An udi (unique document identifier) identifies a document. Because of limitations inside the index engine, it is restricted in length (to 200 bytes), which is why a regular URI cannot be used. The structure and contents of the udi is defined by the application and opaque to the index engine. For example, the internal file system indexer uses the complete document path (file path + internal path), truncated to length, the suppressed part being replaced by a hash value. The udi is not explicit in the query interface (it is used "under the hood" by the rclextract module), but it is an explicit element of the update interface.

parent_udi

If this attribute is set on a document when entering it in the index, it designates its physical container document. In a multilevel hierarchy, this may not be the immediate parent. parent_udi is optional, but its use by an indexer may simplify index maintenance, as Recoll will automatically delete all children defined by parent_udi == udi when the document designated by udi is destroyed. e.g. if a Zip archive contains entries which are themselves containers, like mbox files, all the subdocuments inside the Zip file (mbox, messages, message attachments, etc.) would have the same parent_udi, matching the udi for the Zip file, and all would be destroyed when the Zip file (identified by its udi) is removed from the index. The standard filesystem indexer uses parent_udi.

Stored and indexed fields

The fields file inside the Recoll configuration defines which document fields are either indexed (searchable), stored (retrievable with search results), or both. Apart from a few standard/internal fields, only the stored fields are retrievable through the Python search interface.

4.3.3. Log messages for Python scripts

Two specific configuration variables: pyloglevel and pylogfilename allow overriding the generic values for Python programs. Set pyloglevel to 2 to suppress default startup messages (printed at level 3).

4.3.4. Python search interface

The recoll module

connect(confdir=None, extra_dbs=None, writable = False)

The connect() function connects to one or several Recoll index(es) and returns a Db object.

This call initializes the recoll module, and it should always be performed before any other call or object creation.

  • confdir may specify a configuration directory. The usual defaults apply.

  • extra_dbs is a list of additional indexes (Xapian directories).

  • writable decides if we can index new data through this connection.

The Db class

A Db object is created by a connect() call and holds a connection to a Recoll index.

Db.close()

Closes the connection. You can't do anything with the Db object after this.

Db.query(), Db.cursor()

These aliases return a blank Query object for this index.

Db.setAbstractParams(maxchars, contextwords)

Set the parameters used to build snippets (sets of keywords in context text fragments). maxchars defines the maximum total size of the abstract. contextwords defines how many terms are shown around the keyword.

Db.termMatch(match_type, expr, field='', maxlen=-1, casesens=False, diacsens=False, lang='english')

Expand an expression against the index term list. Performs the basic function from the GUI term explorer tool. match_type can be either of wildcard, regexp or stem. Returns a list of terms expanded from the input expression.

The Query class

A Query object (equivalent to a cursor in the Python DB API) is created by a Db.query() call. It is used to execute index searches.

Query.sortby(fieldname, ascending=True)

Sort results by fieldname, in ascending or descending order. Must be called before executing the search.

Query.execute(query_string, stemming=1, stemlang="english", fetchtext=False, collapseduplicates=False)

Starts a search for query_string, a Recoll search language string. If the index stores the document texts and fetchtext is True, store the document extracted text in doc.text.

Query.executesd(SearchData, fetchtext=False, collapseduplicates=False)

Starts a search for the query defined by the SearchData object. If the index stores the document texts and fetchtext is True, store the document extracted text in doc.text.

Query.fetchmany(size=query.arraysize)

Fetches the next Doc objects in the current search results, and returns them as an array of the required size, which is by default the value of the arraysize data member.

Query.fetchone()

Fetches the next Doc object from the current search results. Generates a StopIteration exception if there are no results left.

Query.close()

Closes the query. The object is unusable after the call.

Query.scroll(value, mode='relative')

Adjusts the position in the current result set. mode can be relative or absolute.

Query.getgroups()

Retrieves the expanded query terms as a list of pairs. Meaningful only after executexx In each pair, the first entry is a list of user terms (of size one for simple terms, or more for group and phrase clauses), the second a list of query terms as derived from the user terms and used in the Xapian Query.

Query.getxquery()

Return the Xapian query description as a Unicode string. Meaningful only after executexx.

Query.highlight(text, ishtml = 0, methods = object)

Will insert <span "class=rclmatch">, </span> tags around the match areas in the input text and return the modified text. ishtml can be set to indicate that the input text is HTML and that HTML special characters should not be escaped. methods if set should be an object with methods startMatch(i) and endMatch() which will be called for each match and should return a begin and end tag

Query.makedocabstract(doc, methods = object))

Create a snippets abstract for doc (a Doc object) by selecting text around the match terms. If methods is set, will also perform highlighting. See the highlight method.

Query.__iter__() and Query.next()

So that things like for doc in query: will work.

Query.arraysize

Default number of records processed by fetchmany (r/w).

Query.rowcount

Number of records returned by the last execute.

Query.rownumber

Next index to be fetched from results. Normally increments after each fetchone() call, but can be set/reset before the call to effect seeking (equivalent to using scroll()). Starts at 0.

The Doc class

A Doc object contains index data for a given document. The data is extracted from the index when searching, or set by the indexer program when updating. The Doc object has many attributes to be read or set by its user. It mostly matches the Rcl::Doc C++ object. Some of the attributes are predefined, but, especially when indexing, others can be set, the name of which will be processed as field names by the indexing configuration. Inputs can be specified as Unicode or strings. Outputs are Unicode objects. All dates are specified as Unix timestamps, printed as strings. Please refer to the rcldb/rcldoc.cpp C++ file for a full description of the predefined attributes. Here follows a short list.

  • url the document URL but see also getbinurl()

  • ipath the document ipath for embedded documents.

  • fbytes, dbytes the document file and text sizes.

  • fmtime, dmtime the document file and document times.

  • xdocid the document Xapian document ID. This is useful if you want to access the document through a direct Xapian operation.

  • mtype the document MIME type.

  • Fields stored by default: author, filename, keywords, recipient

At query time, only the fields that are defined as stored either by default or in the fields configuration file will be meaningful in the Doc object. The document processed text may be present or not, depending if the index stores the text at all, and if it does, on the fetchtext query execute option. See also the rclextract module for accessing document contents.

get(key), [] operator

Retrieve the named document attribute. You can also use getattr(doc, key) or doc.key.

doc.key = value

Set the the named document attribute. You can also use setattr(doc, key, value).

getbinurl()

Retrieve the URL in byte array format (no transcoding), for use as parameter to a system call.

setbinurl(url)

Set the URL in byte array format (no transcoding).

items()

Return a dictionary of doc object keys/values

keys()

list of doc object keys (attribute names).

The SearchData class

A SearchData object allows building a query by combining clauses, for execution by Query.executesd(). It can be used in replacement of the query language approach. The interface is going to change a little, so no detailed doc for now...

addclause(type='and'|'or'|'excl'|'phrase'|'near'|'sub', qstring=string, slack=0, field='', stemming=1, subSearch=SearchData)

The rclextract module

Prior to Recoll 1.25, index queries could not provide document content because it was never stored. Recoll 1.25 and later usually store the document text, which can be optionally retrieved when running a query (see query.execute() above - the result is always plain text).

The rclextract module can give access to the original document and to the document text content (if not stored by the index, or to access an HTML version of the text). Acessing the original document is particularly useful if it is embedded (e.g. an email attachment).

You need to import the recoll module before the rclextract module.

The Extractor class
Extractor(doc)

An Extractor object is built from a Doc object, output from a query.

Extractor.textextract(ipath)

Extract document defined by ipath and return a Doc object. The doc.text field has the document text converted to either text/plain or text/html according to doc.mimetype. The typical use would be as follows:

from recoll import recoll, rclextract

qdoc = query.fetchone()
extractor = recoll.Extractor(qdoc)
doc = extractor.textextract(qdoc.ipath)
# use doc.text, e.g. for previewing

Passing qdoc.ipath to textextract() is redundant, but reflects the fact that the Extractor object actually has the capability to access the other entries in a compound document.

Extractor.idoctofile(ipath, targetmtype, outfile='')

Extracts document into an output file, which can be given explicitly or will be created as a temporary file to be deleted by the caller. Typical use:

from recoll import recoll, rclextract

qdoc = query.fetchone()
extractor = recoll.Extractor(qdoc)
filename = extractor.idoctofile(qdoc.ipath, qdoc.mimetype)

In all cases the output is a copy, even if the requested document is a regular system file, which may be wasteful in some cases. If you want to avoid this, you can test for a simple file document as follows:

not doc.ipath and (not "rclbes" in doc.keys() or doc["rclbes"] == "FS")

Search API usage example

The following sample would query the index with a user language string. See the python/samples directory inside the Recoll source for other examples. The recollgui subdirectory has a very embryonic GUI which demonstrates the highlighting and data extraction functions.

#!/usr/bin/env python

from recoll import recoll

db = recoll.connect()
db.setAbstractParams(maxchars=80, contextwords=4)

query = db.query()
nres = query.execute("some user question")
print "Result count: ", nres
if nres > 5:
    nres = 5
for i in range(nres):
    doc = query.fetchone()
    print "Result #%d" % (query.rownumber,)
    for k in ("title", "size"):
        print k, ":", getattr(doc, k).encode('utf-8')
    abs = db.makeDocAbstract(doc, query).encode('utf-8')
    print abs
    print

4.3.5. Creating Python external indexers

The update API can be used to create an index from data which is not accessible to the regular Recoll indexer, or structured to present difficulties to the Recoll input handlers.

An indexer created using this API will be have equivalent work to do as the the Recoll file system indexer: look for modified documents, extract their text, call the API for indexing it, take care of purging the index out of data from documents which do not exist in the document store any more.

The data for such an external indexer should be stored in an index separate from any used by the Recoll internal file system indexer. The reason is that the main document indexer purge pass (removal of deleted documents) would also remove all the documents belonging to the external indexer, as they were not seen during the filesystem walk. The main indexer documents would also probably be a problem for the external indexer own purge operation.

While there would be ways to enable multiple foreign indexers to cooperate on a single index, it is just simpler to use separate ones, and use the multiple index access capabilities of the query interface, if needed.

There are two parts in the update interface:

  • Methods inside the recoll module allow inserting data into the index, to make it accessible by the normal query interface.

  • An interface based on scripts execution is defined to allow either the GUI or the rclextract module to access original document data for previewing or editing.

Python update interface

The update methods are part of the recoll module described above. The connect() method is used with a writable=true parameter to obtain a writable Db object. The following Db object methods are then available.

addOrUpdate(udi, doc, parent_udi=None)

Add or update index data for a given document The udi string must define a unique id for the document. It is an opaque interface element and not interpreted inside Recoll. doc is a Doc object, created from the data to be indexed (the main text should be in doc.text). If parent_udi is set, this is a unique identifier for the top-level container (e.g. for the filesystem indexer, this would be the one which is an actual file).

delete(udi)

Purge index from all data for udi, and all documents (if any) which have a matrching parent_udi.

needUpdate(udi, sig)

Test if the index needs to be updated for the document identified by udi. If this call is to be used, the doc.sig field should contain a signature value when calling addOrUpdate(). The needUpdate() call then compares its parameter value with the stored sig for udi. sig is an opaque value, compared as a string.

The filesystem indexer uses a concatenation of the decimal string values for file size and update time, but a hash of the contents could also be used.

As a side effect, if the return value is false (the index is up to date), the call will set the existence flag for the document (and any subdocument defined by its parent_udi), so that a later purge() call will preserve them).

The use of needUpdate() and purge() is optional, and the indexer may use another method for checking the need to reindex or to delete stale entries.

purge()

Delete all documents that were not touched during the just finished indexing pass (since open-for-write). These are the documents for the needUpdate() call was not performed, indicating that they no longer exist in the primary storage system.

Query data access for external indexers (1.23)

Recoll has internal methods to access document data for its internal (filesystem) indexer. An external indexer needs to provide data access methods if it needs integration with the GUI (e.g. preview function), or support for the rclextract module.

The index data and the access method are linked by the rclbes (recoll backend storage) Doc field. You should set this to a short string value identifying your indexer (e.g. the filesystem indexer uses either "FS" or an empty value, the Web history indexer uses "BGL").

The link is actually performed inside a backends configuration file (stored in the configuration directory). This defines commands to execute to access data from the specified indexer. Example, for the mbox indexing sample found in the Recoll source (which sets rclbes="MBOX"):

[MBOX]
          fetch = /path/to/recoll/src/python/samples/rclmbox.py fetch
          makesig = path/to/recoll/src/python/samples/rclmbox.py makesig
          

fetch and makesig define two commands to execute to respectively retrieve the document text and compute the document signature (the example implementation uses the same script with different first parameters to perform both operations).

The scripts are called with three additional arguments: udi, url, ipath, stored with the document when it was indexed, and may use any or all to perform the requested operation. The caller expects the result data on stdout.

External indexer samples

The Recoll source tree has two samples of external indexers in the src/python/samples directory. The more interesting one is rclmbox.py which indexes a directory containing mbox folder files. It exercises most features in the update interface, and has a data access interface.

See the comments inside the file for more information.

4.3.6. Package compatibility with the previous version

The following code fragments can be used to ensure that code can run with both the old and the new API (as long as it does not use the new abilities of the new API of course).

Adapting to the new package structure:

          
                   try:
                   from recoll import recoll
                   from recoll import rclextract
                   hasextract = True
                   except:
                   import recoll
                   hasextract = False
          
        

Adapting to the change of nature of the next Query member. The same test can be used to choose to use the scroll() method (new) or set the next value (old).

          
                   rownum = query.next if type(query.next) == int else \
                   query.rownumber
          
        

Chapter 5. Installation and configuration

5.1. Installing a binary copy

Recoll binary copies are always distributed as regular packages for your system. They can be obtained either through the system's normal software distribution framework (e.g. Debian/Ubuntu apt, FreeBSD ports, etc.), or from some type of "backports" repository providing versions newer than the standard ones, or found on the Recoll WEB site in some cases. The most up-to-date information about Recoll packages can usually be found on the Recoll WEB site downloads page

The Windows version of Recoll comes in a self-contained setup file, there is nothing else to install.

On Unix-like systems, the package management tools will automatically install hard dependancies for packages obtained from a proper package repository. You will have to deal with them by hand for downloaded packages (for example, when dpkg complains about missing dependancies).

In all cases, you will have to check or install supporting applications for the file types that you want to index beyond those that are natively processed by Recoll (text, HTML, email files, and a few others).

You should also maybe have a look at the configuration section (but this may not be necessary for a quick test with default parameters). Most parameters can be more conveniently set from the GUI interface.

5.2. Supporting packages

Note

The Windows installation of Recoll is self-contained. Windows users can skip this section.

Recoll uses external applications to index some file types. You need to install them for the file types that you wish to have indexed (these are run-time optional dependencies. None is needed for building or running Recoll except for indexing their specific file type).

After an indexing pass, the commands that were found missing can be displayed from the recoll File menu. The list is stored in the missing text file inside the configuration directory.

The past has proven that I was unable to maintain an up to date application list in this manual. Please check http://www.recoll.org/features.html for a complete list along with links to the home pages or best source/patches pages, and misc tips. What follows is only a very short extract of the stable essentials.

  • PDF files need pdftotext which is part of Poppler (usually comes with the poppler-utils package). Avoid the original one from Xpdf.

  • MS Word documents need antiword. It is also useful to have wvWare installed as it may be be used as a fallback for some files which antiword does not handle.

  • RTF files need unrtf, which, in its older versions, has much trouble with non-western character sets. Many Linux distributions carry outdated unrtf versions. Check http://www.recoll.org/features.html for details.

  • Pictures: Recoll uses the Exiftool Perl package to extract tag information. Most image file formats are supported.

  • Up to Recoll 1.24, many XML-based formats need the xsltproc command, which usually comes with libxslt. These are: abiword, fb2 ebooks, kword, openoffice, opendocument svg. Recoll 1.25 and later process them internally (using libxslt).

5.3. Building from source

5.3.1. Prerequisites

The following prerequisites are described in broad terms and not as specific package names (which will depend on the exact platform). The dependancies should be available as packages on most common Unix derivatives, and it should be quite uncommon that you would have to build one of them.

If you do not need the GUI, you can avoid all GUI dependancies by disabling its build. (See the configure section further).

The shopping list:

  • If you start from git code, you will need the autoconf, automake and libtool triad. They are not needed for building from tar distributions.

  • C++ compiler. Recent versions require C++11 compatibility (1.23 and later).

  • bison command (for Recoll 1.21 and later).

  • For building the documentation: the xsltproc command, and the Docbook XML and style sheet files. You can avoid this dependancy by disabling documentation building with the --disable-userdoc configure option.

  • Development files for Xapian core.

    Important

    If you are building Xapian for an older CPU (before Pentium 4 or Athlon 64), you need to add the --disable-sse flag to the configure command. Else all Xapian application will crash with an illegal instruction error.

  • Development files for Qt 5 . and its own dependancies (X11 etc.)

  • Development files for libxslt

  • Development files for zlib.

  • Development files for Python (or use --disable-python-module).

  • Development files for libchm

  • You may also need libiconv. On Linux systems, the iconv interface is part of libc and you should not need to do anything special.

Check the Recoll download page for up to date version information.

5.3.2. Building

Recoll has been built on Linux, FreeBSD, Mac OS X, and Solaris, most versions after 2005 should be ok, maybe some older ones too (Solaris 8 used to be ok). If you build on another system, and need to modify things, I would very much welcome patches.

Configure options: 

  • --without-aspell will disable the code for phonetic matching of search terms.

  • --with-fam or --with-inotify will enable the code for real time indexing. Inotify support is enabled by default on Linux systems.

  • --with-qzeitgeist will enable sending Zeitgeist events about the visited search results, and needs the qzeitgeist package.

  • --disable-webkit is available from version 1.17 to implement the result list with a Qt QTextBrowser instead of a WebKit widget if you do not or can't depend on the latter.

  • --disable-qtgui Disable the Qt interface. Will allow building the indexer and the command line search program in absence of a Qt environment.

  • --enable-webengine Enable the use of Qt Webengine (only meaningful if the Qt GUI is enabled), in place or Qt Webkit.

  • --disable-idxthreads is available from version 1.19 to suppress multithreading inside the indexing process. You can also use the run-time configuration to restrict recollindex to using a single thread, but the compile-time option may disable a few more unused locks. This only applies to the use of multithreading for the core index processing (data input). The Recoll monitor mode always uses at least two threads of execution.

  • --disable-python-module will avoid building the Python module.

  • --disable-python-chm will avoid building the Python libchm interface used to index CHM files.

  • --disable-xattr will prevent fetching data from file extended attributes. Beyond a few standard attributes, fetching extended attributes data can only be useful is some application stores data in there, and also needs some simple configuration (see comments in the fields configuration file).

  • --enable-camelcase will enable splitting camelCase words. This is not enabled by default as it has the unfortunate side-effect of making some phrase searches quite confusing: ie, "MySQL manual" would be matched by "MySQL manual" and "my sql manual" but not "mysql manual" (only inside phrase searches).

  • --with-file-command Specify the version of the 'file' command to use (ie: --with-file-command=/usr/local/bin/file). Can be useful to enable the gnu version on systems where the native one is bad.

  • --disable-x11mon Disable X11 connection monitoring inside recollindex. Together with --disable-qtgui, this allows building recoll without Qt and X11.

  • --disable-userdoc will avoid building the user manual. This avoids having to install the Docbook XML/XSL files and the TeX toolchain used for translating the manual to PDF.

  • --enable-recollq Enable building the recollq command line query tool (recoll -t without need for Qt). This is done by default if --disable-qtgui is set but this option enables forcing it.

  • --disable-pic (Recoll versions up to 1.21 only) will compile Recoll with position-dependant code. This is incompatible with building the KIO or the Python or PHP extensions, but might yield very marginally faster code.

  • Of course the usual autoconf configure options, like --prefix apply.

Normal procedure (for source extracted from a tar distribution):

          cd recoll-xxx
          ./configure
          make
          (practices usual hardship-repelling invocations)
        

When building from source cloned from the git repository, you also need to install autoconf, automake, and libtool and you must execute sh autogen.sh in the top source directory before running configure.

5.3.3. Installing

Use make install in the root of the source tree. This will copy the commands to prefix/bin and the sample configuration files, scripts and other shared data to prefix/share/recoll.

5.3.4. Python API package

The Python interface can be found in the source tree, under the python/recoll directory.

As of Recoll 1.19, the module can be compiled for Python3.

The normal Recoll build procedure (see above) installs the API package for the default system version (python) along with the main code. The package for other Python versions (e.g. python3 if the system default is python2) must be explicitely built and installed.

The python/recoll/ directory contains the usual setup.py. After configuring and building the main Recoll code, you can use the script to build and install the Python module:

          cd recoll-xxx/python/recoll
          pythonX setup.py build
          sudo pythonX setup.py install
        

5.3.5. Building on Solaris

We did not test building the GUI on Solaris for recent versions. You will need at least Qt 4.4. There are some hints on an old web site page, they may still be valid.

Someone did test the 1.19 indexer and Python module build, they do work, with a few minor glitches. Be sure to use GNU make and install.

5.4. Configuration overview

Most of the parameters specific to the recoll GUI are set through the Preferences menu and stored in the standard Qt place ($HOME/.config/Recoll.org/recoll.conf). You probably do not want to edit this by hand.

Recoll indexing options are set inside text configuration files located in a configuration directory. There can be several such directories, each of which defines the parameters for one index.

The configuration files can be edited by hand or through the Index configuration dialog (Preferences menu). The GUI tool will try to respect your formatting and comments as much as possible, so it is quite possible to use both approaches on the same configuration.

The most accurate documentation for the configuration parameters is given by comments inside the default files, and we will just give a general overview here.

For each index, there are at least two sets of configuration files. System-wide configuration files are kept in a directory named like /usr/share/recoll/examples, and define default values, shared by all indexes. For each index, a parallel set of files defines the customized parameters.

The default location of the customized configuration is the .recoll directory in your home. Most people will only use this directory.

This location can be changed, or others can be added with the RECOLL_CONFDIR environment variable or the -c option parameter to recoll and recollindex.

In addition (as of Recoll version 1.19.7), it is possible to specify two additional configuration directories which will be stacked before and after the user configuration directory. These are defined by the RECOLL_CONFTOP and RECOLL_CONFMID environment variables. Values from configuration files inside the top directory will override user ones, values from configuration files inside the middle directory will override system ones and be overriden by user ones. These two variables may be of use to applications which augment Recoll functionality, and need to add configuration data without disturbing the user's files. Please note that the two, currently single, values will probably be interpreted as colon-separated lists in the future: do not use colon characters inside the directory paths.

If the .recoll directory does not exist when recoll or recollindex are started, it will be created with a set of empty configuration files. recoll will give you a chance to edit the configuration file before starting indexing. recollindex will proceed immediately. To avoid mistakes, the automatic directory creation will only occur for the default location, not if -c or RECOLL_CONFDIR were used (in the latter cases, you will have to create the directory).

All configuration files share the same format. For example, a short extract of the main configuration file might look as follows:

        # Space-separated list of files and directories to index.
        topdirs =  ~/docs /usr/share/doc

        [~/somedirectory-with-utf8-txt-files]
        defaultcharset = utf-8
      

There are three kinds of lines:

  • Comment (starts with #) or empty.

  • Parameter affectation (name = value).

  • Section definition ([somedirname]).

Long lines can be broken by ending each incomplete part with a backslash (\).

Depending on the type of configuration file, section definitions either separate groups of parameters or allow redefining some parameters for a directory sub-tree. They stay in effect until another section definition, or the end of file, is encountered. Some of the parameters used for indexing are looked up hierarchically from the current directory location upwards. Not all parameters can be meaningfully redefined, this is specified for each in the next section.

Important

Global parameters must not be defined in a directory subsection, else they will not be found at all by the Recoll code, which looks for them at the top level (e.g. skippedPaths).

When found at the beginning of a file path, the tilde character (~) is expanded to the name of the user's home directory, as a shell would do.

Some parameters are lists of strings. White space is used for separation. List elements with embedded spaces can be quoted using double-quotes. Double quotes inside these elements can be escaped with a backslash.

No value inside a configuration file can contain a newline character. Long lines can be continued by escaping the physical newline with backslash, even inside quoted strings.

        astringlist =  "some string \
        with spaces"
        thesame = "some string with spaces"        
      

Parameters which are not part of string lists can't be quoted, and leading and trailing space characters are stripped before the value is used.

Encoding issues. Most of the configuration parameters are plain ASCII. Two particular sets of values may cause encoding issues:

  • File path parameters may contain non-ascii characters and should use the exact same byte values as found in the file system directory. Usually, this means that the configuration file should use the system default locale encoding.

  • The unac_except_trans parameter should be encoded in UTF-8. If your system locale is not UTF-8, and you need to also specify non-ascii file paths, this poses a difficulty because common text editors cannot handle multiple encodings in a single file. In this relatively unlikely case, you can edit the configuration file as two separate text files with appropriate encodings, and concatenate them to create the complete configuration.

5.4.1. Environment variables

RECOLL_CONFDIR

Defines the main configuration directory.

RECOLL_TMPDIR, TMPDIR

Locations for temporary files, in this order of priority. The default if none of these is set is to use /tmp. Big temporary files may be created during indexing, mostly for decompressing, and also for processing, e.g. email attachments.

RECOLL_CONFTOP, RECOLL_CONFMID

Allow adding configuration directories with priorities below and above the user directory (see above the Configuration overview section for details).

RECOLL_EXTRA_DBS, RECOLL_ACTIVE_EXTRA_DBS

Help for setting up external indexes. See this paragraph for explanations.

RECOLL_DATADIR

Defines replacement for the default location of Recoll data files, normally found in, e.g., /usr/share/recoll).

RECOLL_FILTERSDIR

Defines replacement for the default location of Recoll filters, normally found in, e.g., /usr/share/recoll/filters).

ASPELL_PROG

aspell program to use for creating the spelling dictionary. The result has to be compatible with the libaspell which Recoll is using.

VARNAME

Blabla

5.4.2. Recoll main configuration file, recoll.conf

Parameters affecting what documents we index

topdirs

Space-separated list of files or directories to recursively index. Default to ~ (indexes $HOME). You can use symbolic links in the list, they will be followed, independantly of the value of the followLinks variable.

monitordirs

Space-separated list of files or directories to monitor for updates. When running the real-time indexer, this allows monitoring only a subset of the whole indexed area. The elements must be included in the tree defined by the 'topdirs' members.

skippedNames

Files and directories which should be ignored. White space separated list of wildcard patterns (simple ones, not paths, must contain no / ), which will be tested against file and directory names. The list in the default configuration does not exclude hidden directories (names beginning with a dot), which means that it may index quite a few things that you do not want. On the other hand, email user agents like Thunderbird usually store messages in hidden directories, and you probably want this indexed. One possible solution is to have ".*" in "skippedNames", and add things like "~/.thunderbird" "~/.evolution" to "topdirs". Not even the file names are indexed for patterns in this list, see the "noContentSuffixes" variable for an alternative approach which indexes the file names. Can be redefined for any subtree.

skippedNames-

List of name endings to remove from the default skippedNames list.

skippedNames+

List of name endings to add to the default skippedNames list.

onlyNames

Regular file name filter patterns If this is set, only the file names not in skippedNames and matching one of the patterns will be considered for indexing. Can be redefined per subtree. Does not apply to directories.

noContentSuffixes

List of name endings (not necessarily dot-separated suffixes) for which we don't try MIME type identification, and don't uncompress or index content. Only the names will be indexed. This complements the now obsoleted recoll_noindex list from the mimemap file, which will go away in a future release (the move from mimemap to recoll.conf allows editing the list through the GUI). This is different from skippedNames because these are name ending matches only (not wildcard patterns), and the file name itself gets indexed normally. This can be redefined for subdirectories.

noContentSuffixes-

List of name endings to remove from the default noContentSuffixes list.

noContentSuffixes+

List of name endings to add to the default noContentSuffixes list.

skippedPaths

Absolute paths we should not go into. Space-separated list of wildcard expressions for absolute filesystem paths. Must be defined at the top level of the configuration file, not in a subsection. Can contain files and directories. The database and configuration directories will automatically be added. The expressions are matched using 'fnmatch(3)' with the FNM_PATHNAME flag set by default. This means that '/' characters must be matched explicitely. You can set 'skippedPathsFnmPathname' to 0 to disable the use of FNM_PATHNAME (meaning that '/*/dir3' will match '/dir1/dir2/dir3'). The default value contains the usual mount point for removable media to remind you that it is a bad idea to have Recoll work on these (esp. with the monitor: media gets indexed on mount, all data gets erased on unmount). Explicitely adding '/media/xxx' to the 'topdirs' variable will override this.

skippedPathsFnmPathname

Set to 0 to override use of FNM_PATHNAME for matching skipped paths.

nowalkfn

File name which will cause its parent directory to be skipped. Any directory containing a file with this name will be skipped as if it was part of the skippedPaths list. Ex: .recoll-noindex

daemSkippedPaths

skippedPaths equivalent specific to real time indexing. This enables having parts of the tree which are initially indexed but not monitored. If daemSkippedPaths is not set, the daemon uses skippedPaths.

zipUseSkippedNames

Use skippedNames inside Zip archives. Fetched directly by the rclzip handler. Skip the patterns defined by skippedNames inside Zip archives. Can be redefined for subdirectories. See https://www.lesbonscomptes.com/recoll/faqsandhowtos/FilteringOutZipArchiveMembers.html

zipSkippedNames

Space-separated list of wildcard expressions for names that should be ignored inside zip archives. This is used directly by the zip handler. If zipUseSkippedNames is not set, zipSkippedNames defines the patterns to be skipped inside archives. If zipUseSkippedNames is set, the two lists are concatenated and used. Can be redefined for subdirectories. See https://www.lesbonscomptes.com/recoll/faqsandhowtos/FilteringOutZipArchiveMembers.html

followLinks

Follow symbolic links during indexing. The default is to ignore symbolic links to avoid multiple indexing of linked files. No effort is made to avoid duplication when this option is set to true. This option can be set individually for each of the 'topdirs' members by using sections. It can not be changed below the 'topdirs' level. Links in the 'topdirs' list itself are always followed.

indexedmimetypes

Restrictive list of indexed mime types. Normally not set (in which case all supported types are indexed). If it is set, only the types from the list will have their contents indexed. The names will be indexed anyway if indexallfilenames is set (default). MIME type names should be taken from the mimemap file (the values may be different from xdg-mime or file -i output in some cases). Can be redefined for subtrees.

excludedmimetypes

List of excluded MIME types. Lets you exclude some types from indexing. MIME type names should be taken from the mimemap file (the values may be different from xdg-mime or file -i output in some cases) Can be redefined for subtrees.

nomd5types

Don't compute md5 for these types. md5 checksums are used only for deduplicating results, and can be very expensive to compute on multimedia or other big files. This list lets you turn off md5 computation for selected types. It is global (no redefinition for subtrees). At the moment, it only has an effect for external handlers (exec and execm). The file types can be specified by listing either MIME types (e.g. audio/mpeg) or handler names (e.g. rclaudio).

compressedfilemaxkbs

Size limit for compressed files. We need to decompress these in a temporary directory for identification, which can be wasteful in some cases. Limit the waste. Negative means no limit. 0 results in no processing of any compressed file. Default 50 MB.

textfilemaxmbs

Size limit for text files. Mostly for skipping monster logs. Default 20 MB.

indexallfilenames

Index the file names of unprocessed files Index the names of files the contents of which we don't index because of an excluded or unsupported MIME type.

usesystemfilecommand

Use a system command for file MIME type guessing as a final step in file type identification This is generally useful, but will usually cause the indexing of many bogus 'text' files. See 'systemfilecommand' for the command used.

systemfilecommand

Command used to guess MIME types if the internal methods fails This should be a "file -i" workalike. The file path will be added as a last parameter to the command line. "xdg-mime" works better than the traditional "file" command, and is now the configured default (with a hard-coded fallback to "file")

processwebqueue

Decide if we process the Web queue. The queue is a directory where the Recoll Web browser plugins create the copies of visited pages.

textfilepagekbs

Page size for text files. If this is set, text/plain files will be divided into documents of approximately this size. Will reduce memory usage at index time and help with loading data in the preview window at query time. Particularly useful with very big files, such as application or system logs. Also see textfilemaxmbs and compressedfilemaxkbs.

membermaxkbs

Size limit for archive members. This is passed to the filters in the environment as RECOLL_FILTER_MAXMEMBERKB.

Parameters affecting how we generate terms and organize the index

indexStripChars

Decide if we store character case and diacritics in the index. If we do, searches sensitive to case and diacritics can be performed, but the index will be bigger, and some marginal weirdness may sometimes occur. The default is a stripped index. When using multiple indexes for a search, this parameter must be defined identically for all. Changing the value implies an index reset.

indexStoreDocText

Decide if we store the documents' text content in the index. Storing the text allows extracting snippets from it at query time, instead of building them from index position data. Newer Xapian index formats have rendered our use of positions list unacceptably slow in some cases. The last Xapian index format with good performance for the old method is Chert, which is default for 1.2, still supported but not default in 1.4 and will be dropped in 1.6. The stored document text is translated from its original format to UTF-8 plain text, but not stripped of upper-case, diacritics, or punctuation signs. Storing it increases the index size by 10-20% typically, but also allows for nicer snippets, so it may be worth enabling it even if not strictly needed for performance if you can afford the space. The variable only has an effect when creating an index, meaning that the xapiandb directory must not exist yet. Its exact effect depends on the Xapian version. For Xapian 1.4, if the variable is set to 0, the Chert format will be used, and the text will not be stored. If the variable is 1, Glass will be used, and the text stored. For Xapian 1.2, and for versions after 1.5 and newer, the index format is always the default, but the variable controls if the text is stored or not, and the abstract generation method. With Xapian 1.5 and later, and the variable set to 0, abstract generation may be very slow, but this setting may still be useful to save space if you do not use abstract generation at all.

nonumbers

Decides if terms will be generated for numbers. For example "123", "1.5e6", 192.168.1.4, would not be indexed if nonumbers is set ("value123" would still be). Numbers are often quite interesting to search for, and this should probably not be set except for special situations, ie, scientific documents with huge amounts of numbers in them, where setting nonumbers will reduce the index size. This can only be set for a whole index, not for a subtree.

dehyphenate

Determines if we index 'coworker' also when the input is 'co-worker'. This is new in version 1.22, and on by default. Setting the variable to off allows restoring the previous behaviour.

backslashasletter

Process backslash as normal letter This may make sense for people wanting to index TeX commands as such but is not of much general use.

maxtermlength

Maximum term length. Words longer than this will be discarded. The default is 40 and used to be hard-coded, but it can now be adjusted. You need an index reset if you change the value.

nocjk

Decides if specific East Asian (Chinese Korean Japanese) characters/word splitting is turned off. This will save a small amount of CPU if you have no CJK documents. If your document base does include such text but you are not interested in searching it, setting nocjk may be a significant time and space saver.

cjkngramlen

This lets you adjust the size of n-grams used for indexing CJK text. The default value of 2 is probably appropriate in most cases. A value of 3 would allow more precision and efficiency on longer words, but the index will be approximately twice as large.

indexstemminglanguages

Languages for which to create stemming expansion data. Stemmer names can be found by executing 'recollindex -l', or this can also be set from a list in the GUI.

defaultcharset

Default character set. This is used for files which do not contain a character set definition (e.g.: text/plain). Values found inside files, e.g. a 'charset' tag in HTML documents, will override it. If this is not set, the default character set is the one defined by the NLS environment ($LC_ALL, $LC_CTYPE, $LANG), or ultimately iso-8859-1 (cp-1252 in fact). If for some reason you want a general default which does not match your LANG and is not 8859-1, use this variable. This can be redefined for any sub-directory.

unac_except_trans

A list of characters, encoded in UTF-8, which should be handled specially when converting text to unaccented lowercase. For example, in Swedish, the letter a with diaeresis has full alphabet citizenship and should not be turned into an a. Each element in the space-separated list has the special character as first element and the translation following. The handling of both the lowercase and upper-case versions of a character should be specified, as appartenance to the list will turn-off both standard accent and case processing. The value is global and affects both indexing and querying. Examples: Swedish: unac_except_trans = ää Ää öö Öö üü Üü ßss œoe Œoe æae Æae ffff fifi flfl åå Åå . German: unac_except_trans = ää Ää öö Öö üü Üü ßss œoe Œoe æae Æae ffff fifi flfl In French, you probably want to decompose oe and ae and nobody would type a German ß unac_except_trans = ßss œoe Œoe æae Æae ffff fifi flfl . The default for all until someone protests follows. These decompositions are not performed by unac, but it is unlikely that someone would type the composed forms in a search. unac_except_trans = ßss œoe Œoe æae Æae ffff fifi flfl

maildefcharset

Overrides the default character set for email messages which don't specify one. This is mainly useful for readpst (libpst) dumps, which are utf-8 but do not say so.

localfields

Set fields on all files (usually of a specific fs area). Syntax is the usual: name = value ; attr1 = val1 ; [...] value is empty so this needs an initial semi-colon. This is useful, e.g., for setting the rclaptg field for application selection inside mimeview.

testmodifusemtime

Use mtime instead of ctime to test if a file has been modified. The time is used in addition to the size, which is always used. Setting this can reduce re-indexing on systems where extended attributes are used (by some other application), but not indexed, because changing extended attributes only affects ctime. Notes: - This may prevent detection of change in some marginal file rename cases (the target would need to have the same size and mtime). - You should probably also set noxattrfields to 1 in this case, except if you still prefer to perform xattr indexing, for example if the local file update pattern makes it of value (as in general, there is a risk for pure extended attributes updates without file modification to go undetected). Perform a full index reset after changing this.

noxattrfields

Disable extended attributes conversion to metadata fields. This probably needs to be set if testmodifusemtime is set.

metadatacmds

Define commands to gather external metadata, e.g. tmsu tags. There can be several entries, separated by semi-colons, each defining which field name the data goes into and the command to use. Don't forget the initial semi-colon. All the field names must be different. You can use aliases in the "field" file if necessary. As a not too pretty hack conceded to convenience, any field name beginning with "rclmulti" will be taken as an indication that the command returns multiple field values inside a text blob formatted as a recoll configuration file ("fieldname = fieldvalue" lines). The rclmultixx name will be ignored, and field names and values will be parsed from the data. Example: metadatacmds = ; tags = tmsu tags %f; rclmulti1 = cmdOutputsConf %f

Parameters affecting where and how we store things

cachedir

Top directory for Recoll data. Recoll data directories are normally located relative to the configuration directory (e.g. ~/.recoll/xapiandb, ~/.recoll/mboxcache). If 'cachedir' is set, the directories are stored under the specified value instead (e.g. if cachedir is ~/.cache/recoll, the default dbdir would be ~/.cache/recoll/xapiandb). This affects dbdir, webcachedir, mboxcachedir, aspellDicDir, which can still be individually specified to override cachedir. Note that if you have multiple configurations, each must have a different cachedir, there is no automatic computation of a subpath under cachedir.

maxfsoccuppc

Maximum file system occupation over which we stop indexing. The value is a percentage, corresponding to what the "Capacity" df output column shows. The default value is 0, meaning no checking.

dbdir

Xapian database directory location. This will be created on first indexing. If the value is not an absolute path, it will be interpreted as relative to cachedir if set, or the configuration directory (-c argument or $RECOLL_CONFDIR). If nothing is specified, the default is then ~/.recoll/xapiandb/

idxstatusfile

Name of the scratch file where the indexer process updates its status. Default: idxstatus.txt inside the configuration directory.

mboxcachedir

Directory location for storing mbox message offsets cache files. This is normally 'mboxcache' under cachedir if set, or else under the configuration directory, but it may be useful to share a directory between different configurations.

mboxcacheminmbs

Minimum mbox file size over which we cache the offsets. There is really no sense in caching offsets for small files. The default is 5 MB.

webcachedir

Directory where we store the archived web pages. This is only used by the web history indexing code Default: cachedir/webcache if cachedir is set, else $RECOLL_CONFDIR/webcache

webcachemaxmbs

Maximum size in MB of the Web archive. This is only used by the web history indexing code. Default: 40 MB. Reducing the size will not physically truncate the file.

webqueuedir

The path to the Web indexing queue. This used to be hard-coded in the old plugin as ~/.recollweb/ToIndex so there would be no need or possibility to change it, but the WebExtensions plugin now downloads the files to the user Downloads directory, and a script moves them to webqueuedir. The script reads this value from the config so it has become possible to change it.

webdownloadsdir

The path to browser downloads directory. This is where the new browser add-on extension has to create the files. They are then moved by a script to webqueuedir.

aspellDicDir

Aspell dictionary storage directory location. The aspell dictionary (aspdict.(lang).rws) is normally stored in the directory specified by cachedir if set, or under the configuration directory.

filtersdir

Directory location for executable input handlers. If RECOLL_FILTERSDIR is set in the environment, we use it instead. Defaults to $prefix/share/recoll/filters. Can be redefined for subdirectories.

iconsdir

Directory location for icons. The only reason to change this would be if you want to change the icons displayed in the result list. Defaults to $prefix/share/recoll/images

Parameters affecting indexing performance and resource usage

idxflushmb

Threshold (megabytes of new data) where we flush from memory to disk index. Setting this allows some control over memory usage by the indexer process. A value of 0 means no explicit flushing, which lets Xapian perform its own thing, meaning flushing every $XAPIAN_FLUSH_THRESHOLD documents created, modified or deleted: as memory usage depends on average document size, not only document count, the Xapian approach is is not very useful, and you should let Recoll manage the flushes. The program compiled value is 0. The configured default value (from this file) is now 50 MB, and should be ok in many cases. You can set it as low as 10 to conserve memory, but if you are looking for maximum speed, you may want to experiment with values between 20 and 200. In my experience, values beyond this are always counterproductive. If you find otherwise, please drop me a note.

filtermaxseconds

Maximum external filter execution time in seconds. Default 1200 (20mn). Set to 0 for no limit. This is mainly to avoid infinite loops in postscript files (loop.ps)

filtermaxmbytes

Maximum virtual memory space for filter processes (setrlimit(RLIMIT_AS)), in megabytes. Note that this includes any mapped libs (there is no reliable Linux way to limit the data space only), so we need to be a bit generous here. Anything over 2000 will be ignored on 32 bits machines.

thrQSizes

Stage input queues configuration. There are three internal queues in the indexing pipeline stages (file data extraction, terms generation, index update). This parameter defines the queue depths for each stage (three integer values). If a value of -1 is given for a given stage, no queue is used, and the thread will go on performing the next stage. In practise, deep queues have not been shown to increase performance. Default: a value of 0 for the first queue tells Recoll to perform autoconfiguration based on the detected number of CPUs (no need for the two other values in this case). Use thrQSizes = -1 -1 -1 to disable multithreading entirely.

thrTCounts

Number of threads used for each indexing stage. The three stages are: file data extraction, terms generation, index update). The use of the counts is also controlled by some special values in thrQSizes: if the first queue depth is 0, all counts are ignored (autoconfigured); if a value of -1 is used for a queue depth, the corresponding thread count is ignored. It makes no sense to use a value other than 1 for the last stage because updating the Xapian index is necessarily single-threaded (and protected by a mutex).

Miscellaneous parameters

loglevel

Log file verbosity 1-6. A value of 2 will print only errors and warnings. 3 will print information like document updates, 4 is quite verbose and 6 very verbose.

logfilename

Log file destination. Use 'stderr' (default) to write to the console.

idxloglevel

Override loglevel for the indexer.

idxlogfilename

Override logfilename for the indexer.

daemloglevel

Override loglevel for the indexer in real time mode. The default is to use the idx... values if set, else the log... values.

daemlogfilename

Override logfilename for the indexer in real time mode. The default is to use the idx... values if set, else the log... values.

pyloglevel

Override loglevel for the python module.

pylogfilename

Override logfilename for the python module.

orgidxconfdir

Original location of the configuration directory. This is used exclusively for movable datasets. Locating the configuration directory inside the directory tree makes it possible to provide automatic query time path translations once the data set has moved (for example, because it has been mounted on another location).

curidxconfdir

Current location of the configuration directory. Complement orgidxconfdir for movable datasets. This should be used if the configuration directory has been copied from the dataset to another location, either because the dataset is readonly and an r/w copy is desired, or for performance reasons. This records the original moved location before copy, to allow path translation computations. For example if a dataset originally indexed as '/home/me/mydata/config' has been mounted to '/media/me/mydata', and the GUI is running from a copied configuration, orgidxconfdir would be '/home/me/mydata/config', and curidxconfdir (as set in the copied configuration) would be '/media/me/mydata/config'.

idxrundir

Indexing process current directory. The input handlers sometimes leave temporary files in the current directory, so it makes sense to have recollindex chdir to some temporary directory. If the value is empty, the current directory is not changed. If the value is (literal) tmp, we use the temporary directory as set by the environment (RECOLL_TMPDIR else TMPDIR else /tmp). If the value is an absolute path to a directory, we go there.

checkneedretryindexscript

Script used to heuristically check if we need to retry indexing files which previously failed. The default script checks the modified dates on /usr/bin and /usr/local/bin. A relative path will be looked up in the filters dirs, then in the path. Use an absolute path to do otherwise.

recollhelperpath

Additional places to search for helper executables. This is only used on Windows for now.

idxabsmlen

Length of abstracts we store while indexing. Recoll stores an abstract for each indexed file. The text can come from an actual 'abstract' section in the document or will just be the beginning of the document. It is stored in the index so that it can be displayed inside the result lists without decoding the original file. The idxabsmlen parameter defines the size of the stored abstract. The default value is 250 bytes. The search interface gives you the choice to display this stored text or a synthetic abstract built by extracting text around the search terms. If you always prefer the synthetic abstract, you can reduce this value and save a little space.

idxmetastoredlen

Truncation length of stored metadata fields. This does not affect indexing (the whole field is processed anyway), just the amount of data stored in the index for the purpose of displaying fields inside result lists or previews. The default value is 150 bytes which may be too low if you have custom fields.

idxtexttruncatelen

Truncation length for all document texts. Only index the beginning of documents. This is not recommended except if you are sure that the interesting keywords are at the top and have severe disk space issues.

aspellLanguage

Language definitions to use when creating the aspell dictionary. The value must match a set of aspell language definition files. You can type "aspell dicts" to see a list The default if this is not set is to use the NLS environment to guess the value.

aspellAddCreateParam

Additional option and parameter to aspell dictionary creation command. Some aspell packages may need an additional option (e.g. on Debian Jessie: --local-data-dir=/usr/lib/aspell). See Debian bug 772415.

aspellKeepStderr

Set this to have a look at aspell dictionary creation errors. There are always many, so this is mostly for debugging.

noaspell

Disable aspell use. The aspell dictionary generation takes time, and some combinations of aspell version, language, and local terms, result in aspell crashing, so it sometimes makes sense to just disable the thing.

monauxinterval

Auxiliary database update interval. The real time indexer only updates the auxiliary databases (stemdb, aspell) periodically, because it would be too costly to do it for every document change. The default period is one hour.

monixinterval

Minimum interval (seconds) between processings of the indexing queue. The real time indexer does not process each event when it comes in, but lets the queue accumulate, to diminish overhead and to aggregate multiple events affecting the same file. Default 30 S.

mondelaypatterns

Timing parameters for the real time indexing. Definitions for files which get a longer delay before reindexing is allowed. This is for fast-changing files, that should only be reindexed once in a while. A list of wildcardPattern:seconds pairs. The patterns are matched with fnmatch(pattern, path, 0) You can quote entries containing white space with double quotes (quote the whole entry, not the pattern). The default is empty. Example: mondelaypatterns = *.log:20 "*with spaces.*:30"

monioniceclass

ionice class for the real time indexing process On platforms where this is supported. The default value is 3.

monioniceclassdata

ionice class parameter for the real time indexing process. On platforms where this is supported. The default is empty.

Query-time parameters (no impact on the index)

autodiacsens

auto-trigger diacritics sensitivity (raw index only). IF the index is not stripped, decide if we automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the "D" modifier to specify diacritics sensitivity. Default is no.

autocasesens

auto-trigger case sensitivity (raw index only). IF the index is not stripped (see indexStripChars), decide if we automatically trigger character case sensitivity if the search term has upper-case characters in any but the first position. Else you need to use the query language and the "C" modifier to specify character-case sensitivity. Default is yes.

maxTermExpand

Maximum query expansion count for a single term (e.g.: when using wildcards). This only affects queries, not indexing. We used to not limit this at all (except for filenames where the limit was too low at 1000), but it is unreasonable with a big index. Default 10000.

maxXapianClauses

Maximum number of clauses we add to a single Xapian query. This only affects queries, not indexing. In some cases, the result of term expansion can be multiplicative, and we want to avoid eating all the memory. Default 50000.

snippetMaxPosWalk

Maximum number of positions we walk while populating a snippet for the result list. The default of 1,000,000 may be insufficient for very big documents, the consequence would be snippets with possibly meaning-altering missing words.

Parameters for the PDF input script

pdfocr

Attempt OCR of PDF files with no text content if both tesseract and pdftoppm are installed. This can be defined in subdirectories. The default is off because OCR is so very slow.

pdfocrlang

Language to assume for PDF OCR. This is very important for having a reasonable rate of errors with tesseract. This can also be set through a configuration variable or directory-local parameters. See the rclpdf.py script.

pdfattach

Enable PDF attachment extraction by executing pdftk (if available). This is normally disabled, because it does slow down PDF indexing a bit even if not one attachment is ever found.

pdfextrameta

Extract text from selected XMP metadata tags. This is a space-separated list of qualified XMP tag names. Each element can also include a translation to a Recoll field name, separated by a '|' character. If the second element is absent, the tag name is used as the Recoll field names. You will also need to add specifications to the "fields" file to direct processing of the extracted data.

pdfextrametafix

Define name of XMP field editing script. This defines the name of a script to be loaded for editing XMP field values. The script should define a 'MetaFixer' class with a metafix() method which will be called with the qualified tag name and value of each selected field, for editing or erasing. A new instance is created for each document, so that the object can keep state for, e.g. eliminating duplicate values.

Parameters set for specific locations

mhmboxquirks

Enable thunderbird/mozilla-seamonkey mbox format quirks Set this for the directory where the email mbox files are stored.

5.4.3. The fields file

This file contains information about dynamic fields handling in Recoll. Some very basic fields have hard-wired behaviour, and, mostly, you should not change the original data inside the fields file. But you can create custom fields fitting your data and handle them just like they were native ones.

The fields file has several sections, which each define an aspect of fields processing. Quite often, you'll have to modify several sections to obtain the desired behaviour.

We will only give a short description here, you should refer to the comments inside the default file for more detailed information.

Field names should be lowercase alphabetic ASCII.

[prefixes]

A field becomes indexed (searchable) by having a prefix defined in this section. There is a more complete explanation of what prefixes are in used by a standard recoll installation. In a nutshell: extension prefixes should be all caps, begin with XY, and short. E.g. XYMFLD.

[values]

Fields listed in this section will be stored as Xapian values inside the index. This makes them available for range queries, allowing to filter results according to the field value. This feature currently supports string and integer data. See the comments in the file for more detail

[stored]

A field becomes stored (displayable inside results) by having its name listed in this section (typically with an empty value).

[aliases]

This section defines lists of synonyms for the canonical names used inside the [prefixes] and [stored] sections

[queryaliases]

This section also defines aliases for the canonic field names, with the difference that the substitution will only be used at query time, avoiding any possibility that the value would pick-up random metadata from documents.

handler-specific sections

Some input handlers may need specific configuration for handling fields. Only the email message handler currently has such a section (named [mail]). It allows indexing arbitrary email headers in addition to the ones indexed by default. Other such sections may appear in the future.

Here follows a small example of a personal fields file. This would extract a specific email header and use it as a searchable field, with data displayable inside result lists. (Side note: as the email handler does no decoding on the values, only plain ascii headers can be indexed, and only the first occurrence will be used for headers that occur several times).

[prefixes]
        # Index mailmytag contents (with the given prefix)
        mailmytag = XMTAG

        [stored]
        # Store mailmytag inside the document data record (so that it can be
        # displayed - as %(mailmytag) - in result lists).
        mailmytag = 

        [queryaliases]
        filename = fn
        containerfilename = cfn

        [mail]
        # Extract the X-My-Tag mail header, and use it internally with the
        # mailmytag field name
        x-my-tag = mailmytag
        

Extended attributes in the fields file

Recoll versions 1.19 and later process user extended file attributes as documents fields by default.

Attributes are processed as fields of the same name, after removing the user prefix on Linux.

The [xattrtofields] section of the fields file allows specifying translations from extended attributes names to Recoll field names. An empty translation disables use of the corresponding attribute data.

5.4.4. The mimemap file

mimemap specifies the file name extension to MIME type mappings.

For file names without an extension, or with an unknown one, a system command (file -i, or xdg-mime) will be executed to determine the MIME type (this can be switched off, or the command changed inside the main configuration file).

All extension values in mimemap must be entered in lower case. File names extensions are lower-cased for comparison during indexing, meaning that an upper case mimemap entry will never be matched.

The mappings can be specified on a per-subtree basis, which may be useful in some cases. Example: okular notes have a .xml extension but should be handled specially, which is possible because they are usually all located in one place. Example:

          [~/.kde/share/apps/okular/docdata]
        .xml = application/x-okular-notes

The recoll_noindex mimemap variable has been moved to recoll.conf and renamed to noContentSuffixes, while keeping the same function, as of Recoll version 1.21. For older Recoll versions, see the documentation for noContentSuffixes but use recoll_noindex in mimemap.

5.4.5. The mimeconf file

The main purpose of the mimeconf file is to specify how the different MIME types are handled for indexing. This is done in the [index] section, which should not be modified casually. See the comments in the file.

The file also contains other definitions which affect the query language and the GUI, and which, in retrospect, should have been stored elsewhere.

The [icons] section allows you to change the icons which are displayed by the recoll GUI in the result lists (the values are the basenames of the png images inside the iconsdir directory (which is itself defined in recoll.conf).

The [categories] section defines the groupings of MIME types into categories as used when adding an rclcat clause to a query language query. rclcat clauses are also used by the default guifilters buttons in the GUI (see next).

The filter controls appear at the top of the recoll GUI, either as checkboxes just above the result list, or as a dropbox in the tool area.

By default, they are labeled: media, message, other, presentation, spreadsheet and text, and each maps to a document category. This is determined in the [guifilters] section, where each control is defined by a variable naming a query language fragment.

A simple exemple will hopefully make things clearer.

[guifilters]

Big Books = dir:"~/My Books" size>10K
My Docs = dir:"~/My Documents"
Small Books = dir:"~/My Books" size<10K
System Docs = dir:/usr/share/doc
        

The above definition would create four filter checkboxes, labelled Big Books, My Docs, etc.

The text after the equal sign must be a valid query language fragment, and, when the button is checked, it will be combined with the rest of the query with an AND conjunction.

Any name text before a colon character will be erased in the display, but used for sorting. You can use this to display the checkboxes in any order you like. For exemple, the following would do exactly the same as above, but ordering the checkboxes in the reverse order.

[guifilters]

d:Big Books = dir:"~/My Books" size>10K
c:My Docs = dir:"~/My Documents"
b:Small Books = dir:"~/My Books" size<10K
a:System Docs = dir:/usr/share/doc
        

As you may have guessed, The default [guifilters] section looks like:

[guifilters]
text = rclcat:text
spreadsheet = rclcat:spreadsheet
presentation = rclcat:presentation
media = rclcat:media
message = rclcat:message
other = rclcat:other
        

5.4.6. The mimeview file

mimeview specifies which programs are started when you click on an Open link in a result list. Ie: HTML is normally displayed using firefox, but you may prefer Konqueror, your openoffice.org program might be named oofice instead of openoffice etc.

Changes to this file can be done by direct editing, or through the recoll GUI preferences dialog.

If Use desktop preferences to choose document editor is checked in the Recoll GUI preferences, all mimeview entries will be ignored except the one labelled application/x-all (which is set to use xdg-open by default).

In this case, the xallexcepts top level variable defines a list of MIME type exceptions which will be processed according to the local entries instead of being passed to the desktop. This is so that specific Recoll options such as a page number or a search string can be passed to applications that support them, such as the evince viewer.

As for the other configuration files, the normal usage is to have a mimeview inside your own configuration directory, with just the non-default entries, which will override those from the central configuration file.

All viewer definition entries must be placed under a [view] section.

The keys in the file are normally MIME types. You can add an application tag to specialize the choice for an area of the filesystem (using a localfields specification in mimeconf). The syntax for the key is mimetype|tag

The nouncompforviewmts entry, (placed at the top level, outside of the [view] section), holds a list of MIME types that should not be uncompressed before starting the viewer (if they are found compressed, ie: mydoc.doc.gz).

The right side of each assignment holds a command to be executed for opening the file. The following substitutions are performed:

  • %D. Document date

  • %f. File name. This may be the name of a temporary file if it was necessary to create one (ie: to extract a subdocument from a container).

  • %i. Internal path, for subdocuments of containers. The format depends on the container type. If this appears in the command line, Recoll will not create a temporary file to extract the subdocument, expecting the called application (possibly a script) to be able to handle it.

  • %M. MIME type

  • %p. Page index. Only significant for a subset of document types, currently only PDF, Postscript and DVI files. Can be used to start the editor at the right page for a match or snippet.

  • %s. Search term. The value will only be set for documents with indexed page numbers (ie: PDF). The value will be one of the matched search terms. It would allow pre-setting the value in the "Find" entry inside Evince for example, for easy highlighting of the term.

  • %u. Url.

In addition to the predefined values above, all strings like %(fieldname) will be replaced by the value of the field named fieldname for the document. This could be used in combination with field customisation to help with opening the document.

5.4.7. The ptrans file

ptrans specifies query-time path translations. These can be useful in multiple cases.

The file has a section for any index which needs translations, either the main one or additional query indexes. The sections are named with the Xapian index directory names. No slash character should exist at the end of the paths (all comparisons are textual). An exemple should make things sufficiently clear

          [/home/me/.recoll/xapiandb]
          /this/directory/moved = /to/this/place

          [/path/to/additional/xapiandb]
          /server/volume1/docdir = /net/server/volume1/docdir
          /server/volume2/docdir = /net/server/volume2/docdir
        

5.4.8. Examples of configuration adjustments

Adding an external viewer for an non-indexed type

Imagine that you have some kind of file which does not have indexable content, but for which you would like to have a functional Open link in the result list (when found by file name). The file names end in .blob and can be displayed by application blobviewer.

You need two entries in the configuration files for this to work:

  • In $RECOLL_CONFDIR/mimemap (typically ~/.recoll/mimemap), add the following line:

                .blob = application/x-blobapp
              

    Note that the MIME type is made up here, and you could call it diesel/oil just the same.

  • In $RECOLL_CONFDIR/mimeview under the [view] section, add:

                  application/x-blobapp = blobviewer %f
                

    We are supposing that blobviewer wants a file name parameter here, you would use %u if it liked URLs better.

If you just wanted to change the application used by Recoll to display a MIME type which it already knows, you would just need to edit mimeview. The entries you add in your personal file override those in the central configuration, which you do not need to alter. mimeview can also be modified from the Gui.

Adding indexing support for a new file type

Let us now imagine that the above .blob files actually contain indexable text and that you know how to extract it with a command line program. Getting Recoll to index the files is easy. You need to perform the above alteration, and also to add data to the mimeconf file (typically in ~/.recoll/mimeconf):

  • Under the [index] section, add the following line (more about the rclblob indexing script later):

    application/x-blobapp = exec rclblob

    Or if the files are mostly text and you don't need to process them for indexing:

    application/x-blobapp = internal text/plain
  • Under the [icons] section, you should choose an icon to be displayed for the files inside the result lists. Icons are normally 64x64 pixels PNG files which live in /usr/share/recoll/images.

  • Under the [categories] section, you should add the MIME type where it makes sense (you can also create a category). Categories may be used for filtering in advanced search.

The rclblob handler should be an executable program or script which exists inside /usr/share/recoll/filters. It will be given a file name as argument and should output the text or html contents on the standard output.

The filter programming section describes in more detail how to write an input handler.

recoll-1.26.3/doc/user/docbook-xsl.css0000644000175000017500000000743313303776057014507 00000000000000/* * Copyright (c) 2001, 2003, 2010 The FreeBSD Documentation Project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: doc/share/misc/docbook.css,v 1.15 2010/03/20 04:15:01 hrs Exp $ */ body address { line-height: 1.3; margin: .6em 0; } body blockquote { margin-top: .75em; line-height: 1.5; margin-bottom: .75em; } html body { margin: 1em 8% 1em 10%; line-height: 1.2; } .legalnotice { font-size: small; font-variant: small-caps; } body div { margin: 0; } dl { margin: .8em 0; line-height: 1.2; } body form { margin: .6em 0; } h1, h2, h3, h4, h5, h6, div.example p b, .question, div.table p b, div.procedure p b { color: #990000; } body h1, body h2, body h3, body h4, body h5, body h6 { line-height: 1.3; margin-left: 0; } body h1, body h2 { margin: .8em 0 0 -4%; } body h3, body h4 { margin: .8em 0 0 -3%; } body h5 { margin: .8em 0 0 -2%; } body h6 { margin: .8em 0 0 -1%; } body hr { margin: .6em; border-width: 0 0 1px 0; border-style: solid; border-color: #cecece; } body img.navheader { margin: 0 0 0 -4%; } ol { margin: 0 0 0 5%; line-height: 1.2; } body pre { margin: .75em 0; line-height: 1.0; font-family: monospace; } body td, body th { line-height: 1.2; } ul, body dir, body menu { margin: 0 0 0 5%; line-height: 1.2; } html { margin: 0; padding: 0; } body p b.application { color: #000000; } body p span.application { font-weight: bold; color: #000000; } .filename { color: #007a00; } .guimenu, .guimenuitem, .guisubmenu, .guilabel, .interface, .shortcut, .shortcut .keycap { font-weight: bold; } .guibutton { background-color: #cfcfcf; padding: 2px; } .accel { background-color: #f0f0f0; text-decoration: underline; } .screen { padding: 1ex; } .programlisting { padding: 1ex; background-color: #eee; border: 1px solid #ccc; } @media screen { /* hide from ie3 */ a[href]:hover { background: #ffa } } blockquote.note { color: #222; background: #eee; border: 1px solid #ccc; padding: 0.4em 0.4em; width: 85%; } blockquote.tip { color: #004f00; background: #d8ecd6; border: 1px solid green; padding: 0.2em 2em; width: 85%; } blockquote.important { font-style:italic; border: 1px solid #a00; border-left: 12px solid #c00; padding: 0.1em 1em; } blockquote.warning { color: #9f1313; background: #f8e8e8; border: 1px solid #e59595; padding: 0.2em 2em; width: 85%; } .example { background: #fefde6; border: 1px solid #f1bb16; margin: 1em 0; padding: 0.2em 2em; width: 90%; } .informaltable table.calstable tr td { padding-left: 1em; padding-right: 1em; } recoll-1.26.3/doc/user/docbook.css0000644000175000017500000000733213303776057013701 00000000000000/* * Copyright (c) 2001, 2003, 2010 The FreeBSD Documentation Project * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: doc/share/misc/docbook.css,v 1.15 2010/03/20 04:15:01 hrs Exp $ */ BODY ADDRESS { line-height: 1.3; margin: .6em 0; } BODY BLOCKQUOTE { margin-top: .75em; line-height: 1.5; margin-bottom: .75em; } HTML BODY { margin: 1em 8% 1em 10%; line-height: 1.2; } .LEGALNOTICE { font-size: small; font-variant: small-caps; } BODY DIV { margin: 0; } DL { margin: .8em 0; line-height: 1.2; } BODY FORM { margin: .6em 0; } H1, H2, H3, H4, H5, H6, DIV.EXAMPLE P B, .QUESTION, DIV.TABLE P B, DIV.PROCEDURE P B { color: #990000; } BODY H1, BODY H2, BODY H3, BODY H4, BODY H5, BODY H6 { line-height: 1.3; margin-left: 0; } BODY H1, BODY H2 { margin: .8em 0 0 -4%; } BODY H3, BODY H4 { margin: .8em 0 0 -3%; } BODY H5 { margin: .8em 0 0 -2%; } BODY H6 { margin: .8em 0 0 -1%; } BODY HR { margin: .6em; border-width: 0 0 1px 0; border-style: solid; border-color: #cecece; } BODY IMG.NAVHEADER { margin: 0 0 0 -4%; } OL { margin: 0 0 0 5%; line-height: 1.2; } BODY PRE { margin: .75em 0; line-height: 1.0; font-family: monospace; } BODY TD, BODY TH { line-height: 1.2; } UL, BODY DIR, BODY MENU { margin: 0 0 0 5%; line-height: 1.2; } HTML { margin: 0; padding: 0; } BODY P B.APPLICATION { color: #000000; } .FILENAME { color: #007a00; } .GUIMENU, .GUIMENUITEM, .GUISUBMENU, .GUILABEL, .INTERFACE, .SHORTCUT, .SHORTCUT .KEYCAP { font-weight: bold; } .GUIBUTTON { background-color: #CFCFCF; padding: 2px; } .ACCEL { background-color: #F0F0F0; text-decoration: underline; } .SCREEN { padding: 1ex; } .PROGRAMLISTING { padding: 1ex; background-color: #eee; border: 1px solid #ccc; } @media screen { /* hide from IE3 */ a[href]:hover { background: #ffa } } BLOCKQUOTE.NOTE { color: #222; background: #eee; border: 1px solid #ccc; padding: 0.4em 0.4em; width: 85%; } BLOCKQUOTE.TIP { color: #004F00; background: #d8ecd6; border: 1px solid green; padding: 0.2em 2em; width: 85%; } BLOCKQUOTE.IMPORTANT { font-style:italic; border: 1px solid #a00; border-left: 12px solid #c00; padding: 0.1em 1em; } BLOCKQUOTE.WARNING { color: #9F1313; background: #f8e8e8; border: 1px solid #e59595; padding: 0.2em 2em; width: 85%; } .EXAMPLE { background: #fefde6; border: 1px solid #f1bb16; margin: 1em 0; padding: 0.2em 2em; width: 90%; } .INFORMALTABLE TABLE.CALSTABLE TR TD { padding-left: 1em; padding-right: 1em; } recoll-1.26.3/doc/user/Makefile0000644000175000017500000000541013533651561013177 00000000000000 # Wherever docbook.xsl and chunk.xsl live. # NOTE: THIS IS HARDCODED inside custom.xsl (for changing the output # charset), which needs to change if the stylesheet location changes. # Necessity of custom.xsl: # http://www.sagehill.net/docbookxsl/OutputEncoding.html # Fbsd #XSLDIR="/usr/local/share/xsl/docbook/" # Mac #XSLDIR="/opt/local/share/xsl/docbook-xsl/" #Linux XSLDIR="/usr/share/xml/docbook/stylesheet/docbook-xsl/" # Options common to the single-file and chunked versions commonoptions=--stringparam section.autolabel 1 \ --stringparam section.autolabel.max.depth 2 \ --stringparam section.label.includes.component.label 1 \ --stringparam toc.max.depth 3 \ --stringparam autotoc.label.in.hyperlink 0 \ --stringparam abstract.notitle.enabled 1 \ --stringparam html.stylesheet docbook-xsl.css \ --stringparam generate.toc "book toc,title,figure,table,example,equation" # index.html chunk format target replaced by nicer webhelp (needs separate # make) in webhelp/ subdir all: usermanual.html webh usermanual.pdf webh: make -C webhelp usermanual.html: usermanual.xml recoll.conf.xml xsltproc --xinclude ${commonoptions} \ -o tmpfile.html custom.xsl $< -tidy -indent tmpfile.html > usermanual.html rm -f tmpfile.html index.html: usermanual.xml recoll.conf.xml xsltproc ${commonoptions} \ --stringparam use.id.as.filename 1 \ --stringparam root.filename index \ "${XSLDIR}/html/chunk.xsl" $< usermanual.pdf: usermanual.xml recoll.conf.xml dblatex --xslt-opts="--xinclude" -tpdf $< UTILBUILDS=/home/dockes/tmp/builds/medocutils/ recoll-conf-xml: $(UTILBUILDS)/confxml --docbook \ --idprefix=RCL.INSTALL.CONFIG.RECOLLCONF \ ../../sampleconf/recoll.conf > recoll.conf.xml # Generating a restructured text version, for uploading to readthedocs. # Does not really work, the internal links are botched. pandoc # generates something like: # `configuration <#RCL.INDEXING.CONFIG>`__ # when it should be: # :ref:`RCL.INDEXING.CONFIG` # Also with the second form, the link text is the section heading (can't be # chosen), which is not nice. Else, the change could probably be done by a # script. # Also could not get readthedocs to generate the left pane TOC? could # probably be fixed... #usermanual-rst: recoll-conf-xml # tail -n +2 recoll.conf.xml > rcl-conf-tail.xml # sed -e '/xi:include/r rcl-conf-tail.xml' \ # < usermanual.xml > full-man.xml # sed -i -e '/xi:include/d' -e '//d' full-man.xml # pandoc -s -f docbook -t rst full-man.xml > \ # ../../../docs/index.rst # rm -f rcl-conf-tail.xml full-man.xml # not needed with pandoc 2.x -@echo fix termmatch and execute clean: rm -f RCL.*.html usermanual.pdf usermanual.html index.html \ tmpfile.html rcl-conf-tail.xml full-man.xml recoll-1.26.3/doc/user/recoll.conf.xml0000644000175000017500000013143113566424763014500 00000000000000 Recoll main configuration file, recoll.conf Parameters affecting what documents we index topdirs Space-separated list of files or directories to recursively index. Default to ~ (indexes $HOME). You can use symbolic links in the list, they will be followed, independantly of the value of the followLinks variable. monitordirs Space-separated list of files or directories to monitor for updates. When running the real-time indexer, this allows monitoring only a subset of the whole indexed area. The elements must be included in the tree defined by the 'topdirs' members. skippedNames Files and directories which should be ignored. White space separated list of wildcard patterns (simple ones, not paths, must contain no / ), which will be tested against file and directory names. The list in the default configuration does not exclude hidden directories (names beginning with a dot), which means that it may index quite a few things that you do not want. On the other hand, email user agents like Thunderbird usually store messages in hidden directories, and you probably want this indexed. One possible solution is to have ".*" in "skippedNames", and add things like "~/.thunderbird" "~/.evolution" to "topdirs". Not even the file names are indexed for patterns in this list, see the "noContentSuffixes" variable for an alternative approach which indexes the file names. Can be redefined for any subtree. skippedNames- List of name endings to remove from the default skippedNames list. skippedNames+ List of name endings to add to the default skippedNames list. onlyNames Regular file name filter patterns If this is set, only the file names not in skippedNames and matching one of the patterns will be considered for indexing. Can be redefined per subtree. Does not apply to directories. noContentSuffixes List of name endings (not necessarily dot-separated suffixes) for which we don't try MIME type identification, and don't uncompress or index content. Only the names will be indexed. This complements the now obsoleted recoll_noindex list from the mimemap file, which will go away in a future release (the move from mimemap to recoll.conf allows editing the list through the GUI). This is different from skippedNames because these are name ending matches only (not wildcard patterns), and the file name itself gets indexed normally. This can be redefined for subdirectories. noContentSuffixes- List of name endings to remove from the default noContentSuffixes list. noContentSuffixes+ List of name endings to add to the default noContentSuffixes list. skippedPaths Absolute paths we should not go into. Space-separated list of wildcard expressions for absolute filesystem paths. Must be defined at the top level of the configuration file, not in a subsection. Can contain files and directories. The database and configuration directories will automatically be added. The expressions are matched using 'fnmatch(3)' with the FNM_PATHNAME flag set by default. This means that '/' characters must be matched explicitely. You can set 'skippedPathsFnmPathname' to 0 to disable the use of FNM_PATHNAME (meaning that '/*/dir3' will match '/dir1/dir2/dir3'). The default value contains the usual mount point for removable media to remind you that it is a bad idea to have Recoll work on these (esp. with the monitor: media gets indexed on mount, all data gets erased on unmount). Explicitely adding '/media/xxx' to the 'topdirs' variable will override this. skippedPathsFnmPathname Set to 0 to override use of FNM_PATHNAME for matching skipped paths. nowalkfn File name which will cause its parent directory to be skipped. Any directory containing a file with this name will be skipped as if it was part of the skippedPaths list. Ex: .recoll-noindex daemSkippedPaths skippedPaths equivalent specific to real time indexing. This enables having parts of the tree which are initially indexed but not monitored. If daemSkippedPaths is not set, the daemon uses skippedPaths. zipUseSkippedNames Use skippedNames inside Zip archives. Fetched directly by the rclzip handler. Skip the patterns defined by skippedNames inside Zip archives. Can be redefined for subdirectories. See https://www.lesbonscomptes.com/recoll/faqsandhowtos/FilteringOutZipArchiveMembers.html zipSkippedNames Space-separated list of wildcard expressions for names that should be ignored inside zip archives. This is used directly by the zip handler. If zipUseSkippedNames is not set, zipSkippedNames defines the patterns to be skipped inside archives. If zipUseSkippedNames is set, the two lists are concatenated and used. Can be redefined for subdirectories. See https://www.lesbonscomptes.com/recoll/faqsandhowtos/FilteringOutZipArchiveMembers.html followLinks Follow symbolic links during indexing. The default is to ignore symbolic links to avoid multiple indexing of linked files. No effort is made to avoid duplication when this option is set to true. This option can be set individually for each of the 'topdirs' members by using sections. It can not be changed below the 'topdirs' level. Links in the 'topdirs' list itself are always followed. indexedmimetypes Restrictive list of indexed mime types. Normally not set (in which case all supported types are indexed). If it is set, only the types from the list will have their contents indexed. The names will be indexed anyway if indexallfilenames is set (default). MIME type names should be taken from the mimemap file (the values may be different from xdg-mime or file -i output in some cases). Can be redefined for subtrees. excludedmimetypes List of excluded MIME types. Lets you exclude some types from indexing. MIME type names should be taken from the mimemap file (the values may be different from xdg-mime or file -i output in some cases) Can be redefined for subtrees. nomd5types Don't compute md5 for these types. md5 checksums are used only for deduplicating results, and can be very expensive to compute on multimedia or other big files. This list lets you turn off md5 computation for selected types. It is global (no redefinition for subtrees). At the moment, it only has an effect for external handlers (exec and execm). The file types can be specified by listing either MIME types (e.g. audio/mpeg) or handler names (e.g. rclaudio). compressedfilemaxkbs Size limit for compressed files. We need to decompress these in a temporary directory for identification, which can be wasteful in some cases. Limit the waste. Negative means no limit. 0 results in no processing of any compressed file. Default 50 MB. textfilemaxmbs Size limit for text files. Mostly for skipping monster logs. Default 20 MB. indexallfilenames Index the file names of unprocessed files Index the names of files the contents of which we don't index because of an excluded or unsupported MIME type. usesystemfilecommand Use a system command for file MIME type guessing as a final step in file type identification This is generally useful, but will usually cause the indexing of many bogus 'text' files. See 'systemfilecommand' for the command used. systemfilecommand Command used to guess MIME types if the internal methods fails This should be a "file -i" workalike. The file path will be added as a last parameter to the command line. "xdg-mime" works better than the traditional "file" command, and is now the configured default (with a hard-coded fallback to "file") processwebqueue Decide if we process the Web queue. The queue is a directory where the Recoll Web browser plugins create the copies of visited pages. textfilepagekbs Page size for text files. If this is set, text/plain files will be divided into documents of approximately this size. Will reduce memory usage at index time and help with loading data in the preview window at query time. Particularly useful with very big files, such as application or system logs. Also see textfilemaxmbs and compressedfilemaxkbs. membermaxkbs Size limit for archive members. This is passed to the filters in the environment as RECOLL_FILTER_MAXMEMBERKB. Parameters affecting how we generate terms and organize the index indexStripChars Decide if we store character case and diacritics in the index. If we do, searches sensitive to case and diacritics can be performed, but the index will be bigger, and some marginal weirdness may sometimes occur. The default is a stripped index. When using multiple indexes for a search, this parameter must be defined identically for all. Changing the value implies an index reset. indexStoreDocText Decide if we store the documents' text content in the index. Storing the text allows extracting snippets from it at query time, instead of building them from index position data. Newer Xapian index formats have rendered our use of positions list unacceptably slow in some cases. The last Xapian index format with good performance for the old method is Chert, which is default for 1.2, still supported but not default in 1.4 and will be dropped in 1.6. The stored document text is translated from its original format to UTF-8 plain text, but not stripped of upper-case, diacritics, or punctuation signs. Storing it increases the index size by 10-20% typically, but also allows for nicer snippets, so it may be worth enabling it even if not strictly needed for performance if you can afford the space. The variable only has an effect when creating an index, meaning that the xapiandb directory must not exist yet. Its exact effect depends on the Xapian version. For Xapian 1.4, if the variable is set to 0, the Chert format will be used, and the text will not be stored. If the variable is 1, Glass will be used, and the text stored. For Xapian 1.2, and for versions after 1.5 and newer, the index format is always the default, but the variable controls if the text is stored or not, and the abstract generation method. With Xapian 1.5 and later, and the variable set to 0, abstract generation may be very slow, but this setting may still be useful to save space if you do not use abstract generation at all. nonumbers Decides if terms will be generated for numbers. For example "123", "1.5e6", 192.168.1.4, would not be indexed if nonumbers is set ("value123" would still be). Numbers are often quite interesting to search for, and this should probably not be set except for special situations, ie, scientific documents with huge amounts of numbers in them, where setting nonumbers will reduce the index size. This can only be set for a whole index, not for a subtree. dehyphenate Determines if we index 'coworker' also when the input is 'co-worker'. This is new in version 1.22, and on by default. Setting the variable to off allows restoring the previous behaviour. backslashasletter Process backslash as normal letter This may make sense for people wanting to index TeX commands as such but is not of much general use. maxtermlength Maximum term length. Words longer than this will be discarded. The default is 40 and used to be hard-coded, but it can now be adjusted. You need an index reset if you change the value. nocjk Decides if specific East Asian (Chinese Korean Japanese) characters/word splitting is turned off. This will save a small amount of CPU if you have no CJK documents. If your document base does include such text but you are not interested in searching it, setting nocjk may be a significant time and space saver. cjkngramlen This lets you adjust the size of n-grams used for indexing CJK text. The default value of 2 is probably appropriate in most cases. A value of 3 would allow more precision and efficiency on longer words, but the index will be approximately twice as large. indexstemminglanguages Languages for which to create stemming expansion data. Stemmer names can be found by executing 'recollindex -l', or this can also be set from a list in the GUI. defaultcharset Default character set. This is used for files which do not contain a character set definition (e.g.: text/plain). Values found inside files, e.g. a 'charset' tag in HTML documents, will override it. If this is not set, the default character set is the one defined by the NLS environment ($LC_ALL, $LC_CTYPE, $LANG), or ultimately iso-8859-1 (cp-1252 in fact). If for some reason you want a general default which does not match your LANG and is not 8859-1, use this variable. This can be redefined for any sub-directory. unac_except_trans A list of characters, encoded in UTF-8, which should be handled specially when converting text to unaccented lowercase. For example, in Swedish, the letter a with diaeresis has full alphabet citizenship and should not be turned into an a. Each element in the space-separated list has the special character as first element and the translation following. The handling of both the lowercase and upper-case versions of a character should be specified, as appartenance to the list will turn-off both standard accent and case processing. The value is global and affects both indexing and querying. Examples: Swedish: unac_except_trans = ää Ää öö Öö üü Üü ßss œoe Œoe æae Æae ffff fifi flfl åå Åå . German: unac_except_trans = ää Ää öö Öö üü Üü ßss œoe Œoe æae Æae ffff fifi flfl In French, you probably want to decompose oe and ae and nobody would type a German ß unac_except_trans = ßss œoe Œoe æae Æae ffff fifi flfl . The default for all until someone protests follows. These decompositions are not performed by unac, but it is unlikely that someone would type the composed forms in a search. unac_except_trans = ßss œoe Œoe æae Æae ffff fifi flfl maildefcharset Overrides the default character set for email messages which don't specify one. This is mainly useful for readpst (libpst) dumps, which are utf-8 but do not say so. localfields Set fields on all files (usually of a specific fs area). Syntax is the usual: name = value ; attr1 = val1 ; [...] value is empty so this needs an initial semi-colon. This is useful, e.g., for setting the rclaptg field for application selection inside mimeview. testmodifusemtime Use mtime instead of ctime to test if a file has been modified. The time is used in addition to the size, which is always used. Setting this can reduce re-indexing on systems where extended attributes are used (by some other application), but not indexed, because changing extended attributes only affects ctime. Notes: - This may prevent detection of change in some marginal file rename cases (the target would need to have the same size and mtime). - You should probably also set noxattrfields to 1 in this case, except if you still prefer to perform xattr indexing, for example if the local file update pattern makes it of value (as in general, there is a risk for pure extended attributes updates without file modification to go undetected). Perform a full index reset after changing this. noxattrfields Disable extended attributes conversion to metadata fields. This probably needs to be set if testmodifusemtime is set. metadatacmds Define commands to gather external metadata, e.g. tmsu tags. There can be several entries, separated by semi-colons, each defining which field name the data goes into and the command to use. Don't forget the initial semi-colon. All the field names must be different. You can use aliases in the "field" file if necessary. As a not too pretty hack conceded to convenience, any field name beginning with "rclmulti" will be taken as an indication that the command returns multiple field values inside a text blob formatted as a recoll configuration file ("fieldname = fieldvalue" lines). The rclmultixx name will be ignored, and field names and values will be parsed from the data. Example: metadatacmds = ; tags = tmsu tags %f; rclmulti1 = cmdOutputsConf %f Parameters affecting where and how we store things cachedir Top directory for Recoll data. Recoll data directories are normally located relative to the configuration directory (e.g. ~/.recoll/xapiandb, ~/.recoll/mboxcache). If 'cachedir' is set, the directories are stored under the specified value instead (e.g. if cachedir is ~/.cache/recoll, the default dbdir would be ~/.cache/recoll/xapiandb). This affects dbdir, webcachedir, mboxcachedir, aspellDicDir, which can still be individually specified to override cachedir. Note that if you have multiple configurations, each must have a different cachedir, there is no automatic computation of a subpath under cachedir. maxfsoccuppc Maximum file system occupation over which we stop indexing. The value is a percentage, corresponding to what the "Capacity" df output column shows. The default value is 0, meaning no checking. dbdir Xapian database directory location. This will be created on first indexing. If the value is not an absolute path, it will be interpreted as relative to cachedir if set, or the configuration directory (-c argument or $RECOLL_CONFDIR). If nothing is specified, the default is then ~/.recoll/xapiandb/ idxstatusfile Name of the scratch file where the indexer process updates its status. Default: idxstatus.txt inside the configuration directory. mboxcachedir Directory location for storing mbox message offsets cache files. This is normally 'mboxcache' under cachedir if set, or else under the configuration directory, but it may be useful to share a directory between different configurations. mboxcacheminmbs Minimum mbox file size over which we cache the offsets. There is really no sense in caching offsets for small files. The default is 5 MB. webcachedir Directory where we store the archived web pages. This is only used by the web history indexing code Default: cachedir/webcache if cachedir is set, else $RECOLL_CONFDIR/webcache webcachemaxmbs Maximum size in MB of the Web archive. This is only used by the web history indexing code. Default: 40 MB. Reducing the size will not physically truncate the file. webqueuedir The path to the Web indexing queue. This used to be hard-coded in the old plugin as ~/.recollweb/ToIndex so there would be no need or possibility to change it, but the WebExtensions plugin now downloads the files to the user Downloads directory, and a script moves them to webqueuedir. The script reads this value from the config so it has become possible to change it. webdownloadsdir The path to browser downloads directory. This is where the new browser add-on extension has to create the files. They are then moved by a script to webqueuedir. aspellDicDir Aspell dictionary storage directory location. The aspell dictionary (aspdict.(lang).rws) is normally stored in the directory specified by cachedir if set, or under the configuration directory. filtersdir Directory location for executable input handlers. If RECOLL_FILTERSDIR is set in the environment, we use it instead. Defaults to $prefix/share/recoll/filters. Can be redefined for subdirectories. iconsdir Directory location for icons. The only reason to change this would be if you want to change the icons displayed in the result list. Defaults to $prefix/share/recoll/images Parameters affecting indexing performance and resource usage idxflushmb Threshold (megabytes of new data) where we flush from memory to disk index. Setting this allows some control over memory usage by the indexer process. A value of 0 means no explicit flushing, which lets Xapian perform its own thing, meaning flushing every $XAPIAN_FLUSH_THRESHOLD documents created, modified or deleted: as memory usage depends on average document size, not only document count, the Xapian approach is is not very useful, and you should let Recoll manage the flushes. The program compiled value is 0. The configured default value (from this file) is now 50 MB, and should be ok in many cases. You can set it as low as 10 to conserve memory, but if you are looking for maximum speed, you may want to experiment with values between 20 and 200. In my experience, values beyond this are always counterproductive. If you find otherwise, please drop me a note. filtermaxseconds Maximum external filter execution time in seconds. Default 1200 (20mn). Set to 0 for no limit. This is mainly to avoid infinite loops in postscript files (loop.ps) filtermaxmbytes Maximum virtual memory space for filter processes (setrlimit(RLIMIT_AS)), in megabytes. Note that this includes any mapped libs (there is no reliable Linux way to limit the data space only), so we need to be a bit generous here. Anything over 2000 will be ignored on 32 bits machines. thrQSizes Stage input queues configuration. There are three internal queues in the indexing pipeline stages (file data extraction, terms generation, index update). This parameter defines the queue depths for each stage (three integer values). If a value of -1 is given for a given stage, no queue is used, and the thread will go on performing the next stage. In practise, deep queues have not been shown to increase performance. Default: a value of 0 for the first queue tells Recoll to perform autoconfiguration based on the detected number of CPUs (no need for the two other values in this case). Use thrQSizes = -1 -1 -1 to disable multithreading entirely. thrTCounts Number of threads used for each indexing stage. The three stages are: file data extraction, terms generation, index update). The use of the counts is also controlled by some special values in thrQSizes: if the first queue depth is 0, all counts are ignored (autoconfigured); if a value of -1 is used for a queue depth, the corresponding thread count is ignored. It makes no sense to use a value other than 1 for the last stage because updating the Xapian index is necessarily single-threaded (and protected by a mutex). Miscellaneous parameters loglevel Log file verbosity 1-6. A value of 2 will print only errors and warnings. 3 will print information like document updates, 4 is quite verbose and 6 very verbose. logfilename Log file destination. Use 'stderr' (default) to write to the console. idxloglevel Override loglevel for the indexer. idxlogfilename Override logfilename for the indexer. daemloglevel Override loglevel for the indexer in real time mode. The default is to use the idx... values if set, else the log... values. daemlogfilename Override logfilename for the indexer in real time mode. The default is to use the idx... values if set, else the log... values. pyloglevel Override loglevel for the python module. pylogfilename Override logfilename for the python module. orgidxconfdir Original location of the configuration directory. This is used exclusively for movable datasets. Locating the configuration directory inside the directory tree makes it possible to provide automatic query time path translations once the data set has moved (for example, because it has been mounted on another location). curidxconfdir Current location of the configuration directory. Complement orgidxconfdir for movable datasets. This should be used if the configuration directory has been copied from the dataset to another location, either because the dataset is readonly and an r/w copy is desired, or for performance reasons. This records the original moved location before copy, to allow path translation computations. For example if a dataset originally indexed as '/home/me/mydata/config' has been mounted to '/media/me/mydata', and the GUI is running from a copied configuration, orgidxconfdir would be '/home/me/mydata/config', and curidxconfdir (as set in the copied configuration) would be '/media/me/mydata/config'. idxrundir Indexing process current directory. The input handlers sometimes leave temporary files in the current directory, so it makes sense to have recollindex chdir to some temporary directory. If the value is empty, the current directory is not changed. If the value is (literal) tmp, we use the temporary directory as set by the environment (RECOLL_TMPDIR else TMPDIR else /tmp). If the value is an absolute path to a directory, we go there. checkneedretryindexscript Script used to heuristically check if we need to retry indexing files which previously failed. The default script checks the modified dates on /usr/bin and /usr/local/bin. A relative path will be looked up in the filters dirs, then in the path. Use an absolute path to do otherwise. recollhelperpath Additional places to search for helper executables. This is only used on Windows for now. idxabsmlen Length of abstracts we store while indexing. Recoll stores an abstract for each indexed file. The text can come from an actual 'abstract' section in the document or will just be the beginning of the document. It is stored in the index so that it can be displayed inside the result lists without decoding the original file. The idxabsmlen parameter defines the size of the stored abstract. The default value is 250 bytes. The search interface gives you the choice to display this stored text or a synthetic abstract built by extracting text around the search terms. If you always prefer the synthetic abstract, you can reduce this value and save a little space. idxmetastoredlen Truncation length of stored metadata fields. This does not affect indexing (the whole field is processed anyway), just the amount of data stored in the index for the purpose of displaying fields inside result lists or previews. The default value is 150 bytes which may be too low if you have custom fields. idxtexttruncatelen Truncation length for all document texts. Only index the beginning of documents. This is not recommended except if you are sure that the interesting keywords are at the top and have severe disk space issues. aspellLanguage Language definitions to use when creating the aspell dictionary. The value must match a set of aspell language definition files. You can type "aspell dicts" to see a list The default if this is not set is to use the NLS environment to guess the value. aspellAddCreateParam Additional option and parameter to aspell dictionary creation command. Some aspell packages may need an additional option (e.g. on Debian Jessie: --local-data-dir=/usr/lib/aspell). See Debian bug 772415. aspellKeepStderr Set this to have a look at aspell dictionary creation errors. There are always many, so this is mostly for debugging. noaspell Disable aspell use. The aspell dictionary generation takes time, and some combinations of aspell version, language, and local terms, result in aspell crashing, so it sometimes makes sense to just disable the thing. monauxinterval Auxiliary database update interval. The real time indexer only updates the auxiliary databases (stemdb, aspell) periodically, because it would be too costly to do it for every document change. The default period is one hour. monixinterval Minimum interval (seconds) between processings of the indexing queue. The real time indexer does not process each event when it comes in, but lets the queue accumulate, to diminish overhead and to aggregate multiple events affecting the same file. Default 30 S. mondelaypatterns Timing parameters for the real time indexing. Definitions for files which get a longer delay before reindexing is allowed. This is for fast-changing files, that should only be reindexed once in a while. A list of wildcardPattern:seconds pairs. The patterns are matched with fnmatch(pattern, path, 0) You can quote entries containing white space with double quotes (quote the whole entry, not the pattern). The default is empty. Example: mondelaypatterns = *.log:20 "*with spaces.*:30" monioniceclass ionice class for the real time indexing process On platforms where this is supported. The default value is 3. monioniceclassdata ionice class parameter for the real time indexing process. On platforms where this is supported. The default is empty. Query-time parameters (no impact on the index) autodiacsens auto-trigger diacritics sensitivity (raw index only). IF the index is not stripped, decide if we automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the "D" modifier to specify diacritics sensitivity. Default is no. autocasesens auto-trigger case sensitivity (raw index only). IF the index is not stripped (see indexStripChars), decide if we automatically trigger character case sensitivity if the search term has upper-case characters in any but the first position. Else you need to use the query language and the "C" modifier to specify character-case sensitivity. Default is yes. maxTermExpand Maximum query expansion count for a single term (e.g.: when using wildcards). This only affects queries, not indexing. We used to not limit this at all (except for filenames where the limit was too low at 1000), but it is unreasonable with a big index. Default 10000. maxXapianClauses Maximum number of clauses we add to a single Xapian query. This only affects queries, not indexing. In some cases, the result of term expansion can be multiplicative, and we want to avoid eating all the memory. Default 50000. snippetMaxPosWalk Maximum number of positions we walk while populating a snippet for the result list. The default of 1,000,000 may be insufficient for very big documents, the consequence would be snippets with possibly meaning-altering missing words. Parameters for the PDF input script pdfocr Attempt OCR of PDF files with no text content if both tesseract and pdftoppm are installed. This can be defined in subdirectories. The default is off because OCR is so very slow. pdfocrlang Language to assume for PDF OCR. This is very important for having a reasonable rate of errors with tesseract. This can also be set through a configuration variable or directory-local parameters. See the rclpdf.py script. pdfattach Enable PDF attachment extraction by executing pdftk (if available). This is normally disabled, because it does slow down PDF indexing a bit even if not one attachment is ever found. pdfextrameta Extract text from selected XMP metadata tags. This is a space-separated list of qualified XMP tag names. Each element can also include a translation to a Recoll field name, separated by a '|' character. If the second element is absent, the tag name is used as the Recoll field names. You will also need to add specifications to the "fields" file to direct processing of the extracted data. pdfextrametafix Define name of XMP field editing script. This defines the name of a script to be loaded for editing XMP field values. The script should define a 'MetaFixer' class with a metafix() method which will be called with the qualified tag name and value of each selected field, for editing or erasing. A new instance is created for each document, so that the object can keep state for, e.g. eliminating duplicate values. Parameters set for specific locations mhmboxquirks Enable thunderbird/mozilla-seamonkey mbox format quirks Set this for the directory where the email mbox files are stored. recoll-1.26.3/doc/user/custom.xsl0000644000175000017500000000067313303776057013612 00000000000000 recoll-1.26.3/doc/user/usermanual.xml0000644000175000017500000116720013566424763014454 00000000000000 Recoll"> http://www.recoll.org/features.html"> Xapian"> Windows"> Unix-like systems"> ]> Recoll user manual Jean-Francois Dockes
jfd@recoll.org
2005-2019 Jean-Francois Dockes Permission is granted to copy, distribute and/or modify this document under the terms of the GNU Free Documentation License, Version 1.3 or any later version published by the Free Software Foundation; with no Invariant Sections, no Front-Cover Texts, and no Back-Cover Texts. A copy of the license can be found at the following location: GNU web site. This document introduces full text search notions and describes the installation and use of the &RCL; application. This version describes &RCL; &RCLVERSION;.
Introduction This document introduces full text search notions and describes the installation and use of the &RCL; application. It is updated for &RCL; &RCLVERSION;. &RCL; was for a long time dedicated to Unix-like systems. It was only lately (2015) ported to MS-Windows. Many references in this manual, especially file locations, are specific to Unix, and not valid on &WIN;, where some described features are also not available. The manual will be progressively updated. Until this happens, on &WIN;, most references to shared files can be translated by looking under the Recoll installation directory (esp. the Share subdirectory). The user configuration is stored by default under AppData/Local/Recoll inside the user directory, along with the index itself. Giving it a try If you do not like reading manuals (who does?) but wish to give &RCL; a try, just install the application and start the recoll graphical user interface (GUI), which will ask permission to index your home directory, allowing you to search immediately after indexing completes. Do not do this if your home directory contains a huge number of documents and you do not want to wait or are very short on disk space. In this case, you may first want to customize the configuration to restrict the indexed area (shortcut: from the recoll GUI go to: Preferences Indexing configuration , then adjust the Top directories section). On &LIN;, you may need to install the appropriate supporting applications for document types that need them (for example antiword for Microsoft Word files). The &RCL; for &WIN; package is self-contained and includes most useful auxiliary programs. Full text search &RCL; is a full text search application, which means that it finds your data by content rather than by external attributes (like the file name). You specify words (terms) which should or should not appear in the text you are looking for, and receive in return a list of matching documents, ordered so that the most relevant documents will appear first. You do not need to remember in what file or email message you stored a given piece of information. You just ask for related terms, and the tool will return a list of documents where these terms are prominent, in a similar way to Internet search engines. Full text search applications try to determine which documents are most relevant to the search terms you provide. Computer algorithms for determining relevance can be very complex, and in general are inferior to the power of the human mind to rapidly determine relevance. The quality of relevance guessing is probably the most important aspect when evaluating a search application. &RCL; relies on the &XAP; probabilistic information retrieval library to determine relevance. In many cases, you are looking for all the forms of a word, including plurals, different tenses for a verb, or terms derived from the same root or stem (example: floor, floors, floored, flooring...). Queries are usually automatically expanded to all such related terms (words that reduce to the same stem). This can be prevented for searching for a specific form. Stemming, by itself, does not accommodate for misspellings or phonetic searches. A full text search application may also support this form of approximation. For example, a search for aliterattion returning no result might propose alliteration, alteration, alterations, or altercation as possible replacement terms. &RCL; bases its suggestions on the actual index contents, so that suggestions may be made for words which would not appear in a standard dictionary. Recoll overview &RCL; uses the &XAP; information retrieval library as its storage and retrieval engine. &XAP; is a very mature package using a sophisticated probabilistic ranking model. The &XAP; library manages an index database which describes where terms appear in your document files. It efficiently processes the complex queries which are produced by the &RCL; query expansion mechanism, and is in charge of the all-important relevance computation task. &RCL; provides the mechanisms and interface to get data into and out of the index. This includes translating the many possible document formats into pure text, handling term variations (using &XAP; stemmers), and spelling approximations (using the aspell speller), interpreting user queries and presenting results. In a shorter way, &RCL; does the dirty footwork, &XAP; deals with the intelligent parts of the process. The &XAP; index can be big (roughly the size of the original document set), but it is not a document archive. &RCL; can only display documents that still exist at the place from which they were indexed. &RCL; stores all internal data in Unicode UTF-8 format, and it can index many types of files with different character sets, encodings, and languages into the same index. It can process documents embedded inside other documents (for example a PDF document stored inside a Zip archive sent as an email attachment...), down to an arbitrary depth. Stemming is the process by which &RCL; reduces words to their radicals so that searching does not depend, for example, on a word being singular or plural (floor, floors), or on a verb tense (flooring, floored). Because the mechanisms used for stemming depend on the specific grammatical rules for each language, there is a separate &XAP; stemmer module for most common languages where stemming makes sense. &RCL; stores the unstemmed versions of terms in the main index and uses auxiliary databases for term expansion (one for each stemming language), which means that you can switch stemming languages between searches, or add a language without needing a full reindex. Storing documents written in different languages in the same index is possible, and commonly done. In this situation, you can specify several stemming languages for the index. &RCL; currently makes no attempt at automatic language recognition, which means that the stemmer will sometimes be applied to terms from other languages with potentially strange results. In practise, even if this introduces possibilities of confusion, this approach has been proven quite useful, and it is much less cumbersome than separating your documents according to what language they are written in. By default, &RCL; strips most accents and diacritics from terms, and converts them to lower case before either storing them in the index or searching for them. As a consequence, it is impossible to search for a particular capitalization of a term (US / us), or to discriminate two terms based on diacritics (sake / saké, mate / maté). &RCL; can optionally store the raw terms, without accent stripping or case conversion. In this configuration, default searches will behave as before, but it is possible to perform searches sensitive to case and diacritics. This is described in more detail in the section about index case and diacritics sensitivity. &RCL; uses many parameters to define exactly what to index, and how to classify and decode the source documents. These are kept in configuration files. A default configuration is copied into a standard location (usually something like /usr/share/recoll/examples) during installation. The default values set by the configuration files in this directory may be overridden by values set inside your personal configuration. With the default configuration, &RCL; will index your home directory with generic parameters. The configuration can be customized either by editing the text files or by using configuration menus in the recoll GUI. The indexing process is started automatically (after asking permission), the first time you execute the recoll GUI. Indexing can also be performed by executing the recollindex command. &RCL; indexing is multithreaded by default when appropriate hardware resources are available, and can perform in parallel multiple tasks for text extraction, segmentation and index updates. Searches are usually performed inside the recoll GUI, which has many options to help you find what you are looking for. However, there are other ways to query the index: A command line interface. A Python programming interface A KDE KIO slave module. A Ubuntu Unity Scope module. A Gnome Shell Search Provider. A WEB interface. Indexing Introduction Indexing is the process by which the set of documents is analyzed and the data entered into the database. &RCL; indexing is normally incremental: documents will only be processed if they have been modified since the last run. On the first execution, all documents will need processing. A full index build can be forced later by specifying an option to the indexing command (recollindex or ). recollindex skips files which caused an error during a previous pass. This is a performance optimization, and a new behaviour in version 1.21 (failed files were always retried by previous versions). The command line option can be set to retry failed files, for example after updating an input handler. The following sections give an overview of different aspects of the indexing processes and configuration, with links to detailed sections. Depending on your data, temporary files may be needed during indexing, some of them possibly quite big. You can use the RECOLL_TMPDIR or TMPDIR environment variables to determine where they are created (the default is to use /tmp). Using TMPDIR has the nice property that it may also be taken into account by auxiliary commands executed by recollindex. Indexing modes &RCL; indexing can be performed along two main modes: <link linkend="RCL.INDEXING.PERIODIC">Periodic (or batch) indexing</link> recollindex is executed at discrete times. On &LIN;, the typical usage is to have a nightly run programmed into your cron file. On &WIN;, this is the only mode available, and the indexer is usually started from the GUI (but there is nothing to prevent starting it from a command script). <link linkend="RCL.INDEXING.MONITOR">Real time indexing</link> (Only available on &LIN;). recollindex runs permanently as a daemon and uses a file system alteration monitor (e.g. inotify) to detect file changes. New or updated files are indexed at once. Monitoring a big file system tree can consume significant system resources. &LIN;: choosing an indexing mode The choice between the two methods is mostly a matter of preference, and they can be combined by setting up multiple indexes (ie: use periodic indexing on a big documentation directory, and real time indexing on a small home directory), or, with &RCL; 1.24 and newer, by configuring the index so that only a subset of the tree will be monitored. The choice of method and the parameters used can be configured from the recoll GUI: Preferences Indexing schedule Configurations, multiple indexes &RCL; supports defining multiple indexes, each defined by its own configuration directory. A configuration directory contains several files which describe what should be indexed and how. When recoll or recollindex is first executed, it creates a default configuration directory. This configuration is the one used for indexing and querying when no specific configuration is specified. It is located in $HOME/.recoll/ for &LIN; and %LOCALAPPDATA%\Recoll on &WIN; (typically C:\Users\[me]\Appdata\Local\Recoll). All configuration parameters have defaults, defined in system-wide files. Without further customisation, the default configuration will process your complete home directory, with a reasonable set of defaults. It can be adjusted to process a different area of the file system, select files in different ways, and many other things. In some cases, it may be useful to create additional configuration directories, for example, to separate personal and shared indexes, or to take advantage of the organization of your data to improve search precision. In order to do this, you would create an empty directory in a location of your choice, and then instruct recoll or recollindex to use it by setting either a command line option (-c /some/directory), or an environment variable (RECOLL_CONFDIR=/some/directory). Any modification performed by the commands (e.g. configuration customisation or searches by recoll or index creation by recollindex) would then apply to the new directory and not to the default one. Once multiple indexes are created, you can use each of them separately by setting the -c option or the RECOLL_CONFDIR environment variable when starting a command, to select the desired index. It is also possible to instruct one configuration to query one or several other indexes in addition to its own, by using the External index function in the recoll GUI, or some other functions in the command line and programming tools. A plausible usage scenario for the multiple index feature would be for a system administrator to set up a central index for shared data, that you choose to search or not in addition to your personal data. Of course, there are other possibilities. for example, there are many cases where you know the subset of files that should be searched, and where narrowing the search can improve the results. You can achieve approximately the same effect with the directory filter in advanced search, but multiple indexes may have better performance and may be worth the trouble in some cases. A more advanced use case would be to use multiple index to improve indexing performance, by updating several indexes in parallel (using multiple CPU cores and disks, or possibly several machines), and then merging them, or querying them in parallel. See the section about configuring multiple indexes for more detail Document types &RCL; knows about quite a few different document types. The parameters for document types recognition and processing are set in configuration files. Most file types, like HTML or word processing files, only hold one document. Some file types, like email folders or zip archives, can hold many individually indexed documents, which may themselves be compound ones. Such hierarchies can go quite deep, and &RCL; can process, for example, a LibreOffice document stored as an attachment to an email message inside an email folder archived in a zip file... recollindex processes plain text, HTML, OpenDocument (Open/LibreOffice), email formats, and a few others internally. Other file types (ie: postscript, pdf, ms-word, rtf ...) need external applications for preprocessing. The list is in the installation section. After every indexing operation, &RCL; updates a list of commands that would be needed for indexing existing files types. This list can be displayed by selecting the menu option File Show Missing Helpers in the recoll GUI. It is stored in the missing text file inside the configuration directory. By default, &RCL; will try to index any file type that it has a way to read. This is sometimes not desirable, and there are ways to either exclude some types, or on the contrary define a positive list of types to be indexed. In the latter case, any type not in the list will be ignored. Excluding files by name can be done by adding wildcard name patterns to the skippedNames list, which can be done from the GUI Index configuration menu. Excluding by type can be done by setting the excludedmimetypes list in the configuration file (1.20 and later). This can be redefined for subdirectories. You can also define an exclusive list of MIME types to be indexed (no others will be indexed), by settting the indexedmimetypes configuration variable. Example: indexedmimetypes = text/html application/pdf It is possible to redefine this parameter for subdirectories. Example: [/path/to/my/dir] indexedmimetypes = application/pdf (When using sections like this, don't forget that they remain in effect until the end of the file or another section indicator). excludedmimetypes or indexedmimetypes, can be set either by editing the configuration file (recoll.conf) for the index, or by using the GUI index configuration tool. Note about MIME types When editing the indexedmimetypes or excludedmimetypes lists, you should use the MIME values listed in the mimemap file or in Recoll result lists in preference to file -i output: there are a number of differences. The file -i output should only be used for files without extensions, or for which the extension is not listed in mimemap Indexing failures Indexing may fail for some documents, for a number of reasons: a helper program may be missing, the document may be corrupt, we may fail to uncompress a file because no file system space is available, etc. &RCL; versions prior to 1.21 always retried to index files which had previously caused an error. This guaranteed that anything that may have become indexable (for example because a helper had been installed) would be indexed. However this was bad for performance because some indexing failures may be quite costly (for example failing to uncompress a big file because of insufficient disk space). The indexer in &RCL; versions 1.21 and later does not retry failed files by default. Retrying will only occur if an explicit option () is set on the recollindex command line, or if a script executed when recollindex starts up says so. The script is defined by a configuration variable (checkneedretryindexscript), and makes a rather lame attempt at deciding if a helper command may have been installed, by checking if any of the common bin directories have changed. Recovery In the rare case where the index becomes corrupted (which can signal itself by weird search results or crashes), the index files need to be erased before restarting a clean indexing pass. Just delete the xapiandb directory (see next section), or, alternatively, start the next recollindex with the option, which will reset the database before indexing. The difference between the two methods is that the second will not change the current index format, which may be undesirable if a newer format is supported by the &XAP; version. Index storage The default location for the index data is the xapiandb subdirectory of the &RCL; configuration directory, typically $HOME/.recoll/xapiandb/. This can be changed via two different methods (with different purposes): For a given configuration directory, you can specify a non-default storage location for the index by setting the dbdir parameter in the configuration file (see the configuration section). This method would mainly be of use if you wanted to keep the configuration directory in its default location, but desired another location for the index, typically out of disk occupation or performance concerns. You can specify a different configuration directory by setting the RECOLL_CONFDIR environment variable, or using the option to the &RCL; commands. This method would typically be used to index different areas of the file system to different indexes. For example, if you were to issue the following command: recoll -c ~/.indexes-email Then &RCL; would use configuration files stored in ~/.indexes-email/ and, (unless specified otherwise in recoll.conf) would look for the index in ~/.indexes-email/xapiandb/. Using multiple configuration directories and configuration options allows you to tailor multiple configurations and indexes to handle whatever subset of the available data you wish to make searchable. The size of the index is determined by the size of the set of documents, but the ratio can vary a lot. For a typical mixed set of documents, the index size will often be close to the data set size. In specific cases (a set of compressed mbox files for example), the index can become much bigger than the documents. It may also be much smaller if the documents contain a lot of images or other non-indexed data (an extreme example being a set of mp3 files where only the tags would be indexed). Of course, images, sound and video do not increase the index size, which means that in most cases, the space used by the index will be negligible against the total amount of data on the computer. The index data directory (xapiandb) only contains data that can be completely rebuilt by an index run (as long as the original documents exist), and it can always be destroyed safely. &XAP; index formats &XAP; versions usually support several formats for index storage. A given major &XAP; version will have a current format, used to create new indexes, and will also support the format from the previous major version. &XAP; will not convert automatically an existing index from the older format to the newer one. If you want to upgrade to the new format, or if a very old index needs to be converted because its format is not supported any more, you will have to explicitly delete the old index (typically ~/.recoll/xapiandb), then run a normal indexing command. Using recollindex option would not work in this situation. Security aspects The &RCL; index does not hold complete copies of the indexed documents (it almost does after version 1.24). But it does hold enough data to allow for an almost complete reconstruction. If confidential data is indexed, access to the database directory should be restricted. &RCL; will create the configuration directory with a mode of 0700 (access by owner only). As the index data directory is by default a sub-directory of the configuration directory, this should result in appropriate protection. If you use another setup, you should think of the kind of protection you need for your index, set the directory and files access modes appropriately, and also maybe adjust the umask used during index updates. Special considerations for big indexes This only needs concern you if your index is going to be bigger than around 5 GBytes. Beyond 10 GBytes, it becomes a serious issue. Most people have much smaller indexes. For reference, 5 GBytes would be around 2000 bibles, a lot of text. If you have a huge text dataset (remember: images don't count, the text content of PDFs is typically less than 5% of the file size), read on. The amount of writing performed by Xapian during index creation is not linear with the index size (it is somewhere between linear and quadratic). For big indexes this becomes a performance issue, and may even be an SSD disk wear issue. The problem can be mitigated by observing the following rules: Partition the data set and create several indexes of reasonable size rather than a huge one. These indexes can then be queried in parallel (using the &RCL; external indexes facility), or merged using xapian-compact. Have a lot of RAM available and set the idxflushmb &RCL; configuration parameter as high as you can without swapping (experimentation will be needed). 200 would be a minimum in this context. Use Xapian 1.4.10 or newer, as this version brought a significant improvement in the amount of writes. Index configuration Variables set inside the &RCL; configuration files control which areas of the file system are indexed, and how files are processed. These variables can be set either by editing the text files or by using the dialogs in the recoll GUI. The first time you start recoll, you will be asked whether or not you would like it to build the index. If you want to adjust the configuration before indexing, just click Cancel at this point, which will get you into the configuration interface. If you exit at this point, recoll will have created a default configuration directory with empty configuration files, which you can then edit. The configuration is documented inside the installation chapter of this document, or in the recoll.conf5 manual page.Both documents are automatically generated from the comments inside the configuration file. The most immediately useful variable is probably topdirs, which lists the subtrees and files to be indexed. The applications needed to index file types other than text, HTML or email (ie: pdf, postscript, ms-word...) are described in the external packages section. As of Recoll 1.18 there are two incompatible types of Recoll indexes, depending on the treatment of character case and diacritics. A further section describes the two types in more detail. Multiple indexes Multiple &RCL; indexes can be created by using several configuration directories which are typically set to index different areas of the file system. A specific index can be selected by setting the RECOLL_CONFDIR environment variable or giving the option to recoll and recollindex. The recollindex program, used for creating or updating indexes, always works on a single index. The different configurations are entirely independant (no parameters are ever shared between configurations when indexing). All the search interfaces (recoll, recollq, the Python API, etc.) operate with a main configuration, from which both configuration and index data are used, and can also query data from multiple additional indexes. Only the index data from the latter is used, their configuration parameters are ignored. This implies that some parameters should be consistent among index configurations which are to be used together. When searching, the current main index (defined by RECOLL_CONFDIR or ) is always active. If this is undesirable, you can set up your base configuration to index an empty directory. Index configuration parameters can be set either by using a text editor on the files, or, for most parameters, by using the recoll index configuration GUI. In the latter case, the configuration directory for which parameters are modified is the one which was selected by RECOLL_CONFDIR or the parameter, and there is no way to switch configurations within the GUI. See the configuration section for a detailed description of the parameters Some configuration parameters must be consistent among a set of multiple indexes used together for searches. Most importantly, all indexes to be queried concurrently must have the same option concerning character case and diacritics stripping, but there are other constraints. Most of the relevant parameters affect the term generation. Using multiple configurations implies a small level of command line or file manager usage. The user must explicitely create additional configuration directories, the GUI will not do it. This is to avoid mistakenly creating additional directories when an argument is mistyped. Also, the GUI or the indexer must be launched with a specific option or environment to work on the right configuration. In practise: creating and using an additional index Initially creating the configuration and index: mkdir /path/to/my/new/config Configuring the new index can be done from the recoll GUI, launched from the command line to pass the -c option (you could create a desktop file to do it for you), and then using the GUI index configuration tool to set up the index. recoll -c /path/to/my/new/config Alternatively, you can just start a text editor on the main configuration file: someEditor /path/to/my/new/config/recoll.conf Creating and updating the index can be done from the command line: recollindex -c /path/to/my/new/config or from the File menu of a GUI launched with the same option (recoll, see above). The same GUI would also let you set up batch indexing for the new index. Real time indexing can only be set up from the GUI for the default index (the menu entry will be inactive if the GUI was started with a non-default -c option). The new index can be queried alone with recoll -c /path/to/my/new/config Or, in parallel with the default index, by starting recoll without a -c option, and using the Preferences External Index Dialog menu. Index case and diacritics sensitivity As of &RCL; version 1.18 you have a choice of building an index with terms stripped of character case and diacritics, or one with raw terms. For a source term of Résumé, the former will store resume, the latter Résumé. Each type of index allows performing searches insensitive to case and diacritics: with a raw index, the user entry will be expanded to match all case and diacritics variations present in the index. With a stripped index, the search term will be stripped before searching. A raw index allows using case and diacritics to discriminate between terms, e.g., returning different results when searching for US and us or resume and résumé. Read the section about search case and diacritics sensitivity for more details. The type of index to be created is controlled by the indexStripChars configuration variable which can only be changed by editing the configuration file. Any change implies an index reset (not automated by &RCL;), and all indexes in a search must be set in the same way (again, not checked by &RCL;). &RCL; creates a stripped index by default if indexStripChars is not set. As a cost for added capability, a raw index will be slightly bigger than a stripped one (around 10%). Also, searches will be more complex, so probably slightly slower, and the feature is relatively little used, so that a certain amount of weirdness cannot be excluded. One of the most adverse consequence of using a raw index is that some phrase and proximity searches may become impossible: because each term needs to be expanded, and all combinations searched for, the multiplicative expansion may become unmanageable. Indexing threads configuration (&LIN;) The &RCL; indexing process recollindex can use multiple threads to speed up indexing on multiprocessor systems. The work done to index files is divided in several stages and some of the stages can be executed by multiple threads. The stages are: File system walking: this is always performed by the main thread. File conversion and data extraction. Text processing (splitting, stemming, etc.). &XAP; index update. You can also read a longer document about the transformation of &RCL; indexing to multithreading. The threads configuration is controlled by two configuration file parameters. thrQSizes This variable defines the job input queues configuration. There are three possible queues for stages 2, 3 and 4, and this parameter should give the queue depth for each stage (three integer values). If a value of -1 is used for a given stage, no queue is used, and the thread will go on performing the next stage. In practise, deep queues have not been shown to increase performance. A value of 0 for the first queue tells &RCL; to perform autoconfiguration (no need for anything else in this case, thrTCounts is not used) - this is the default configuration. thrTCounts This defines the number of threads used for each stage. If a value of -1 is used for one of the queue depths, the corresponding thread count is ignored. It makes no sense to use a value other than 1 for the last stage because updating the &XAP; index is necessarily single-threaded (and protected by a mutex). If the first value in thrQSizes is 0, thrTCounts is ignored. The following example would use three queues (of depth 2), and 4 threads for converting source documents, 2 for processing their text, and one to update the index. This was tested to be the best configuration on the test system (quadri-processor with multiple disks). thrQSizes = 2 2 2 thrTCounts = 4 2 1 The following example would use a single queue, and the complete processing for each document would be performed by a single thread (several documents will still be processed in parallel in most cases). The threads will use mutual exclusion when entering the index update stage. In practise the performance would be close to the precedent case in general, but worse in certain cases (e.g. a Zip archive would be performed purely sequentially), so the previous approach is preferred. YMMV... The 2 last values for thrTCounts are ignored. thrQSizes = 2 -1 -1 thrTCounts = 6 1 1 The following example would disable multithreading. Indexing will be performed by a single thread. thrQSizes = -1 -1 -1 The index configuration GUI Most parameters for a given index configuration can be set from a recoll GUI running on this configuration (either as default, or by setting RECOLL_CONFDIR or the option.) The interface is started from the Preferences Index Configuration menu entry. It is divided in four tabs, Global parameters, Local parameters, Web history (which is explained in the next section) and Search parameters. The Global parameters tab allows setting global variables, like the lists of top directories, skipped paths, or stemming languages. The Local parameters tab allows setting variables that can be redefined for subdirectories. This second tab has an initially empty list of customisation directories, to which you can add. The variables are then set for the currently selected directory (or at the top level if the empty line is selected). The Search parameters section defines parameters which are used at query time, but are global to an index and affect all search tools, not only the GUI. The meaning for most entries in the interface is self-evident and documented by a ToolTip popup on the text label. For more detail, you will need to refer to the configuration section of this guide. The configuration tool normally respects the comments and most of the formatting inside the configuration file, so that it is quite possible to use it on hand-edited files, which you might nevertheless want to backup first... Removable volumes &RCL; used to have no support for indexing removable volumes (portable disks, USB keys, etc.). Recent versions have improved the situation and support indexing removable volumes in two different ways: By indexing the volume in the main, fixed, index, and ensuring that the volume data is not purged if the indexing runs while the volume is mounted. (&RCL; 1.25.2). By storing a volume index on the volume itself (&RCL; 1.24). Indexing removable volumes in the main index As of version 1.25.2, &RCL; provides a simple way to ensure that the index data for an absent volume will not be purged. Two conditions must be met: The volume mount point must be a member of the topdirs list. The mount directory must be empty (when the volume is not mounted). If recollindex finds that one of the topdirs is empty when starting up, any existing data for the tree will be preserved by the indexing pass (no purge for this area). Self contained volumes As of &RCL; 1.24, it has become possible to build self-contained datasets including a &RCL; configuration directory and index together with the indexed documents, and to move such a dataset around (for example copying it to an USB drive), without having to adjust the configuration for querying the index. This is a query-time feature only. The index must only be updated in its original location. If an update is necessary in a different location, the index must be reset. The principle of operation is that the configuration stores the location of the original configuration directory, which must reside on the movable volume. If the volume is later mounted elsewhere, &RCL; adjusts the paths stored inside the index by the difference between the original and current locations of the configuration directory. To make a long story short, here follows a script to create a &RCL; configuration and index under a given directory (given as single parameter). The resulting data set (files + recoll directory) can later to be moved to a CDROM or thumb drive. Longer explanations come after the script. #!/bin/sh fatal() { echo $*;exit 1 } usage() { fatal "Usage: init-recoll-volume.sh <top-directory>" } test $# = 1 || usage topdir=$1 test -d "$topdir" || fatal $topdir should be a directory confdir="$topdir/recoll-config" test ! -d "$confdir" || fatal $confdir should not exist mkdir "$confdir" cd "$topdir" topdir=`pwd` cd "$confdir" confdir=`pwd` (echo topdirs = '"'$topdir'"'; \ echo orgidxconfdir = $topdir/recoll-config) > "$confdir/recoll.conf" recollindex -c "$confdir" The examples below will assume that you have a dataset under /home/me/mydata/, with the index configuration and data stored inside /home/me/mydata/recoll-confdir. In order to be able to run queries after the dataset has been moved, you must ensure the following: The main configuration file must define the orgidxconfdir variable to be the original location of the configuration directory (orgidxconfdir=/home/me/mydata/recoll-confdir must be set inside /home/me/mydata/recoll-confdir/recoll.conf in the example above). The configuration directory must exist with the documents, somewhere under the directory which will be moved. E.g. if you are moving /home/me/mydata around, the configuration directory must exist somewhere below this point, for example /home/me/mydata/recoll-confdir, or /home/me/mydata/sub/recoll-confdir. You should keep the default locations for the index elements which are relative to the configuration directory by default (principally dbdir). Only the paths referring to the documents themselves (e.g. topdirs values) should be absolute (in general, they are only used when indexing anyway). Only the first point needs an explicit user action, the &RCL; defaults are compatible with the third one, and the second is natural. If, after the move, the configuration directory needs to be copied out of the dataset (for example because the thumb drive is too slow), you can set the curidxconfdir, variable inside the copied configuration to define the location of the moved one. For example if /home/me/mydata is now mounted onto /media/me/somelabel, but the configuration directory and index has been copied to /tmp/tempconfig, you would set curidxconfdir to /media/me/somelabel/recoll-confdir inside /tmp/tempconfig/recoll.conf. orgidxconfdir would still be /home/me/mydata/recoll-confdir in the original and the copy. If you are regularly copying the configuration out of the dataset, it will be useful to write a script to automate the procedure. This can't really be done inside &RCL; because there are probably many possible variants. One example would be to copy the configuration to make it writable, but keep the index data on the medium because it is too big - in this case, the script would also need to set dbdir in the copied configuration. The same set of modifications (&RCL; 1.24) has also made it possible to run queries from a readonly configuration directory (with slightly reduced function of course, such as not recording the query history). &LIN;: indexing visited WEB pages With the help of a Firefox extension, &RCL; can index the Internet pages that you visit. The extension has a long history: it was initially designed for the Beagle indexer, then adapted to &RCL; and the Firefox XUL API. A new version of the addon has been written to work with the WebExtensions API, which is the only one supported after Firefox version 57. The extension works by copying visited WEB pages to an indexing queue directory, which &RCL; then processes, indexing the data, storing it into a local cache, then removing the file from the queue. Because the WebExtensions API introduces more constraints to what extensions can do, the new version works with one more step: the files are first created in the browser default downloads location (typically $HOME/Downloads ), then moved by a script in the old queue location. The script is automatically executed by the &RCL; indexer versions 1.23.5 and newer. It could conceivably be executed independantly to make the new browser extension compatible with an older &RCL; version (the script is named recoll-we-move-files.py). For the WebExtensions-based version to work, it is necessary to set the webdownloadsdir value in the configuration if it was changed from the default $HOME/Downloads in the browser preferences. The visited WEB pages indexing feature can be enabled on the &RCL; side from the GUI Index configuration panel, or by editing the configuration file (set processwebqueue to 1). A current pointer to the extension can be found, along with up-to-date instructions, on the Recoll wiki. A copy of the indexed WEB pages is retained by Recoll in a local cache (from which previews can be fetched). The cache size can be adjusted from the Index configuration / Web history panel. Once the maximum size is reached, old pages are purged - both from the cache and the index - to make room for new ones, so you need to explicitly archive in some other place the pages that you want to keep indefinitely. &LIN;: using extended attributes User extended attributes are named pieces of information that most modern file systems can attach to any file. &RCL; processes extended attributes as document fields by default. A freedesktop standard defines a few special attributes, which are handled as such by &RCL;: mime_type If set, this overrides any other determination of the file MIME type. charset If set, this defines the file character set (mostly useful for plain text files). By default, other attributes are handled as &RCL; fields of the same name. On Linux, the user prefix is removed from the name. The name translation can be configured more precisely inside the fields configuration file. &LIN;: importing external tags During indexing, it is possible to import metadata for each file by executing commands. This allows, for example, extracting tag data from an external application and storing it in a field for indexing. See the section about the metadatacmds field in the main configuration chapter for a description of the configuration syntax. For example, if you would want &RCL; to use tags managed by tmsu in a field named tags, you would add the following to the configuration file: [/some/area/of/the/fs] metadatacmds = ; tags = tmsu tags %f Depending on the tmsu version, you may need/want to add options like --database=/some/db. You may want to restrict this processing to a subset of the directory tree, because it may slow down indexing a bit ([some/area/of/the/fs]). Note the initial semi-colon after the equal sign. In the example above, the output of tmsu is used to set a field named tags. The field name is arbitrary and could be tmsu or myfield just the same, but tags is an alias for the standard &RCL; keywords field, and the tmsu output will just augment its contents. This will avoid the need to extend the field configuration. Once re-indexing is performed (you will need to force the file reindexing, &RCL; will not detect the need by itself), you will be able to search from the query language, through any of its aliases: tags:some/alternate/values or tags:all,these,values (the compact field search syntax is supported for recoll 1.20 and later. For older versions, you would need to repeat the tags: specifier for each term, e.g. tags:some OR tags:alternate). Tags changes will not be detected by the indexer if the file itself did not change. One possible workaround would be to update the file ctime when you modify the tags, which would be consistent with how extended attributes function. A pair of chmod commands could accomplish this, or a touch -a . Alternatively, just couple the tag update with a recollindex -e -i /path/to/the/file. The PDF input handler The PDF format is very important for scientific and technical documentation, and document archival. It has extensive facilities for storing metadata along with the document, and these facilities are actually used in the real world. In consequence, the rclpdf.py PDF input handler has more complex capabilities than most others, and it is also more configurable. Specifically, rclpdf.py can automatically use tesseract to perform OCR if the document text is empty, it can be configured to extract specific metadata tags from an XMP packet, and to extract PDF attachments. OCR with Tesseract If both tesseract and pdftoppm (generally from the poppler-utils package) are installed, the PDF handler may attempt OCR on PDF files with no text content. This is controlled by the pdfocr configuration variable, which is false by default because OCR is very slow. The choice of language is very important for successfull OCR. Recoll has currently no way to determine this from the document itself. You can set the language to use through the contents of a .ocrpdflang text file in the same directory as the PDF document, or through the RECOLL_TESSERACT_LANG environment variable, or through the contents of an ocrpdf text file inside the configuration directory. If none of the above are used, &RCL; will try to guess the language from the NLS environment. XMP fields extraction The rclpdf.py script in &RCL; version 1.23.2 and later can extract XMP metadata fields by executing the pdfinfo command (usually found with poppler-utils). This is controlled by the pdfextrameta configuration variable, which specifies which tags to extract and, possibly, how to rename them. The pdfextrametafix variable can be used to designate a file with Python code to edit the metadata fields (available for &RCL; 1.23.3 and later. 1.23.2 has equivalent code inside the handler script). Example: import sys import re class MetaFixer(object): def __init__(self): pass def metafix(self, nm, txt): if nm == 'bibtex:pages': txt = re.sub(r'--', '-', txt) elif nm == 'someothername': # do something else pass elif nm == 'stillanother': # etc. pass return txt def wrapup(self, metaheaders): pass If the 'metafix()' method is defined, it is called for each metadata field. A new MetaFixer object is created for each PDF document (so the object can keep state for, for example, eliminating duplicate values). If the 'wrapup()' method is defined, it is called at the end of XMP fields processing with the whole metadata as parameter, as an array of '(nm, val)' pairs, allowing an alternate approach for editing or adding/deleting fields. PDF attachment indexing If pdftk is installed, and if the the pdfattach configuration variable is set, the PDF input handler will try to extract PDF attachements for indexing as sub-documents of the PDF file. This is disabled by default, because it slows down PDF indexing a bit even if not one attachment is ever found (PDF attachments are uncommon in my experience). Periodic indexing Running the indexer The recollindex program performs index updates. You can start it either from the command line or from the File menu in the recoll GUI program. When started from the GUI, the indexing will run on the same configuration recoll was started on. When started from the command line, recollindex will use the RECOLL_CONFDIR variable or accept a confdir option to specify a non-default configuration directory. If the recoll program finds no index when it starts, it will automatically start indexing (except if canceled). The GUI File menu has entries to start or stop the current indexing operation. When no indexing is running, you have a choice of updating the index or rebuilding it (the first choice only processes changed files, the second one zeroes the index before starting so that all files are processed). On Linux, the recollindex indexing process can be interrupted by sending an interrupt (Ctrl-C, SIGINT) or terminate (SIGTERM) signal. On Linux and Windows, the GUI can used to manage the indexing operation. Stopping the indexer can be done from the recoll GUI File Stop Indexing menu entry. When stopped, some time may elapse before recollindex exits, because it needs to properly flush and close the index. After an interruption, the index will be somewhat inconsistent because some operations which are normally performed at the end of the indexing pass will have been skipped (for example, the stemming and spelling databases will be inexistant or out of date). You just need to restart indexing at a later time to restore consistency. The indexing will restart at the interruption point (the full file tree will be traversed, but files that were indexed up to the interruption and for which the index is still up to date will not need to be reindexed). recollindex has many options which are listed in its manual page. Only a few will be described here. Option will reset the index when starting. This is almost the same as destroying the index files (the nuance is that the &XAP; format version will not be changed). Option will force the update of all documents without resetting the index first. This will not have the "clean start" aspect of , but the advantage is that the index will remain available for querying while it is rebuilt, which can be a significant advantage if it is very big (some installations need days for a full index rebuild). Option will force retrying files which previously failed to be indexed, for example because of a missing helper program. Of special interest also, maybe, are the and options. allows indexing an explicit list of files (given as command line parameters or read on stdin). tells recollindex to ignore file selection parameters from the configuration. Together, these options allow building a custom file selection process for some area of the file system, by adding the top directory to the skippedPaths list and using an appropriate file selection method to build the file list to be fed to recollindex . Trivial example: find . -name indexable.txt -print | recollindex -if recollindex will not descend into subdirectories specified as parameters, but just add them as index entries. It is up to the external file selection method to build the complete file list. Linux: using <command>cron</command> to automate indexing The most common way to set up indexing is to have a cron task execute it every night. For example the following crontab entry would do it every day at 3:30AM (supposing recollindex is in your PATH): /some/tmp/dir/recolltrace 2>&1 ]]> Or, using anacron: /tmp/rcltraceme 2>&1" ]]> The &RCL; GUI has dialogs to manage crontab entries for recollindex. You can reach them from the Preferences Indexing Schedule menu. They only work with the good old cron, and do not give access to all features of cron scheduling. The usual command to edit your crontab is crontab (which will usually start the vi editor to edit the file). You may have more sophisticated tools available on your system. Please be aware that there may be differences between your usual interactive command line environment and the one seen by crontab commands. Especially the PATH variable may be of concern. Please check the crontab manual pages about possible issues. &LIN;: real time indexing Real time monitoring/indexing is performed by starting the recollindex command. With this option, recollindex will detach from the terminal and become a daemon, permanently monitoring file changes and updating the index. In this situation, the recoll GUI File menu makes two operations available: 'Stop' and 'Trigger incremental pass'. While it is convenient that data is indexed in real time, repeated indexing can generate a significant load on the system when files such as email folders change. Also, monitoring large file trees by itself significantly taxes system resources. You probably do not want to enable it if your system is short on resources. Periodic indexing is adequate in most cases. As of &RCL; 1.24, you can set the monitordirs configuration variable to specify that only a subset of your indexed files will be monitored for instant indexing. In this situation, an incremental pass on the full tree can be triggered by either restarting the indexer, or just running recollindex, which will notify the running process. The recoll GUI also has a menu entry for this. Automatic daemon start Under KDE, Gnome and some other desktop environments, the daemon can automatically started when you log in, by creating a desktop file inside the ~/.config/autostart directory. This can be done for you by the &RCL; GUI. Use the Preferences->Indexing Schedule menu. With older X11 setups, starting the daemon is normally performed as part of the user session script. The rclmon.sh script can be used to easily start and stop the daemon. It can be found in the examples directory (typically /usr/local/[share/]recoll/examples). For example, a good old xdm-based session could have a .xsession script with the following lines at the end: recollconf=$HOME/.recoll-home recolldata=/usr/local/share/recoll RECOLL_CONFDIR=$recollconf $recolldata/examples/rclmon.sh start fvwm The indexing daemon gets started, then the window manager, for which the session waits. By default the indexing daemon will monitor the state of the X11 session, and exit when it finishes, it is not necessary to kill it explicitly. (The X11 server monitoring can be disabled with option to recollindex). If you use the daemon completely out of an X11 session, you need to add option to disable X11 session monitoring (else the daemon will not start). Miscellaneous details By default, the messages from the indexing daemon will be sent to the same file as those from the interactive commands (logfilename). You may want to change this by setting the daemlogfilename and daemloglevel configuration parameters. Also the log file will only be truncated when the daemon starts. If the daemon runs permanently, the log file may grow quite big, depending on the log level. Increasing resources for inotify On Linux systems, monitoring a big tree may need increasing the resources available to inotify, which are normally defined in /etc/sysctl.conf. ### inotify # # cat /proc/sys/fs/inotify/max_queued_events - 16384 # cat /proc/sys/fs/inotify/max_user_instances - 128 # cat /proc/sys/fs/inotify/max_user_watches - 16384 # # -- Change to: # fs.inotify.max_queued_events=32768 fs.inotify.max_user_instances=256 fs.inotify.max_user_watches=32768 Especially, you will need to trim your tree or adjust the max_user_watches value if indexing exits with a message about errno ENOSPC (28) from inotify_add_watch. Slowing down the reindexing rate for fast changing files When using the real time monitor, it may happen that some files need to be indexed, but change so often that they impose an excessive load for the system. &RCL; provides a configuration option to specify the minimum time before which a file, specified by a wildcard pattern, cannot be reindexed. See the mondelaypatterns parameter in the configuration section. Searching Introduction Getting answers to specific queries is of course the whole point of &RCL;. The multiple provided interfaces always understand simple queries made of one or several words, and return appropriate results in most cases. In order to make the most of &RCL; though, it may be worthwhile to understand how it processes your input. Five different modes exist: In All Terms mode, &RCL; looks for documents containing all your input terms. Query Language mode behaves like All Terms in the absence of special input, but it can also do much more. This is the best mode for getting the most of &RCL;. In Any Term mode, &RCL; looks for documents containing any your input terms, preferring those which contain more. In File Name mode, &RCL; will only match file names, not content. Using a small subset of the index allows things like left-hand wildcards without performance issues, and may sometimes be useful. The GUI Advanced Search mode is actually not more powerful than the query language, but it helps you build complex queries without having to remember the language, and avoids any interpretation ambiguity, as it bypasses the user input parser. These five input modes are supported by the different user interfaces which are described in the following sections. Searching with the Qt graphical user interface The recoll program provides the main user interface for searching. It is based on the Qt library. recoll has two search modes: Simple search (the default, on the main screen) has a single entry field where you can enter multiple words. Advanced search (a panel accessed through the Tools menu or the toolbox bar icon) has multiple entry fields, which you may use to build a logical condition, with additional filtering on file type, location in the file system, modification date, and size. In most cases, you can enter the terms as you think them, even if they contain embedded punctuation or other non-textual characters (e.g. &RCL; can handle things like email addresses). The main case where you should enter text differently from how it is printed is for east-asian languages (Chinese, Japanese, Korean). Words composed of single or multiple characters should be entered separated by white space in this case (they would typically be printed without white space). Some searches can be quite complex, and you may want to re-use them later, perhaps with some tweaking. &RCL; can save and restore searches. See Saving and restoring queries. Simple search Start the recoll program. Possibly choose a search mode: Any term, All terms, File name or Query language. Enter search term(s) in the text field at the top of the window. Click the Search button or hit the Enter key to start the search. The initial default search mode is Query language. Without special directives, this will look for documents containing all of the search terms (the ones with more terms will get better scores), just like the All terms mode. Any term will search for documents where at least one of the terms appear. File name will exclusively look for file names, not contents All search modes allow terms to be expanded with wildcards characters (*, ?, []). See the section about wildcards for more details. In all modes except File name, you can search for exact phrases (adjacent words in a given order) by enclosing the input inside double quotes. Ex: "virtual reality". The Query Language features are described in a separate section. The File name search mode will specifically look for file names. The point of having a separate file name search is that wild card expansion can be performed more efficiently on a small subset of the index (allowing wild cards on the left of terms without excessive cost). Things to know: White space in the entry should match white space in the file name, and is not treated specially. The search is insensitive to character case and accents, independantly of the type of index. An entry without any wild card character and not capitalized will be prepended and appended with '*' (ie: etc -> *etc*, but Etc -> etc). If you have a big index (many files), excessively generic fragments may result in inefficient searches. When using a stripped index (the default), character case has no influence on search, except that you can disable stem expansion for any term by capitalizing it. Ie: a search for floor will also normally look for flooring, floored, etc., but a search for Floor will only look for floor, in any character case. Stemming can also be disabled globally in the preferences. When using a raw index, the rules are a bit more complicated. &RCL; remembers the last few searches that you performed. You can directly access the search history by clicking the clock button on the right of the search entry, while the latter is empty. Otherwise, the history is used for entry completion (see next). Only the search texts are remembered, not the mode (all/any/file name). While text is entered in the search area, recoll will display possible completions, filtered from the history and the index search terms. This can be disabled with a GUI Preferences option. Double-clicking on a word in the result list or a preview window will insert it into the simple search entry field. You can cut and paste any text into an All terms or Any term search field, punctuation, newlines and all - except for wildcard characters (single ? characters are ok). &RCL; will process it and produce a meaningful search. This is what most differentiates this mode from the Query Language mode, where you have to care about the syntax. You can use the ToolsAdvanced search dialog for more complex searches. The default result list After starting a search, a list of results will instantly be displayed in the main list window. By default, the document list is presented in order of relevance (how well the system estimates that the document matches the query). You can sort the result by ascending or descending date by using the vertical arrows in the toolbar. Clicking on the Preview link for an entry will open an internal preview window for the document. Further Preview clicks for the same search will open tabs in the existing preview window. You can use Shift+Click to force the creation of another preview window, which may be useful to view the documents side by side. (You can also browse successive results in a single preview window by typing Shift+ArrowUp/Down in the window). Clicking the Open link will start an external viewer for the document. By default, &RCL; lets the desktop choose the appropriate application for most document types (there is a short list of exceptions, see further). If you prefer to completely customize the choice of applications, you can uncheck the Use desktop preferences option in the GUI preferences dialog, and click the Choose editor applications button to adjust the predefined &RCL; choices. The tool accepts multiple selections of MIME types (e.g. to set up the editor for the dozens of office file types). Even when Use desktop preferences is checked, there is a small list of exceptions, for MIME types where the &RCL; choice should override the desktop one. These are applications which are well integrated with &RCL;, especially evince for viewing PDF and Postscript files because of its support for opening the document at a specific page and passing a search string as an argument. Of course, you can edit the list (in the GUI preferences) if you would prefer to lose the functionality and use the standard desktop tool. You may also change the choice of applications by editing the mimeview configuration file if you find this more convenient. Each result entry also has a right-click menu with an Open With entry. This lets you choose an application from the list of those which registered with the desktop for the document MIME type. The Preview and Open edit links may not be present for all entries, meaning that &RCL; has no configured way to preview a given file type (which was indexed by name only), or no configured external editor for the file type. This can sometimes be adjusted simply by tweaking the mimemap and mimeview configuration files (the latter can be modified with the user preferences dialog). The format of the result list entries is entirely configurable by using the preference dialog to edit an HTML fragment. You can click on the Query details link at the top of the results page to see the query actually performed, after stem expansion and other processing. Double-clicking on any word inside the result list or a preview window will insert it into the simple search text. The result list is divided into pages (the size of which you can change in the preferences). Use the arrow buttons in the toolbar or the links at the bottom of the page to browse the results. No results: the spelling suggestions When a search yields no result, and if the aspell dictionary is configured, &RCL; will try to check for misspellings among the query terms, and will propose lists of replacements. Clicking on one of the suggestions will replace the word and restart the search. You can hold any of the modifier keys (Ctrl, Shift, etc.) while clicking if you would rather stay on the suggestion screen because several terms need replacement. The result list right-click menu Apart from the preview and edit links, you can display a pop-up menu by right-clicking over a paragraph in the result list. This menu has the following entries: Preview Open Open With Run Script Copy File Name Copy Url Save to File Find similar Preview Parent document Open Parent document Open Snippets Window The Preview and Open entries do the same thing as the corresponding links. Open With lets you open the document with one of the applications claiming to be able to handle its MIME type (the information comes from the .desktop files in /usr/share/applications). Run Script allows starting an arbitrary command on the result file. It will only appear for results which are top-level files. See further for a more detailed description. The Copy File Name and Copy Url copy the relevant data to the clipboard, for later pasting. Save to File allows saving the contents of a result document to a chosen file. This entry will only appear if the document does not correspond to an existing file, but is a subdocument inside such a file (ie: an email attachment). It is especially useful to extract attachments with no associated editor. The Open/Preview Parent document entries allow working with the higher level document (e.g. the email message an attachment comes from). &RCL; is sometimes not totally accurate as to what it can or can't do in this area. For example the Parent entry will also appear for an email which is part of an mbox folder file, but you can't actually visualize the mbox (there will be an error dialog if you try). If the document is a top-level file, Open Parent will start the default file manager on the enclosing filesystem directory. The Find similar entry will select a number of relevant term from the current document and enter them into the simple search field. You can then start a simple search, with a good chance of finding documents related to the current result. I can't remember a single instance where this function was actually useful to me... The Open Snippets Window entry will only appear for documents which support page breaks (typically PDF, Postscript, DVI). The snippets window lists extracts from the document, taken around search terms occurrences, along with the corresponding page number, as links which can be used to start the native viewer on the appropriate page. If the viewer supports it, its search function will also be primed with one of the search terms. The result table In &RCL; 1.15 and newer, the results can be displayed in spreadsheet-like fashion. You can switch to this presentation by clicking the table-like icon in the toolbar (this is a toggle, click again to restore the list). Clicking on the column headers will allow sorting by the values in the column. You can click again to invert the order, and use the header right-click menu to reset sorting to the default relevance order (you can also use the sort-by-date arrows to do this). Both the list and the table display the same underlying results. The sort order set from the table is still active if you switch back to the list mode. You can click twice on a date sort arrow to reset it from there. The header right-click menu allows adding or deleting columns. The columns can be resized, and their order can be changed (by dragging). All the changes are recorded when you quit recoll Hovering over a table row will update the detail area at the bottom of the window with the corresponding values. You can click the row to freeze the display. The bottom area is equivalent to a result list paragraph, with links for starting a preview or a native application, and an equivalent right-click menu. Typing Esc (the Escape key) will unfreeze the display. Running arbitrary commands on result files (1.20 and later) Apart from the Open and Open With operations, which allow starting an application on a result document (or a temporary copy), based on its MIME type, it is also possible to run arbitrary commands on results which are top-level files, using the Run Script entry in the results pop-up menu. The commands which will appear in the Run Script submenu must be defined by .desktop files inside the scripts subdirectory of the current configuration directory. Here follows an example of a .desktop file, which could be named for example, ~/.recoll/scripts/myscript.desktop (the exact file name inside the directory is irrelevant): [Desktop Entry] Type=Application Name=MyFirstScript Exec=/home/me/bin/tryscript %F MimeType=*/* The Name attribute defines the label which will appear inside the Run Script menu. The Exec attribute defines the program to be run, which does not need to actually be a script, of course. The MimeType attribute is not used, but needs to exist. The commands defined this way can also be used from links inside the result paragraph. As an example, it might make sense to write a script which would move the document to the trash and purge it from the &RCL; index. Displaying thumbnails The default format for the result list entries and the detail area of the result table display an icon for each result document. The icon is either a generic one determined from the MIME type, or a thumbnail of the document appearance. Thumbnails are only displayed if found in the standard freedesktop location, where they would typically have been created by a file manager. Recoll has no capability to create thumbnails. A relatively simple trick is to use the Open parent document/folder entry in the result list popup menu. This should open a file manager window on the containing directory, which should in turn create the thumbnails (depending on your settings). Restarting the search should then display the thumbnails. There are also some pointers about thumbnail generation on the &RCL; wiki. The preview window The preview window opens when you first click a Preview link inside the result list. Subsequent preview requests for a given search open new tabs in the existing window (except if you hold the Shift key while clicking which will open a new window for side by side viewing). Starting another search and requesting a preview will create a new preview window. The old one stays open until you close it. You can close a preview tab by typing Ctrl-W (Ctrl + W) in the window. Closing the last tab for a window will also close the window. Of course you can also close a preview window by using the window manager button in the top of the frame. You can display successive or previous documents from the result list inside a preview tab by typing Shift+Down or Shift+Up (Down and Up are the arrow keys). A right-click menu in the text area allows switching between displaying the main text or the contents of fields associated to the document (ie: author, abtract, etc.). This is especially useful in cases where the term match did not occur in the main text but in one of the fields. In the case of images, you can switch between three displays: the image itself, the image metadata as extracted by exiftool and the fields, which is the metadata stored in the index. You can print the current preview window contents by typing Ctrl-P (Ctrl + P) in the window text. Searching inside the preview The preview window has an internal search capability, mostly controlled by the panel at the bottom of the window, which works in two modes: as a classical editor incremental search, where we look for the text entered in the entry zone, or as a way to walk the matches between the document and the &RCL; query that found it. Incremental text search The preview tabs have an internal incremental search function. You initiate the search either by typing a / (slash) or CTL-F inside the text area or by clicking into the Search for: text field and entering the search string. You can then use the Next and Previous buttons to find the next/previous occurrence. You can also type F3 inside the text area to get to the next occurrence. If you have a search string entered and you use Ctrl-Up/Ctrl-Down to browse the results, the search is initiated for each successive document. If the string is found, the cursor will be positioned at the first occurrence of the search string. Walking the match lists If the entry area is empty when you click the Next or Previous buttons, the editor will be scrolled to show the next match to any search term (the next highlighted zone). If you select a search group from the dropdown list and click Next or Previous, the match list for this group will be walked. This is not the same as a text search, because the occurences will include non-exact matches (as caused by stemming or wildcards). The search will revert to the text mode as soon as you edit the entry area. The Query Fragments window Selecting the Tools Query Fragments menu entry will open a window with radio- and check-buttons which can be used to activate query language fragments for filtering the current query. This can be useful if you have frequent reusable selectors, for example, filtering on alternate directories, or searching just one category of files, not covered by the standard category selectors. The contents of the window are entirely customizable, and defined by the contents of the fragbuts.xml file inside the configuration directory. The sample file distributed with &RCL; (which you should be able to find under /usr/share/recoll/examples/fragbuts.xml), contains an example which filters the results from the WEB history. Here follows an example: <?xml version="1.0" encoding="UTF-8"?> <fragbuts version="1.0"> <radiobuttons> <fragbut> <label>Include Web Results</label> <frag></frag> </fragbut> <fragbut> <label>Exclude Web Results</label> <frag>-rclbes:BGL</frag> </fragbut> <fragbut> <label>Only Web Results</label> <frag>rclbes:BGL</frag> </fragbut> </radiobuttons> <buttons> <fragbut> <label>Year 2010</label> <frag>date:2010-01-01/2010-12-31</frag> </fragbut> <fragbut> <label>My Great Directory Only</label> <frag>dir:/my/great/directory</frag> </fragbut> </buttons> </fragbuts> Each radiobuttons or buttons section defines a line of checkbuttons or radiobuttons inside the window. Any number of buttons can be selected, but the radiobuttons in a line are exclusive. Each fragbut section defines the label for a button, and the Query Language fragment which will be added (as an AND filter) before performing the query if the button is active. This feature is new in &RCL; 1.20, and will probably be refined depending on user feedback. Complex/advanced search The advanced search dialog helps you build more complex queries without memorizing the search language constructs. It can be opened through the Tools menu or through the main toolbar. &RCL; keeps a history of searches. See Advanced search history. The dialog has two tabs: The first tab lets you specify terms to search for, and permits specifying multiple clauses which are combined to build the search. The second tab lets filter the results according to file size, date of modification, MIME type, or location. Click on the Start Search button in the advanced search dialog, or type Enter in any text field to start the search. The button in the main window always performs a simple search. Click on the Show query details link at the top of the result page to see the query expansion. Avanced search: the "find" tab This part of the dialog lets you constructc a query by combining multiple clauses of different types. Each entry field is configurable for the following modes: All terms. Any term. None of the terms. Phrase (exact terms in order within an adjustable window). Proximity (terms in any order within an adjustable window). Filename search. Additional entry fields can be created by clicking the Add clause button. When searching, the non-empty clauses will be combined either with an AND or an OR conjunction, depending on the choice made on the left (All clauses or Any clause). Entries of all types except "Phrase" and "Near" accept a mix of single words and phrases enclosed in double quotes. Stemming and wildcard expansion will be performed as for simple search. Phrases and Proximity searches These two clauses work in similar ways, with the difference that proximity searches do not impose an order on the words. In both cases, an adjustable number (slack) of non-matched words may be accepted between the searched ones (use the counter on the left to adjust this count). For phrases, the default count is zero (exact match). For proximity it is ten (meaning that two search terms, would be matched if found within a window of twelve words). Examples: a phrase search for quick fox with a slack of 0 will match quick fox but not quick brown fox. With a slack of 1 it will match the latter, but not fox quick. A proximity search for quick fox with the default slack will match the latter, and also a fox is a cunning and quick animal. Avanced search: the "filter" tab This part of the dialog has several sections which allow filtering the results of a search according to a number of criteria The first section allows filtering by dates of last modification. You can specify both a minimum and a maximum date. The initial values are set according to the oldest and newest documents found in the index. The next section allows filtering the results by file size. There are two entries for minimum and maximum size. Enter decimal numbers. You can use suffix multipliers: k/K, m/M, g/G, t/T for 1E3, 1E6, 1E9, 1E12 respectively. The next section allows filtering the results by their MIME types, or MIME categories (ie: media/text/message/etc.). You can transfer the types between two boxes, to define which will be included or excluded by the search. The state of the file type selection can be saved as the default (the file type filter will not be activated at program start-up, but the lists will be in the restored state). The bottom section allows restricting the search results to a sub-tree of the indexed area. You can use the Invert checkbox to search for files not in the sub-tree instead. If you use directory filtering often and on big subsets of the file system, you may think of setting up multiple indexes instead, as the performance may be better. You can use relative/partial paths for filtering. Ie, entering dirA/dirB would match either /dir1/dirA/dirB/myfile1 or /dir2/dirA/dirB/someother/myfile2. Avanced search history The advanced search tool memorizes the last 100 searches performed. You can walk the saved searches by using the up and down arrow keys while the keyboard focus belongs to the advanced search dialog. The complex search history can be erased, along with the one for simple search, by selecting the File Erase Search History menu entry. The term explorer tool &RCL; automatically manages the expansion of search terms to their derivatives (ie: plural/singular, verb inflections). But there are other cases where the exact search term is not known. For example, you may not remember the exact spelling, or only know the beginning of the name. The search will only propose replacement terms with spelling variations when no matching document were found. In some cases, both proper spellings and mispellings are present in the index, and it may be interesting to look for them explicitely. The term explorer tool (started from the toolbar icon or from the Term explorer entry of the Tools menu) can be used to search the full index terms list. It has three modes of operations: Wildcard In this mode of operation, you can enter a search string with shell-like wildcards (*, ?, []). ie: xapi* would display all index terms beginning with xapi. (More about wildcards here ). Regular expression This mode will accept a regular expression as input. Example: word[0-9]+. The expression is implicitely anchored at the beginning. Ie: press will match pression but not expression. You can use .*press to match the latter, but be aware that this will cause a full index term list scan, which can be quite long. Stem expansion This mode will perform the usual stem expansion normally done as part user input processing. As such it is probably mostly useful to demonstrate the process. Spelling/Phonetic In this mode, you enter the term as you think it is spelled, and &RCL; will do its best to find index terms that sound like your entry. This mode uses the Aspell spelling application, which must be installed on your system for things to work (if your documents contain non-ascii characters, &RCL; needs an aspell version newer than 0.60 for UTF-8 support). The language which is used to build the dictionary out of the index terms (which is done at the end of an indexing pass) is the one defined by your NLS environment. Weird things will probably happen if languages are mixed up. Note that in cases where &RCL; does not know the beginning of the string to search for (ie a wildcard expression like *coll), the expansion can take quite a long time because the full index term list will have to be processed. The expansion is currently limited at 10000 results for wildcards and regular expressions. It is possible to change the limit in the configuration file. Double-clicking on a term in the result list will insert it into the simple search entry field. You can also cut/paste between the result list and any entry field (the end of lines will be taken care of). Multiple indexes See the section describing the use of multiple indexes for generalities. Only the aspects concerning the recoll GUI are described here. A recoll program instance is always associated with a specific index, which is the one to be updated when requested from the File menu, but it can use any number of &RCL; indexes for searching. The external indexes can be selected through the external indexes tab in the preferences dialog. Index selection is performed in two phases. A set of all usable indexes must first be defined, and then the subset of indexes to be used for searching. These parameters are retained across program executions (there are kept separately for each &RCL; configuration). The set of all indexes is usually quite stable, while the active ones might typically be adjusted quite frequently. The main index (defined by RECOLL_CONFDIR) is always active. If this is undesirable, you can set up your base configuration to index an empty directory. When adding a new index to the set, you can select either a &RCL; configuration directory, or directly a &XAP; index directory. In the first case, the &XAP; index directory will be obtained from the selected configuration. As building the set of all indexes can be a little tedious when done through the user interface, you can use the RECOLL_EXTRA_DBS environment variable to provide an initial set. This might typically be set up by a system administrator so that every user does not have to do it. The variable should define a colon-separated list of index directories, ie: export RECOLL_EXTRA_DBS=/some/place/xapiandb:/some/other/db Another environment variable, RECOLL_ACTIVE_EXTRA_DBS allows adding to the active list of indexes. This variable was suggested and implemented by a &RCL; user. It is mostly useful if you use scripts to mount external volumes with &RCL; indexes. By using RECOLL_EXTRA_DBS and RECOLL_ACTIVE_EXTRA_DBS, you can add and activate the index for the mounted volume when starting recoll. RECOLL_ACTIVE_EXTRA_DBS is available for &RCL; versions 1.17.2 and later. A change was made in the same update so that recoll will automatically deactivate unreachable indexes when starting up. Document history Documents that you actually view (with the internal preview or an external tool) are entered into the document history, which is remembered. You can display the history list by using the Tools/Doc History menu entry. You can erase the document history by using the Erase document history entry in the File menu. Sorting search results and collapsing duplicates The documents in a result list are normally sorted in order of relevance. It is possible to specify a different sort order, either by using the vertical arrows in the GUI toolbox to sort by date, or switching to the result table display and clicking on any header. The sort order chosen inside the result table remains active if you switch back to the result list, until you click one of the vertical arrows, until both are unchecked (you are back to sort by relevance). Sort parameters are remembered between program invocations, but result sorting is normally always inactive when the program starts. It is possible to keep the sorting activation state between program invocations by checking the Remember sort activation state option in the preferences. It is also possible to hide duplicate entries inside the result list (documents with the exact same contents as the displayed one). The test of identity is based on an MD5 hash of the document container, not only of the text contents (so that ie, a text document with an image added will not be a duplicate of the text only). Duplicates hiding is controlled by an entry in the GUI configuration dialog, and is off by default. As of release 1.19, when a result document does have undisplayed duplicates, a Dups link will be shown with the result list entry. Clicking the link will display the paths (URLs + ipaths) for the duplicate entries. Search tips, shortcuts Terms and search expansion Term completion Typing Esc Space in the simple search entry field while entering a word will either complete the current word if its beginning matches a unique term in the index, or open a window to propose a list of completions. Picking up new terms from result or preview text Double-clicking on a word in the result list or in a preview window will copy it to the simple search entry field. Wildcards Wildcards can be used inside search terms in all forms of searches. More about wildcards. Automatic suffixes Words like odt or ods can be automatically turned into query language ext:xxx clauses. This can be enabled in the Search preferences panel in the GUI. Disabling stem expansion Entering a capitalized word in any search field will prevent stem expansion (no search for gardening if you enter Garden instead of garden). This is the only case where character case should make a difference for a &RCL; search. You can also disable stem expansion or change the stemming language in the preferences. Finding related documents Selecting the Find similar documents entry in the result list paragraph right-click menu will select a set of "interesting" terms from the current result, and insert them into the simple search entry field. You can then possibly edit the list and start a search to find documents which may be apparented to the current result. File names File names are added as terms during indexing, and you can specify them as ordinary terms in normal search fields (&RCL; used to index all directories in the file path as terms. This has been abandoned as it did not seem really useful). Alternatively, you can use the specific file name search which will only look for file names, and may be faster than the generic search especially when using wildcards. Working with phrases and proximity Phrases and Proximity searches A phrase can be looked for by enclosing it in double quotes. Example: "user manual" will look only for occurrences of user immediately followed by manual. You can use the This phrase field of the advanced search dialog to the same effect. Phrases can be entered along simple terms in all simple or advanced search entry fields (except This exact phrase). AutoPhrases This option can be set in the preferences dialog. If it is set, a phrase will be automatically built and added to simple searches when looking for Any terms. This will not change radically the results, but will give a relevance boost to the results where the search terms appear as a phrase. Ie: searching for virtual reality will still find all documents where either virtual or reality or both appear, but those which contain virtual reality should appear sooner in the list. Phrase searches can strongly slow down a query if most of the terms in the phrase are common. This is why the autophrase option is off by default for &RCL; versions before 1.17. As of version 1.17, autophrase is on by default, but very common terms will be removed from the constructed phrase. The removal threshold can be adjusted from the search preferences. Phrases and abbreviations As of &RCL; version 1.17, dotted abbreviations like I.B.M. are also automatically indexed as a word without the dots: IBM. Searching for the word inside a phrase (ie: "the IBM company") will only match the dotted abrreviation if you increase the phrase slack (using the advanced search panel control, or the o query language modifier). Literal occurences of the word will be matched normally. Others Using fields You can use the query language and field specifications to only search certain parts of documents. This can be especially helpful with email, for example only searching emails from a specific originator: search tips from:helpfulgui Ajusting the result table columns When displaying results in table mode, you can use a right click on the table headers to activate a pop-up menu which will let you adjust what columns are displayed. You can drag the column headers to adjust their order. You can click them to sort by the field displayed in the column. You can also save the result list in CSV format. Changing the GUI geometry It is possible to configure the GUI in wide form factor by dragging the toolbars to one of the sides (their location is remembered between sessions), and moving the category filters to a menu (can be set in the Preferences GUI configuration User interface panel). Query explanation You can get an exact description of what the query looked for, including stem expansion, and Boolean operators used, by clicking on the result list header. Advanced search history As of &RCL; 1.18, you can display any of the last 100 complex searches performed by using the up and down arrow keys while the advanced search panel is active. Browsing the result list inside a preview window Entering Shift-Down or Shift-Up (Shift + an arrow key) in a preview window will display the next or the previous document from the result list. Any secondary search currently active will be executed on the new document. Scrolling the result list from the keyboard You can use PageUp and PageDown to scroll the result list, Shift+Home to go back to the first page. These work even while the focus is in the search entry. Result table: moving the focus to the table You can use Ctrl-r to move the focus from the search entry to the table, and then use the arrow keys to change the current row. Ctrl-Shift-s returns to the search. Result table: open / preview With the focus in the result table, you can use Ctrl-o to open the document from the current row, Ctrl-Shift-o to open the document and close recoll, Ctrl-d to preview the document, and Ctrl-e to open the document snippets window. Editing a new search while the focus is not in the search entry You can use the Ctrl-Shift-S shortcut to return the cursor to the search entry (and select the current search text), while the focus is anywhere in the main window. Forced opening of a preview window You can use Shift+Click on a result list Preview link to force the creation of a preview window instead of a new tab in the existing one. Closing previews Entering Ctrl-W in a tab will close it (and, for the last tab, close the preview window). Entering Esc will close the preview window and all its tabs. Printing previews Entering Ctrl-P in a preview window will print the currently displayed text. Quitting Entering Ctrl-Q almost anywhere will close the application. Saving and restoring queries (1.21 and later) Both simple and advanced query dialogs save recent history, but the amount is limited: old queries will eventually be forgotten. Also, important queries may be difficult to find among others. This is why both types of queries can also be explicitely saved to files, from the GUI menus: File Save last query / Load last query The default location for saved queries is a subdirectory of the current configuration directory, but saved queries are ordinary files and can be written or moved anywhere. Some of the saved query parameters are part of the preferences (e.g. autophrase or the active external indexes), and may differ when the query is loaded from the time it was saved. In this case, &RCL; will warn of the differences, but will not change the user preferences. Customizing the search interface You can customize some aspects of the search interface by using the GUI configuration entry in the Preferences menu. There are several tabs in the dialog, dealing with the interface itself, the parameters used for searching and returning results, and what indexes are searched. User interface parameters: Highlight color for query terms: Terms from the user query are highlighted in the result list samples and the preview window. The color can be chosen here. Any Qt color string should work (ie red, #ff0000). The default is blue. Style sheet: The name of a Qt style sheet text file which is applied to the whole Recoll application on startup. The default value is empty, but there is a skeleton style sheet (recoll.qss) inside the /usr/share/recoll/examples directory. Using a style sheet, you can change most recoll graphical parameters: colors, fonts, etc. See the sample file for a few simple examples. You should be aware that parameters (e.g.: the background color) set inside the &RCL; GUI style sheet will override global system preferences, with possible strange side effects: for example if you set the foreground to a light color and the background to a dark one in the desktop preferences, but only the background is set inside the &RCL; style sheet, and it is light too, then text will appear light-on-light inside the &RCL; GUI. Maximum text size highlighted for preview Inserting highlights on search term inside the text before inserting it in the preview window involves quite a lot of processing, and can be disabled over the given text size to speed up loading. Prefer HTML to plain text for preview if set, Recoll will display HTML as such inside the preview window. If this causes problems with the Qt HTML display, you can uncheck it to display the plain text version instead. Activate links in preview if set, Recoll will turn HTTP links found inside plain text into proper HTML anchors, and clicking a link inside a preview window will start the default browser on the link target. Plain text to HTML line style: when displaying plain text inside the preview window, &RCL; tries to preserve some of the original text line breaks and indentation. It can either use PRE HTML tags, which will well preserve the indentation but will force horizontal scrolling for long lines, or use BR tags to break at the original line breaks, which will let the editor introduce other line breaks according to the window width, but will lose some of the original indentation. The third option has been available in recent releases and is probably now the best one: use PRE tags with line wrapping. Choose editor application: this opens a dialog which allows you to select the application to be used to open each MIME type. The default is to use the xdg-open utility, but you can use this dialog to override it, setting exceptions for MIME types that will still be opened according to &RCL; preferences. This is useful for passing parameters like page numbers or search strings to applications that support them (e.g. evince). This cannot be done with xdg-open which only supports passing one parameter. Disable Qt autocompletion in search entry: this will disable the completion popup. Il will only appear, and display the full history, either if you enter only white space in the search area, or if you click the clock button on the right of the area. Document filter choice style: this will let you choose if the document categories are displayed as a list or a set of buttons, or a menu. Start with simple search mode: this lets you choose the value of the simple search type on program startup. Either a fixed value (e.g. Query Language, or the value in use when the program last exited. Start with advanced search dialog open : If you use this dialog frequently, checking the entries will get it to open when recoll starts. Remember sort activation state if set, Recoll will remember the sort tool stat between invocations. It normally starts with sorting disabled. Result list parameters: Number of results in a result page Result list font: There is quite a lot of information shown in the result list, and you may want to customize the font and/or font size. The rest of the fonts used by &RCL; are determined by your generic Qt config (try the qtconfig command). Edit result list paragraph format string: allows you to change the presentation of each result list entry. See the result list customisation section. Edit result page HTML header insert: allows you to define text inserted at the end of the result page HTML header. More detail in the result list customisation section. Date format: allows specifying the format used for displaying dates inside the result list. This should be specified as an strftime() string (man strftime). Abstract snippet separator: for synthetic abstracts built from index data, which are usually made of several snippets from different parts of the document, this defines the snippet separator, an ellipsis by default. Search parameters: Hide duplicate results: decides if result list entries are shown for identical documents found in different places. Stemming language: stemming obviously depends on the document's language. This listbox will let you chose among the stemming databases which were built during indexing (this is set in the main configuration file), or later added with recollindex -s (See the recollindex manual). Stemming languages which are dynamically added will be deleted at the next indexing pass unless they are also added in the configuration file. Automatically add phrase to simple searches: a phrase will be automatically built and added to simple searches when looking for Any terms. This will give a relevance boost to the results where the search terms appear as a phrase (consecutive and in order). Autophrase term frequency threshold percentage: very frequent terms should not be included in automatic phrase searches for performance reasons. The parameter defines the cutoff percentage (percentage of the documents where the term appears). Replace abstracts from documents: this decides if we should synthesize and display an abstract in place of an explicit abstract found within the document itself. Dynamically build abstracts: this decides if &RCL; tries to build document abstracts (lists of snippets) when displaying the result list. Abstracts are constructed by taking context from the document information, around the search terms. Synthetic abstract size: adjust to taste... Synthetic abstract context words: how many words should be displayed around each term occurrence. Query language magic file name suffixes: a list of words which automatically get turned into ext:xxx file name suffix clauses when starting a query language query (e.g.: doc xls xlsx...). This will save some typing for people who use file types a lot when querying. External indexes: This panel will let you browse for additional indexes that you may want to search. External indexes are designated by their database directory (ie: /home/someothergui/.recoll/xapiandb, /usr/local/recollglobal/xapiandb). Once entered, the indexes will appear in the External indexes list, and you can chose which ones you want to use at any moment by checking or unchecking their entries. Your main database (the one the current configuration indexes to), is always implicitly active. If this is not desirable, you can set up your configuration so that it indexes, for example, an empty directory. An alternative indexer may also need to implement a way of purging the index from stale data, The result list format Newer versions of Recoll (from 1.17) normally use WebKit HTML widgets for the result list and the snippets window (this may be disabled at build time). Total customisation is possible with full support for CSS and Javascript. Conversely, there are limits to what you can do with the older Qt QTextBrowser, but still, it is possible to decide what data each result will contain, and how it will be displayed. The result list presentation can be exhaustively customized by adjusting two elements: The paragraph format HTML code inside the header section. For versions 1.21 and later, this is also used for the snippets window. The paragraph format and the header fragment can be edited from the Result list tab of the GUI configuration. The header fragment is used both for the result list and the snippets window. The snippets list is a table and has a snippets class attribute. Each paragraph in the result list is a table, with class respar, but this can be changed by editing the paragraph format. There are a few examples on the page about customising the result list on the &RCL; web site. The paragraph format This is an arbitrary HTML string where the following printf-like % substitutions will be performed: %AAbstract %DDate %IIcon image name. This is normally determined from the MIME type. The associations are defined inside the mimeconf configuration file. If a thumbnail for the file is found at the standard Freedesktop location, this will be displayed instead. %KKeywords (if any) %LPrecooked Preview, Edit, and possibly Snippets links %MMIME type %Nresult Number inside the result page %PParent folder Url. In the case of an embedded document, this is the parent folder for the top level container file. %RRelevance percentage %SSize information %TTitle or Filename if not set. %tTitle or empty. %(filename)File name. %UUrl The format of the Preview, Edit, and Snippets links is <a href="P%N">, <a href="E%N"> and <a href="A%N"> where docnum (%N) expands to the document number inside the result page). A link target defined as "F%N" will open the document corresponding to the %P parent folder expansion, usually creating a file manager window on the folder where the container file resides. E.g.: <a href="F%N">%P</a> A link target defined as R%N|scriptname will run the corresponding script on the result file (if the document is embedded, the script will be started on the top-level parent). See the section about defining scripts. In addition to the predefined values above, all strings like %(fieldname) will be replaced by the value of the field named fieldname for this document. Only stored fields can be accessed in this way, the value of indexed but not stored fields is not known at this point in the search process (see field configuration). There are currently very few fields stored by default, apart from the values above (only author and filename), so this feature will need some custom local configuration to be useful. An example candidate would be the recipient field which is generated by the message input handlers. The default value for the paragraph format string is: \n" "\n" "\n" "%L  %S   %T
\n" "%M %D    %U %i
\n" "%A %K\n" "\n" ]]>
You may, for example, try the following for a more web-like experience: %T
%A%U - %S - %L ]]>
Note that the P%N link in the above paragraph makes the title a preview link. Or the clean looking: %L %R   %T&
%S  %U
%A
%K ]]>
These samples, and some others are on the web site, with pictures to show how they look. It is also possible to define the value of the snippet separator inside the abstract section.
Searching with the KDE KIO slave What's this The &RCL; KIO slave allows performing a &RCL; search by entering an appropriate URL in a KDE open dialog, or with an HTML-based interface displayed in Konqueror. The HTML-based interface is similar to the Qt-based interface, but slightly less powerful for now. Its advantage is that you can perform your search while staying fully within the KDE framework: drag and drop from the result list works normally and you have your normal choice of applications for opening files. The alternative interface uses a directory view of search results. Due to limitations in the current KIO slave interface, it is currently not obviously useful (to me). The interface is described in more detail inside a help file which you can access by entering recoll:/ inside the konqueror URL line (this works only if the recoll KIO slave has been previously installed). The instructions for building this module are located in the source tree. See: kde/kio/recoll/00README.txt. Some Linux distributions do package the kio-recoll module, so check before diving into the build process, maybe it's already out there ready for one-click installation. Searchable documents As a sample application, the &RCL; KIO slave could allow preparing a set of HTML documents (for example a manual) so that they become their own search interface inside konqueror. This can be done by either explicitly inserting ]]> links around some document areas, or automatically by adding a very small javascript program to the documents, like the following example, which would initiate a search by double-clicking any term: <script language="JavaScript"> function recollsearch() { var t = document.getSelection(); window.location.href = 'recoll://search/query?qtp=a&p=0&q=' + encodeURIComponent(t); } </script> .... <body ondblclick="recollsearch()"> Searching on the command line There are several ways to obtain search results as a text stream, without a graphical interface: By passing option to the recoll program, or by calling it as recollq (through a link). By using the recollq program. By writing a custom Python program, using the Recoll Python API. The first two methods work in the same way and accept/need the same arguments (except for the additional to recoll). The query to be executed is specified as command line arguments. recollq is not always built by default. You can use the Makefile in the query directory to build it. This is a very simple program, and if you can program a little c++, you may find it useful to taylor its output format to your needs. Apart from being easily customised, recollq is only really useful on systems where the Qt libraries are not available, else it is redundant with recoll -t. recollq has a man page. The Usage string follows: Runs a recoll query and displays result lines. Default: will interpret the argument(s) as a xesam query string Query elements: * Implicit AND, exclusion, field spec: t1 -t2 title:t3 * OR has priority: t1 OR t2 t3 OR t4 means (t1 OR t2) AND (t3 OR t4) * Phrase: "t1 t2" (needs additional quoting on cmd line) -o Emulate the GUI simple search in ANY TERM mode -a Emulate the GUI simple search in ALL TERMS mode -f Emulate the GUI simple search in filename mode -q is just ignored (compatibility with the recoll GUI command line) Common options: -c : specify config directory, overriding $RECOLL_CONFDIR -d also dump file contents -n [first-] define the result slice. The default value for [first] is 0. Without the option, the default max count is 2000. Use n=0 for no limit -b : basic. Just output urls, no mime types or titles -Q : no result lines, just the processed query and result count -m : dump the whole document meta[] array for each result -A : output the document abstracts -S fld : sort by field -D : sort descending -s stemlang : set stemming language to use (must exist in index...) Use -s "" to turn off stem expansion -T : use the parameter (Thesaurus) for word expansion -i : additional index, several can be given -e use url encoding (%xx) for urls -F : output exactly these fields for each result. The field values are encoded in base64, output in one line and separated by one space character. This is the recommended format for use by other programs. Use a normal query with option -m to see the field names. Use -F '' to output all fields, but you probably also want option -N in this case -N : with -F, print the (plain text) field names before the field values ]]> Sample execution: recollq 'ilur -nautique mime:text/html' Recoll query: ((((ilur:(wqf=11) OR ilurs) AND_NOT (nautique:(wqf=11) OR nautiques OR nautiqu OR nautiquement)) FILTER Ttext/html)) 4 results text/html [file:///Users/uncrypted-dockes/projets/bateaux/ilur/comptes.html] [comptes.html] 18593 bytes text/html [file:///Users/uncrypted-dockes/projets/nautique/webnautique/articles/ilur1/index.html] [Constructio... text/html [file:///Users/uncrypted-dockes/projets/pagepers/index.html] [psxtcl/writemime/recoll]... text/html [file:///Users/uncrypted-dockes/projets/bateaux/ilur/factEtCie/recu-chasse-maree.... The query language The query language processor is activated in the GUI simple search entry when the search mode selector is set to Query Language. It can also be used with the KIO slave or the command line search. It broadly has the same capabilities as the complex search interface in the GUI. The language was based on the now defunct Xesam user search language specification. If the results of a query language search puzzle you and you doubt what has been actually searched for, you can use the GUI Show Query link at the top of the result list to check the exact query which was finally executed by Xapian. Here follows a sample request that we are going to explain: author:"john doe" Beatles OR Lennon Live OR Unplugged -potatoes This would search for all documents with John Doe appearing as a phrase in the author field (exactly what this is would depend on the document type, ie: the From: header, for an email message), and containing either beatles or lennon and either live or unplugged but not potatoes (in any part of the document). An element is composed of an optional field specification, and a value, separated by a colon (the field separator is the last colon in the element). Examples: Eugenie, author:balzac, dc:title:grandet dc:title:"eugenie grandet" The colon, if present, means "contains". Xesam defines other relations, which are mostly unsupported for now (except in special cases, described further down). All elements in the search entry are normally combined with an implicit AND. It is possible to specify that elements be OR'ed instead, as in Beatles OR Lennon. The OR must be entered literally (capitals), and it has priority over the AND associations: word1 word2 OR word3 means word1 AND (word2 OR word3) not (word1 AND word2) OR word3. &RCL; versions 1.21 and later, allow using parentheses to group elements, which will sometimes make things clearer, and may allow expressing combinations which would have been difficult otherwise. An element preceded by a - specifies a term that should not appear. As usual, words inside quotes define a phrase (the order of words is significant), so that title:"prejudice pride" is not the same as title:prejudice title:pride, and is unlikely to find a result. Words inside phrases and capitalized words are not stem-expanded. Wildcards may be used anywhere inside a term. Specifying a wild-card on the left of a term can produce a very slow search (or even an incorrect one if the expansion is truncated because of excessive size). Also see More about wildcards. To save you some typing, recent &RCL; versions (1.20 and later) interpret a comma-separated list of terms for a field as an AND list inside the field. Use slash characters ('/') for an OR list. No white space is allowed. So author:john,lennon will search for documents with john and lennon inside the author field (in any order), and author:john/ringo would search for john or ringo. This behaviour only happens for field queries (input without a field, comma- or slash- separated input will produce a phrase search). You can use a text field name to search the main text this way. Modifiers can be set on a double-quote value, for example to specify a proximity search (unordered). See the modifier section. No space must separate the final double-quote and the modifiers value, e.g. "two one"po10 &RCL; currently manages the following default fields: title, subject or caption are synonyms which specify data to be searched for in the document title or subject. author or from for searching the documents originators. recipient or to for searching the documents recipients. keyword for searching the document-specified keywords (few documents actually have any). filename for the document's file name. This is not necessarily set for all documents: internal documents contained inside a compound one (for example an EPUB section) do not inherit the container file name any more, this was replaced by an explicit field (see next). Sub-documents can still have a specific filename, if it is implied by the document format, for example the attachment file name for an email attachment. containerfilename. This is set for all documents, both top-level and contained sub-documents, and is always the name of the filesystem directory entry which contains the data. The terms from this field can only be matched by an explicit field specification (as opposed to terms from filename which are also indexed as general document content). This avoids getting matches for all the sub-documents when searching for the container file name. ext specifies the file name extension (Ex: ext:html) &RCL; 1.20 and later have a way to specify aliases for the field names, which will save typing, for example by aliasing filename to fn or containerfilename to cfn. See the section about the fields file. The document input handlers used while indexing have the possibility to create other fields with arbitrary names, and aliases may be defined in the configuration, so that the exact field search possibilities may be different for you if someone took care of the customisation. The field syntax also supports a few field-like, but special, criteria: dir for filtering the results on file location (Ex: dir:/home/me/somedir). -dir also works to find results not in the specified directory (release >= 1.15.8). Tilde expansion will be performed as usual (except for a bug in versions 1.19 to 1.19.11p1). Wildcards will be expanded, but please have a look at an important limitation of wildcards in path filters. Relative paths also make sense, for example, dir:share/doc would match either /usr/share/doc or /usr/local/share/doc Several dir clauses can be specified, both positive and negative. For example the following makes sense: dir:recoll dir:src -dir:utils -dir:common This would select results which have both recoll and src in the path (in any order), and which have not either utils or common. You can also use OR conjunctions with dir: clauses. A special aspect of dir clauses is that the values in the index are not transcoded to UTF-8, and never lower-cased or unaccented, but stored as binary. This means that you need to enter the values in the exact lower or upper case, and that searches for names with diacritics may sometimes be impossible because of character set conversion issues. Non-ASCII UNIX file paths are an unending source of trouble and are best avoided. You need to use double-quotes around the path value if it contains space characters. size for filtering the results on file size. Example: size<10000. You can use <, > or = as operators. You can specify a range like the following: size>100 size<1000. The usual k/K, m/M, g/G, t/T can be used as (decimal) multipliers. Ex: size>1k to search for files bigger than 1000 bytes. date for searching or filtering on dates. The syntax for the argument is based on the ISO8601 standard for dates and time intervals. Only dates are supported, no times. The general syntax is 2 elements separated by a / character. Each element can be a date or a period of time. Periods are specified as PnYnMnD. The n numbers are the respective numbers of years, months or days, any of which may be missing. Dates are specified as YYYY-MM-DD. The days and months parts may be missing. If the / is present but an element is missing, the missing element is interpreted as the lowest or highest date in the index. Examples: 2001-03-01/2002-05-01 the basic syntax for an interval of dates. 2001-03-01/P1Y2M the same specified with a period. 2001/ from the beginning of 2001 to the latest date in the index. 2001 the whole year of 2001 P2D/ means 2 days ago up to now if there are no documents with dates in the future. /2003 all documents from 2003 or older. Periods can also be specified with small letters (ie: p2y). mime or format for specifying the MIME type. These clauses are processed besides the normal Boolean logic of the search. Multiple values will be OR'ed (instead of the normal AND). You can specify types to be excluded, with the usual -, and use wildcards. Example: mime:text/* -mime:text/plain Specifying an explicit boolean operator before a mime specification is not supported and will produce strange results. type or rclcat for specifying the category (as in text/media/presentation/etc.). The classification of MIME types in categories is defined in the &RCL; configuration (mimeconf), and can be modified or extended. The default category names are those which permit filtering results in the main GUI screen. Categories are OR'ed like MIME types above, and can be negated with -. mime, rclcat, size and date criteria always affect the whole query (they are applied as a final filter), even if set with other terms inside a parenthese. mime (or the equivalent rclcat) is the only field with an OR default. You do need to use OR with ext terms for example. Range clauses &RCL; 1.24 and later support range clauses on fields which have been configured to support it. No default field uses them currently, so this paragraph is only interesting if you modified the fields configuration and possibly use a custom input handler. A range clause looks like one of the following: myfield:small..big myfield:small.. myfield:..big The nature of the clause is indicated by the two dots .., and the effect is to filter the results for which the myfield value is in the possibly open-ended interval. See the section about the fields configuration file for the details of configuring a field for range searches (list them in the [values] section). Modifiers Some characters are recognized as search modifiers when found immediately after the closing double quote of a phrase, as in "some term"modifierchars. The actual "phrase" can be a single term of course. Supported modifiers: l can be used to turn off stemming (mostly makes sense with p because stemming is off by default for phrases). s can be used to turn off synonym expansion, if a synonyms file is in place (only for &RCL; 1.22 and later). o can be used to specify a "slack" for phrase and proximity searches: the number of additional terms that may be found between the specified ones. If o is followed by an integer number, this is the slack, else the default is 10. p can be used to turn the default phrase search into a proximity one (unordered). Example: "order any in"p C will turn on case sensitivity (if the index supports it). D will turn on diacritics sensitivity (if the index supports it). A weight can be specified for a query element by specifying a decimal value at the start of the modifiers. Example: "Important"2.5. Anchored searches and wildcards Some special characters are interpreted by &RCL; in search strings to expand or specialize the search. Wildcards expand a root term in controlled ways. Anchor characters can restrict a search to succeed only if the match is found at or near the beginning of the document or one of its fields. More about wildcards All words entered in &RCL; search fields will be processed for wildcard expansion before the request is finally executed. The wildcard characters are: * which matches 0 or more characters. ? which matches a single character. [] which allow defining sets of characters to be matched (ex: [abc] matches a single character which may be 'a' or 'b' or 'c', [0-9] matches any number. You should be aware of a few things when using wildcards. Using a wildcard character at the beginning of a word can make for a slow search because &RCL; will have to scan the whole index term list to find the matches. However, this is much less a problem for field searches, and queries like author:*@domain.com can sometimes be very useful. For &RCL; version 18 only, when working with a raw index (preserving character case and diacritics), the literal part of a wildcard expression will be matched exactly for case and diacritics. This is not true any more for versions 19 and later. Using a * at the end of a word can produce more matches than you would think, and strange search results. You can use the term explorer tool to check what completions exist for a given term. You can also see exactly what search was performed by clicking on the link at the top of the result list. In general, for natural language terms, stem expansion will produce better results than an ending * (stem expansion is turned off when any wildcard character appears in the term). Wildcards and path filtering Due to the way that &RCL; processes wildcards inside dir path filtering clauses, they will have a multiplicative effect on the query size. A clause containg wildcards in several paths elements, like, for example, dir:/home/me/*/*/docdir, will almost certainly fail if your indexed tree is of any realistic size. Depending on the case, you may be able to work around the issue by specifying the paths elements more narrowly, with a constant prefix, or by using 2 separate dir: clauses instead of multiple wildcards, as in dir:/home/me dir:docdir. The latter query is not equivalent to the initial one because it does not specify a number of directory levels, but that's the best we can do (and it may be actually more useful in some cases). Anchored searches Two characters are used to specify that a search hit should occur at the beginning or at the end of the text. ^ at the beginning of a term or phrase constrains the search to happen at the start, $ at the end force it to happen at the end. As this function is implemented as a phrase search it is possible to specify a maximum distance at which the hit should occur, either through the controls of the advanced search panel, or using the query language, for example, as in: "^someterm"o10 which would force someterm to be found within 10 terms of the start of the text. This can be combined with a field search as in somefield:"^someterm"o10 or somefield:someterm$. This feature can also be used with an actual phrase search, but in this case, the distance applies to the whole phrase and anchor, so that, for example, bla bla my unexpected term at the beginning of the text would be a match for "^my term"o5. Anchored searches can be very useful for searches inside somewhat structured documents like scientific articles, in case explicit metadata has not been supplied (a most frequent case), for example for looking for matches inside the abstract or the list of authors (which occur at the top of the document). Using Synonyms (1.22) Term synonyms: there are a number of ways to use term synonyms for searching text: At index creation time, they can be used to alter the indexed terms, either increasing or decreasing their number, by expanding the original terms to all synonyms, or by reducing all synonym terms to a canonical one. At query time, they can be used to match texts containing terms which are synonyms of the ones specified by the user, either by expanding the query for all synonyms, or by reducing the user entry to canonical terms (the latter only works if the corresponding processing has been performed while creating the index). &RCL; only uses synonyms at query time. A user query term which part of a synonym group will be optionally expanded into an OR query for all terms in the group. Synonym groups are defined inside ordinary text files. Each line in the file defines a group. Example: hi hello "good morning" # not sure about "au revoir" though. Is this english ? bye goodbye "see you" \ "au revoir" As usual, lines beginning with a # are comments, empty lines are ignored, and lines can be continued by ending them with a backslash. Multi-word synonyms are supported, but be aware that these will generate phrase queries, which may degrade performance and will disable stemming expansion for the phrase terms. The contents of the synonyms file must be casefolded (not only lowercased), because this is what expected at the point in the query processing where it is used. There are a few cases where this makes a difference, for example, German sharp s should be expressed as ss, Greek final sigma as sigma. For reference, Python3 has an easy way to casefold words (str.casefold()). The synonyms file can be specified in the Search parameters tab of the GUI configuration Preferences menu entry, or as an option for command-line searches. Once the file is defined, the use of synonyms can be enabled or disabled directly from the Preferences menu. The synonyms are searched for matches with user terms after the latter are stem-expanded, but the contents of the synonyms file itself is not subjected to stem expansion. This means that a match will not be found if the form present in the synonyms file is not present anywhere in the document set (same with accents when using a raw index). The synonyms function is probably not going to help you find your letters to Mr. Smith. It is best used for domain-specific searches. For example, it was initially suggested by a user performing searches among historical documents: the synonyms file would contains nicknames and aliases for each of the persons of interest. Path translations In some cases, the document paths stored inside the index do not match the actual ones, so that document previews and accesses will fail. This can occur in a number of circumstances: When using multiple indexes it is a relatively common occurrence that some will actually reside on a remote volume, for exemple mounted via NFS. In this case, the paths used to access the documents on the local machine are not necessarily the same than the ones used while indexing on the remote machine. For example, /home/me may have been used as a topdirs elements while indexing, but the directory might be mounted as /net/server/home/me on the local machine. The case may also occur with removable disks. It is perfectly possible to configure an index to live with the documents on the removable disk, but it may happen that the disk is not mounted at the same place so that the documents paths from the index are invalid. As a last exemple, one could imagine that a big directory has been moved, but that it is currently inconvenient to run the indexer. &RCL; has a facility for rewriting access paths when extracting the data from the index. The translations can be defined for the main index and for any additional query index. The path translation facility will be useful whenever the documents paths seen by the indexer are not the same as the ones which should be used at query time. In the above NFS example, &RCL; could be instructed to rewrite any file:///home/me URL from the index to file:///net/server/home/me, allowing accesses from the client. The translations are defined in the ptrans configuration file, which can be edited by hand or from the GUI external indexes configuration dialog: Preferences External index dialog , then click the Paths translations button on the right below the index list. Due to a current bug, the GUI must be restarted after changing the ptrans values (even when they were changed from the GUI). Search case and diacritics sensitivity For &RCL; versions 1.18 and later, and when working with a raw index (not the default), searches can be sensitive to character case and diacritics. How this happens is controlled by configuration variables and what search data is entered. The general default is that searches entered without upper-case or accented characters are insensitive to case and diacritics. An entry of resume will match any of Resume, RESUME, résumé, Résumé etc. Two configuration variables can automate switching on sensitivity (they were documented but actually did nothing until &RCL; 1.22): autodiacsensIf this is set, search sensitivity to diacritics will be turned on as soon as an accented character exists in a search term. When the variable is set to true, resume will start a diacritics-unsensitive search, but résumé will be matched exactly. The default value is false. autocasesensIf this is set, search sensitivity to character case will be turned on as soon as an upper-case character exists in a search term except for the first one. When the variable is set to true, us or Us will start a diacritics-unsensitive search, but US will be matched exactly. The default value is true (contrary to autodiacsens). As in the past, capitalizing the first letter of a word will turn off its stem expansion and have no effect on case-sensitivity. You can also explicitely activate case and diacritics sensitivity by using modifiers with the query language. C will make the term case-sensitive, and D will make it diacritics-sensitive. Examples: "us"C will search for the term us exactly (Us will not be a match). "resume"D will search for the term resume exactly (résumé will not be a match). When either case or diacritics sensitivity is activated, stem expansion is turned off. Having both does not make much sense. Desktop integration Being independant of the desktop type has its drawbacks: &RCL; desktop integration is minimal. However there are a few tools available: The KDE KIO Slave was described in a previous section. If you use a recent version of Ubuntu Linux, you may find the Ubuntu Unity Lens module useful. There is also an independantly developed Krunner plugin. Here follow a few other things that may help. Hotkeying recoll It is surprisingly convenient to be able to show or hide the &RCL; GUI with a single keystroke. Recoll comes with a small Python script, based on the libwnck window manager interface library, which will allow you to do just this. The detailed instructions are on this wiki page. The KDE Kicker Recoll applet This is probably obsolete now. Anyway: The &RCL; source tree contains the source code to the recoll_applet, a small application derived from the find_applet. This can be used to add a small &RCL; launcher to the KDE panel. The applet is not automatically built with the main &RCL; programs, nor is it included with the main source distribution (because the KDE build boilerplate makes it relatively big). You can download its source from the recoll.org download page. Use the omnipotent configure;make;make install incantation to build and install. You can then add the applet to the panel by right-clicking the panel and choosing the Add applet entry. The recoll_applet has a small text window where you can type a &RCL; query (in query language form), and an icon which can be used to restrict the search to certain types of files. It is quite primitive, and launches a new recoll GUI instance every time (even if it is already running). You may find it useful anyway.
Programming interface &RCL; has an Application Programming Interface, usable both for indexing and searching, currently accessible from the Python language. Another less radical way to extend the application is to write input handlers for new types of documents. The processing of metadata attributes for documents (fields) is highly configurable. Writing a document input handler TerminologyThe small programs or pieces of code which handle the processing of the different document types for &RCL; used to be called filters, which is still reflected in the name of the directory which holds them and many configuration variables. They were named this way because one of their primary functions is to filter out the formatting directives and keep the text content. However these modules may have other behaviours, and the term input handler is now progressively substituted in the documentation. filter is still used in many places though. &RCL; input handlers cooperate to translate from the multitude of input document formats, simple ones as opendocument, acrobat, or compound ones such as Zip or Email, into the final &RCL; indexing input format, which is plain text (in many cases the processing pipeline has an intermediary HTML step, which may be used for better previewing presentation). Most input handlers are executable programs or scripts. A few handlers are coded in C++ and live inside recollindex. This latter kind will not be described here. There are currently (since version 1.13) two kinds of external executable input handlers: Simple exec handlers run once and exit. They can be bare programs like antiword, or scripts using other programs. They are very simple to write, because they just need to print the converted document to the standard output. Their output can be plain text or HTML. HTML is usually preferred because it can store metadata fields and it allows preserving some of the formatting for the GUI preview. However, these handlers have limitations: They can only process one document per file. The output MIME type must be known and fixed. The character encoding, if relevant, must be known and fixed (or possibly just depending on location). Multiple execm handlers can process multiple files (sparing the process startup time which can be very significant), or multiple documents per file (e.g.: for archives or multi-chapter publications). They communicate with the indexer through a simple protocol, but are nevertheless a bit more complicated than the older kind. Most of the new handlers are written in Python (exception: rclimg which is written in Perl because exiftool has no real Python equivalent). The Python handlers use common modules to factor out the boilerplate, which can make them very simple in favorable cases. The subdocuments output by these handlers can be directly indexable (text or HTML), or they can be other simple or compound documents that will need to be processed by another handler. In both cases, handlers deal with regular file system files, and can process either a single document, or a linear list of documents in each file. &RCL; is responsible for performing up to date checks, deal with more complex embedding and other upper level issues. A simple handler returning a document in text/plain format, can transfer no metadata to the indexer. Generic metadata, like document size or modification date, will be gathered and stored by the indexer. Handlers that produce text/html format can return an arbitrary amount of metadata inside HTML meta tags. These will be processed according to the directives found in the fields configuration file. The handlers that can handle multiple documents per file return a single piece of data to identify each document inside the file. This piece of data, called an ipath will be sent back by &RCL; to extract the document at query time, for previewing, or for creating a temporary file to be opened by a viewer. These handlers can also return metadata either as HTML meta tags, or as named data through the communication protocol. The following section describes the simple handlers, and the next one gives a few explanations about the execm ones. You could conceivably write a simple handler with only the elements in the manual. This will not be the case for the other ones, for which you will have to look at the code. Simple input handlers &RCL; simple handlers are usually shell-scripts, but this is in no way necessary. Extracting the text from the native format is the difficult part. Outputting the format expected by &RCL; is trivial. Happily enough, most document formats have translators or text extractors which can be called from the handler. In some cases the output of the translating program is completely appropriate, and no intermediate shell-script is needed. Input handlers are called with a single argument which is the source file name. They should output the result to stdout. When writing a handler, you should decide if it will output plain text or HTML. Plain text is simpler, but you will not be able to add metadata or vary the output character encoding (this will be defined in a configuration file). Additionally, some formatting may be easier to preserve when previewing HTML. Actually the deciding factor is metadata: &RCL; has a way to extract metadata from the HTML header and use it for field searches.. The RECOLL_FILTER_FORPREVIEW environment variable (values yes, no) tells the handler if the operation is for indexing or previewing. Some handlers use this to output a slightly different format, for example stripping uninteresting repeated keywords (ie: Subject: for email) when indexing. This is not essential. You should look at one of the simple handlers, for example rclps for a starting point. Don't forget to make your handler executable before testing ! "Multiple" handlers If you can program and want to write an execm handler, it should not be too difficult to make sense of one of the existing handlers. The existing handlers differ in the amount of helper code which they are using: rclimg is written in Perl and handles the execm protocol all by itself (showing how trivial it is). All the Python handlers share at least the rclexecm.py module, which handles the communication. Have a look at, for example, rclzip for a handler which uses rclexecm.py directly. Most Python handlers which process single-document files by executing another command are further abstracted by using the rclexec1.py module. See for example rclrtf.py for a simple one, or rcldoc.py for a slightly more complicated one (possibly executing several commands). Handlers which extract text from an XML document by using an XSLT style sheet are now executed inside recollindex, with only the style sheet stored in the filters/ directory. These can use a single style sheet (e.g. abiword.xsl), or two sheets for the data and metadata (e.g. opendoc-body.xsl and opendoc-meta.xsl). The mimeconf configuration file defines how the sheets are used, have a look. Before the C++ import, the xsl-based handlers used a common module rclgenxslt.py, it is still around but unused. The handler for OpenXML presentations is still the Python version because the format did not fit with what the C++ code does. It would be a good base for another similar issue. There is a sample trivial handler based on rclexecm.py, with many comments, not actually used by &RCL;. It would index a text file as one document per line. Look for rcltxtlines.py in the src/filters directory in the online &RCL; Git repository (the sample not in the distributed release at the moment). You can also have a look at the slightly more complex rclzip which uses Zip file paths as identifiers (ipath). execm handlers sometimes need to make a choice for the nature of the ipath elements that they use in communication with the indexer. Here are a few guidelines: Use ASCII or UTF-8 (if the identifier is an integer print it, for example, like printf %d would do). If at all possible, the data should make some kind of sense when printed to a log file to help with debugging. &RCL; uses a colon (:) as a separator to store a complex path internally (for deeper embedding). Colons inside the ipath elements output by a handler will be escaped, but would be a bad choice as a handler-specific separator (mostly, again, for debugging issues). In any case, the main goal is that it should be easy for the handler to extract the target document, given the file name and the ipath element. execm handlers will also produce a document with a null ipath element. Depending on the type of document, this may have some associated data (e.g. the body of an email message), or none (typical for an archive file). If it is empty, this document will be useful anyway for some operations, as the parent of the actual data documents. Telling &RCL; about the handler There are two elements that link a file to the handler which should process it: the association of file to MIME type and the association of a MIME type with a handler. The association of files to MIME types is mostly based on name suffixes. The types are defined inside the mimemap file. Example: .doc = application/msword If no suffix association is found for the file name, &RCL; will try to execute a system command (typically file -i or xdg-mime) to determine a MIME type. The second element is the association of MIME types to handlers in the mimeconf file. A sample will probably be better than a long explanation: [index] application/msword = exec antiword -t -i 1 -m UTF-8;\ mimetype = text/plain ; charset=utf-8 application/ogg = exec rclogg text/rtf = exec unrtf --nopict --html; charset=iso-8859-1; mimetype=text/html application/x-chm = execm rclchm The fragment specifies that: application/msword files are processed by executing the antiword program, which outputs text/plain encoded in utf-8. application/ogg files are processed by the rclogg script, with default output type (text/html, with encoding specified in the header, or utf-8 by default). text/rtf is processed by unrtf, which outputs text/html. The iso-8859-1 encoding is specified because it is not the utf-8 default, and not output by unrtf in the HTML header section. application/x-chm is processed by a persistant handler. This is determined by the execm keyword. Input handler output Both the simple and persistent input handlers can return any MIME type to Recoll, which will further process the data according to the MIME configuration. Most input filters filters produce either text/plain or text/html data. There are exceptions, for example, filters which process archive file (zip, tar, etc.) will usually return the documents as they are found, without processing them further. There is nothing to say about text/plain output, except that its character encoding should be consistent with what is specified in the mimeconf file. For filters producing HTML, the output could be very minimal like the following example: <html> <head> <meta http-equiv="Content-Type" content="text/html;charset=UTF-8"> </head> <body> Some text content </body> </html> You should take care to escape some characters inside the text by transforming them into appropriate entities. At the very minimum, "&" should be transformed into "&amp;", "<" should be transformed into "&lt;". This is not always properly done by external helper programs which output HTML, and of course never by those which output plain text. When encapsulating plain text in an HTML body, the display of a preview may be improved by enclosing the text inside <pre> tags. The character set needs to be specified in the header. It does not need to be UTF-8 (&RCL; will take care of translating it), but it must be accurate for good results. &RCL; will process meta tags inside the header as possible document fields candidates. Documents fields can be processed by the indexer in different ways, for searching or displaying inside query results. This is described in a following section. By default, the indexer will process the standard header fields if they are present: title, meta/description, and meta/keywords are both indexed and stored for query-time display. A predefined non-standard meta tag will also be processed by &RCL; without further configuration: if a date tag is present and has the right format, it will be used as the document date (for display and sorting), in preference to the file modification date. The date format should be as follows: <meta name="date" content="YYYY-mm-dd HH:MM:SS"> or <meta name="date" content="YYYY-mm-ddTHH:MM:SS"> Example: <meta name="date" content="2013-02-24 17:50:00"> Input handlers also have the possibility to "invent" field names. This should also be output as meta tags: <meta name="somefield" content="Some textual data" /> You can embed HTML markup inside the content of custom fields, for improving the display inside result lists. In this case, add a (wildly non-standard) markup attribute to tell &RCL; that the value is HTML and should not be escaped for display. <meta name="somefield" markup="html" content="Some <i>textual</i> data" /> As written above, the processing of fields is described in a further section. Persistent filters can use another, probably simpler, method to produce metadata, by calling the setfield() helper method. This avoids the necessity to produce HTML, and any issue with HTML quoting. See, for example, rclaudio in &RCL; 1.23 and later for an example of handler which outputs text/plain and uses setfield() to produce metadata. Page numbers The indexer will interpret ^L characters in the handler output as indicating page breaks, and will record them. At query time, this allows starting a viewer on the right page for a hit or a snippet. Currently, only the PDF, Postscript and DVI handlers generate page breaks. Field data processing Fields are named pieces of information in or about documents, like title, author, abstract. The field values for documents can appear in several ways during indexing: either output by input handlers as meta fields in the HTML header section, or extracted from file extended attributes, or added as attributes of the Doc object when using the API, or again synthetized internally by &RCL;. The &RCL; query language allows searching for text in a specific field. &RCL; defines a number of default fields. Additional ones can be output by handlers, and described in the fields configuration file. Fields can be: indexed, meaning that their terms are separately stored in inverted lists (with a specific prefix), and that a field-specific search is possible. stored, meaning that their value is recorded in the index data record for the document, and can be returned and displayed with search results. A field can be either or both indexed and stored. This and other aspects of fields handling is defined inside the fields configuration file. Some fields may also designated as supporting range queries, meaning that the results may be selected for an interval of its values. See the configuration section for more details. The sequence of events for field processing is as follows: During indexing, recollindex scans all meta fields in HTML documents (most document types are transformed into HTML at some point). It compares the name for each element to the configuration defining what should be done with fields (the fields file) If the name for the meta element matches one for a field that should be indexed, the contents are processed and the terms are entered into the index with the prefix defined in the fields file. If the name for the meta element matches one for a field that should be stored, the content of the element is stored with the document data record, from which it can be extracted and displayed at query time. At query time, if a field search is performed, the index prefix is computed and the match is only performed against appropriately prefixed terms in the index. At query time, the field can be displayed inside the result list by using the appropriate directive in the definition of the result list paragraph format. All fields are displayed on the fields screen of the preview window (which you can reach through the right-click menu). This is independant of the fact that the search which produced the results used the field or not. You can find more information in the section about the fields file, or in comments inside the file. You can also have a look at the example in the FAQs area, detailing how one could add a page count field to pdf documents for displaying inside result lists. Python API Introduction The &RCL; Python programming interface can be used both for searching and for creating/updating an index. Bindings exist for Python2 and Python3. The search interface is used in a number of active projects: the &RCL; Gnome Shell Search Provider, the &RCL; Web UI, and the upmpdcli UPnP Media Server, in addition to many small scripts. The index update section of the API may be used to create and update &RCL; indexes on specific configurations (separate from the ones created by recollindex). The resulting databases can be queried alone, or in conjunction with regular ones, through the GUI or any of the query interfaces. The search API is modeled along the Python database API version 2.0 specification (early versions used the version 1.0 spec). The recoll package contains two modules: The recoll module contains functions and classes used to query (or update) the index. The rclextract module contains functions and classes used at query time to access document data. The recoll module must be imported before rclextract There is a good chance that your system repository has packages for the Recoll Python API, sometimes in a package separate from the main one (maybe named something like python-recoll). Else refer to the Building from source chapter. As an introduction, the following small sample will run a query and list the title and url for each of the results. The python/samples source directory contains several examples of Python programming with &RCL;, exercising the extension more completely, and especially its data extraction features. You can also take a look at the source for the Recoll WebUI, the upmpdcli local media server, or the Gnome Shell Search Provider. Interface elements A few elements in the interface are specific and and need an explanation. ipath This data value (set as a field in the Doc object) is stored, along with the URL, but not indexed by &RCL;. Its contents are not interpreted by the index layer, and its use is up to the application. For example, the &RCL; file system indexer uses the ipath to store the part of the document access path internal to (possibly imbricated) container documents. ipath in this case is a vector of access elements (e.g, the first part could be a path inside a zip file to an archive member which happens to be an mbox file, the second element would be the message sequential number inside the mbox etc.). url and ipath are returned in every search result and define the access to the original document. ipath is empty for top-level document/files (e.g. a PDF document which is a filesystem file). The &RCL; GUI knows about the structure of the ipath values used by the filesystem indexer, and uses it for such functions as opening the parent of a given document. udi An udi (unique document identifier) identifies a document. Because of limitations inside the index engine, it is restricted in length (to 200 bytes), which is why a regular URI cannot be used. The structure and contents of the udi is defined by the application and opaque to the index engine. For example, the internal file system indexer uses the complete document path (file path + internal path), truncated to length, the suppressed part being replaced by a hash value. The udi is not explicit in the query interface (it is used "under the hood" by the rclextract module), but it is an explicit element of the update interface. parent_udi If this attribute is set on a document when entering it in the index, it designates its physical container document. In a multilevel hierarchy, this may not be the immediate parent. parent_udi is optional, but its use by an indexer may simplify index maintenance, as &RCL; will automatically delete all children defined by parent_udi == udi when the document designated by udi is destroyed. e.g. if a Zip archive contains entries which are themselves containers, like mbox files, all the subdocuments inside the Zip file (mbox, messages, message attachments, etc.) would have the same parent_udi, matching the udi for the Zip file, and all would be destroyed when the Zip file (identified by its udi) is removed from the index. The standard filesystem indexer uses parent_udi. Stored and indexed fields The fields file inside the &RCL; configuration defines which document fields are either indexed (searchable), stored (retrievable with search results), or both. Apart from a few standard/internal fields, only the stored fields are retrievable through the Python search interface. Log messages for Python scripts Two specific configuration variables: pyloglevel and pylogfilename allow overriding the generic values for Python programs. Set pyloglevel to 2 to suppress default startup messages (printed at level 3). Python search interface The recoll module connect(confdir=None, extra_dbs=None, writable = False) The connect() function connects to one or several &RCL; index(es) and returns a Db object. This call initializes the recoll module, and it should always be performed before any other call or object creation. confdir may specify a configuration directory. The usual defaults apply. extra_dbs is a list of additional indexes (Xapian directories). writable decides if we can index new data through this connection. The Db class A Db object is created by a connect() call and holds a connection to a Recoll index. Db.close() Closes the connection. You can't do anything with the Db object after this. Db.query(), Db.cursor() These aliases return a blank Query object for this index. Db.setAbstractParams(maxchars, contextwords) Set the parameters used to build snippets (sets of keywords in context text fragments). maxchars defines the maximum total size of the abstract. contextwords defines how many terms are shown around the keyword. Db.termMatch(match_type, expr, field='', maxlen=-1, casesens=False, diacsens=False, lang='english') Expand an expression against the index term list. Performs the basic function from the GUI term explorer tool. match_type can be either of wildcard, regexp or stem. Returns a list of terms expanded from the input expression. The Query class A Query object (equivalent to a cursor in the Python DB API) is created by a Db.query() call. It is used to execute index searches. Query.sortby(fieldname, ascending=True) Sort results by fieldname, in ascending or descending order. Must be called before executing the search. Query.execute(query_string, stemming=1, stemlang="english", fetchtext=False, collapseduplicates=False) Starts a search for query_string, a &RCL; search language string. If the index stores the document texts and fetchtext is True, store the document extracted text in doc.text. Query.executesd(SearchData, fetchtext=False, collapseduplicates=False) Starts a search for the query defined by the SearchData object. If the index stores the document texts and fetchtext is True, store the document extracted text in doc.text. Query.fetchmany(size=query.arraysize) Fetches the next Doc objects in the current search results, and returns them as an array of the required size, which is by default the value of the arraysize data member. Query.fetchone() Fetches the next Doc object from the current search results. Generates a StopIteration exception if there are no results left. Query.close() Closes the query. The object is unusable after the call. Query.scroll(value, mode='relative') Adjusts the position in the current result set. mode can be relative or absolute. Query.getgroups() Retrieves the expanded query terms as a list of pairs. Meaningful only after executexx In each pair, the first entry is a list of user terms (of size one for simple terms, or more for group and phrase clauses), the second a list of query terms as derived from the user terms and used in the Xapian Query. Query.getxquery() Return the Xapian query description as a Unicode string. Meaningful only after executexx. Query.highlight(text, ishtml = 0, methods = object) Will insert <span "class=rclmatch">, </span> tags around the match areas in the input text and return the modified text. ishtml can be set to indicate that the input text is HTML and that HTML special characters should not be escaped. methods if set should be an object with methods startMatch(i) and endMatch() which will be called for each match and should return a begin and end tag Query.makedocabstract(doc, methods = object)) Create a snippets abstract for doc (a Doc object) by selecting text around the match terms. If methods is set, will also perform highlighting. See the highlight method. Query.__iter__() and Query.next() So that things like for doc in query: will work. Query.arraysize Default number of records processed by fetchmany (r/w). Query.rowcount Number of records returned by the last execute. Query.rownumber Next index to be fetched from results. Normally increments after each fetchone() call, but can be set/reset before the call to effect seeking (equivalent to using scroll()). Starts at 0. The Doc class A Doc object contains index data for a given document. The data is extracted from the index when searching, or set by the indexer program when updating. The Doc object has many attributes to be read or set by its user. It mostly matches the Rcl::Doc C++ object. Some of the attributes are predefined, but, especially when indexing, others can be set, the name of which will be processed as field names by the indexing configuration. Inputs can be specified as Unicode or strings. Outputs are Unicode objects. All dates are specified as Unix timestamps, printed as strings. Please refer to the rcldb/rcldoc.cpp C++ file for a full description of the predefined attributes. Here follows a short list. url the document URL but see also getbinurl() ipath the document ipath for embedded documents. fbytes, dbytes the document file and text sizes. fmtime, dmtime the document file and document times. xdocid the document Xapian document ID. This is useful if you want to access the document through a direct Xapian operation. mtype the document MIME type. Fields stored by default: author, filename, keywords, recipient At query time, only the fields that are defined as stored either by default or in the fields configuration file will be meaningful in the Doc object. The document processed text may be present or not, depending if the index stores the text at all, and if it does, on the fetchtext query execute option. See also the rclextract module for accessing document contents. get(key), [] operator Retrieve the named document attribute. You can also use getattr(doc, key) or doc.key. doc.key = value Set the the named document attribute. You can also use setattr(doc, key, value). getbinurl() Retrieve the URL in byte array format (no transcoding), for use as parameter to a system call. setbinurl(url) Set the URL in byte array format (no transcoding). items() Return a dictionary of doc object keys/values keys() list of doc object keys (attribute names). The SearchData class A SearchData object allows building a query by combining clauses, for execution by Query.executesd(). It can be used in replacement of the query language approach. The interface is going to change a little, so no detailed doc for now... addclause(type='and'|'or'|'excl'|'phrase'|'near'|'sub', qstring=string, slack=0, field='', stemming=1, subSearch=SearchData) The rclextract module Prior to &RCL; 1.25, index queries could not provide document content because it was never stored. &RCL; 1.25 and later usually store the document text, which can be optionally retrieved when running a query (see query.execute() above - the result is always plain text). The rclextract module can give access to the original document and to the document text content (if not stored by the index, or to access an HTML version of the text). Acessing the original document is particularly useful if it is embedded (e.g. an email attachment). You need to import the recoll module before the rclextract module. The Extractor class Extractor(doc) An Extractor object is built from a Doc object, output from a query. Extractor.textextract(ipath) Extract document defined by ipath and return a Doc object. The doc.text field has the document text converted to either text/plain or text/html according to doc.mimetype. The typical use would be as follows: from recoll import recoll, rclextract qdoc = query.fetchone() extractor = recoll.Extractor(qdoc) doc = extractor.textextract(qdoc.ipath) # use doc.text, e.g. for previewing Passing qdoc.ipath to textextract() is redundant, but reflects the fact that the Extractor object actually has the capability to access the other entries in a compound document. Extractor.idoctofile(ipath, targetmtype, outfile='') Extracts document into an output file, which can be given explicitly or will be created as a temporary file to be deleted by the caller. Typical use: from recoll import recoll, rclextract qdoc = query.fetchone() extractor = recoll.Extractor(qdoc) filename = extractor.idoctofile(qdoc.ipath, qdoc.mimetype) In all cases the output is a copy, even if the requested document is a regular system file, which may be wasteful in some cases. If you want to avoid this, you can test for a simple file document as follows: not doc.ipath and (not "rclbes" in doc.keys() or doc["rclbes"] == "FS") Search API usage example The following sample would query the index with a user language string. See the python/samples directory inside the &RCL; source for other examples. The recollgui subdirectory has a very embryonic GUI which demonstrates the highlighting and data extraction functions. 5: nres = 5 for i in range(nres): doc = query.fetchone() print "Result #%d" % (query.rownumber,) for k in ("title", "size"): print k, ":", getattr(doc, k).encode('utf-8') abs = db.makeDocAbstract(doc, query).encode('utf-8') print abs print ]]> Creating Python external indexers The update API can be used to create an index from data which is not accessible to the regular &RCL; indexer, or structured to present difficulties to the &RCL; input handlers. An indexer created using this API will be have equivalent work to do as the the Recoll file system indexer: look for modified documents, extract their text, call the API for indexing it, take care of purging the index out of data from documents which do not exist in the document store any more. The data for such an external indexer should be stored in an index separate from any used by the &RCL; internal file system indexer. The reason is that the main document indexer purge pass (removal of deleted documents) would also remove all the documents belonging to the external indexer, as they were not seen during the filesystem walk. The main indexer documents would also probably be a problem for the external indexer own purge operation. While there would be ways to enable multiple foreign indexers to cooperate on a single index, it is just simpler to use separate ones, and use the multiple index access capabilities of the query interface, if needed. There are two parts in the update interface: Methods inside the recoll module allow inserting data into the index, to make it accessible by the normal query interface. An interface based on scripts execution is defined to allow either the GUI or the rclextract module to access original document data for previewing or editing. Python update interface The update methods are part of the recoll module described above. The connect() method is used with a writable=true parameter to obtain a writable Db object. The following Db object methods are then available. addOrUpdate(udi, doc, parent_udi=None) Add or update index data for a given document The udi string must define a unique id for the document. It is an opaque interface element and not interpreted inside Recoll. doc is a Doc object, created from the data to be indexed (the main text should be in doc.text). If parent_udi is set, this is a unique identifier for the top-level container (e.g. for the filesystem indexer, this would be the one which is an actual file). delete(udi) Purge index from all data for udi, and all documents (if any) which have a matrching parent_udi. needUpdate(udi, sig) Test if the index needs to be updated for the document identified by udi. If this call is to be used, the doc.sig field should contain a signature value when calling addOrUpdate(). The needUpdate() call then compares its parameter value with the stored sig for udi. sig is an opaque value, compared as a string. The filesystem indexer uses a concatenation of the decimal string values for file size and update time, but a hash of the contents could also be used. As a side effect, if the return value is false (the index is up to date), the call will set the existence flag for the document (and any subdocument defined by its parent_udi), so that a later purge() call will preserve them). The use of needUpdate() and purge() is optional, and the indexer may use another method for checking the need to reindex or to delete stale entries. purge() Delete all documents that were not touched during the just finished indexing pass (since open-for-write). These are the documents for the needUpdate() call was not performed, indicating that they no longer exist in the primary storage system. Query data access for external indexers (1.23) &RCL; has internal methods to access document data for its internal (filesystem) indexer. An external indexer needs to provide data access methods if it needs integration with the GUI (e.g. preview function), or support for the rclextract module. The index data and the access method are linked by the rclbes (recoll backend storage) Doc field. You should set this to a short string value identifying your indexer (e.g. the filesystem indexer uses either "FS" or an empty value, the Web history indexer uses "BGL"). The link is actually performed inside a backends configuration file (stored in the configuration directory). This defines commands to execute to access data from the specified indexer. Example, for the mbox indexing sample found in the Recoll source (which sets rclbes="MBOX"): [MBOX] fetch = /path/to/recoll/src/python/samples/rclmbox.py fetch makesig = path/to/recoll/src/python/samples/rclmbox.py makesig fetch and makesig define two commands to execute to respectively retrieve the document text and compute the document signature (the example implementation uses the same script with different first parameters to perform both operations). The scripts are called with three additional arguments: udi, url, ipath, stored with the document when it was indexed, and may use any or all to perform the requested operation. The caller expects the result data on stdout. External indexer samples The Recoll source tree has two samples of external indexers in the src/python/samples directory. The more interesting one is rclmbox.py which indexes a directory containing mbox folder files. It exercises most features in the update interface, and has a data access interface. See the comments inside the file for more information. Package compatibility with the previous version The following code fragments can be used to ensure that code can run with both the old and the new API (as long as it does not use the new abilities of the new API of course). Adapting to the new package structure: Adapting to the change of nature of the next Query member. The same test can be used to choose to use the scroll() method (new) or set the next value (old). Installation and configuration Installing a binary copy &RCL; binary copies are always distributed as regular packages for your system. They can be obtained either through the system's normal software distribution framework (e.g. Debian/Ubuntu apt, FreeBSD ports, etc.), or from some type of "backports" repository providing versions newer than the standard ones, or found on the &RCL; WEB site in some cases. The most up-to-date information about Recoll packages can usually be found on the Recoll WEB site downloads page The &WIN; version of Recoll comes in a self-contained setup file, there is nothing else to install. On &LIN;, the package management tools will automatically install hard dependancies for packages obtained from a proper package repository. You will have to deal with them by hand for downloaded packages (for example, when dpkg complains about missing dependancies). In all cases, you will have to check or install supporting applications for the file types that you want to index beyond those that are natively processed by &RCL; (text, HTML, email files, and a few others). You should also maybe have a look at the configuration section (but this may not be necessary for a quick test with default parameters). Most parameters can be more conveniently set from the GUI interface. Supporting packages The &WIN; installation of &RCL; is self-contained. &WIN; users can skip this section. &RCL; uses external applications to index some file types. You need to install them for the file types that you wish to have indexed (these are run-time optional dependencies. None is needed for building or running &RCL; except for indexing their specific file type). After an indexing pass, the commands that were found missing can be displayed from the recoll File menu. The list is stored in the missing text file inside the configuration directory. The past has proven that I was unable to maintain an up to date application list in this manual. Please check &RCLAPPS; for a complete list along with links to the home pages or best source/patches pages, and misc tips. What follows is only a very short extract of the stable essentials. PDF files need pdftotext which is part of Poppler (usually comes with the poppler-utils package). Avoid the original one from Xpdf. MS Word documents need antiword. It is also useful to have wvWare installed as it may be be used as a fallback for some files which antiword does not handle. RTF files need unrtf, which, in its older versions, has much trouble with non-western character sets. Many Linux distributions carry outdated unrtf versions. Check &RCLAPPS; for details. Pictures: &RCL; uses the Exiftool Perl package to extract tag information. Most image file formats are supported. Up to &RCL; 1.24, many XML-based formats need the xsltproc command, which usually comes with libxslt. These are: abiword, fb2 ebooks, kword, openoffice, opendocument svg. &RCL; 1.25 and later process them internally (using libxslt). Building from source Prerequisites The following prerequisites are described in broad terms and not as specific package names (which will depend on the exact platform). The dependancies should be available as packages on most common Unix derivatives, and it should be quite uncommon that you would have to build one of them. If you do not need the GUI, you can avoid all GUI dependancies by disabling its build. (See the configure section further). The shopping list: If you start from git code, you will need the autoconf, automake and libtool triad. They are not needed for building from tar distributions. C++ compiler. Recent versions require C++11 compatibility (1.23 and later). bison command (for &RCL; 1.21 and later). For building the documentation: the xsltproc command, and the Docbook XML and style sheet files. You can avoid this dependancy by disabling documentation building with the --disable-userdoc configure option. Development files for Xapian core. If you are building Xapian for an older CPU (before Pentium 4 or Athlon 64), you need to add the flag to the configure command. Else all Xapian application will crash with an illegal instruction error. Development files for Qt 5 . and its own dependancies (X11 etc.) Development files for libxslt Development files for zlib. Development files for Python (or use --disable-python-module). Development files for libchm You may also need libiconv. On Linux systems, the iconv interface is part of libc and you should not need to do anything special. Check the &RCL; download page for up to date version information. Building &RCL; has been built on Linux, FreeBSD, Mac OS X, and Solaris, most versions after 2005 should be ok, maybe some older ones too (Solaris 8 used to be ok). If you build on another system, and need to modify things, I would very much welcome patches. Configure options: will disable the code for phonetic matching of search terms. or will enable the code for real time indexing. Inotify support is enabled by default on Linux systems. will enable sending Zeitgeist events about the visited search results, and needs the qzeitgeist package. is available from version 1.17 to implement the result list with a Qt QTextBrowser instead of a WebKit widget if you do not or can't depend on the latter. Disable the Qt interface. Will allow building the indexer and the command line search program in absence of a Qt environment. Enable the use of Qt Webengine (only meaningful if the Qt GUI is enabled), in place or Qt Webkit. is available from version 1.19 to suppress multithreading inside the indexing process. You can also use the run-time configuration to restrict recollindex to using a single thread, but the compile-time option may disable a few more unused locks. This only applies to the use of multithreading for the core index processing (data input). The &RCL; monitor mode always uses at least two threads of execution. will avoid building the Python module. will avoid building the Python libchm interface used to index CHM files. will prevent fetching data from file extended attributes. Beyond a few standard attributes, fetching extended attributes data can only be useful is some application stores data in there, and also needs some simple configuration (see comments in the fields configuration file). will enable splitting camelCase words. This is not enabled by default as it has the unfortunate side-effect of making some phrase searches quite confusing: ie, "MySQL manual" would be matched by "MySQL manual" and "my sql manual" but not "mysql manual" (only inside phrase searches). Specify the version of the 'file' command to use (ie: --with-file-command=/usr/local/bin/file). Can be useful to enable the gnu version on systems where the native one is bad. Disable X11 connection monitoring inside recollindex. Together with --disable-qtgui, this allows building recoll without Qt and X11. will avoid building the user manual. This avoids having to install the Docbook XML/XSL files and the TeX toolchain used for translating the manual to PDF. Enable building the recollq command line query tool (recoll -t without need for Qt). This is done by default if --disable-qtgui is set but this option enables forcing it. (&RCL; versions up to 1.21 only) will compile &RCL; with position-dependant code. This is incompatible with building the KIO or the Python or PHP extensions, but might yield very marginally faster code. Of course the usual autoconf configure options, like apply. Normal procedure (for source extracted from a tar distribution): cd recoll-xxx ./configure make (practices usual hardship-repelling invocations) When building from source cloned from the git repository, you also need to install autoconf, automake, and libtool and you must execute sh autogen.sh in the top source directory before running configure. Installing Use make install in the root of the source tree. This will copy the commands to prefix/bin and the sample configuration files, scripts and other shared data to prefix/share/recoll. Python API package The Python interface can be found in the source tree, under the python/recoll directory. As of &RCL; 1.19, the module can be compiled for Python3. The normal &RCL; build procedure (see above) installs the API package for the default system version (python) along with the main code. The package for other Python versions (e.g. python3 if the system default is python2) must be explicitely built and installed. The python/recoll/ directory contains the usual setup.py. After configuring and building the main &RCL; code, you can use the script to build and install the Python module: cd recoll-xxx/python/recoll pythonX setup.py build sudo pythonX setup.py install Building on Solaris We did not test building the GUI on Solaris for recent versions. You will need at least Qt 4.4. There are some hints on an old web site page, they may still be valid. Someone did test the 1.19 indexer and Python module build, they do work, with a few minor glitches. Be sure to use GNU make and install. Configuration overview Most of the parameters specific to the recoll GUI are set through the Preferences menu and stored in the standard Qt place ($HOME/.config/Recoll.org/recoll.conf). You probably do not want to edit this by hand. &RCL; indexing options are set inside text configuration files located in a configuration directory. There can be several such directories, each of which defines the parameters for one index. The configuration files can be edited by hand or through the Index configuration dialog (Preferences menu). The GUI tool will try to respect your formatting and comments as much as possible, so it is quite possible to use both approaches on the same configuration. The most accurate documentation for the configuration parameters is given by comments inside the default files, and we will just give a general overview here. For each index, there are at least two sets of configuration files. System-wide configuration files are kept in a directory named like /usr/share/recoll/examples, and define default values, shared by all indexes. For each index, a parallel set of files defines the customized parameters. The default location of the customized configuration is the .recoll directory in your home. Most people will only use this directory. This location can be changed, or others can be added with the RECOLL_CONFDIR environment variable or the option parameter to recoll and recollindex. In addition (as of &RCL; version 1.19.7), it is possible to specify two additional configuration directories which will be stacked before and after the user configuration directory. These are defined by the RECOLL_CONFTOP and RECOLL_CONFMID environment variables. Values from configuration files inside the top directory will override user ones, values from configuration files inside the middle directory will override system ones and be overriden by user ones. These two variables may be of use to applications which augment &RCL; functionality, and need to add configuration data without disturbing the user's files. Please note that the two, currently single, values will probably be interpreted as colon-separated lists in the future: do not use colon characters inside the directory paths. If the .recoll directory does not exist when recoll or recollindex are started, it will be created with a set of empty configuration files. recoll will give you a chance to edit the configuration file before starting indexing. recollindex will proceed immediately. To avoid mistakes, the automatic directory creation will only occur for the default location, not if or RECOLL_CONFDIR were used (in the latter cases, you will have to create the directory). All configuration files share the same format. For example, a short extract of the main configuration file might look as follows: # Space-separated list of files and directories to index. topdirs = ~/docs /usr/share/doc [~/somedirectory-with-utf8-txt-files] defaultcharset = utf-8 There are three kinds of lines: Comment (starts with #) or empty. Parameter affectation (name = value). Section definition ([somedirname]). Long lines can be broken by ending each incomplete part with a backslash (\). Depending on the type of configuration file, section definitions either separate groups of parameters or allow redefining some parameters for a directory sub-tree. They stay in effect until another section definition, or the end of file, is encountered. Some of the parameters used for indexing are looked up hierarchically from the current directory location upwards. Not all parameters can be meaningfully redefined, this is specified for each in the next section. Global parameters must not be defined in a directory subsection, else they will not be found at all by the &RCL; code, which looks for them at the top level (e.g. skippedPaths). When found at the beginning of a file path, the tilde character (~) is expanded to the name of the user's home directory, as a shell would do. Some parameters are lists of strings. White space is used for separation. List elements with embedded spaces can be quoted using double-quotes. Double quotes inside these elements can be escaped with a backslash. No value inside a configuration file can contain a newline character. Long lines can be continued by escaping the physical newline with backslash, even inside quoted strings. astringlist = "some string \ with spaces" thesame = "some string with spaces" Parameters which are not part of string lists can't be quoted, and leading and trailing space characters are stripped before the value is used. Encoding issues Most of the configuration parameters are plain ASCII. Two particular sets of values may cause encoding issues: File path parameters may contain non-ascii characters and should use the exact same byte values as found in the file system directory. Usually, this means that the configuration file should use the system default locale encoding. The unac_except_trans parameter should be encoded in UTF-8. If your system locale is not UTF-8, and you need to also specify non-ascii file paths, this poses a difficulty because common text editors cannot handle multiple encodings in a single file. In this relatively unlikely case, you can edit the configuration file as two separate text files with appropriate encodings, and concatenate them to create the complete configuration. Environment variables RECOLL_CONFDIR Defines the main configuration directory. RECOLL_TMPDIR, TMPDIR Locations for temporary files, in this order of priority. The default if none of these is set is to use /tmp. Big temporary files may be created during indexing, mostly for decompressing, and also for processing, e.g. email attachments. RECOLL_CONFTOP, RECOLL_CONFMID Allow adding configuration directories with priorities below and above the user directory (see above the Configuration overview section for details). RECOLL_EXTRA_DBS, RECOLL_ACTIVE_EXTRA_DBS Help for setting up external indexes. See this paragraph for explanations. RECOLL_DATADIR Defines replacement for the default location of Recoll data files, normally found in, e.g., /usr/share/recoll). RECOLL_FILTERSDIR Defines replacement for the default location of Recoll filters, normally found in, e.g., /usr/share/recoll/filters). ASPELL_PROG aspell program to use for creating the spelling dictionary. The result has to be compatible with the libaspell which &RCL; is using. VARNAME Blabla &RCLCONF; The fields file This file contains information about dynamic fields handling in &RCL;. Some very basic fields have hard-wired behaviour, and, mostly, you should not change the original data inside the fields file. But you can create custom fields fitting your data and handle them just like they were native ones. The fields file has several sections, which each define an aspect of fields processing. Quite often, you'll have to modify several sections to obtain the desired behaviour. We will only give a short description here, you should refer to the comments inside the default file for more detailed information. Field names should be lowercase alphabetic ASCII. [prefixes] A field becomes indexed (searchable) by having a prefix defined in this section. There is a more complete explanation of what prefixes are in used by a standard recoll installation. In a nutshell: extension prefixes should be all caps, begin with XY, and short. E.g. XYMFLD. [values] Fields listed in this section will be stored as &XAP; values inside the index. This makes them available for range queries, allowing to filter results according to the field value. This feature currently supports string and integer data. See the comments in the file for more detail [stored] A field becomes stored (displayable inside results) by having its name listed in this section (typically with an empty value). [aliases] This section defines lists of synonyms for the canonical names used inside the [prefixes] and [stored] sections [queryaliases] This section also defines aliases for the canonic field names, with the difference that the substitution will only be used at query time, avoiding any possibility that the value would pick-up random metadata from documents. handler-specific sections Some input handlers may need specific configuration for handling fields. Only the email message handler currently has such a section (named [mail]). It allows indexing arbitrary email headers in addition to the ones indexed by default. Other such sections may appear in the future. Here follows a small example of a personal fields file. This would extract a specific email header and use it as a searchable field, with data displayable inside result lists. (Side note: as the email handler does no decoding on the values, only plain ascii headers can be indexed, and only the first occurrence will be used for headers that occur several times). [prefixes] # Index mailmytag contents (with the given prefix) mailmytag = XMTAG [stored] # Store mailmytag inside the document data record (so that it can be # displayed - as %(mailmytag) - in result lists). mailmytag = [queryaliases] filename = fn containerfilename = cfn [mail] # Extract the X-My-Tag mail header, and use it internally with the # mailmytag field name x-my-tag = mailmytag Extended attributes in the fields file &RCL; versions 1.19 and later process user extended file attributes as documents fields by default. Attributes are processed as fields of the same name, after removing the user prefix on Linux. The [xattrtofields] section of the fields file allows specifying translations from extended attributes names to &RCL; field names. An empty translation disables use of the corresponding attribute data. The mimemap file mimemap specifies the file name extension to MIME type mappings. For file names without an extension, or with an unknown one, a system command (file , or xdg-mime) will be executed to determine the MIME type (this can be switched off, or the command changed inside the main configuration file). All extension values in mimemap must be entered in lower case. File names extensions are lower-cased for comparison during indexing, meaning that an upper case mimemap entry will never be matched. The mappings can be specified on a per-subtree basis, which may be useful in some cases. Example: okular notes have a .xml extension but should be handled specially, which is possible because they are usually all located in one place. Example: [~/.kde/share/apps/okular/docdata] .xml = application/x-okular-notes The recoll_noindex mimemap variable has been moved to recoll.conf and renamed to noContentSuffixes, while keeping the same function, as of &RCL; version 1.21. For older &RCL; versions, see the documentation for noContentSuffixes but use recoll_noindex in mimemap. The mimeconf file The main purpose of the mimeconf file is to specify how the different MIME types are handled for indexing. This is done in the [index] section, which should not be modified casually. See the comments in the file. The file also contains other definitions which affect the query language and the GUI, and which, in retrospect, should have been stored elsewhere. The [icons] section allows you to change the icons which are displayed by the recoll GUI in the result lists (the values are the basenames of the png images inside the iconsdir directory (which is itself defined in recoll.conf). The [categories] section defines the groupings of MIME types into categories as used when adding an rclcat clause to a query language query. rclcat clauses are also used by the default guifilters buttons in the GUI (see next). The filter controls appear at the top of the recoll GUI, either as checkboxes just above the result list, or as a dropbox in the tool area. By default, they are labeled: media, message, other, presentation, spreadsheet and text, and each maps to a document category. This is determined in the [guifilters] section, where each control is defined by a variable naming a query language fragment. A simple exemple will hopefully make things clearer. [guifilters] Big Books = dir:"~/My Books" size>10K My Docs = dir:"~/My Documents" Small Books = dir:"~/My Books" size<10K System Docs = dir:/usr/share/doc The above definition would create four filter checkboxes, labelled Big Books, My Docs, etc. The text after the equal sign must be a valid query language fragment, and, when the button is checked, it will be combined with the rest of the query with an AND conjunction. Any name text before a colon character will be erased in the display, but used for sorting. You can use this to display the checkboxes in any order you like. For exemple, the following would do exactly the same as above, but ordering the checkboxes in the reverse order. [guifilters] d:Big Books = dir:"~/My Books" size>10K c:My Docs = dir:"~/My Documents" b:Small Books = dir:"~/My Books" size<10K a:System Docs = dir:/usr/share/doc As you may have guessed, The default [guifilters] section looks like: [guifilters] text = rclcat:text spreadsheet = rclcat:spreadsheet presentation = rclcat:presentation media = rclcat:media message = rclcat:message other = rclcat:other The mimeview file mimeview specifies which programs are started when you click on an Open link in a result list. Ie: HTML is normally displayed using firefox, but you may prefer Konqueror, your openoffice.org program might be named oofice instead of openoffice etc. Changes to this file can be done by direct editing, or through the recoll GUI preferences dialog. If Use desktop preferences to choose document editor is checked in the &RCL; GUI preferences, all mimeview entries will be ignored except the one labelled application/x-all (which is set to use xdg-open by default). In this case, the xallexcepts top level variable defines a list of MIME type exceptions which will be processed according to the local entries instead of being passed to the desktop. This is so that specific &RCL; options such as a page number or a search string can be passed to applications that support them, such as the evince viewer. As for the other configuration files, the normal usage is to have a mimeview inside your own configuration directory, with just the non-default entries, which will override those from the central configuration file. All viewer definition entries must be placed under a [view] section. The keys in the file are normally MIME types. You can add an application tag to specialize the choice for an area of the filesystem (using a localfields specification in mimeconf). The syntax for the key is mimetype|tag The nouncompforviewmts entry, (placed at the top level, outside of the [view] section), holds a list of MIME types that should not be uncompressed before starting the viewer (if they are found compressed, ie: mydoc.doc.gz). The right side of each assignment holds a command to be executed for opening the file. The following substitutions are performed: %D Document date %f File name. This may be the name of a temporary file if it was necessary to create one (ie: to extract a subdocument from a container). %i Internal path, for subdocuments of containers. The format depends on the container type. If this appears in the command line, &RCL; will not create a temporary file to extract the subdocument, expecting the called application (possibly a script) to be able to handle it. %M MIME type %p Page index. Only significant for a subset of document types, currently only PDF, Postscript and DVI files. Can be used to start the editor at the right page for a match or snippet. %s Search term. The value will only be set for documents with indexed page numbers (ie: PDF). The value will be one of the matched search terms. It would allow pre-setting the value in the "Find" entry inside Evince for example, for easy highlighting of the term. %u Url. In addition to the predefined values above, all strings like %(fieldname) will be replaced by the value of the field named fieldname for the document. This could be used in combination with field customisation to help with opening the document. The <filename>ptrans</filename> file ptrans specifies query-time path translations. These can be useful in multiple cases. The file has a section for any index which needs translations, either the main one or additional query indexes. The sections are named with the &XAP; index directory names. No slash character should exist at the end of the paths (all comparisons are textual). An exemple should make things sufficiently clear [/home/me/.recoll/xapiandb] /this/directory/moved = /to/this/place [/path/to/additional/xapiandb] /server/volume1/docdir = /net/server/volume1/docdir /server/volume2/docdir = /net/server/volume2/docdir Examples of configuration adjustments Adding an external viewer for an non-indexed type Imagine that you have some kind of file which does not have indexable content, but for which you would like to have a functional Open link in the result list (when found by file name). The file names end in .blob and can be displayed by application blobviewer. You need two entries in the configuration files for this to work: In $RECOLL_CONFDIR/mimemap (typically ~/.recoll/mimemap), add the following line: .blob = application/x-blobapp Note that the MIME type is made up here, and you could call it diesel/oil just the same. In $RECOLL_CONFDIR/mimeview under the [view] section, add: application/x-blobapp = blobviewer %f We are supposing that blobviewer wants a file name parameter here, you would use %u if it liked URLs better. If you just wanted to change the application used by &RCL; to display a MIME type which it already knows, you would just need to edit mimeview. The entries you add in your personal file override those in the central configuration, which you do not need to alter. mimeview can also be modified from the Gui. Adding indexing support for a new file type Let us now imagine that the above .blob files actually contain indexable text and that you know how to extract it with a command line program. Getting &RCL; to index the files is easy. You need to perform the above alteration, and also to add data to the mimeconf file (typically in ~/.recoll/mimeconf): Under the [index] section, add the following line (more about the rclblob indexing script later): application/x-blobapp = exec rclblob Or if the files are mostly text and you don't need to process them for indexing: application/x-blobapp = internal text/plain Under the [icons] section, you should choose an icon to be displayed for the files inside the result lists. Icons are normally 64x64 pixels PNG files which live in /usr/share/recoll/images. Under the [categories] section, you should add the MIME type where it makes sense (you can also create a category). Categories may be used for filtering in advanced search. The rclblob handler should be an executable program or script which exists inside /usr/share/recoll/filters. It will be given a file name as argument and should output the text or html contents on the standard output. The filter programming section describes in more detail how to write an input handler.
recoll-1.26.3/aclocal.m40000644000175000017500000025271413566731623011673 00000000000000# generated automatically by aclocal 1.15 -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY, to the extent permitted by law; without # even the implied warranty of MERCHANTABILITY or FITNESS FOR A # PARTICULAR PURPOSE. m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])]) m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],, [m4_warning([this file was generated for autoconf 2.69. You have another version of autoconf. It may work, but is not guaranteed to. If you have problems, you may need to regenerate the build system entirely. To do so, use the procedure documented by the package, typically 'autoreconf'.])]) # lib-ld.m4 serial 6 dnl Copyright (C) 1996-2003, 2009-2016 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl Subroutines of libtool.m4, dnl with replacements s/_*LT_PATH/AC_LIB_PROG/ and s/lt_/acl_/ to avoid dnl collision with libtool.m4. dnl From libtool-2.4. Sets the variable with_gnu_ld to yes or no. AC_DEFUN([AC_LIB_PROG_LD_GNU], [AC_CACHE_CHECK([if the linker ($LD) is GNU ld], [acl_cv_prog_gnu_ld], [# I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 /dev/null 2>&1 \ && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ || PATH_SEPARATOR=';' } fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. AC_MSG_CHECKING([for ld used by $CC]) case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [[\\/]]* | ?:[[\\/]]*) re_direlt='/[[^/]][[^/]]*/\.\./' # Canonicalize the pathname of ld ac_prog=`echo "$ac_prog"| sed 's%\\\\%/%g'` while echo "$ac_prog" | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then AC_MSG_CHECKING([for GNU ld]) else AC_MSG_CHECKING([for non-GNU ld]) fi AC_CACHE_VAL([acl_cv_path_LD], [if test -z "$LD"; then acl_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$acl_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$acl_cv_path_LD" -v 2>&1 = 1.10 to complain if config.rpath is missing. m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([config.rpath])]) AC_REQUIRE([AC_PROG_CC]) dnl we use $CC, $GCC, $LDFLAGS AC_REQUIRE([AC_LIB_PROG_LD]) dnl we use $LD, $with_gnu_ld AC_REQUIRE([AC_CANONICAL_HOST]) dnl we use $host AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT]) dnl we use $ac_aux_dir AC_CACHE_CHECK([for shared library run path origin], [acl_cv_rpath], [ CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done ]) wl="$acl_cv_wl" acl_libext="$acl_cv_libext" acl_shlibext="$acl_cv_shlibext" acl_libname_spec="$acl_cv_libname_spec" acl_library_names_spec="$acl_cv_library_names_spec" acl_hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" acl_hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" acl_hardcode_direct="$acl_cv_hardcode_direct" acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" dnl Determine whether the user wants rpath handling at all. AC_ARG_ENABLE([rpath], [ --disable-rpath do not hardcode runtime library paths], :, enable_rpath=yes) ]) dnl AC_LIB_FROMPACKAGE(name, package) dnl declares that libname comes from the given package. The configure file dnl will then not have a --with-libname-prefix option but a dnl --with-package-prefix option. Several libraries can come from the same dnl package. This declaration must occur before an AC_LIB_LINKFLAGS or similar dnl macro call that searches for libname. AC_DEFUN([AC_LIB_FROMPACKAGE], [ pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) define([acl_frompackage_]NAME, [$2]) popdef([NAME]) pushdef([PACK],[$2]) pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) define([acl_libsinpackage_]PACKUP, m4_ifdef([acl_libsinpackage_]PACKUP, [m4_defn([acl_libsinpackage_]PACKUP)[, ]],)[lib$1]) popdef([PACKUP]) popdef([PACK]) ]) dnl AC_LIB_LINKFLAGS_BODY(name [, dependencies]) searches for libname and dnl the libraries corresponding to explicit and implicit dependencies. dnl Sets the LIB${NAME}, LTLIB${NAME} and INC${NAME} variables. dnl Also, sets the LIB${NAME}_PREFIX variable to nonempty if libname was found dnl in ${LIB${NAME}_PREFIX}/$acl_libdirstem. AC_DEFUN([AC_LIB_LINKFLAGS_BODY], [ AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) pushdef([NAME],[m4_translit([$1],[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) pushdef([PACK],[m4_ifdef([acl_frompackage_]NAME, [acl_frompackage_]NAME, lib[$1])]) pushdef([PACKUP],[m4_translit(PACK,[abcdefghijklmnopqrstuvwxyz./+-], [ABCDEFGHIJKLMNOPQRSTUVWXYZ____])]) pushdef([PACKLIBS],[m4_ifdef([acl_frompackage_]NAME, [acl_libsinpackage_]PACKUP, lib[$1])]) dnl Autoconf >= 2.61 supports dots in --with options. pushdef([P_A_C_K],[m4_if(m4_version_compare(m4_defn([m4_PACKAGE_VERSION]),[2.61]),[-1],[m4_translit(PACK,[.],[_])],PACK)]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) AC_ARG_WITH(P_A_C_K[-prefix], [[ --with-]]P_A_C_K[[-prefix[=DIR] search for ]PACKLIBS[ in DIR/include and DIR/lib --without-]]P_A_C_K[[-prefix don't search for ]PACKLIBS[ in includedir and libdir]], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" if test "$acl_libdirstem2" != "$acl_libdirstem" \ && ! test -d "$withval/$acl_libdirstem"; then additional_libdir="$withval/$acl_libdirstem2" fi fi fi ]) dnl Search the library and its dependencies in $additional_libdir and dnl $LDFLAGS. Using breadth-first-seach. LIB[]NAME= LTLIB[]NAME= INC[]NAME= LIB[]NAME[]_PREFIX= dnl HAVE_LIB${NAME} is an indicator that LIB${NAME}, LTLIB${NAME} have been dnl computed. So it has to be reset here. HAVE_LIB[]NAME= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='$1 $2' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" dnl See if it was already located by an earlier AC_LIB_LINKFLAGS dnl or AC_LIB_HAVE_LINKFLAGS call. uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./+-|ABCDEFGHIJKLMNOPQRSTUVWXYZ____|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$value" else dnl An earlier call to AC_LIB_HAVE_LINKFLAGS has determined dnl that this library doesn't exist. So just drop it. : fi else dnl Search the library lib$name in $additional_libdir and $LDFLAGS dnl and the already constructed $LIBNAME/$LTLIBNAME. found_dir= found_la= found_so= found_a= eval libname=\"$acl_libname_spec\" # typically: libname=lib$name if test -n "$acl_shlibext"; then shrext=".$acl_shlibext" # typically: shrext=.so else shrext= fi if test $use_additional = yes; then dir="$additional_libdir" dnl The same code as in the loop below: dnl First look for a shared library. if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi dnl Then look for a static library. if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` dnl First look for a shared library. if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi dnl Then look for a static library. if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then dnl Found the library. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then dnl Linking with a shared library. We attempt to hardcode its dnl directory into the executable's runpath, unless it's the dnl standard /usr/lib. if test "$enable_rpath" = no \ || test "X$found_dir" = "X/usr/$acl_libdirstem" \ || test "X$found_dir" = "X/usr/$acl_libdirstem2"; then dnl No hardcoding is needed. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl Use an explicit option to hardcode DIR into the resulting dnl binary. dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi dnl The hardcoding into $LIBNAME is system dependent. if test "$acl_hardcode_direct" = yes; then dnl Using DIR/libNAME.so during linking hardcodes DIR into the dnl resulting binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then dnl Use an explicit option to hardcode DIR into the resulting dnl binary. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else dnl Rely on "-L$found_dir". dnl But don't add it if it's already contained in the LDFLAGS dnl or the already constructed $LIBNAME haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_so" else dnl We cannot use $acl_hardcode_runpath_var and LD_RUN_PATH dnl here, because this doesn't fit in flags passed to the dnl compiler. So give up. No hardcoding. This affects only dnl very old systems. dnl FIXME: Not sure whether we should use dnl "-L$found_dir -l$name" or "-L$found_dir $found_so" dnl here. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then dnl Linking with a static library. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$found_a" else dnl We shouldn't come here, but anyway it's good to have a dnl fallback. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$found_dir -l$name" fi fi dnl Assume the include files are nearby. additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` if test "$name" = '$1'; then LIB[]NAME[]_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; */$acl_libdirstem2 | */$acl_libdirstem2/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem2/"'*$,,'` if test "$name" = '$1'; then LIB[]NAME[]_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then dnl Potentially add $additional_includedir to $INCNAME. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's /usr/local/include and we are using GCC on Linux, dnl 3. if it's already present in $CPPFLAGS or the already dnl constructed $INCNAME, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INC[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $INCNAME. INC[]NAME="${INC[]NAME}${INC[]NAME:+ }-I$additional_includedir" fi fi fi fi fi dnl Look for dependencies. if test -n "$found_la"; then dnl Read the .la file. It defines the variables dnl dlname, library_names, old_library, dependency_libs, current, dnl age, revision, installed, dlopen, dlpreopen, libdir. save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" dnl We use only dependency_libs. for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` dnl Potentially add $additional_libdir to $LIBNAME and $LTLIBNAME. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's /usr/local/lib and we are using GCC on Linux, dnl 3. if it's already present in $LDFLAGS or the already dnl constructed $LIBNAME, dnl 4. if it doesn't exist as a directory. if test "X$additional_libdir" != "X/usr/$acl_libdirstem" \ && test "X$additional_libdir" != "X/usr/$acl_libdirstem2"; then haveit= if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem" \ || test "X$additional_libdir" = "X/usr/local/$acl_libdirstem2"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LIBNAME. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIB[]NAME; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LTLIBNAME. LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then dnl Potentially add DIR to rpathdirs. dnl The rpathdirs will be appended to $LIBNAME at the end. haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi dnl Potentially add DIR to ltrpathdirs. dnl The ltrpathdirs will be appended to $LTLIBNAME at the end. haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) dnl Handle this in the next round. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) dnl Handle this in the next round. Throw away the .la's dnl directory; it is already contained in a preceding -L dnl option. names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) dnl Most likely an immediate library name. LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$dep" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }$dep" ;; esac done fi else dnl Didn't find the library; assume it is in the system directories dnl known to the linker and runtime loader. (All the system dnl directories known to the linker should also be known to the dnl runtime loader, otherwise the system is severely misconfigured.) LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }-l$name" LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$acl_hardcode_libdir_separator"; then dnl Weird platform: only the last -rpath option counts, the user must dnl pass all path elements in one option. We can arrange that for a dnl single library, but not when more than one $LIBNAMEs are used. alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" done dnl Note: acl_hardcode_libdir_flag_spec uses $libdir and $wl. acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" else dnl The -rpath options are cumulative. for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIB[]NAME="${LIB[]NAME}${LIB[]NAME:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then dnl When using libtool, the option that works for both libraries and dnl executables is -R. The -R options are cumulative. for found_dir in $ltrpathdirs; do LTLIB[]NAME="${LTLIB[]NAME}${LTLIB[]NAME:+ }-R$found_dir" done fi popdef([P_A_C_K]) popdef([PACKLIBS]) popdef([PACKUP]) popdef([PACK]) popdef([NAME]) ]) dnl AC_LIB_APPENDTOVAR(VAR, CONTENTS) appends the elements of CONTENTS to VAR, dnl unless already present in VAR. dnl Works only for CPPFLAGS, not for LIB* variables because that sometimes dnl contains two or three consecutive elements that belong together. AC_DEFUN([AC_LIB_APPENDTOVAR], [ for element in [$2]; do haveit= for x in $[$1]; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then [$1]="${[$1]}${[$1]:+ }$element" fi done ]) dnl For those cases where a variable contains several -L and -l options dnl referring to unknown libraries and directories, this macro determines the dnl necessary additional linker options for the runtime path. dnl AC_LIB_LINKFLAGS_FROM_LIBS([LDADDVAR], [LIBSVALUE], [USE-LIBTOOL]) dnl sets LDADDVAR to linker options needed together with LIBSVALUE. dnl If USE-LIBTOOL evaluates to non-empty, linking with libtool is assumed, dnl otherwise linking without libtool is assumed. AC_DEFUN([AC_LIB_LINKFLAGS_FROM_LIBS], [ AC_REQUIRE([AC_LIB_RPATH]) AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) $1= if test "$enable_rpath" != no; then if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then dnl Use an explicit option to hardcode directories into the resulting dnl binary. rpathdirs= next= for opt in $2; do if test -n "$next"; then dir="$next" dnl No need to hardcode the standard /usr/lib. if test "X$dir" != "X/usr/$acl_libdirstem" \ && test "X$dir" != "X/usr/$acl_libdirstem2"; then rpathdirs="$rpathdirs $dir" fi next= else case $opt in -L) next=yes ;; -L*) dir=`echo "X$opt" | sed -e 's,^X-L,,'` dnl No need to hardcode the standard /usr/lib. if test "X$dir" != "X/usr/$acl_libdirstem" \ && test "X$dir" != "X/usr/$acl_libdirstem2"; then rpathdirs="$rpathdirs $dir" fi next= ;; *) next= ;; esac fi done if test "X$rpathdirs" != "X"; then if test -n ""$3""; then dnl libtool is used for linking. Use -R options. for dir in $rpathdirs; do $1="${$1}${$1:+ }-R$dir" done else dnl The linker is used for linking directly. if test -n "$acl_hardcode_libdir_separator"; then dnl Weird platform: only the last -rpath option counts, the user dnl must pass all path elements in one option. alldirs= for dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" $1="$flag" else dnl The -rpath options are cumulative. for dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" $1="${$1}${$1:+ }$flag" done fi fi fi fi fi AC_SUBST([$1]) ]) # lib-prefix.m4 serial 7 (gettext-0.18) dnl Copyright (C) 2001-2005, 2008-2016 Free Software Foundation, Inc. dnl This file is free software; the Free Software Foundation dnl gives unlimited permission to copy and/or distribute it, dnl with or without modifications, as long as this notice is preserved. dnl From Bruno Haible. dnl AC_LIB_ARG_WITH is synonymous to AC_ARG_WITH in autoconf-2.13, and dnl similar to AC_ARG_WITH in autoconf 2.52...2.57 except that is doesn't dnl require excessive bracketing. ifdef([AC_HELP_STRING], [AC_DEFUN([AC_LIB_ARG_WITH], [AC_ARG_WITH([$1],[[$2]],[$3],[$4])])], [AC_DEFUN([AC_][LIB_ARG_WITH], [AC_ARG_WITH([$1],[$2],[$3],[$4])])]) dnl AC_LIB_PREFIX adds to the CPPFLAGS and LDFLAGS the flags that are needed dnl to access previously installed libraries. The basic assumption is that dnl a user will want packages to use other packages he previously installed dnl with the same --prefix option. dnl This macro is not needed if only AC_LIB_LINKFLAGS is used to locate dnl libraries, but is otherwise very convenient. AC_DEFUN([AC_LIB_PREFIX], [ AC_BEFORE([$0], [AC_LIB_LINKFLAGS]) AC_REQUIRE([AC_PROG_CC]) AC_REQUIRE([AC_CANONICAL_HOST]) AC_REQUIRE([AC_LIB_PREPARE_MULTILIB]) AC_REQUIRE([AC_LIB_PREPARE_PREFIX]) dnl By default, look in $includedir and $libdir. use_additional=yes AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) AC_LIB_ARG_WITH([lib-prefix], [ --with-lib-prefix[=DIR] search for libraries in DIR/include and DIR/lib --without-lib-prefix don't search for libraries in includedir and libdir], [ if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then AC_LIB_WITH_FINAL_PREFIX([ eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" ]) else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" fi fi ]) if test $use_additional = yes; then dnl Potentially add $additional_includedir to $CPPFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/include, dnl 2. if it's already present in $CPPFLAGS, dnl 3. if it's /usr/local/include and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_includedir" != "X/usr/include"; then haveit= for x in $CPPFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_includedir"; then dnl Really add $additional_includedir to $CPPFLAGS. CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }-I$additional_includedir" fi fi fi fi dnl Potentially add $additional_libdir to $LDFLAGS. dnl But don't add it dnl 1. if it's the standard /usr/lib, dnl 2. if it's already present in $LDFLAGS, dnl 3. if it's /usr/local/lib and we are using GCC on Linux, dnl 4. if it doesn't exist as a directory. if test "X$additional_libdir" != "X/usr/$acl_libdirstem"; then haveit= for x in $LDFLAGS; do AC_LIB_WITH_FINAL_PREFIX([eval x=\"$x\"]) if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem"; then if test -n "$GCC"; then case $host_os in linux*) haveit=yes;; esac fi fi if test -z "$haveit"; then if test -d "$additional_libdir"; then dnl Really add $additional_libdir to $LDFLAGS. LDFLAGS="${LDFLAGS}${LDFLAGS:+ }-L$additional_libdir" fi fi fi fi fi ]) dnl AC_LIB_PREPARE_PREFIX creates variables acl_final_prefix, dnl acl_final_exec_prefix, containing the values to which $prefix and dnl $exec_prefix will expand at the end of the configure script. AC_DEFUN([AC_LIB_PREPARE_PREFIX], [ dnl Unfortunately, prefix and exec_prefix get only finally determined dnl at the end of configure. if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" ]) dnl AC_LIB_WITH_FINAL_PREFIX([statement]) evaluates statement, with the dnl variables prefix and exec_prefix bound to the values they will have dnl at the end of the configure script. AC_DEFUN([AC_LIB_WITH_FINAL_PREFIX], [ acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" $1 exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" ]) dnl AC_LIB_PREPARE_MULTILIB creates dnl - a variable acl_libdirstem, containing the basename of the libdir, either dnl "lib" or "lib64" or "lib/64", dnl - a variable acl_libdirstem2, as a secondary possible value for dnl acl_libdirstem, either the same as acl_libdirstem or "lib/sparcv9" or dnl "lib/amd64". AC_DEFUN([AC_LIB_PREPARE_MULTILIB], [ dnl There is no formal standard regarding lib and lib64. dnl On glibc systems, the current practice is that on a system supporting dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under dnl $prefix/lib64 and 32-bit libraries go under $prefix/lib. We determine dnl the compiler's default mode by looking at the compiler's library search dnl path. If at least one of its elements ends in /lib64 or points to a dnl directory whose absolute pathname ends in /lib64, we assume a 64-bit ABI. dnl Otherwise we use the default, namely "lib". dnl On Solaris systems, the current practice is that on a system supporting dnl 32-bit and 64-bit instruction sets or ABIs, 64-bit libraries go under dnl $prefix/lib/64 (which is a symlink to either $prefix/lib/sparcv9 or dnl $prefix/lib/amd64) and 32-bit libraries go under $prefix/lib. AC_REQUIRE([AC_CANONICAL_HOST]) acl_libdirstem=lib acl_libdirstem2= case "$host_os" in solaris*) dnl See Solaris 10 Software Developer Collection > Solaris 64-bit Developer's Guide > The Development Environment dnl . dnl "Portable Makefiles should refer to any library directories using the 64 symbolic link." dnl But we want to recognize the sparcv9 or amd64 subdirectory also if the dnl symlink is missing, so we set acl_libdirstem2 too. AC_CACHE_CHECK([for 64-bit host], [gl_cv_solaris_64bit], [AC_EGREP_CPP([sixtyfour bits], [ #ifdef _LP64 sixtyfour bits #endif ], [gl_cv_solaris_64bit=yes], [gl_cv_solaris_64bit=no]) ]) if test $gl_cv_solaris_64bit = yes; then acl_libdirstem=lib/64 case "$host_cpu" in sparc*) acl_libdirstem2=lib/sparcv9 ;; i*86 | x86_64) acl_libdirstem2=lib/amd64 ;; esac fi ;; *) searchpath=`(LC_ALL=C $CC -print-search-dirs) 2>/dev/null | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'` if test -n "$searchpath"; then acl_save_IFS="${IFS= }"; IFS=":" for searchdir in $searchpath; do if test -d "$searchdir"; then case "$searchdir" in */lib64/ | */lib64 ) acl_libdirstem=lib64 ;; */../ | */.. ) # Better ignore directories of this form. They are misleading. ;; *) searchdir=`cd "$searchdir" && pwd` case "$searchdir" in */lib64 ) acl_libdirstem=lib64 ;; esac ;; esac fi done IFS="$acl_save_IFS" fi ;; esac test -n "$acl_libdirstem2" || acl_libdirstem2="$acl_libdirstem" ]) # Copyright (C) 2002-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_AUTOMAKE_VERSION(VERSION) # ---------------------------- # Automake X.Y traces this macro to ensure aclocal.m4 has been # generated from the m4 files accompanying Automake X.Y. # (This private macro should not be called outside this file.) AC_DEFUN([AM_AUTOMAKE_VERSION], [am__api_version='1.15' dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to dnl require some minimum version. Point them to the right macro. m4_if([$1], [1.15], [], [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl ]) # _AM_AUTOCONF_VERSION(VERSION) # ----------------------------- # aclocal traces this macro to find the Autoconf version. # This is a private macro too. Using m4_define simplifies # the logic in aclocal, which can simply ignore this definition. m4_define([_AM_AUTOCONF_VERSION], []) # AM_SET_CURRENT_AUTOMAKE_VERSION # ------------------------------- # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. # This function is AC_REQUIREd by AM_INIT_AUTOMAKE. AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], [AM_AUTOMAKE_VERSION([1.15])dnl m4_ifndef([AC_AUTOCONF_VERSION], [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) # AM_AUX_DIR_EXPAND -*- Autoconf -*- # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets # $ac_aux_dir to '$srcdir/foo'. In other projects, it is set to # '$srcdir', '$srcdir/..', or '$srcdir/../..'. # # Of course, Automake must honor this variable whenever it calls a # tool from the auxiliary directory. The problem is that $srcdir (and # therefore $ac_aux_dir as well) can be either absolute or relative, # depending on how configure is run. This is pretty annoying, since # it makes $ac_aux_dir quite unusable in subdirectories: in the top # source directory, any form will work fine, but in subdirectories a # relative path needs to be adjusted first. # # $ac_aux_dir/missing # fails when called from a subdirectory if $ac_aux_dir is relative # $top_srcdir/$ac_aux_dir/missing # fails if $ac_aux_dir is absolute, # fails when called from a subdirectory in a VPATH build with # a relative $ac_aux_dir # # The reason of the latter failure is that $top_srcdir and $ac_aux_dir # are both prefixed by $srcdir. In an in-source build this is usually # harmless because $srcdir is '.', but things will broke when you # start a VPATH build or use an absolute $srcdir. # # So we could use something similar to $top_srcdir/$ac_aux_dir/missing, # iff we strip the leading $srcdir from $ac_aux_dir. That would be: # am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` # and then we would define $MISSING as # MISSING="\${SHELL} $am_aux_dir/missing" # This will work as long as MISSING is not called from configure, because # unfortunately $(top_srcdir) has no meaning in configure. # However there are other variables, like CC, which are often used in # configure, and could therefore not use this "fixed" $ac_aux_dir. # # Another solution, used here, is to always expand $ac_aux_dir to an # absolute PATH. The drawback is that using absolute paths prevent a # configured tree to be moved without reconfiguration. AC_DEFUN([AM_AUX_DIR_EXPAND], [AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` ]) # AM_CONDITIONAL -*- Autoconf -*- # Copyright (C) 1997-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_CONDITIONAL(NAME, SHELL-CONDITION) # ------------------------------------- # Define a conditional. AC_DEFUN([AM_CONDITIONAL], [AC_PREREQ([2.52])dnl m4_if([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl AC_SUBST([$1_TRUE])dnl AC_SUBST([$1_FALSE])dnl _AM_SUBST_NOTMAKE([$1_TRUE])dnl _AM_SUBST_NOTMAKE([$1_FALSE])dnl m4_define([_AM_COND_VALUE_$1], [$2])dnl if $2; then $1_TRUE= $1_FALSE='#' else $1_TRUE='#' $1_FALSE= fi AC_CONFIG_COMMANDS_PRE( [if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then AC_MSG_ERROR([[conditional "$1" was never defined. Usually this means the macro was only invoked conditionally.]]) fi])]) # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be # written in clear, in which case automake, when reading aclocal.m4, # will think it sees a *use*, and therefore will trigger all it's # C support machinery. Also note that it means that autoscan, seeing # CC etc. in the Makefile, will ask for an AC_PROG_CC use... # _AM_DEPENDENCIES(NAME) # ---------------------- # See how the compiler implements dependency checking. # NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC". # We try a few techniques and use that to set a single cache variable. # # We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was # modified to invoke _AM_DEPENDENCIES(CC); we would have a circular # dependency, and given that the user is not expected to run this macro, # just rely on AC_PROG_CC. AC_DEFUN([_AM_DEPENDENCIES], [AC_REQUIRE([AM_SET_DEPDIR])dnl AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl AC_REQUIRE([AM_MAKE_INCLUDE])dnl AC_REQUIRE([AM_DEP_TRACK])dnl m4_if([$1], [CC], [depcc="$CC" am_compiler_list=], [$1], [CXX], [depcc="$CXX" am_compiler_list=], [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'], [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'], [$1], [UPC], [depcc="$UPC" am_compiler_list=], [$1], [GCJ], [depcc="$GCJ" am_compiler_list='gcc3 gcc'], [depcc="$$1" am_compiler_list=]) AC_CACHE_CHECK([dependency style of $depcc], [am_cv_$1_dependencies_compiler_type], [if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_$1_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` fi am__universal=false m4_case([$1], [CC], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac], [CXX], [case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac]) for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_$1_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_$1_dependencies_compiler_type=none fi ]) AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) AM_CONDITIONAL([am__fastdep$1], [ test "x$enable_dependency_tracking" != xno \ && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) ]) # AM_SET_DEPDIR # ------------- # Choose a directory name for dependency files. # This macro is AC_REQUIREd in _AM_DEPENDENCIES. AC_DEFUN([AM_SET_DEPDIR], [AC_REQUIRE([AM_SET_LEADING_DOT])dnl AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl ]) # AM_DEP_TRACK # ------------ AC_DEFUN([AM_DEP_TRACK], [AC_ARG_ENABLE([dependency-tracking], [dnl AS_HELP_STRING( [--enable-dependency-tracking], [do not reject slow dependency extractors]) AS_HELP_STRING( [--disable-dependency-tracking], [speeds up one-time build])]) if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) AC_SUBST([AMDEPBACKSLASH])dnl _AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl AC_SUBST([am__nodep])dnl _AM_SUBST_NOTMAKE([am__nodep])dnl ]) # Generate code to set up dependency tracking. -*- Autoconf -*- # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_OUTPUT_DEPENDENCY_COMMANDS # ------------------------------ AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], [{ # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named 'Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`AS_DIRNAME("$mf")` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running 'make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "$am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`AS_DIRNAME(["$file"])` AS_MKDIR_P([$dirpart/$fdir]) # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ])# _AM_OUTPUT_DEPENDENCY_COMMANDS # AM_OUTPUT_DEPENDENCY_COMMANDS # ----------------------------- # This macro should only be invoked once -- use via AC_REQUIRE. # # This code is only required when automatic dependency tracking # is enabled. FIXME. This creates each '.P' file that we will # need in order to bootstrap the dependency handling code. AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], [AC_CONFIG_COMMANDS([depfiles], [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) ]) # Do all the work for Automake. -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # This macro actually does too much. Some checks are only needed if # your package does certain things. But this isn't really a big deal. dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O. m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC]) [_AM_PROG_CC_C_O ]) # AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) # AM_INIT_AUTOMAKE([OPTIONS]) # ----------------------------------------------- # The call with PACKAGE and VERSION arguments is the old style # call (pre autoconf-2.50), which is being phased out. PACKAGE # and VERSION should now be passed to AC_INIT and removed from # the call to AM_INIT_AUTOMAKE. # We support both call styles for the transition. After # the next Automake release, Autoconf can make the AC_INIT # arguments mandatory, and then we can depend on a new Autoconf # release and drop the old call support. AC_DEFUN([AM_INIT_AUTOMAKE], [AC_PREREQ([2.65])dnl dnl Autoconf wants to disallow AM_ names. We explicitly allow dnl the ones we care about. m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl AC_REQUIRE([AC_PROG_INSTALL])dnl if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl # test to see if srcdir already configured if test -f $srcdir/config.status; then AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi AC_SUBST([CYGPATH_W]) # Define the identity of the package. dnl Distinguish between old-style and new-style calls. m4_ifval([$2], [AC_DIAGNOSE([obsolete], [$0: two- and three-arguments forms are deprecated.]) m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl AC_SUBST([PACKAGE], [$1])dnl AC_SUBST([VERSION], [$2])], [_AM_SET_OPTIONS([$1])dnl dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. m4_if( m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]), [ok:ok],, [m4_fatal([AC_INIT should be called with package and version arguments])])dnl AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl _AM_IF_OPTION([no-define],, [AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package]) AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl # Some tools Automake needs. AC_REQUIRE([AM_SANITY_CHECK])dnl AC_REQUIRE([AC_ARG_PROGRAM])dnl AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}]) AM_MISSING_PROG([AUTOCONF], [autoconf]) AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}]) AM_MISSING_PROG([AUTOHEADER], [autoheader]) AM_MISSING_PROG([MAKEINFO], [makeinfo]) AC_REQUIRE([AM_PROG_INSTALL_SH])dnl AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl AC_REQUIRE([AC_PROG_MKDIR_P])dnl # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # AC_SUBST([mkdir_p], ['$(MKDIR_P)']) # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. AC_REQUIRE([AC_PROG_AWK])dnl AC_REQUIRE([AC_PROG_MAKE_SET])dnl AC_REQUIRE([AM_SET_LEADING_DOT])dnl _AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], [_AM_PROG_TAR([v7])])]) _AM_IF_OPTION([no-dependencies],, [AC_PROVIDE_IFELSE([AC_PROG_CC], [_AM_DEPENDENCIES([CC])], [m4_define([AC_PROG_CC], m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_CXX], [_AM_DEPENDENCIES([CXX])], [m4_define([AC_PROG_CXX], m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJC], [_AM_DEPENDENCIES([OBJC])], [m4_define([AC_PROG_OBJC], m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl AC_PROVIDE_IFELSE([AC_PROG_OBJCXX], [_AM_DEPENDENCIES([OBJCXX])], [m4_define([AC_PROG_OBJCXX], m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl ]) AC_REQUIRE([AM_SILENT_RULES])dnl dnl The testsuite driver may need to know about EXEEXT, so add the dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below. AC_CONFIG_COMMANDS_PRE(dnl [m4_provide_if([_AM_COMPILER_EXEEXT], [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END AC_MSG_ERROR([Your 'rm' program is bad, sorry.]) fi fi dnl The trailing newline in this macro's definition is deliberate, for dnl backward compatibility and to allow trailing 'dnl'-style comments dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841. ]) dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion. Do not dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further dnl mangled by Autoconf and run in a shell conditional statement. m4_define([_AC_COMPILER_EXEEXT], m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) # When config.status generates a header, we must update the stamp-h file. # This file resides in the same directory as the config header # that is generated. The stamp files are numbered to have different names. # Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the # loop where config.status creates the headers, so we can generate # our stamp files there. AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], [# Compute $1's index in $config_headers. _am_arg=$1 _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_SH # ------------------ # Define $install_sh. AC_DEFUN([AM_PROG_INSTALL_SH], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl if test x"${install_sh+set}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi AC_SUBST([install_sh])]) # Copyright (C) 2003-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # Check whether the underlying file-system supports filenames # with a leading dot. For instance MS-DOS doesn't. AC_DEFUN([AM_SET_LEADING_DOT], [rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null AC_SUBST([am__leading_dot])]) # Check to see how 'make' treats includes. -*- Autoconf -*- # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MAKE_INCLUDE() # ----------------- # Check to see how make treats includes. AC_DEFUN([AM_MAKE_INCLUDE], [am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. AC_MSG_CHECKING([for style of include used by $am_make]) am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from 'make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi AC_SUBST([am__include]) AC_SUBST([am__quote]) AC_MSG_RESULT([$_am_result]) rm -f confinc confmf ]) # Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- # Copyright (C) 1997-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_MISSING_PROG(NAME, PROGRAM) # ------------------------------ AC_DEFUN([AM_MISSING_PROG], [AC_REQUIRE([AM_MISSING_HAS_RUN]) $1=${$1-"${am_missing_run}$2"} AC_SUBST($1)]) # AM_MISSING_HAS_RUN # ------------------ # Define MISSING if not defined so far and test if it is modern enough. # If it is, set am_missing_run to use it, otherwise, to nothing. AC_DEFUN([AM_MISSING_HAS_RUN], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([missing])dnl if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= AC_MSG_WARN(['missing' script is too old or missing]) fi ]) # Helper functions for option handling. -*- Autoconf -*- # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_MANGLE_OPTION(NAME) # ----------------------- AC_DEFUN([_AM_MANGLE_OPTION], [[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) # _AM_SET_OPTION(NAME) # -------------------- # Set option NAME. Presently that only means defining a flag for this option. AC_DEFUN([_AM_SET_OPTION], [m4_define(_AM_MANGLE_OPTION([$1]), [1])]) # _AM_SET_OPTIONS(OPTIONS) # ------------------------ # OPTIONS is a space-separated list of Automake options. AC_DEFUN([_AM_SET_OPTIONS], [m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) # _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) # ------------------------------------------- # Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. AC_DEFUN([_AM_IF_OPTION], [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) # Copyright (C) 1999-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_CC_C_O # --------------- # Like AC_PROG_CC_C_O, but changed for automake. We rewrite AC_PROG_CC # to automatically call this. AC_DEFUN([_AM_PROG_CC_C_O], [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl AC_REQUIRE_AUX_FILE([compile])dnl AC_LANG_PUSH([C])dnl AC_CACHE_CHECK( [whether $CC understands -c and -o together], [am_cv_prog_cc_c_o], [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])]) # Make sure it works both with $CC and with simple cc. # Following AC_PROG_CC_C_O, we do the test twice because some # compilers refuse to overwrite an existing .o file with -o, # though they will create one. am_cv_prog_cc_c_o=yes for am_i in 1 2; do if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \ && test -f conftest2.$ac_objext; then : OK else am_cv_prog_cc_c_o=no break fi done rm -f core conftest* unset am_i]) if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi AC_LANG_POP([C])]) # For backward compatibility. AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_RUN_LOG(COMMAND) # ------------------- # Run COMMAND, save the exit status in ac_status, and log it. # (This has been adapted from Autoconf's _AC_RUN_LOG macro.) AC_DEFUN([AM_RUN_LOG], [{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD (exit $ac_status); }]) # Check to make sure that the build environment is sane. -*- Autoconf -*- # Copyright (C) 1996-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SANITY_CHECK # --------------- AC_DEFUN([AM_SANITY_CHECK], [AC_MSG_CHECKING([whether build environment is sane]) # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[[\\\"\#\$\&\'\`$am_lf]]*) AC_MSG_ERROR([unsafe absolute working directory name]);; esac case $srcdir in *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$[*]" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$[*]" != "X $srcdir/configure conftest.file" \ && test "$[*]" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken alias in your environment]) fi if test "$[2]" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$[2]" = conftest.file ) then # Ok. : else AC_MSG_ERROR([newly created file is older than distributed files! Check your system clock]) fi AC_MSG_RESULT([yes]) # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi AC_CONFIG_COMMANDS_PRE( [AC_MSG_CHECKING([that generated files are newer than configure]) if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi AC_MSG_RESULT([done])]) rm -f conftest.file ]) # Copyright (C) 2009-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_SILENT_RULES([DEFAULT]) # -------------------------- # Enable less verbose build rules; with the default set to DEFAULT # ("yes" being less verbose, "no" or empty being verbose). AC_DEFUN([AM_SILENT_RULES], [AC_ARG_ENABLE([silent-rules], [dnl AS_HELP_STRING( [--enable-silent-rules], [less verbose build output (undo: "make V=1")]) AS_HELP_STRING( [--disable-silent-rules], [verbose build output (undo: "make V=0")])dnl ]) case $enable_silent_rules in @%:@ ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);; esac dnl dnl A few 'make' implementations (e.g., NonStop OS and NextStep) dnl do not support nested variable expansions. dnl See automake bug#9928 and bug#10237. am_make=${MAKE-make} AC_CACHE_CHECK([whether $am_make supports nested variables], [am_cv_make_support_nested_variables], [if AS_ECHO([['TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi]) if test $am_cv_make_support_nested_variables = yes; then dnl Using '$V' instead of '$(V)' breaks IRIX make. AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AC_SUBST([AM_V])dnl AM_SUBST_NOTMAKE([AM_V])dnl AC_SUBST([AM_DEFAULT_V])dnl AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl AC_SUBST([AM_DEFAULT_VERBOSITY])dnl AM_BACKSLASH='\' AC_SUBST([AM_BACKSLASH])dnl _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl ]) # Copyright (C) 2001-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # AM_PROG_INSTALL_STRIP # --------------------- # One issue with vendor 'install' (even GNU) is that you can't # specify the program used to strip binaries. This is especially # annoying in cross-compiling environments, where the build's strip # is unlikely to handle the host's binaries. # Fortunately install-sh will honor a STRIPPROG variable, so we # always use install-sh in "make install-strip", and initialize # STRIPPROG with the value of the STRIP variable (set by the user). AC_DEFUN([AM_PROG_INSTALL_STRIP], [AC_REQUIRE([AM_PROG_INSTALL_SH])dnl # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. dnl Don't test for $cross_compiling = yes, because it might be 'maybe'. if test "$cross_compiling" != no; then AC_CHECK_TOOL([STRIP], [strip], :) fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" AC_SUBST([INSTALL_STRIP_PROGRAM])]) # Copyright (C) 2006-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_SUBST_NOTMAKE(VARIABLE) # --------------------------- # Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. # This macro is traced by Automake. AC_DEFUN([_AM_SUBST_NOTMAKE]) # AM_SUBST_NOTMAKE(VARIABLE) # -------------------------- # Public sister of _AM_SUBST_NOTMAKE. AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) # Check how to create a tarball. -*- Autoconf -*- # Copyright (C) 2004-2014 Free Software Foundation, Inc. # # This file is free software; the Free Software Foundation # gives unlimited permission to copy and/or distribute it, # with or without modifications, as long as this notice is preserved. # _AM_PROG_TAR(FORMAT) # -------------------- # Check how to create a tarball in format FORMAT. # FORMAT should be one of 'v7', 'ustar', or 'pax'. # # Substitute a variable $(am__tar) that is a command # writing to stdout a FORMAT-tarball containing the directory # $tardir. # tardir=directory && $(am__tar) > result.tar # # Substitute a variable $(am__untar) that extract such # a tarball read from stdin. # $(am__untar) < result.tar # AC_DEFUN([_AM_PROG_TAR], [# Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AC_SUBST([AMTAR], ['$${TAR-tar}']) # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' m4_if([$1], [v7], [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], [m4_case([$1], [ustar], [# The POSIX 1988 'ustar' format is defined with fixed-size fields. # There is notably a 21 bits limit for the UID and the GID. In fact, # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343 # and bug#13588). am_max_uid=2097151 # 2^21 - 1 am_max_gid=$am_max_uid # The $UID and $GID variables are not portable, so we need to resort # to the POSIX-mandated id(1) utility. Errors in the 'id' calls # below are definitely unexpected, so allow the users to see them # (that is, avoid stderr redirection). am_uid=`id -u || echo unknown` am_gid=`id -g || echo unknown` AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format]) if test $am_uid -le $am_max_uid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format]) if test $am_gid -le $am_max_gid; then AC_MSG_RESULT([yes]) else AC_MSG_RESULT([no]) _am_tools=none fi], [pax], [], [m4_fatal([Unknown tar format])]) AC_MSG_CHECKING([how to create a $1 tar archive]) # Go ahead even if we have the value already cached. We do so because we # need to set the values for the 'am__tar' and 'am__untar' variables. _am_tools=${am_cv_prog_tar_$1-$_am_tools} for _am_tool in $_am_tools; do case $_am_tool in gnutar) for _am_tar in tar gnutar gtar; do AM_RUN_LOG([$_am_tar --version]) && break done am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' am__untar="$_am_tar -xf -" ;; plaintar) # Must skip GNU tar: if it does not support --format= it doesn't create # ustar tarball either. (tar --version) >/dev/null 2>&1 && continue am__tar='tar chf - "$$tardir"' am__tar_='tar chf - "$tardir"' am__untar='tar xf -' ;; pax) am__tar='pax -L -x $1 -w "$$tardir"' am__tar_='pax -L -x $1 -w "$tardir"' am__untar='pax -r' ;; cpio) am__tar='find "$$tardir" -print | cpio -o -H $1 -L' am__tar_='find "$tardir" -print | cpio -o -H $1 -L' am__untar='cpio -i -H $1 -d' ;; none) am__tar=false am__tar_=false am__untar=false ;; esac # If the value was cached, stop now. We just wanted to have am__tar # and am__untar set. test -n "${am_cv_prog_tar_$1}" && break # tar/untar a dummy directory, and stop if the command works. rm -rf conftest.dir mkdir conftest.dir echo GrepMe > conftest.dir/file AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) rm -rf conftest.dir if test -s conftest.tar; then AM_RUN_LOG([$am__untar /dev/null 2>&1 && break fi done rm -rf conftest.dir AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) AC_MSG_RESULT([$am_cv_prog_tar_$1])]) AC_SUBST([am__tar]) AC_SUBST([am__untar]) ]) # _AM_PROG_TAR m4_include([m4/libtool.m4]) m4_include([m4/ltoptions.m4]) m4_include([m4/ltsugar.m4]) m4_include([m4/ltversion.m4]) m4_include([m4/lt~obsolete.m4]) recoll-1.26.3/index/0000755000175000017500000000000013570165410011175 500000000000000recoll-1.26.3/index/mimetype.h0000644000175000017500000000264213533651561013131 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _MIMETYPE_H_INCLUDED_ #define _MIMETYPE_H_INCLUDED_ #include "safesysstat.h" #include class RclConfig; /** * Try to determine a mime type for file. * * If stp is not null, this may imply more than matching the suffix, * the name must be usable to actually access file data. * @param filename file/path name to use * @param stp if not null use st_mode bits for directories etc. * @param cfg recoll config * @param usfc Use system's 'file' command as last resort (or not) */ std::string mimetype(const std::string &filename, const struct stat *stp, RclConfig *cfg, bool usfc); #endif /* _MIMETYPE_H_INCLUDED_ */ recoll-1.26.3/index/indexer.h0000644000175000017500000001043513533651561012735 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _INDEXER_H_INCLUDED_ #define _INDEXER_H_INCLUDED_ #include "rclconfig.h" #include #include #include #include #include #include "rcldb.h" #include "rcldoc.h" #include "idxstatus.h" class FsIndexer; class WebQueueIndexer; /** Callback to say what we're doing. If the update func returns false, we * stop as soon as possible without corrupting state */ class DbIxStatusUpdater { public: #ifdef IDX_THREADS std::mutex m_mutex; #endif DbIxStatus status; virtual ~DbIxStatusUpdater(){} // Convenience: change phase/fn and update virtual bool update(DbIxStatus::Phase phase, const string& fn) { #ifdef IDX_THREADS std::unique_lock lock(m_mutex); #endif status.phase = phase; status.fn = fn; return update(); } // To be implemented by user for sending info somewhere virtual bool update() = 0; }; /** * The top level batch indexing object. Processes the configuration, * then invokes file system walking or other to populate/update the * database(s). */ class ConfIndexer { public: enum runStatus {IndexerOk, IndexerError}; ConfIndexer(RclConfig *cnf, DbIxStatusUpdater *updfunc = 0); virtual ~ConfIndexer(); // Indexer types. Maybe we'll have something more dynamic one day enum ixType {IxTNone, IxTFs=1, IxTWebQueue=2, IxTAll = IxTFs | IxTWebQueue}; // Misc indexing flags enum IxFlag {IxFNone = 0, IxFIgnoreSkip = 1, // Ignore skipped lists IxFNoWeb = 2, // Do not process the web queue. // First pass: just do the top files so that the user can // try searching asap. IxFQuickShallow = 4, // Do not retry files which previously failed ('+' sigs) IxFNoRetryFailed = 8, // Do perform purge pass even if we can't be sure we saw // all files IxFDoPurge = 16, }; /** Run indexers */ bool index(bool resetbefore, ixType typestorun, int f = IxFNone); const string &getReason() {return m_reason;} /** Stemming reset to config: create needed, delete unconfigured */ bool createStemmingDatabases(); /** Create stem database for given language */ bool createStemDb(const string &lang); /** Create misspelling expansion dictionary if aspell i/f is available */ bool createAspellDict(); /** List possible stemmer names */ static vector getStemmerNames(); /** Index a list of files. No db cleaning or stemdb updating */ bool indexFiles(std::list &files, int f = IxFNone); /** Update index for list of documents given as list of docs (out of query) */ bool updateDocs(vector &docs, IxFlag f = IxFNone); /** Purge a list of files. */ bool purgeFiles(std::list &files, int f = IxFNone); /** Set in place reset mode */ void setInPlaceReset() {m_db.setInPlaceReset();} private: RclConfig *m_config; Rcl::Db m_db; FsIndexer *m_fsindexer; bool m_doweb; WebQueueIndexer *m_webindexer; DbIxStatusUpdater *m_updater; string m_reason; // The first time we index, we do things a bit differently to // avoid user frustration (make at least some results available // fast by using several passes, the first ones to index common // interesting locations). bool runFirstIndexing(); bool firstFsIndexingSequence(); }; #endif /* _INDEXER_H_INCLUDED_ */ recoll-1.26.3/index/rclmon.h0000644000175000017500000000660613533651561012576 00000000000000/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _RCLMON_H_INCLUDED_ #define _RCLMON_H_INCLUDED_ #include "autoconfig.h" #ifdef RCL_MONITOR /** * Definitions for the real-time monitoring recoll. * We're interested in file modifications, deletions and renaming. * We use two threads, one to receive events from the source, the * other to perform adequate processing. * * The two threads communicate through an event buffer which is * actually a hash map indexed by file path for easy coalescing of * multiple events to the same file. */ #include #include #include #include #include "rclconfig.h" #ifndef NO_NAMESPACES using std::string; using std::multimap; #endif /** * Monitoring event: something changed in the filesystem */ class RclMonEvent { public: enum EvType {RCLEVT_NONE= 0, RCLEVT_MODIFY=1, RCLEVT_DELETE=2, RCLEVT_DIRCREATE=3, RCLEVT_ISDIR=0x10}; string m_path; // Type and flags int m_etyp; ///// For fast changing files: minimum time interval before reindex // Minimum interval (from config) int m_itvsecs; // Don't process this entry before: time_t m_minclock; // Changed since put in purgatory after reindex bool m_needidx; RclMonEvent() : m_etyp(RCLEVT_NONE), m_itvsecs(0), m_minclock(0), m_needidx(false) {} EvType evtype() {return EvType(m_etyp & 0xf);} int evflags() {return m_etyp & 0xf0;} }; enum RclMonitorOption {RCLMON_NONE=0, RCLMON_NOFORK=1, RCLMON_NOX11=2, RCLMON_NOCONFCHECK=4}; /** * Monitoring event queue. This is the shared object between the main thread * (which does the actual indexing work), and the monitoring thread which * receives events from FAM / inotify / etc. */ class RclEQData; class RclMonEventQueue { public: RclMonEventQueue(); ~RclMonEventQueue(); /** Wait for event or timeout. Returns with the queue locked */ std::unique_lock wait(int secs = -1, bool *timedout = 0); /** Add event. */ bool pushEvent(const RclMonEvent &ev); /** To all threads: end processing */ void setTerminate(); bool ok(); bool empty(); RclMonEvent pop(); void setopts(int opts); // Convenience function for initially communicating config to mon thr void setConfig(RclConfig *conf); RclConfig *getConfig(); private: RclEQData *m_data; }; /** Start monitoring on the topdirs described in conf */ extern bool startMonitor(RclConfig *conf, int flags); /** Main routine for the event receiving thread */ extern void *rclMonRcvRun(void *); // Specific debug macro for monitor synchronization events #define MONDEB LOGDEB2 #endif // RCL_MONITOR #endif /* _RCLMON_H_INCLUDED_ */ recoll-1.26.3/index/fsfetcher.cpp0000644000175000017500000000524013533651561013601 00000000000000/* Copyright (C) 2012-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include "safesysstat.h" #include "log.h" #include "cstr.h" #include "fetcher.h" #include "fsfetcher.h" #include "fsindexer.h" #include "pathut.h" using std::string; static DocFetcher::Reason urltopath(RclConfig* cnf, const Rcl::Doc& idoc, string& fn, struct stat& st) { // The url has to be like file:// fn = fileurltolocalpath(idoc.url); if (fn.empty()) { LOGERR("FSDocFetcher::fetch/sig: non fs url: [" << idoc.url << "]\n"); return DocFetcher::FetchOther; } cnf->setKeyDir(path_getfather(fn)); bool follow = false; cnf->getConfParam("followLinks", &follow); if (path_fileprops(fn, &st, follow) < 0) { LOGERR("FSDocFetcher::fetch: stat errno " << errno << " for [" << fn << "]\n"); return DocFetcher::FetchNotExist; } return DocFetcher::FetchOk; } bool FSDocFetcher::fetch(RclConfig* cnf, const Rcl::Doc& idoc, RawDoc& out) { string fn; if (urltopath(cnf, idoc, fn, out.st) != DocFetcher::FetchOk) return false; out.kind = RawDoc::RDK_FILENAME; out.data = fn; return true; } bool FSDocFetcher::makesig(RclConfig* cnf, const Rcl::Doc& idoc, string& sig) { string fn; struct stat st; if (urltopath(cnf, idoc, fn, st) != DocFetcher::FetchOk) return false; FsIndexer::makesig(&st, sig); return true; } DocFetcher::Reason FSDocFetcher::testAccess(RclConfig* cnf, const Rcl::Doc& idoc) { string fn; struct stat st; DocFetcher::Reason reason = urltopath(cnf, idoc, fn, st); if (reason != DocFetcher::FetchOk) { return reason; } if (!path_readable(fn)) { return DocFetcher::FetchNoPerm; } // We have no way to know if the file is fully readable without // trying (local Windows locks), which would take too much time. return DocFetcher::FetchOther; } recoll-1.26.3/index/mimetype.cpp0000644000175000017500000001674313533651561013473 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef TEST_MIMETYPE #include "autoconfig.h" #include "safesysstat.h" #include #include #include #include "mimetype.h" #include "log.h" #include "execmd.h" #include "rclconfig.h" #include "smallut.h" #include "idfile.h" #include "pxattr.h" using namespace std; /// Identification of file from contents. This is called for files with /// unrecognized extensions. /// /// The system 'file' utility does not always work for us. For exemple /// it will mistake mail folders for simple text files if there is no /// 'Received' header, which would be the case, for exemple in a /// 'Sent' folder. Also "file -i" does not exist on all systems, and /// is quite costly to execute. /// So we first call the internal file identifier, which currently /// only knows about mail, but in which we can add the more /// current/interesting file types. /// As a last resort we execute 'file' or its configured replacement /// (except if forbidden by config) static string mimetypefromdata(RclConfig *cfg, const string &fn, bool usfc) { LOGDEB1("mimetypefromdata: fn [" << fn << "]\n"); // First try the internal identifying routine string mime = idFile(fn.c_str()); #ifdef USE_SYSTEM_FILE_COMMAND if (usfc && mime.empty()) { // Last resort: use "file -i", or its configured replacement. // 'file' fallback if the configured command (default: // xdg-mime) is not found static const vector tradfilecmd = {{FILE_PROG}, {"-i"}}; vector cmd; string scommand; if (cfg->getConfParam("systemfilecommand", scommand)) { LOGDEB2("mimetype: syscmd from config: " << scommand << "\n"); stringToStrings(scommand, cmd); string exe; if (cmd.empty()) { cmd = tradfilecmd; } else if (!ExecCmd::which(cmd[0], exe)) { cmd = tradfilecmd; } else { cmd[0] = exe; } cmd.push_back(fn); } else { LOGDEB("mimetype:systemfilecommand not found, using " << stringsToString(tradfilecmd) << "\n"); cmd = tradfilecmd; } string result; LOGDEB2("mimetype: executing: [" << stringsToString(cmd) << "]\n"); if (!ExecCmd::backtick(cmd, result)) { LOGERR("mimetypefromdata: exec " << stringsToString(cmd) << " failed\n"); return string(); } trimstring(result, " \t\n\r"); LOGDEB2("mimetype: systemfilecommand output [" << result << "]\n"); // The normal output from "file -i" looks like the following: // thefilename.xxx: text/plain; charset=us-ascii // Sometimes the semi-colon is missing like in: // mimetype.cpp: text/x-c charset=us-ascii // And sometimes we only get the mime type. This apparently happens // when 'file' believes that the file name is binary // xdg-mime only outputs the MIME type. // If there is no colon and there is a slash, this is hopefuly // the mime type if (result.find_first_of(":") == string::npos && result.find_first_of("/") != string::npos) { return result; } // Else the result should begin with the file name. Get rid of it: if (result.find(fn) != 0) { // Garbage "file" output. Maybe the result of a charset // conversion attempt? LOGERR("mimetype: can't interpret output from [" << stringsToString(cmd) << "] : [" << result << "]\n"); return string(); } result = result.substr(fn.size()); // Now should look like ": text/plain; charset=us-ascii" // Split it, and take second field list res; stringToStrings(result, res); if (res.size() <= 1) return string(); list::iterator it = res.begin(); mime = *++it; // Remove possible semi-colon at the end trimstring(mime, " \t;"); // File -i will sometimes return strange stuff (ie: "very small file") if(mime.find("/") == string::npos) mime.clear(); } #endif //USE_SYSTEM_FILE_COMMAND return mime; } /// Guess mime type, first from suffix, then from file data. We also /// have a list of suffixes that we don't touch at all. string mimetype(const string &fn, const struct stat *stp, RclConfig *cfg, bool usfc) { // Use stat data if available to check for non regular files if (stp) { // Note: the value used for directories is different from what // file -i would print on Linux (inode/directory). Probably // comes from bsd. Thos may surprise a user trying to use a // 'mime:' filter with the query language, but it's not work // changing (would force a reindex). if (S_ISDIR(stp->st_mode)) return "inode/directory"; if (S_ISLNK(stp->st_mode)) return "inode/symlink"; if (!S_ISREG(stp->st_mode)) return "inode/x-fsspecial"; // Empty files are just this: avoid further errors with actual filters. if (stp->st_size == 0) return "inode/x-empty"; } string mtype; #ifndef _WIN32 // Extended attribute has priority on everything, as per: // http://freedesktop.org/wiki/CommonExtendedAttributes if (pxattr::get(fn, "mime_type", &mtype)) { LOGDEB0("Mimetype: 'mime_type' xattr : [" << mtype << "]\n"); if (mtype.empty()) { LOGDEB0("Mimetype: getxattr() returned empty mime type !\n"); } else { return mtype; } } #endif if (cfg == 0) { LOGERR("Mimetype: null config ??\n"); return mtype; } if (cfg->inStopSuffixes(fn)) { LOGDEB("mimetype: fn [" << fn << "] in stopsuffixes\n"); return mtype; } // Compute file name suffix and search the mimetype map string::size_type dot = fn.find_first_of("."); while (dot != string::npos) { string suff = stringtolower(fn.substr(dot)); mtype = cfg->getMimeTypeFromSuffix(suff); if (!mtype.empty() || dot >= fn.size() - 1) break; dot = fn.find_first_of(".", dot + 1); } // If type was not determined from suffix, examine file data. Can // only do this if we have an actual file (as opposed to a pure // name). if (mtype.empty() && stp) mtype = mimetypefromdata(cfg, fn, usfc); return mtype; } #else // TEST-> #include #include "safesysstat.h" #include #include #include "log.h" #include "rclconfig.h" #include "rclinit.h" #include "mimetype.h" using namespace std; int main(int argc, const char **argv) { string reason; RclConfig *config = recollinit(0, 0, 0, reason); if (config == 0 || !config->ok()) { string str = "Configuration problem: "; str += reason; fprintf(stderr, "%s\n", str.c_str()); exit(1); } while (--argc > 0) { string filename = *++argv; struct stat st; if (lstat(filename.c_str(), &st)) { fprintf(stderr, "Can't stat %s\n", filename.c_str()); continue; } cout << filename << " -> " << mimetype(filename, &st, config, true) << endl; } return 0; } #endif // TEST recoll-1.26.3/index/exefetcher.cpp0000644000175000017500000001021513533651561013750 00000000000000/* Copyright (C) 2016 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include "exefetcher.h" #include "log.h" #include "pathut.h" #include "rclconfig.h" #include "execmd.h" #include "rcldoc.h" using namespace std; class EXEDocFetcher::Internal { public: string bckid; vector sfetch; vector smkid; bool docmd(const vector& cmd, const Rcl::Doc& idoc, string& out) { ExecCmd ecmd; // We're always called for preview (or Open) ecmd.putenv("RECOLL_FILTER_FORPREVIEW=yes"); string udi; idoc.getmeta(Rcl::Doc::keyudi, &udi); vector args(cmd); args.push_back(udi); args.push_back(idoc.url); args.push_back(idoc.ipath); int status = ecmd.doexec1(args, 0, &out); if (status == 0) { LOGDEB("EXEDocFetcher::Internal: got [" << out << "]\n"); return true; } else { LOGERR("EXEDOcFetcher::fetch: " << bckid << ": " << stringsToString(cmd) << " failed for " << udi << " " << idoc.url << " " << idoc.ipath << "\n"); return false; } } }; EXEDocFetcher::EXEDocFetcher(const EXEDocFetcher::Internal& _m) { m = new Internal(_m); LOGDEB("EXEDocFetcher::EXEDocFetcher: fetch is " << stringsToString(m->sfetch) << "\n"); } bool EXEDocFetcher::fetch(RclConfig* cnf, const Rcl::Doc& idoc, RawDoc& out) { out.kind = RawDoc::RDK_DATADIRECT; return m->docmd(m->sfetch, idoc, out.data); } bool EXEDocFetcher::makesig(RclConfig* cnf, const Rcl::Doc& idoc, string& sig) { return m->docmd(m->smkid, idoc, sig); } // Lookup bckid in the config and create an appropriate fetcher. std::unique_ptr exeDocFetcherMake(RclConfig *config, const string& bckid) { // The config we only read once, not gonna change. static ConfSimple *bconf; if (!bconf) { string bconfname = path_cat(config->getConfDir(), "backends"); LOGDEB("exeDocFetcherMake: using config in " << bconfname << "\n"); bconf = new ConfSimple(bconfname.c_str(), true); if (!bconf->ok()) { delete bconf; bconf = 0; LOGDEB("exeDocFetcherMake: bad/no config: " << bconfname << "\n"); return 0; } } EXEDocFetcher::Internal m; m.bckid = bckid; string sfetch; if (!bconf->get("fetch", sfetch, bckid) || sfetch.empty()) { LOGERR("exeDocFetcherMake: no 'fetch' for [" << bckid << "]\n"); return 0; } stringToStrings(sfetch, m.sfetch); // We look up the command as we do for filters for now m.sfetch[0] = config->findFilter(m.sfetch[0]); if (!path_isabsolute(m.sfetch[0])) { LOGERR("exeDocFetcherMake: " << m.sfetch[0] << " not found in exec path or filters dir\n"); return 0; } string smkid; if (!bconf->get("makesig", smkid, bckid) || smkid.empty()) { LOGDEB("exeDocFetcherMake: no 'makesig' for [" << bckid << "]\n"); return 0; } stringToStrings(smkid, m.smkid); m.smkid[0] = config->findFilter(m.smkid[0]); if (!path_isabsolute(m.smkid[0])) { LOGERR("exeDocFetcherMake: " << m.smkid[0] << " not found in exec path or filters dir\n"); return 0; } return std::unique_ptr(new EXEDocFetcher(m)); } recoll-1.26.3/index/fsindexer.cpp0000644000175000017500000007437613533651561013637 00000000000000/* Copyright (C) 2009 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "fsindexer.h" #include #include #include #include "safesysstat.h" #include #include #include #include #include "cstr.h" #include "pathut.h" #include "rclutil.h" #include "conftree.h" #include "rclconfig.h" #include "fstreewalk.h" #include "rcldb.h" #include "readfile.h" #include "indexer.h" #include "transcode.h" #include "log.h" #include "internfile.h" #include "smallut.h" #include "chrono.h" #include "wipedir.h" #include "fileudi.h" #include "cancelcheck.h" #include "rclinit.h" #include "extrameta.h" #include "utf8fn.h" using namespace std; #ifdef IDX_THREADS class DbUpdTask { public: // Take some care to avoid sharing string data (if string impl is cow) DbUpdTask(const string& u, const string& p, const Rcl::Doc& d) : udi(u.begin(), u.end()), parent_udi(p.begin(), p.end()) { d.copyto(&doc); } string udi; string parent_udi; Rcl::Doc doc; }; extern void *FsIndexerDbUpdWorker(void*); class InternfileTask { public: // Take some care to avoid sharing string data (if string impl is cow) InternfileTask(const std::string &f, const struct stat *i_stp, map lfields) : fn(f.begin(), f.end()), statbuf(*i_stp) { map_ss_cp_noshr(lfields, &localfields); } string fn; struct stat statbuf; map localfields; }; extern void *FsIndexerInternfileWorker(void*); #endif // IDX_THREADS // Thread safe variation of the "missing helpers" storage. Only the // addMissing method needs protection, the rest are called from the // main thread either before or after the exciting part class FSIFIMissingStore : public FIMissingStore { #ifdef IDX_THREADS std::mutex m_mutex; #endif public: virtual void addMissing(const string& prog, const string& mt) { #ifdef IDX_THREADS std::unique_lock locker(m_mutex); #endif FIMissingStore::addMissing(prog, mt); } }; FsIndexer::FsIndexer(RclConfig *cnf, Rcl::Db *db, DbIxStatusUpdater *updfunc) : m_config(cnf), m_db(db), m_updater(updfunc), m_missing(new FSIFIMissingStore), m_detectxattronly(false), m_noretryfailed(false) #ifdef IDX_THREADS , m_iwqueue("Internfile", cnf->getThrConf(RclConfig::ThrIntern).first), m_dwqueue("Split", cnf->getThrConf(RclConfig::ThrSplit).first) #endif // IDX_THREADS { LOGDEB1("FsIndexer::FsIndexer\n"); m_havelocalfields = m_config->hasNameAnywhere("localfields"); m_config->getConfParam("detectxattronly", &m_detectxattronly); #ifdef IDX_THREADS m_stableconfig = new RclConfig(*m_config); m_haveInternQ = m_haveSplitQ = false; int internqlen = cnf->getThrConf(RclConfig::ThrIntern).first; int internthreads = cnf->getThrConf(RclConfig::ThrIntern).second; if (internqlen >= 0) { if (!m_iwqueue.start(internthreads, FsIndexerInternfileWorker, this)) { LOGERR("FsIndexer::FsIndexer: intern worker start failed\n"); return; } m_haveInternQ = true; } int splitqlen = cnf->getThrConf(RclConfig::ThrSplit).first; int splitthreads = cnf->getThrConf(RclConfig::ThrSplit).second; if (splitqlen >= 0) { if (!m_dwqueue.start(splitthreads, FsIndexerDbUpdWorker, this)) { LOGERR("FsIndexer::FsIndexer: split worker start failed\n"); return; } m_haveSplitQ = true; } LOGDEB("FsIndexer: threads: haveIQ " << m_haveInternQ << " iql " << internqlen << " iqts " << internthreads << " haveSQ " << m_haveSplitQ << " sql " << splitqlen << " sqts " << splitthreads << "\n"); #endif // IDX_THREADS } FsIndexer::~FsIndexer() { LOGDEB1("FsIndexer::~FsIndexer()\n"); #ifdef IDX_THREADS void *status; if (m_haveInternQ) { status = m_iwqueue.setTerminateAndWait(); LOGDEB0("FsIndexer: internfile wrkr status: "<< status << " (1->ok)\n"); } if (m_haveSplitQ) { status = m_dwqueue.setTerminateAndWait(); LOGDEB0("FsIndexer: dbupd worker status: " << status << " (1->ok)\n"); } delete m_stableconfig; #endif // IDX_THREADS delete m_missing; } bool FsIndexer::init() { if (m_tdl.empty()) { m_tdl = m_config->getTopdirs(); if (m_tdl.empty()) { LOGERR("FsIndexers: no topdirs list defined\n"); return false; } } return true; } // Recursively index each directory in the topdirs: bool FsIndexer::index(int flags) { bool quickshallow = (flags & ConfIndexer::IxFQuickShallow) != 0; m_noretryfailed = (flags & ConfIndexer::IxFNoRetryFailed) != 0; Chrono chron; if (!init()) return false; if (m_updater) { #ifdef IDX_THREADS std::unique_lock locker(m_updater->m_mutex); #endif m_updater->status.dbtotdocs = m_db->docCnt(); } m_walker.setSkippedPaths(m_config->getSkippedPaths()); if (quickshallow) { m_walker.setOpts(m_walker.getOpts() | FsTreeWalker::FtwSkipDotFiles); m_walker.setMaxDepth(2); } for (const auto& topdir : m_tdl) { LOGDEB("FsIndexer::index: Indexing " << topdir << " into " << getDbDir() << "\n"); // If a topdirs member appears to be not here or not mounted // (empty), avoid deleting all the related index content by // marking the current docs as existing. if (path_empty(topdir)) { m_db->udiTreeMarkExisting(topdir); continue; } // Set the current directory in config so that subsequent // getConfParams() will get local values m_config->setKeyDir(topdir); // Adjust the "follow symlinks" option bool follow; int opts = m_walker.getOpts(); if (m_config->getConfParam("followLinks", &follow) && follow) { opts |= FsTreeWalker::FtwFollow; } else { opts &= ~FsTreeWalker::FtwFollow; } m_walker.setOpts(opts); int abslen; if (m_config->getConfParam("idxabsmlen", &abslen)) m_db->setAbstractParams(abslen, -1, -1); // Walk the directory tree if (m_walker.walk(topdir, *this) != FsTreeWalker::FtwOk) { LOGERR("FsIndexer::index: error while indexing " << topdir << ": " << m_walker.getReason() << "\n"); return false; } } #ifdef IDX_THREADS if (m_haveInternQ) m_iwqueue.waitIdle(); if (m_haveSplitQ) m_dwqueue.waitIdle(); m_db->waitUpdIdle(); #endif // IDX_THREADS if (m_missing) { string missing; m_missing->getMissingDescription(missing); if (!missing.empty()) { LOGINFO("FsIndexer::index missing helper program(s):\n" << missing << "\n"); } m_config->storeMissingHelperDesc(missing); } LOGINFO("fsindexer index time: " << chron.millis() << " mS\n"); return true; } static bool matchesSkipped(const vector& tdl, FsTreeWalker& walker, const string& path) { // Check path against topdirs and skippedPaths. We go up the // ancestors until we find either a topdirs or a skippedPaths // match. If topdirs is found first-> ok to index (it's possible // and useful to configure a topdir under a skippedPath in the // config). This matches what happens during the normal fs tree // walk. string canonpath = path_canon(path); string mpath = canonpath; string topdir; while (!path_isroot(mpath)) { // we assume root not in skipped paths. for (vector::const_iterator it = tdl.begin(); it != tdl.end(); it++) { // the topdirs members are already canonized. LOGDEB2("matchesSkipped: comparing ancestor [" << mpath << "] to topdir [" << it << "]\n"); if (!mpath.compare(*it)) { topdir = *it; goto goodpath; } } if (walker.inSkippedPaths(mpath, false)) { LOGDEB("FsIndexer::indexFiles: skipping [" << path << "] (skpp)\n"); return true; } string::size_type len = mpath.length(); mpath = path_getfather(mpath); // getfather normally returns a path ending with /, canonic // paths don't (except for '/' itself). if (!path_isroot(mpath) && mpath[mpath.size()-1] == '/') mpath.erase(mpath.size()-1); // should not be necessary, but lets be prudent. If the // path did not shorten, something is seriously amiss // (could be an assert actually) if (mpath.length() >= len) { LOGERR("FsIndexer::indexFile: internal Error: path [" << mpath << "] did not shorten\n"); return true; } } // We get there if neither topdirs nor skippedPaths tests matched LOGDEB("FsIndexer::indexFiles: skipping [" << path << "] (ntd)\n"); return true; goodpath: // Then check all path components up to the topdir against skippedNames mpath = canonpath; while (mpath.length() >= topdir.length() && mpath.length() > 1) { string fn = path_getsimple(mpath); if (walker.inSkippedNames(fn)) { LOGDEB("FsIndexer::indexFiles: skipping [" << path << "] (skpn)\n"); return true; } string::size_type len = mpath.length(); mpath = path_getfather(mpath); // getfather normally returns a path ending with /, getsimple // would then return '' if (!mpath.empty() && mpath[mpath.size()-1] == '/') mpath.erase(mpath.size()-1); // should not be necessary, but lets be prudent. If the // path did not shorten, something is seriously amiss // (could be an assert actually) if (mpath.length() >= len) return true; } return false; } /** * Index individual files, out of a full tree run. No database purging */ bool FsIndexer::indexFiles(list& files, int flags) { LOGDEB("FsIndexer::indexFiles\n"); m_noretryfailed = (flags & ConfIndexer::IxFNoRetryFailed) != 0; bool ret = false; if (!init()) return false; int abslen; if (m_config->getConfParam("idxabsmlen", &abslen)) m_db->setAbstractParams(abslen, -1, -1); m_purgeCandidates.setRecord(true); // We use an FsTreeWalker just for handling the skipped path/name lists FsTreeWalker walker; walker.setSkippedPaths(m_config->getSkippedPaths()); for (list::iterator it = files.begin(); it != files.end(); ) { LOGDEB2("FsIndexer::indexFiles: [" << it << "]\n"); m_config->setKeyDir(path_getfather(*it)); if (m_havelocalfields) localfieldsfromconf(); bool follow = false; m_config->getConfParam("followLinks", &follow); walker.setOnlyNames(m_config->getOnlyNames()); walker.setSkippedNames(m_config->getSkippedNames()); // Check path against indexed areas and skipped names/paths if (!(flags & ConfIndexer::IxFIgnoreSkip) && matchesSkipped(m_tdl, walker, *it)) { it++; continue; } struct stat stb; int ststat = path_fileprops(*it, &stb, follow); if (ststat != 0) { LOGERR("FsIndexer::indexFiles: (l)stat " << *it << ": " << strerror(errno) << "\n"); it++; continue; } if (!(flags & ConfIndexer::IxFIgnoreSkip) && (S_ISREG(stb.st_mode) || S_ISLNK(stb.st_mode))) { if (!walker.inOnlyNames(path_getsimple(*it))) { it++; continue; } } if (processone(*it, &stb, FsTreeWalker::FtwRegular) != FsTreeWalker::FtwOk) { LOGERR("FsIndexer::indexFiles: processone failed\n"); goto out; } it = files.erase(it); } ret = true; out: #ifdef IDX_THREADS if (m_haveInternQ) m_iwqueue.waitIdle(); if (m_haveSplitQ) m_dwqueue.waitIdle(); m_db->waitUpdIdle(); #endif // IDX_THREADS // Purge possible orphan documents if (ret == true) { LOGDEB("Indexfiles: purging orphans\n"); const vector& purgecandidates = m_purgeCandidates.getCandidates(); for (vector::const_iterator it = purgecandidates.begin(); it != purgecandidates.end(); it++) { LOGDEB("Indexfiles: purging orphans for " << *it << "\n"); m_db->purgeOrphans(*it); } #ifdef IDX_THREADS m_db->waitUpdIdle(); #endif // IDX_THREADS } LOGDEB("FsIndexer::indexFiles: done\n"); return ret; } /** Purge docs for given files out of the database */ bool FsIndexer::purgeFiles(list& files) { LOGDEB("FsIndexer::purgeFiles\n"); bool ret = false; if (!init()) return false; for (list::iterator it = files.begin(); it != files.end(); ) { string udi; make_udi(*it, cstr_null, udi); // rcldb::purgefile returns true if the udi was either not // found or deleted, false only in case of actual error bool existed; if (!m_db->purgeFile(udi, &existed)) { LOGERR("FsIndexer::purgeFiles: Database error\n"); goto out; } // If we actually deleted something, take it off the list if (existed) { it = files.erase(it); } else { it++; } } ret = true; out: #ifdef IDX_THREADS if (m_haveInternQ) m_iwqueue.waitIdle(); if (m_haveSplitQ) m_dwqueue.waitIdle(); m_db->waitUpdIdle(); #endif // IDX_THREADS LOGDEB("FsIndexer::purgeFiles: done\n"); return ret; } // Local fields can be set for fs subtrees in the configuration file void FsIndexer::localfieldsfromconf() { LOGDEB1("FsIndexer::localfieldsfromconf\n"); string sfields; m_config->getConfParam("localfields", sfields); if (!sfields.compare(m_slocalfields)) return; m_slocalfields = sfields; m_localfields.clear(); if (sfields.empty()) return; string value; ConfSimple attrs; m_config->valueSplitAttributes(sfields, value, attrs); vector nmlst = attrs.getNames(cstr_null); for (vector::const_iterator it = nmlst.begin(); it != nmlst.end(); it++) { string nm = m_config->fieldCanon(*it); attrs.get(*it, m_localfields[nm]); LOGDEB2("FsIndexer::localfieldsfromconf: [" << nm << "]->[" << m_localfields[nm] << "]\n"); } } void FsIndexer::setlocalfields(const map& fields, Rcl::Doc& doc) { for (map::const_iterator it = fields.begin(); it != fields.end(); it++) { // Being chosen by the user, localfields override values from // the filter. The key is already canonic (see // localfieldsfromconf()) doc.meta[it->first] = it->second; } } void FsIndexer::makesig(const struct stat *stp, string& out) { out = lltodecstr(stp->st_size) + lltodecstr(o_uptodate_test_use_mtime ? stp->st_mtime : stp->st_ctime); } #ifdef IDX_THREADS // Called updworker as seen from here, but the first step (and only in // most meaningful configurations) is doing the word-splitting, which // is why the task is referred as "Split" in the grand scheme of // things. An other stage usually deals with the actual index update. void *FsIndexerDbUpdWorker(void * fsp) { recoll_threadinit(); FsIndexer *fip = (FsIndexer*)fsp; WorkQueue *tqp = &fip->m_dwqueue; DbUpdTask *tsk; for (;;) { size_t qsz; if (!tqp->take(&tsk, &qsz)) { tqp->workerExit(); return (void*)1; } LOGDEB0("FsIndexerDbUpdWorker: task ql " << qsz << "\n"); if (!fip->m_db->addOrUpdate(tsk->udi, tsk->parent_udi, tsk->doc)) { LOGERR("FsIndexerDbUpdWorker: addOrUpdate failed\n"); tqp->workerExit(); return (void*)0; } delete tsk; } } void *FsIndexerInternfileWorker(void * fsp) { recoll_threadinit(); FsIndexer *fip = (FsIndexer*)fsp; WorkQueue *tqp = &fip->m_iwqueue; RclConfig myconf(*(fip->m_stableconfig)); InternfileTask *tsk = 0; for (;;) { if (!tqp->take(&tsk)) { tqp->workerExit(); return (void*)1; } LOGDEB0("FsIndexerInternfileWorker: task fn " << tsk->fn << "\n"); if (fip->processonefile(&myconf, tsk->fn, &tsk->statbuf, tsk->localfields) != FsTreeWalker::FtwOk) { LOGERR("FsIndexerInternfileWorker: processone failed\n"); tqp->workerExit(); return (void*)0; } LOGDEB1("FsIndexerInternfileWorker: done fn " << tsk->fn << "\n"); delete tsk; } } #endif // IDX_THREADS /// This method gets called for every file and directory found by the /// tree walker. /// /// It checks with the db if the file has changed and needs to be /// reindexed. If so, it calls internfile() which will identify the /// file type and call an appropriate handler to convert the document into /// internal format, which we then add to the database. /// /// Accent and majuscule handling are performed by the db module when doing /// the actual indexing work. The Rcl::Doc created by internfile() /// mostly contains pretty raw utf8 data. FsTreeWalker::Status FsIndexer::processone(const std::string &fn, const struct stat *stp, FsTreeWalker::CbFlag flg) { if (m_updater) { #ifdef IDX_THREADS std::unique_lock locker(m_updater->m_mutex); #endif if (!m_updater->update()) { return FsTreeWalker::FtwStop; } } // If we're changing directories, possibly adjust parameters (set // the current directory in configuration object) if (flg == FsTreeWalker::FtwDirEnter || flg == FsTreeWalker::FtwDirReturn) { m_config->setKeyDir(fn); // Set up filter/skipped patterns for this subtree. m_walker.setOnlyNames(m_config->getOnlyNames()); m_walker.setSkippedNames(m_config->getSkippedNames()); // Adjust local fields from config for this subtree if (m_havelocalfields) localfieldsfromconf(); if (flg == FsTreeWalker::FtwDirReturn) return FsTreeWalker::FtwOk; } #ifdef IDX_THREADS if (m_haveInternQ) { InternfileTask *tp = new InternfileTask(fn, stp, m_localfields); if (m_iwqueue.put(tp)) { return FsTreeWalker::FtwOk; } else { return FsTreeWalker::FtwError; } } #endif return processonefile(m_config, fn, stp, m_localfields); } // Start db update, either by queueing or by direct call bool FsIndexer::launchAddOrUpdate(const string& udi, const string& parent_udi, Rcl::Doc& doc) { #ifdef IDX_THREADS if (m_haveSplitQ) { DbUpdTask *tp = new DbUpdTask(udi, parent_udi, doc); if (!m_dwqueue.put(tp)) { LOGERR("processonefile: wqueue.put failed\n"); return false; } else { return true; } } #endif return m_db->addOrUpdate(udi, parent_udi, doc); } FsTreeWalker::Status FsIndexer::processonefile(RclConfig *config, const std::string &fn, const struct stat *stp, const map& localfields) { //////////////////// // Check db up to date ? Doing this before file type // identification means that, if usesystemfilecommand is switched // from on to off it may happen that some files which are now // without mime type will not be purged from the db, resulting // in possible 'cannot intern file' messages at query time... // This is needed if we are in a separate thread than processone() // (mostly always when multithreading). Needed esp. for // excludedmimetypes, etc. config->setKeyDir(path_getfather(fn)); // File signature and up to date check. The sig is based on // m/ctime and size and the possibly new value is checked against // the stored one. string sig; makesig(stp, sig); string udi; make_udi(fn, cstr_null, udi); unsigned int existingDoc; string oldsig; bool needupdate; if (m_noretryfailed) { needupdate = m_db->needUpdate(udi, sig, &existingDoc, &oldsig); } else { needupdate = m_db->needUpdate(udi, sig, &existingDoc, 0); } // If ctime (which we use for the sig) differs from mtime, then at most // the extended attributes were changed, no need to index content. // This unfortunately leaves open the case where the data was // modified, then the extended attributes, in which case we will // miss the data update. We would have to store both the mtime and // the ctime to avoid this bool xattronly = m_detectxattronly && !m_db->inFullReset() && existingDoc && needupdate && (stp->st_mtime < stp->st_ctime); LOGDEB("processone: needupdate " << needupdate << " noretry " << m_noretryfailed << " existing " << existingDoc << " oldsig [" << oldsig << "]\n"); // If noretryfailed is set, check for a file which previously // failed to index, and avoid re-processing it if (needupdate && m_noretryfailed && existingDoc && !oldsig.empty() && *oldsig.rbegin() == '+') { // Check that the sigs are the same except for the '+'. If the file // actually changed, we always retry (maybe it was fixed) string nold = oldsig.substr(0, oldsig.size()-1); if (!nold.compare(sig)) { LOGDEB("processone: not retrying previously failed file\n"); m_db->setExistingFlags(udi, existingDoc); needupdate = false; } } if (!needupdate) { LOGDEB0("processone: up to date: " << fn << "\n"); if (m_updater) { #ifdef IDX_THREADS std::unique_lock locker(m_updater->m_mutex); #endif // Status bar update, abort request etc. m_updater->status.fn = fn; ++(m_updater->status.filesdone); if (!m_updater->update()) { return FsTreeWalker::FtwStop; } } return FsTreeWalker::FtwOk; } LOGDEB0("processone: processing: [" << displayableBytes(stp->st_size) << "] " << fn << "\n"); // Note that we used to do the full path here, but I ended up // believing that it made more sense to use only the file name string utf8fn = compute_utf8fn(config, fn, true); // parent_udi is initially the same as udi, it will be used if there // are subdocs. string parent_udi = udi; Rcl::Doc doc; char ascdate[30]; sprintf(ascdate, "%ld", long(stp->st_mtime)); bool hadNullIpath = false; string mimetype; if (!xattronly) { FileInterner interner(fn, stp, config, FileInterner::FIF_none); if (!interner.ok()) { // no indexing whatsoever in this case. This typically means that // indexallfilenames is not set return FsTreeWalker::FtwOk; } mimetype = interner.getMimetype(); interner.setMissingStore(m_missing); FileInterner::Status fis = FileInterner::FIAgain; bool hadNonNullIpath = false; while (fis == FileInterner::FIAgain) { doc.erase(); try { fis = interner.internfile(doc); } catch (CancelExcept) { LOGERR("fsIndexer::processone: interrupted\n"); return FsTreeWalker::FtwStop; } // We index at least the file name even if there was an error. // We'll change the signature to ensure that the indexing will // be retried every time. // If there is an error and the base doc was already seen, // we're done if (fis == FileInterner::FIError && hadNullIpath) { return FsTreeWalker::FtwOk; } // Internal access path for multi-document files. If empty, this is // for the main file. if (doc.ipath.empty()) { hadNullIpath = true; if (hadNonNullIpath) { // Note that only the filters can reliably compute // this. What we do is dependant of the doc order (if // we see the top doc first, we won't set the flag) doc.haschildren = true; } } else { hadNonNullIpath = true; } make_udi(fn, doc.ipath, udi); // Set file name, mod time and url if not done by // filter. We used to set the top-level container file // name for all subdocs without a proper file name, but // this did not make sense (resulted in multiple not // useful hits on the subdocs when searching for the // file name). if (doc.fmtime.empty()) doc.fmtime = ascdate; if (doc.url.empty()) doc.url = path_pathtofileurl(fn); const string *fnp = 0; if (doc.ipath.empty()) { if (!doc.peekmeta(Rcl::Doc::keyfn, &fnp) || fnp->empty()) doc.meta[Rcl::Doc::keyfn] = utf8fn; } // Set container file name for all docs, top or subdoc doc.meta[Rcl::Doc::keytcfn] = utf8fn; doc.pcbytes = lltodecstr(stp->st_size); // Document signature for up to date checks. All subdocs inherit the // file's. doc.sig = sig; // If there was an error, ensure indexing will be // retried. This is for the once missing, later installed // filter case. It can make indexing much slower (if there are // myriads of such files, the ext script is executed for them // and fails every time) if (fis == FileInterner::FIError) { doc.sig += cstr_plus; } // Possibly add fields from local config if (m_havelocalfields) setlocalfields(localfields, doc); // Add document to database. If there is an ipath, add it // as a child of the file document. if (!launchAddOrUpdate(udi, doc.ipath.empty() ? cstr_null : parent_udi, doc)) { return FsTreeWalker::FtwError; } // Tell what we are doing and check for interrupt request if (m_updater) { #ifdef IDX_THREADS std::unique_lock locker(m_updater->m_mutex); #endif ++(m_updater->status.docsdone); if (m_updater->status.dbtotdocs < m_updater->status.docsdone) m_updater->status.dbtotdocs = m_updater->status.docsdone; m_updater->status.fn = fn; if (!doc.ipath.empty()) { m_updater->status.fn += "|" + doc.ipath; } else { if (fis == FileInterner::FIError) { ++(m_updater->status.fileerrors); } ++(m_updater->status.filesdone); } if (!m_updater->update()) { return FsTreeWalker::FtwStop; } } } if (fis == FileInterner::FIError) { // In case of error, avoid purging any existing // subdoc. For example on windows, this will avoid erasing // all the emails from a .ost because it is currently // locked by Outlook. LOGDEB("processonefile: internfile error, marking " "subdocs as existing\n"); m_db->udiTreeMarkExisting(parent_udi); } else { // If this doc existed and it's a container, recording for // possible subdoc purge (this will be used only if we don't do a // db-wide purge, e.g. if we're called from indexfiles()). LOGDEB2("processOnefile: existingDoc " << existingDoc << " hadNonNullIpath " << hadNonNullIpath << "\n"); if (existingDoc && hadNonNullIpath) { m_purgeCandidates.record(parent_udi); } } } // If we had no instance with a null ipath, we create an empty // document to stand for the file itself, to be used mainly for up // to date checks. Typically this happens for an mbox file. // // If xattronly is set, ONLY the extattr metadata is valid and will be used // by the following step. if (xattronly || hadNullIpath == false) { LOGDEB("Creating empty doc for file or pure xattr update\n"); Rcl::Doc fileDoc; if (xattronly) { map xfields; reapXAttrs(config, fn, xfields); docFieldsFromXattrs(config, xfields, fileDoc); fileDoc.onlyxattr = true; } else { fileDoc.fmtime = ascdate; fileDoc.meta[Rcl::Doc::keyfn] = fileDoc.meta[Rcl::Doc::keytcfn] = utf8fn; fileDoc.haschildren = true; fileDoc.mimetype = mimetype; fileDoc.url = path_pathtofileurl(fn); if (m_havelocalfields) setlocalfields(localfields, fileDoc); fileDoc.pcbytes = lltodecstr(stp->st_size); } fileDoc.sig = sig; if (!launchAddOrUpdate(parent_udi, cstr_null, fileDoc)) { return FsTreeWalker::FtwError; } } return FsTreeWalker::FtwOk; } recoll-1.26.3/index/subtreelist.h0000644000175000017500000000243413533651561013644 00000000000000/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SUBTREELIST_H_INCLUDED_ #define _SUBTREELIST_H_INCLUDED_ #include #include class RclConfig; // This queries the database with a pure directory-filter query, to // retrieve all the entries below the specified path. This is used by // the real time indexer to purge entries when a top directory is // renamed. This is really convoluted, I'd like a better way. extern bool subtreelist(RclConfig *config, const string& top, std::vector& paths); #endif /* _SUBTREELIST_H_INCLUDED_ */ recoll-1.26.3/index/fetcher.h0000644000175000017500000000673413533651561012726 00000000000000/* Copyright (C) 2012-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _FETCHER_H_INCLUDED_ #define _FETCHER_H_INCLUDED_ #include "safesysstat.h" #include #include #include "rcldoc.h" class RclConfig; /** * Generic interface to retrieve the data for a document designated by * its index data (udi/ipath/url). This is used to retrieve the data * for previewing. The actual implementation is specific to the kind * of backend (file system, web cache, others?...), and the * implementation may of course share code with the indexing-time * functions from the specific backend. * * This normally gives access the raw document container (either as a * file or as a memory block). The Internfile code will then further * process it to get to the actual document, especially if * de-embedding is involved. * * The DATADIRECT document kind, which holds final extracted data, is only * returned when using an external indexer (only the python demo sample at * this point), in which case the whole extraction is performed by the * external code. */ class DocFetcher { public: /** A RawDoc is the data for a document-holding entity either as a memory block, or pointed to by a file name */ struct RawDoc { enum RawDocKind {RDK_FILENAME, RDK_DATA, RDK_DATADIRECT}; RawDocKind kind; std::string data; // Doc data or file name struct stat st; // Only used if RDK_FILENAME }; /** * Return the data for the requested document, either as a * file-system file or as a memory object (maybe stream too in the * future?) * @param cnf the global config * @param idoc the data gathered from the index for this doc (udi/ipath) * @param out we may return either a file name or the document data. */ virtual bool fetch(RclConfig* cnf, const Rcl::Doc& idoc, RawDoc& out) = 0; /** * Return the signature for the requested document. This is used for * up-to-date tests performed when not indexing (e.g.: verifying that a * document is not stale before previewing it). * @param cnf the global config * @param idoc the data gathered from the index for this doc (udi/ipath) * @param sig output. */ virtual bool makesig(RclConfig* cnf, const Rcl::Doc& idoc, std::string& sig) = 0; enum Reason{FetchOk, FetchNotExist, FetchNoPerm, FetchOther}; virtual Reason testAccess(RclConfig* cnf, const Rcl::Doc& idoc) { return FetchOther; } virtual ~DocFetcher() {} }; /** Return an appropriate fetcher object given the backend string * identifier inside idoc*/ std::unique_ptr docFetcherMake(RclConfig *config, const Rcl::Doc& idoc); #endif /* _FETCHER_H_INCLUDED_ */ recoll-1.26.3/index/fetcher.cpp0000644000175000017500000000332513533651561013252 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include "log.h" #include "rclconfig.h" #include "fetcher.h" #include "fsfetcher.h" #include "webqueuefetcher.h" #include "exefetcher.h" std::unique_ptr docFetcherMake(RclConfig *config, const Rcl::Doc& idoc) { if (idoc.url.empty()) { LOGERR("docFetcherMakeg:: no url in doc!\n" ); return std::unique_ptr(); } string backend; idoc.getmeta(Rcl::Doc::keybcknd, &backend); if (backend.empty() || !backend.compare("FS")) { return std::unique_ptr(new FSDocFetcher); #ifndef DISABLE_WEB_INDEXER } else if (!backend.compare("BGL")) { return std::unique_ptr(new WQDocFetcher); #endif } else { std::unique_ptr f(exeDocFetcherMake(config, backend)); if (!f) { LOGERR("DocFetcherFactory: unknown backend [" << backend << "]\n"); } return f; } } recoll-1.26.3/index/rclmonprc.cpp0000644000175000017500000004606713533651561013643 00000000000000#include "autoconfig.h" #ifdef RCL_MONITOR /* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /** * Recoll real time monitor processing. This file has the code to retrieve * event from the event queue and do the database-side processing. Also the * initialization function. */ #include #include #include "safeunistd.h" #include #include #include #include #include #include #include #include #include using std::list; using std::vector; #include "log.h" #include "rclmon.h" #include "log.h" #include "execmd.h" #include "recollindex.h" #include "pathut.h" #ifndef _WIN32 #include "x11mon.h" #endif #include "subtreelist.h" typedef unsigned long mttcast; // Seconds between auxiliary db (stem, spell) updates: static const int dfltauxinterval = 60 *60; static int auxinterval = dfltauxinterval; // Seconds between indexing queue processing: for merging events to // fast changing files and saving some of the indexing overhead. static const int dfltixinterval = 30; static int ixinterval = dfltixinterval; static RclMonEventQueue rclEQ; // // Delayed events: this is a special feature for fast changing files. // A list of pattern/delays can be specified in the configuration so // that they don't get re-indexed before some timeout is elapsed. Such // events are kept on a separate queue (m_dqueue) with an auxiliary // list in time-to-reindex order, while the normal events are on // m_iqueue. // Queue management performance: on a typical recoll system there will // be only a few entries on the event queues and no significant time // will be needed to manage them. Even on a busy system, the time used // would most probably be negligible compared to the actual processing // of the indexing events. So this is just for reference. Let I be the // number of immediate events and D the number of delayed ones, N // stands for either. // // Periodic timeout polling: the recollindex process periodically (2S) // wakes up to check for exit requests. At this time it also checks // the queues for new entries (should not happen because the producer // would normally wake up the consumer threads), or ready entries // among the delayed ones. At this time it calls the "empty()" // routine. This has constant time behaviour (checks for stl container // emptiness and the top entry of the delays list). // // Adding a new event (pushEvent()): this performs a search for an // existing event with the same path (O(log(N)), then an insert on the // appropriate queue (O(log(N))) and an insert on the times list (O(D)). // // Popping an event: this is constant time as it just looks at the // tops of the normal and delayed queues. // Indexing event container: a map indexed by file path for fast // insertion of duplicate events to the same file typedef map queue_type; // Entries for delayed events are duplicated (as iterators) on an // auxiliary, sorted by time-to-reindex list. We could get rid of // this, the price would be that the RclEQ.empty() call would have to // walk the whole queue instead of only looking at the first delays // entry. typedef list delays_type; // DelayPat stores a path wildcard pattern and a minimum time between // reindexes, it is read from the recoll configuration struct DelayPat { string pattern; int seconds; DelayPat() : seconds(0) {} }; /** Private part of RclEQ: things that we don't wish to exist in the interface * include file. */ class RclEQData { public: int m_opts; // Queue for normal files (unlimited reindex) queue_type m_iqueue; // Queue for delayed reindex files queue_type m_dqueue; // The delays list stores pointers (iterators) to elements on // m_dqueue. The list is kept in time-to-index order. Elements of // m_dqueue which are also in m_delays can only be deleted while // walking m_delays, so we are certain that the m_dqueue iterators // stored in m_delays remain valid. delays_type m_delays; // Configured intervals for path patterns, read from the configuration. vector m_delaypats; RclConfig *m_config; bool m_ok; std::mutex m_mutex; std::condition_variable m_cond; RclEQData() : m_config(0), m_ok(true) { } void readDelayPats(int dfltsecs); DelayPat searchDelayPats(const string& path) { for (vector::iterator it = m_delaypats.begin(); it != m_delaypats.end(); it++) { if (fnmatch(it->pattern.c_str(), path.c_str(), 0) == 0) { return *it; } } return DelayPat(); } void delayInsert(const queue_type::iterator &qit); }; void RclEQData::readDelayPats(int dfltsecs) { if (m_config == 0) return; string patstring; if (!m_config->getConfParam("mondelaypatterns", patstring) || patstring.empty()) return; vector dplist; if (!stringToStrings(patstring, dplist)) { LOGERR("rclEQData: bad pattern list: [" << (patstring) << "]\n" ); return; } for (vector::iterator it = dplist.begin(); it != dplist.end(); it++) { string::size_type pos = it->find_last_of(":"); DelayPat dp; dp.pattern = it->substr(0, pos); if (pos != string::npos && pos != it->size()-1) { dp.seconds = atoi(it->substr(pos+1).c_str()); } else { dp.seconds = dfltsecs; } m_delaypats.push_back(dp); LOGDEB2("rclmon::readDelayPats: add [" << (dp.pattern) << "] " << (dp.seconds) << "\n" ); } } // Insert event (as queue iterator) into delays list, in time order, // We DO NOT take care of duplicate qits. erase should be called first // when necessary. void RclEQData::delayInsert(const queue_type::iterator &qit) { MONDEB("RclEQData::delayInsert: minclock " << qit->second.m_minclock << std::endl); for (delays_type::iterator dit = m_delays.begin(); dit != m_delays.end(); dit++) { queue_type::iterator qit1 = *dit; if ((*qit1).second.m_minclock > qit->second.m_minclock) { m_delays.insert(dit, qit); return; } } m_delays.push_back(qit); } RclMonEventQueue::RclMonEventQueue() { m_data = new RclEQData; } RclMonEventQueue::~RclMonEventQueue() { delete m_data; } void RclMonEventQueue::setopts(int opts) { if (m_data) m_data->m_opts = opts; } /** Wait until there is something to process on the queue, or timeout. * returns a queue lock */ std::unique_lock RclMonEventQueue::wait(int seconds, bool *top) { std::unique_lock lock(m_data->m_mutex); MONDEB("RclMonEventQueue::wait, seconds: " << seconds << std::endl); if (!empty()) { MONDEB("RclMonEventQueue:: immediate return\n"); return lock; } int err; if (seconds > 0) { if (top) *top = false; if (m_data->m_cond.wait_for(lock, std::chrono::seconds(seconds)) == std::cv_status::timeout) { *top = true; MONDEB("RclMonEventQueue:: timeout\n"); return lock; } } else { m_data->m_cond.wait(lock); } MONDEB("RclMonEventQueue:: non-timeout return\n"); return lock; } void RclMonEventQueue::setConfig(RclConfig *cnf) { m_data->m_config = cnf; // Don't use ixinterval here, could be 0 ! Base the default // delayed reindex delay on the default ixinterval delay m_data->readDelayPats(10 * dfltixinterval); } RclConfig *RclMonEventQueue::getConfig() { return m_data->m_config; } bool RclMonEventQueue::ok() { if (m_data == 0) { LOGINFO("RclMonEventQueue: not ok: bad state\n" ); return false; } if (stopindexing) { LOGINFO("RclMonEventQueue: not ok: stop request\n" ); return false; } if (!m_data->m_ok) { LOGINFO("RclMonEventQueue: not ok: queue terminated\n" ); return false; } return true; } void RclMonEventQueue::setTerminate() { MONDEB("RclMonEventQueue:: setTerminate\n"); std::unique_lock lock(m_data->m_mutex); m_data->m_ok = false; m_data->m_cond.notify_all(); } // Must be called with the queue locked bool RclMonEventQueue::empty() { if (m_data == 0) { MONDEB("RclMonEventQueue::empty(): true (m_data==0)\n"); return true; } if (!m_data->m_iqueue.empty()) { MONDEB("RclMonEventQueue::empty(): false (m_iqueue not empty)\n"); return true; } if (m_data->m_dqueue.empty()) { MONDEB("RclMonEventQueue::empty(): true (m_Xqueue both empty)\n"); return true; } // Only dqueue has events. Have to check the delays (only the // first, earliest one): queue_type::iterator qit = *(m_data->m_delays.begin()); if (qit->second.m_minclock > time(0)) { MONDEB("RclMonEventQueue::empty(): true (no delay ready " << qit->second.m_minclock << ")\n"); return true; } MONDEB("RclMonEventQueue::empty(): returning false (delay expired)\n"); return false; } // Retrieve indexing event for processing. Returns empty event if // nothing interesting is found // Must be called with the queue locked RclMonEvent RclMonEventQueue::pop() { time_t now = time(0); MONDEB("RclMonEventQueue::pop(), now " << now << std::endl); // Look at the delayed events, get rid of the expired/unactive // ones, possibly return an expired/needidx one. while (!m_data->m_delays.empty()) { delays_type::iterator dit = m_data->m_delays.begin(); queue_type::iterator qit = *dit; MONDEB("RclMonEventQueue::pop(): in delays: evt minclock " << qit->second.m_minclock << std::endl); if (qit->second.m_minclock <= now) { if (qit->second.m_needidx) { RclMonEvent ev = qit->second; qit->second.m_minclock = time(0) + qit->second.m_itvsecs; qit->second.m_needidx = false; m_data->m_delays.erase(dit); m_data->delayInsert(qit); return ev; } else { // Delay elapsed without new update, get rid of event. m_data->m_dqueue.erase(qit); m_data->m_delays.erase(dit); } } else { // This and following events are for later processing, we // are done with the delayed event list. break; } } // Look for non-delayed event if (!m_data->m_iqueue.empty()) { queue_type::iterator qit = m_data->m_iqueue.begin(); RclMonEvent ev = qit->second; m_data->m_iqueue.erase(qit); return ev; } return RclMonEvent(); } // Add new event (update or delete) to the processing queue. // It seems that a newer event is always correct to override any // older. TBVerified ? // Some conf-designated files, supposedly updated at a high rate get // special processing to limit their reindexing rate. bool RclMonEventQueue::pushEvent(const RclMonEvent &ev) { MONDEB("RclMonEventQueue::pushEvent for " << ev.m_path << std::endl); std::unique_lock lock(m_data->m_mutex); DelayPat pat = m_data->searchDelayPats(ev.m_path); if (pat.seconds != 0) { // Using delayed reindex queue. Need to take care of minclock and also // insert into the in-minclock-order list queue_type::iterator qit = m_data->m_dqueue.find(ev.m_path); if (qit == m_data->m_dqueue.end()) { // Not there yet, insert new qit = m_data->m_dqueue.insert(queue_type::value_type(ev.m_path, ev)).first; // Set the time to next index to "now" as it has not been // indexed recently (otherwise it would still be in the // queue), and add the iterator to the delay queue. qit->second.m_minclock = time(0); qit->second.m_needidx = true; qit->second.m_itvsecs = pat.seconds; m_data->delayInsert(qit); } else { // Already in queue. Possibly update type but save minclock // (so no need to touch m_delays). Flag as needing indexing time_t saved_clock = qit->second.m_minclock; qit->second = ev; qit->second.m_minclock = saved_clock; qit->second.m_needidx = true; } } else { // Immediate event: just insert it, erasing any previously // existing entry m_data->m_iqueue[ev.m_path] = ev; } m_data->m_cond.notify_all(); return true; } static bool checkfileanddelete(const string& fname) { bool ret; ret = path_exists(fname); unlink(fname.c_str()); return ret; } // It's possible to override the normal indexing delay by creating a // file in the config directory (which we then remove). And yes there // is definitely a race condition (we can suppress the delay and file // before the target doc is queued), and we can't be sure that the // delay suppression will be used for the doc the user intended it // for. But this is used for non-critical function and the race // condition should happen reasonably seldom. // We check for the request file in all possible user config dirs // (usually, there is only the main one) static bool expeditedIndexingRequested(RclConfig *conf) { static vector rqfiles; if (rqfiles.empty()) { rqfiles.push_back(path_cat(conf->getConfDir(), "rclmonixnow")); const char *cp; if ((cp = getenv("RECOLL_CONFTOP"))) { rqfiles.push_back(path_cat(cp, "rclmonixnow")); } if ((cp = getenv("RECOLL_CONFMID"))) { rqfiles.push_back(path_cat(cp, "rclmonixnow")); } } bool found = false; for (vector::const_iterator it = rqfiles.begin(); it != rqfiles.end(); it++) { found = found || checkfileanddelete(*it); } return found; } bool startMonitor(RclConfig *conf, int opts) { if (!conf->getConfParam("monauxinterval", &auxinterval)) auxinterval = dfltauxinterval; if (!conf->getConfParam("monixinterval", &ixinterval)) ixinterval = dfltixinterval; rclEQ.setConfig(conf); rclEQ.setopts(opts); std::thread treceive(rclMonRcvRun, &rclEQ); treceive.detach(); LOGDEB("start_monitoring: entering main loop\n" ); bool timedout; time_t lastauxtime = time(0); time_t lastixtime = lastauxtime; time_t lastmovetime = 0; bool didsomething = false; list modified; list deleted; while (true) { time_t now = time(0); if (now - lastmovetime > ixinterval) { lastmovetime = now; runWebFilesMoverScript(conf); } { // Wait for event or timeout. // Set a relatively short timeout for better monitoring of // exit requests. std::unique_lock lock = rclEQ.wait(2, &timedout); // x11IsAlive() can't be called from ok() because both // threads call it and Xlib is not multithreaded. #ifndef _WIN32 bool x11dead = !(opts & RCLMON_NOX11) && !x11IsAlive(); if (x11dead) LOGDEB("RclMonprc: x11 is dead\n" ); #else bool x11dead = false; #endif if (!rclEQ.ok() || x11dead) { break; } // Process event queue for (;;) { // Retrieve event RclMonEvent ev = rclEQ.pop(); if (ev.m_path.empty()) break; switch (ev.evtype()) { case RclMonEvent::RCLEVT_MODIFY: case RclMonEvent::RCLEVT_DIRCREATE: LOGDEB0("Monitor: Modify/Check on " << ev.m_path << "\n"); modified.push_back(ev.m_path); break; case RclMonEvent::RCLEVT_DELETE: LOGDEB0("Monitor: Delete on " << (ev.m_path) << "\n" ); // If this is for a directory (which the caller should // tell us because he knows), we should purge the db // of all the subtree, because on a directory rename, // inotify will only generate one event for the // renamed top, not the subentries. This is relatively // complicated to do though, and we currently do not // do it, and just wait for a restart to do a full run and // purge. deleted.push_back(ev.m_path); if (ev.evflags() & RclMonEvent::RCLEVT_ISDIR) { vector paths; if (subtreelist(conf, ev.m_path, paths)) { deleted.insert(deleted.end(), paths.begin(), paths.end()); } } break; default: LOGDEB("Monitor: got Other on [" << (ev.m_path) << "]\n" ); } } } now = time(0); // Process. We don't do this every time but let the lists accumulate // a little, this saves processing. Start at once if list is big. if (expeditedIndexingRequested(conf) || (now - lastixtime > ixinterval) || (deleted.size() + modified.size() > 20)) { lastixtime = now; // Used to do the modified list first, but it does seem // smarter to make room first... if (!deleted.empty()) { deleted.sort(); deleted.unique(); if (!purgefiles(conf, deleted)) break; deleted.clear(); didsomething = true; } if (!modified.empty()) { modified.sort(); modified.unique(); if (!indexfiles(conf, modified)) break; modified.clear(); didsomething = true; } } // Recreate the auxiliary dbs every hour at most. now = time(0); if (didsomething && now - lastauxtime > auxinterval) { lastauxtime = now; didsomething = false; if (!createAuxDbs(conf)) { // We used to bail out on error here. Not anymore, // because this is most of the time due to a failure // of aspell dictionary generation, which is not // critical. } } // Check for a config change if (!(opts & RCLMON_NOCONFCHECK) && o_reexec && conf->sourceChanged()) { LOGDEB("Rclmonprc: config changed, reexecuting myself\n" ); // We never want to have a -n option after a config // change. -n was added by the reexec after the initial // pass even if it was not given on the command line o_reexec->removeArg("-n"); o_reexec->reexec(); } } LOGDEB("Rclmonprc: calling queue setTerminate\n" ); rclEQ.setTerminate(); // We used to wait for the receiver thread here before returning, // but this is not useful and may waste time / risk problems // during our limited time window for exiting. To be reviewed if // we ever need several monitor invocations in the same process // (can't foresee any reason why we'd want to do this). LOGDEB("Monitor: returning\n" ); return true; } #endif // RCL_MONITOR recoll-1.26.3/index/recollindex.cpp0000644000175000017500000006636713566450615014164 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #ifndef _WIN32 #include #include #else #include #endif #include "safesysstat.h" #include "safefcntl.h" #include "safeunistd.h" #include #include #include #include using namespace std; #include "log.h" #include "rclinit.h" #include "indexer.h" #include "smallut.h" #include "chrono.h" #include "pathut.h" #include "rclutil.h" #include "rclmon.h" #include "x11mon.h" #include "cancelcheck.h" #include "rcldb.h" #ifndef DISABLE_WEB_INDEXER #include "webqueue.h" #endif #include "recollindex.h" #include "fsindexer.h" #ifndef _WIN32 #include "rclionice.h" #endif #include "execmd.h" #include "checkretryfailed.h" #include "idxstatus.h" // Command line options static int op_flags; #define OPT_MOINS 0x1 #define OPT_C 0x1 #define OPT_D 0x2 #define OPT_E 0x4 #define OPT_K 0x8 #define OPT_P 0x10 #define OPT_R 0x20 #define OPT_S 0x40 #define OPT_Z 0x80 #define OPT_b 0x100 #define OPT_c 0x200 #define OPT_e 0x400 #define OPT_f 0x800 #define OPT_h 0x1000 #define OPT_i 0x2000 #define OPT_k 0x4000 #define OPT_l 0x8000 #define OPT_m 0x10000 #define OPT_n 0x20000 #define OPT_p 0x40000 #define OPT_r 0x80000 #define OPT_s 0x100000 #define OPT_w 0x200000 #define OPT_x 0x400000 #define OPT_z 0x800000 ReExec *o_reexec; // Globals for atexit cleanup static ConfIndexer *confindexer; // This is set as an atexit routine, static void cleanup() { deleteZ(confindexer); recoll_exitready(); } // Receive status updates from the ongoing indexing operation // Also check for an interrupt request and return the info to caller which // should subsequently orderly terminate what it is doing. class MyUpdater : public DbIxStatusUpdater { public: MyUpdater(const RclConfig *config) : m_file(config->getIdxStatusFile().c_str()), m_stopfilename(config->getIdxStopFile()), m_prevphase(DbIxStatus::DBIXS_NONE) { // The total number of files included in the index is actually // difficult to compute from the index itself. For display // purposes, we save it in the status file from indexing to // indexing (mostly...) string stf; if (m_file.get("totfiles", stf)) { status.totfiles = atoi(stf.c_str()); } } virtual bool update() { // Update the status file. Avoid doing it too often. Always do // it at the end (status DONE) if (status.phase == DbIxStatus::DBIXS_DONE || status.phase != m_prevphase || m_chron.millis() > 300) { if (status.totfiles < status.filesdone || status.phase == DbIxStatus::DBIXS_DONE) { status.totfiles = status.filesdone; } m_prevphase = status.phase; m_chron.restart(); m_file.holdWrites(true); m_file.set("phase", int(status.phase)); m_file.set("docsdone", status.docsdone); m_file.set("filesdone", status.filesdone); m_file.set("fileerrors", status.fileerrors); m_file.set("dbtotdocs", status.dbtotdocs); m_file.set("totfiles", status.totfiles); m_file.set("fn", status.fn); m_file.set("hasmonitor", status.hasmonitor); m_file.holdWrites(false); } if (path_exists(m_stopfilename)) { LOGINF("recollindex: asking indexer to stop because " << m_stopfilename << " exists\n"); unlink(m_stopfilename.c_str()); stopindexing = true; } if (stopindexing) { return false; } #ifndef DISABLE_X11MON // If we are in the monitor, we also need to check X11 status // during the initial indexing pass (else the user could log // out and the indexing would go on, not good (ie: if the user // logs in again, the new recollindex will fail). if ((op_flags & OPT_m) && !(op_flags & OPT_x) && !x11IsAlive()) { LOGDEB("X11 session went away during initial indexing pass\n"); stopindexing = true; return false; } #endif return true; } private: ConfSimple m_file; string m_stopfilename; Chrono m_chron; DbIxStatus::Phase m_prevphase; }; static MyUpdater *updater; // This holds the state of topdirs (exist+nonempty) on indexing // startup. If it changes after a resume from sleep we interrupt the // indexing (the assumption being that a volume has been mounted or // unmounted while we slept). This is not foolproof as the user can // always pull out a removable volume while we work. It just avoids a // harmful purge in a common case. static vector o_topdirs; static vector o_topdirs_emptiness; bool topdirs_state(vector tdlstate) { tdlstate.clear(); for (const auto& dir : o_topdirs) { tdlstate.push_back(path_empty(dir)); } return true; } static void sigcleanup(int sig) { if (sig == RCLSIG_RESUME) { vector emptiness; topdirs_state(emptiness); if (emptiness != o_topdirs_emptiness) { string msg = "Recollindex: resume: topdirs state changed while " "we were sleeping\n"; cerr << msg; LOGDEB(msg); CancelCheck::instance().setCancel(); stopindexing = 1; } } else { cerr << "Recollindex: got signal " << sig << ", registering stop request\n"; LOGDEB("Got signal " << sig << ", registering stop request\n"); CancelCheck::instance().setCancel(); stopindexing = 1; } } static void makeIndexerOrExit(RclConfig *config, bool inPlaceReset) { if (!confindexer) { confindexer = new ConfIndexer(config, updater); if (inPlaceReset) confindexer->setInPlaceReset(); } if (!confindexer) { cerr << "Cannot create indexer" << endl; exit(1); } } void rclIxIonice(const RclConfig *config) { #ifndef _WIN32 string clss, classdata; if (!config->getConfParam("monioniceclass", clss) || clss.empty()) clss = "3"; config->getConfParam("monioniceclassdata", classdata); rclionice(clss, classdata); #endif } static void setMyPriority(const RclConfig *config) { #ifndef _WIN32 if (setpriority(PRIO_PROCESS, 0, 20) != 0) { LOGINFO("recollindex: can't setpriority(), errno " << errno << "\n"); } // Try to ionice. This does not work on all platforms rclIxIonice(config); #endif } class MakeListWalkerCB : public FsTreeWalkerCB { public: MakeListWalkerCB(list& files, const vector& selpats) : m_files(files), m_pats(selpats) { } virtual FsTreeWalker::Status processone(const string& fn, const struct stat *, FsTreeWalker::CbFlag flg) { if (flg== FsTreeWalker::FtwDirEnter || flg == FsTreeWalker::FtwRegular){ if (m_pats.empty()) { cerr << "Selecting " << fn << endl; m_files.push_back(fn); } else { for (vector::const_iterator it = m_pats.begin(); it != m_pats.end(); it++) { if (fnmatch(it->c_str(), fn.c_str(), 0) == 0) { m_files.push_back(fn); break; } } } } return FsTreeWalker::FtwOk; } list& m_files; const vector& m_pats; }; // Build a list of things to index, then call purgefiles and/or // indexfiles. This is basically the same as find xxx | recollindex // -i [-e] without the find (so, simpler but less powerfull) bool recursive_index(RclConfig *config, const string& top, const vector& selpats) { list files; MakeListWalkerCB cb(files, selpats); FsTreeWalker walker; walker.walk(top, cb); bool ret = false; if (op_flags & OPT_e) { if (!(ret = purgefiles(config, files))) { return ret; } } if (!(op_flags & OPT_e) || ((op_flags & OPT_e) &&(op_flags & OPT_i))) { ret = indexfiles(config, files); } return ret; } // Index a list of files. We just call the top indexer method, which // will sort out what belongs to the indexed trees and call the // appropriate indexers. // // This is called either from the command line or from the monitor. In // this case we're called repeatedly in the same process, and the // confindexer is only created once by makeIndexerOrExit (but the db closed and // flushed every time) bool indexfiles(RclConfig *config, list &filenames) { if (filenames.empty()) return true; makeIndexerOrExit(config, (op_flags & OPT_Z) != 0); // The default is to retry failed files int indexerFlags = ConfIndexer::IxFNone; if (op_flags & OPT_K) indexerFlags |= ConfIndexer::IxFNoRetryFailed; if (op_flags & OPT_f) indexerFlags |= ConfIndexer::IxFIgnoreSkip; if (op_flags & OPT_P) { indexerFlags |= ConfIndexer::IxFDoPurge; } return confindexer->indexFiles(filenames, indexerFlags); } // Delete a list of files. Same comments about call contexts as indexfiles. bool purgefiles(RclConfig *config, list &filenames) { if (filenames.empty()) return true; makeIndexerOrExit(config, (op_flags & OPT_Z) != 0); return confindexer->purgeFiles(filenames, ConfIndexer::IxFNone); } // Create stemming and spelling databases bool createAuxDbs(RclConfig *config) { makeIndexerOrExit(config, false); if (!confindexer->createStemmingDatabases()) return false; if (!confindexer->createAspellDict()) return false; return true; } // Create additional stem database static bool createstemdb(RclConfig *config, const string &lang) { makeIndexerOrExit(config, false); return confindexer->createStemDb(lang); } // Check that topdir entries are valid (successfull tilde exp + abs // path) or fail. // In addition, topdirs, skippedPaths, daemSkippedPaths entries should // match existing files or directories. Warn if they don't static bool checktopdirs(RclConfig *config, vector& nonexist) { if (!config->getConfParam("topdirs", &o_topdirs)) { cerr << "No 'topdirs' parameter in configuration\n"; LOGERR("recollindex:No 'topdirs' parameter in configuration\n"); return false; } // If a restricted list for real-time monitoring exists check that // all entries are descendants from a topdir vector mondirs; if (config->getConfParam("monitordirs", &mondirs)) { for (const auto& sub : mondirs) { bool found{false}; for (const auto& top : o_topdirs) { if (path_isdesc(top, sub)) { found = true; break; } } if (!found) { string s("Real time monitoring directory entry " + sub + " is not part of the topdirs tree\n"); cerr << s; LOGERR(s); return false; } } } for (auto& dir : o_topdirs) { dir = path_tildexpand(dir); if (!dir.size() || !path_isabsolute(dir)) { if (dir[0] == '~') { cerr << "Tilde expansion failed: " << dir << endl; LOGERR("recollindex: tilde expansion failed: " << dir << "\n"); } else { cerr << "Not an absolute path: " << dir << endl; LOGERR("recollindex: not an absolute path: " << dir << "\n"); } return false; } if (!path_exists(dir)) { nonexist.push_back(dir); } } topdirs_state(o_topdirs_emptiness); // We'd like to check skippedPaths too, but these are wildcard // exprs, so reasonably can't return true; } string thisprog; static const char usage [] = "\n" "recollindex [-h] \n" " Print help\n" "recollindex [-z|-Z] [-k]\n" " Index everything according to configuration file\n" " -z : reset database before starting indexing\n" " -Z : in place reset: consider all documents as changed. Can also\n" " be combined with -i or -r but not -m\n" " -k : retry files on which we previously failed\n" #ifdef RCL_MONITOR "recollindex -m [-w ] -x [-D] [-C]\n" " Perform real time indexing. Don't become a daemon if -D is set.\n" " -w sets number of seconds to wait before starting.\n" " -C disables monitoring config for changes/reexecuting.\n" " -n disables initial incremental indexing (!and purge!).\n" #ifndef DISABLE_X11MON " -x disables exit on end of x11 session\n" #endif /* DISABLE_X11MON */ #endif /* RCL_MONITOR */ "recollindex -e []\n" " Purge data for individual files. No stem database updates.\n" " Reads paths on stdin if none is given as argument.\n" "recollindex -i [-f] [-Z] []\n" " Index individual files. No database purge or stem database updates\n" " Will read paths on stdin if none is given as argument\n" " -f : ignore skippedPaths and skippedNames while doing this\n" "recollindex -r [-K] [-f] [-Z] [-p pattern] \n" " Recursive partial reindex. \n" " -p : filter file names, multiple instances are allowed, e.g.: \n" " -p *.odt -p *.pdf\n" " -K : skip previously failed files (they are retried by default)\n" "recollindex -l\n" " List available stemming languages\n" "recollindex -s \n" " Build stem database for additional language \n" "recollindex -E\n" " Check configuration file for topdirs and other paths existence\n" #ifdef FUTURE_IMPROVEMENT "recollindex -W\n" " Process the Web queue\n" #endif #ifdef RCL_USE_ASPELL "recollindex -S\n" " Build aspell spelling dictionary.>\n" #endif "Common options:\n" " -c : specify config directory, overriding $RECOLL_CONFDIR\n" ; static void Usage(FILE *where = stderr) { FILE *fp = (op_flags & OPT_h) ? stdout : stderr; fprintf(fp, "%s: Usage: %s", path_getsimple(thisprog).c_str(), usage); fprintf(fp, "Recoll version: %s\n", Rcl::version_string().c_str()); exit((op_flags & OPT_h)==0); } static RclConfig *config; static void lockorexit(Pidfile *pidfile, RclConfig *config) { pid_t pid; if ((pid = pidfile->open()) != 0) { if (pid > 0) { cerr << "Can't become exclusive indexer: " << pidfile->getreason() << ". Return (other pid?): " << pid << endl; #ifndef _WIN32 // Have a look at the status file. If the other process is // a monitor we can tell it to start an incremental pass // by touching the configuration file DbIxStatus status; readIdxStatus(config, status); if (status.hasmonitor) { string cmd("touch "); string path = path_cat(config->getConfDir(), "recoll.conf"); cmd += path; int status; if ((status = system(cmd.c_str()))) { cerr << cmd << " failed with status " << status << endl; } else { cerr << "Monitoring indexer process was notified of " "indexing request\n"; } } #endif } else { cerr << "Can't become exclusive indexer: " << pidfile->getreason() << endl; } exit(1); } if (pidfile->write_pid() != 0) { cerr << "Can't become exclusive indexer: " << pidfile->getreason() << endl; exit(1); } } static string reasonsfile; extern ConfSimple idxreasons; static void flushIdxReasons() { if (reasonsfile.empty()) return; if (reasonsfile == "stdout") { idxreasons.write(cout); } else if (reasonsfile == "stderr") { idxreasons.write(std::cerr); } else { ofstream out; try { out.open(reasonsfile, ofstream::out|ofstream::trunc); idxreasons.write(out); } catch (...) { cerr << "Could not write reasons file " << reasonsfile << endl; idxreasons.write(cerr); } } } int main(int argc, char **argv) { string a_config; int sleepsecs = 60; vector selpatterns; // The reexec struct is used by the daemon to shed memory after // the initial indexing pass and to restart when the configuration // changes #ifndef _WIN32 o_reexec = new ReExec; o_reexec->init(argc, argv); #endif thisprog = path_absolute(argv[0]); argc--; argv++; while (argc > 0 && **argv == '-') { (*argv)++; if (!(**argv)) Usage(); while (**argv) switch (*(*argv)++) { case 'b': op_flags |= OPT_b; break; case 'c': op_flags |= OPT_c; if (argc < 2) Usage(); a_config = *(++argv); argc--; goto b1; #ifdef RCL_MONITOR case 'C': op_flags |= OPT_C; break; case 'D': op_flags |= OPT_D; break; #endif case 'E': op_flags |= OPT_E; break; case 'e': op_flags |= OPT_e; break; case 'f': op_flags |= OPT_f; break; case 'h': op_flags |= OPT_h; break; case 'i': op_flags |= OPT_i; break; case 'k': op_flags |= OPT_k; break; case 'K': op_flags |= OPT_K; break; case 'l': op_flags |= OPT_l; break; case 'm': op_flags |= OPT_m; break; case 'n': op_flags |= OPT_n; break; case 'P': op_flags |= OPT_P; break; case 'p': op_flags |= OPT_p; if (argc < 2) Usage(); selpatterns.push_back(*(++argv)); argc--; goto b1; case 'r': op_flags |= OPT_r; break; case 'R': op_flags |= OPT_R; if (argc < 2) Usage(); reasonsfile = *(++argv); argc--; goto b1; case 's': op_flags |= OPT_s; break; #ifdef RCL_USE_ASPELL case 'S': op_flags |= OPT_S; break; #endif case 'w': op_flags |= OPT_w; if (argc < 2) Usage(); if ((sscanf(*(++argv), "%d", &sleepsecs)) != 1) Usage(); argc--; goto b1; case 'x': op_flags |= OPT_x; break; case 'Z': op_flags |= OPT_Z; break; case 'z': op_flags |= OPT_z; break; default: Usage(); break; } b1: argc--; argv++; } if (op_flags & OPT_h) Usage(stdout); #ifndef RCL_MONITOR if (op_flags & (OPT_m | OPT_w|OPT_x)) { cerr << "Sorry, -m not available: real-time monitoring was not " "configured in this build\n"; exit(1); } #endif if ((op_flags & OPT_z) && (op_flags & (OPT_i|OPT_e|OPT_r))) Usage(); if ((op_flags & OPT_Z) && (op_flags & (OPT_m))) Usage(); if ((op_flags & OPT_E) && (op_flags & ~(OPT_E|OPT_c))) { Usage(); } string reason; int flags = RCLINIT_IDX; if ((op_flags & OPT_m) && !(op_flags&OPT_D)) { flags |= RCLINIT_DAEMON; } config = recollinit(flags, cleanup, sigcleanup, reason, &a_config); if (config == 0 || !config->ok()) { addIdxReason("init", reason); flushIdxReasons(); cerr << "Configuration problem: " << reason << endl; exit(1); } #ifndef _WIN32 o_reexec->atexit(cleanup); #endif vector nonexist; if (!checktopdirs(config, nonexist)) { addIdxReason("init", "topdirs not set"); flushIdxReasons(); exit(1); } if (nonexist.size()) { ostream& out = (op_flags & OPT_E) ? cout : cerr; if (!(op_flags & OPT_E)) { cerr << "Warning: invalid paths in topdirs, skippedPaths or " "daemSkippedPaths:\n"; } for (vector::const_iterator it = nonexist.begin(); it != nonexist.end(); it++) { out << *it << endl; } } if ((op_flags & OPT_E)) { exit(0); } string rundir; config->getConfParam("idxrundir", rundir); if (!rundir.compare("tmp")) { LOGINFO("recollindex: changing current directory to [" << tmplocation() << "]\n"); if (chdir(tmplocation().c_str()) < 0) { LOGERR("chdir(" << tmplocation() << ") failed, errno " << errno << "\n"); } } else if (!rundir.empty()) { LOGINFO("recollindex: changing current directory to [" << rundir << "]\n"); if (chdir(rundir.c_str()) < 0) { LOGERR("chdir(" << rundir << ") failed, errno " << errno << "\n"); } } bool rezero((op_flags & OPT_z) != 0); bool inPlaceReset((op_flags & OPT_Z) != 0); // The default is not to retry previously failed files by default. // If -k is set, we do. // If the checker script says so, we do too, except if -K is set. int indexerFlags = ConfIndexer::IxFNoRetryFailed; if (op_flags & OPT_k) { indexerFlags &= ~ConfIndexer::IxFNoRetryFailed; } else { if (op_flags & OPT_K) { indexerFlags |= ConfIndexer::IxFNoRetryFailed; } else { if (checkRetryFailed(config, false)) { indexerFlags &= ~ConfIndexer::IxFNoRetryFailed; } else { indexerFlags |= ConfIndexer::IxFNoRetryFailed; } } } if (indexerFlags & ConfIndexer::IxFNoRetryFailed) { LOGDEB("recollindex: files in error will not be retried\n"); } else { LOGDEB("recollindex: files in error will be retried\n"); } Pidfile pidfile(config->getPidfile()); updater = new MyUpdater(config); // Log something at LOGINFO to reset the trace file. Else at level // 3 it's not even truncated if all docs are up to date. LOGINFO("recollindex: starting up\n"); setMyPriority(config); if (op_flags & OPT_r) { if (argc != 1) Usage(); string top = *argv++; argc--; bool status = recursive_index(config, top, selpatterns); if (confindexer && !confindexer->getReason().empty()) { addIdxReason("indexer", confindexer->getReason()); cerr << confindexer->getReason() << endl; } flushIdxReasons(); exit(status ? 0 : 1); } else if (op_flags & (OPT_i|OPT_e)) { lockorexit(&pidfile, config); list filenames; if (argc == 0) { // Read from stdin char line[1024]; while (fgets(line, 1023, stdin)) { string sl(line); trimstring(sl, "\n\r"); filenames.push_back(sl); } } else { while (argc--) { filenames.push_back(*argv++); } } // Note that -e and -i may be both set. In this case we first erase, // then index. This is a slightly different from -Z -i because we // warranty that all subdocs are purged. bool status = true; if (op_flags & OPT_e) { status = purgefiles(config, filenames); } if (status && (op_flags & OPT_i)) { status = indexfiles(config, filenames); } if (confindexer && !confindexer->getReason().empty()) { addIdxReason("indexer", confindexer->getReason()); cerr << confindexer->getReason() << endl; } flushIdxReasons(); exit(status ? 0 : 1); } else if (op_flags & OPT_l) { if (argc != 0) Usage(); vector stemmers = ConfIndexer::getStemmerNames(); for (vector::const_iterator it = stemmers.begin(); it != stemmers.end(); it++) { cout << *it << endl; } exit(0); } else if (op_flags & OPT_s) { if (argc != 1) Usage(); string lang = *argv++; argc--; exit(!createstemdb(config, lang)); #ifdef RCL_USE_ASPELL } else if (op_flags & OPT_S) { makeIndexerOrExit(config, false); exit(!confindexer->createAspellDict()); #endif // ASPELL #ifdef RCL_MONITOR } else if (op_flags & OPT_m) { if (argc != 0) Usage(); lockorexit(&pidfile, config); if (updater) { updater->status.hasmonitor = true; } if (!(op_flags&OPT_D)) { LOGDEB("recollindex: daemonizing\n"); #ifndef _WIN32 if (daemon(0,0) != 0) { addIdxReason("monitor", "daemon() failed"); cerr << "daemon() failed, errno " << errno << endl; LOGERR("daemon() failed, errno " << errno << "\n"); flushIdxReasons(); exit(1); } #endif } // Need to rewrite pid, it changed pidfile.write_pid(); // Not too sure if I have to redo the nice thing after daemon(), // can't hurt anyway (easier than testing on all platforms...) setMyPriority(config); if (sleepsecs > 0) { LOGDEB("recollindex: sleeping " << sleepsecs << "\n"); for (int i = 0; i < sleepsecs; i++) { sleep(1); // Check that x11 did not go away while we were sleeping. if (!(op_flags & OPT_x) && !x11IsAlive()) { LOGDEB("X11 session went away during initial sleep period\n"); exit(0); } } } if (!(op_flags & OPT_n)) { makeIndexerOrExit(config, inPlaceReset); LOGDEB("Recollindex: initial indexing pass before monitoring\n"); if (!confindexer->index(rezero, ConfIndexer::IxTAll, indexerFlags) || stopindexing) { LOGERR("recollindex, initial indexing pass failed, " "not going into monitor mode\n"); flushIdxReasons(); exit(1); } else { // Record success of indexing pass with failed files retries. if (!(indexerFlags & ConfIndexer::IxFNoRetryFailed)) { checkRetryFailed(config, true); } } deleteZ(confindexer); #ifndef _WIN32 o_reexec->insertArgs(vector(1, "-n")); LOGINFO("recollindex: reexecuting with -n after initial full " "pass\n"); // Note that -n will be inside the reexec when we come // back, but the monitor will explicitly strip it before // starting a config change exec to ensure that we do a // purging pass in this latter case (full restart). o_reexec->reexec(); #endif } if (updater) { updater->status.phase = DbIxStatus::DBIXS_MONITOR; updater->status.fn.clear(); updater->update(); } int opts = RCLMON_NONE; if (op_flags & OPT_D) opts |= RCLMON_NOFORK; if (op_flags & OPT_C) opts |= RCLMON_NOCONFCHECK; if (op_flags & OPT_x) opts |= RCLMON_NOX11; bool monret = startMonitor(config, opts); MONDEB(("Monitor returned %d, exiting\n", monret)); exit(monret == false); #endif // MONITOR } else if (op_flags & OPT_b) { cerr << "Not yet" << endl; return 1; } else { lockorexit(&pidfile, config); makeIndexerOrExit(config, inPlaceReset); bool status = confindexer->index(rezero, ConfIndexer::IxTAll, indexerFlags); // Record success of indexing pass with failed files retries. if (status && !(indexerFlags & ConfIndexer::IxFNoRetryFailed)) { checkRetryFailed(config, true); } if (!status) cerr << "Indexing failed" << endl; if (!confindexer->getReason().empty()) { addIdxReason("indexer", confindexer->getReason()); cerr << confindexer->getReason() << endl; } if (updater) { updater->status.phase = DbIxStatus::DBIXS_DONE; updater->status.fn.clear(); updater->update(); } flushIdxReasons(); return !status; } } recoll-1.26.3/index/checkretryfailed.h0000644000175000017500000000237613533651561014614 00000000000000#ifndef _CHECKRETRYFAILED_H_INCLUDED_ #define _CHECKRETRYFAILED_H_INCLUDED_ /* Copyright (C) 2015 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /** Check if retrying failed files may be needed. We execute a shell-script for this. The default one checks if any of the common bin directories changed. @param conf the config @param record if true, record the state instead of testing @return true if retrying should be performed */ class RclConfig; bool checkRetryFailed(RclConfig *conf, bool record); #endif /* _CHECKRETRYFAILED_H_INCLUDED_ */ recoll-1.26.3/index/rclmon.sh0000755000175000017500000000357713533651561012770 00000000000000#!/bin/sh # Copyright (C) 2006 J.F.Dockes ####################################################### # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. ###################################################### ################### # Start/stop a recollindex program running as permanent real time indexer. # The running program writes its pid in $RECOLL_CONFDIR/index.pid # The portability of this script has not been fully tested. # fatal() { echo $* exit 1 } usage() { fatal "Usage: rclmon.sh " } test $# -eq 1 || usage export LANG=C RECOLL_CONFDIR=${RECOLL_CONFDIR:-$HOME/.recoll} #echo RECOLL_CONFDIR = ${RECOLL_CONFDIR} pidfile="${RECOLL_CONFDIR}/index.pid" opid=0 if test -f $pidfile ; then read opid junk < $pidfile fi if test $opid -gt 0; then out=`kill -0 ${opid} 2>&1` if test $? -ne 0 ; then if test `expr "$out" : '.*such *process.*'` -ne 0 ; then opid=0 else fatal cant test existence of running process fi fi fi #echo "Existing pid $opid" case $1 in start) if test "$opid" -ne 0 ; then fatal "Already running process: $opid" fi recollindex -m ;; stop) if test "$opid" -eq 0 ; then fatal "No process running" fi kill $opid ;; *) usage esac recoll-1.26.3/index/fsfetcher.h0000644000175000017500000000252713533651561013253 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _FSFETCHER_H_INCLUDED_ #define _FSFETCHER_H_INCLUDED_ #include "fetcher.h" /** * The file-system fetcher: */ class FSDocFetcher : public DocFetcher{ /** FSDocFetcher::fetch always returns a file name */ virtual bool fetch(RclConfig* cnf, const Rcl::Doc& idoc, RawDoc& out); /** Calls stat to retrieve file signature data */ virtual bool makesig(RclConfig* cnf,const Rcl::Doc& idoc, std::string& sig); virtual DocFetcher::Reason testAccess(RclConfig* cnf, const Rcl::Doc& idoc); virtual ~FSDocFetcher() {} }; #endif /* _FSFETCHER_H_INCLUDED_ */ recoll-1.26.3/index/idxstatus.h0000644000175000017500000000370613533651561013332 00000000000000/* Copyright (C) 2017-2018 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _IDXSTATUS_H_INCLUDED_ #define _IDXSTATUS_H_INCLUDED_ #include // Current status of an indexing operation. This is updated in // $RECOLL_CONFDIR/idxstatus.txt class DbIxStatus { public: enum Phase {DBIXS_NONE, DBIXS_FILES, DBIXS_PURGE, DBIXS_STEMDB, DBIXS_CLOSING, DBIXS_MONITOR, DBIXS_DONE}; Phase phase; std::string fn; // Last file processed int docsdone; // Documents actually updated int filesdone; // Files tested (updated or not) int fileerrors; // Failed files (e.g.: missing input handler). int dbtotdocs; // Doc count in index at start // Total files in index.This is actually difficult to compute from // the index so it's preserved from last indexing int totfiles; // Is this indexer a monitoring one? This is a permanent value // telling if option -m was set, not about what we are currently // doing bool hasmonitor{false}; void reset() { phase = DBIXS_FILES; fn.erase(); docsdone = filesdone = fileerrors = dbtotdocs = totfiles = 0; } DbIxStatus() {reset();} }; class RclConfig; extern void readIdxStatus(RclConfig *config, DbIxStatus &status); #endif /* _IDXSTATUS_H_INCLUDED_ */ recoll-1.26.3/index/rclmonrcv.cpp0000644000175000017500000006174613533651561013652 00000000000000#include "autoconfig.h" #ifdef RCL_MONITOR /* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include "safesysstat.h" #include "safeunistd.h" #include "log.h" #include "rclmon.h" #include "rclinit.h" #include "fstreewalk.h" #include "pathut.h" /** * Recoll real time monitor event receiver. This file has code to interface * to FAM or inotify and place events on the event queue. */ /** A small virtual interface for monitors. Lets * either fam/gamin or raw imonitor hide behind */ class RclMonitor { public: RclMonitor() {} virtual ~RclMonitor() {} virtual bool addWatch(const string& path, bool isDir) = 0; virtual bool getEvent(RclMonEvent& ev, int msecs = -1) = 0; virtual bool ok() const = 0; // Does this monitor generate 'exist' events at startup? virtual bool generatesExist() const = 0; // Save significant errno after monitor calls int saved_errno{0}; }; // Monitor factory. We only have one compiled-in kind at a time, no // need for a 'kind' parameter static RclMonitor *makeMonitor(); /** * Create directory watches during the initial file system tree walk. * * This class is a callback for the file system tree walker * class. The callback method alternatively creates the directory * watches and flushes the event queue (to avoid a possible overflow * while we create the watches) */ class WalkCB : public FsTreeWalkerCB { public: WalkCB(RclConfig *conf, RclMonitor *mon, RclMonEventQueue *queue, FsTreeWalker& walker) : m_config(conf), m_mon(mon), m_queue(queue), m_walker(walker) {} virtual ~WalkCB() {} virtual FsTreeWalker::Status processone(const string &fn, const struct stat *st, FsTreeWalker::CbFlag flg) { MONDEB("rclMonRcvRun: processone " << fn << " m_mon " << m_mon << " m_mon->ok " << (m_mon ? m_mon->ok() : false) << std::endl); if (flg == FsTreeWalker::FtwDirEnter || flg == FsTreeWalker::FtwDirReturn) { m_config->setKeyDir(fn); // Set up skipped patterns for this subtree. m_walker.setSkippedNames(m_config->getSkippedNames()); } if (flg == FsTreeWalker::FtwDirEnter) { // Create watch when entering directory, but first empty // whatever events we may already have on queue while (m_queue->ok() && m_mon->ok()) { RclMonEvent ev; if (m_mon->getEvent(ev, 0)) { if (ev.m_etyp != RclMonEvent::RCLEVT_NONE) m_queue->pushEvent(ev); } else { MONDEB("rclMonRcvRun: no event pending\n"); break; } } if (!m_mon || !m_mon->ok()) return FsTreeWalker::FtwError; // We do nothing special if addWatch fails for a reasonable reason if (!m_mon->addWatch(fn, true)) { if (m_mon->saved_errno != EACCES && m_mon->saved_errno != ENOENT) return FsTreeWalker::FtwError; } } else if (!m_mon->generatesExist() && flg == FsTreeWalker::FtwRegular) { // Have to synthetize events for regular files existence // at startup because the monitor does not do it // Note 2011-09-29: no sure this is actually needed. We just ran // an incremental indexing pass (before starting the // monitor). Why go over the files once more ? The only // reason I can see would be to catch modifications that // happen between the incremental and the start of // monitoring ? There should be another way: maybe start // monitoring without actually handling events (just // queue), then run incremental then start handling // events ? But we also have to do it on a directory // move! So keep it RclMonEvent ev; ev.m_path = fn; ev.m_etyp = RclMonEvent::RCLEVT_MODIFY; m_queue->pushEvent(ev); } return FsTreeWalker::FtwOk; } private: RclConfig *m_config; RclMonitor *m_mon; RclMonEventQueue *m_queue; FsTreeWalker& m_walker; }; // Main thread routine: create watches, then forever wait for and queue events void *rclMonRcvRun(void *q) { RclMonEventQueue *queue = (RclMonEventQueue *)q; LOGDEB("rclMonRcvRun: running\n"); recoll_threadinit(); // Make a local copy of the configuration as it doesn't like // concurrent accesses. It's ok to copy it here as the other // thread will not work before we have sent events. RclConfig lconfig(*queue->getConfig()); // Create the fam/whatever interface object RclMonitor *mon; if ((mon = makeMonitor()) == 0) { LOGERR("rclMonRcvRun: makeMonitor failed\n"); queue->setTerminate(); return 0; } // Get top directories from config. Special monitor sublist if // set, else full list. vector tdl = lconfig.getTopdirs(true); if (tdl.empty()) { LOGERR("rclMonRcvRun:: top directory list (topdirs param.) not found " "in configuration or topdirs list parse error"); queue->setTerminate(); return 0; } // Walk the directory trees to add watches FsTreeWalker walker; walker.setSkippedPaths(lconfig.getDaemSkippedPaths()); WalkCB walkcb(&lconfig, mon, queue, walker); for (auto it = tdl.begin(); it != tdl.end(); it++) { lconfig.setKeyDir(*it); // Adjust the follow symlinks options bool follow; if (lconfig.getConfParam("followLinks", &follow) && follow) { walker.setOpts(FsTreeWalker::FtwFollow); } else { walker.setOpts(FsTreeWalker::FtwOptNone); } // We have to special-case regular files which are part of the topdirs // list because we the tree walker only adds watches for directories struct stat st; if (path_fileprops(*it, &st, follow) != 0) { LOGERR("rclMonRcvRun: stat failed for " << *it << "\n"); continue; } if (S_ISDIR(st.st_mode)) { LOGDEB("rclMonRcvRun: walking " << *it << "\n"); if (walker.walk(*it, walkcb) != FsTreeWalker::FtwOk) { LOGERR("rclMonRcvRun: tree walk failed\n"); goto terminate; } if (walker.getErrCnt() > 0) { LOGINFO("rclMonRcvRun: fs walker errors: " << walker.getReason() << "\n"); } } else { if (!mon->addWatch(*it, false)) { LOGERR("rclMonRcvRun: addWatch failed for " << *it << " errno " << mon->saved_errno << std::endl); } } } { bool doweb = false; lconfig.getConfParam("processwebqueue", &doweb); if (doweb) { string webqueuedir = lconfig.getWebQueueDir(); if (!mon->addWatch(webqueuedir, true)) { LOGERR("rclMonRcvRun: addwatch (webqueuedir) failed\n"); if (mon->saved_errno != EACCES && mon->saved_errno != ENOENT) goto terminate; } } } // Forever wait for monitoring events and add them to queue: MONDEB("rclMonRcvRun: waiting for events. q->ok(): " << queue->ok() << std::endl); while (queue->ok() && mon->ok()) { RclMonEvent ev; // Note: I could find no way to get the select // call to return when a signal is delivered to the process // (it goes to the main thread, from which I tried to close or // write to the select fd, with no effect). So set a // timeout so that an intr will be detected if (mon->getEvent(ev, 2000)) { // Don't push events for skipped files. This would get // filtered on the processing side anyway, but causes // unnecessary wakeups and messages. Do not test // skippedPaths here, this would be incorrect (because a // topdir can be under a skippedPath and this was handled // while adding the watches). // Also we let the other side process onlyNames. lconfig.setKeyDir(path_getfather(ev.m_path)); walker.setSkippedNames(lconfig.getSkippedNames()); if (walker.inSkippedNames(path_getsimple(ev.m_path))) continue; if (ev.m_etyp == RclMonEvent::RCLEVT_DIRCREATE) { // Recursive addwatch: there may already be stuff // inside this directory. Ie: files were quickly // created, or this is actually the target of a // directory move. This is necessary for inotify, but // it seems that fam/gamin is doing the job for us so // that we are generating double events here (no big // deal as prc will sort/merge). LOGDEB("rclMonRcvRun: walking new dir " << ev.m_path << "\n"); if (walker.walk(ev.m_path, walkcb) != FsTreeWalker::FtwOk) { LOGERR("rclMonRcvRun: walking new dir " << ev.m_path << " : " << walker.getReason() << "\n"); goto terminate; } if (walker.getErrCnt() > 0) { LOGINFO("rclMonRcvRun: fs walker errors: " << walker.getReason() << "\n"); } } if (ev.m_etyp != RclMonEvent::RCLEVT_NONE) queue->pushEvent(ev); } } terminate: queue->setTerminate(); LOGINFO("rclMonRcvRun: monrcv thread routine returning\n"); return 0; } // Utility routine used by both the fam/gamin and inotify versions to get // rid of the id-path translation for a moved dir bool eraseWatchSubTree(map& idtopath, const string& top) { bool found = false; MONDEB("Clearing map for [" << top << "]\n"); map::iterator it = idtopath.begin(); while (it != idtopath.end()) { if (it->second.find(top) == 0) { found = true; idtopath.erase(it++); } else { it++; } } return found; } // We dont compile both the inotify and the fam interface and inotify // has preference #ifndef RCL_USE_INOTIFY #ifdef RCL_USE_FAM ////////////////////////////////////////////////////////////////////////// /** Fam/gamin -based monitor class */ #include #include #include #include /** FAM based monitor class. We have to keep a record of FAM watch request numbers to directory names as the event only contain the request number and file name, not the full path */ class RclFAM : public RclMonitor { public: RclFAM(); virtual ~RclFAM(); virtual bool addWatch(const string& path, bool isdir); virtual bool getEvent(RclMonEvent& ev, int msecs = -1); bool ok() const {return m_ok;} virtual bool generatesExist() const {return true;} private: bool m_ok; FAMConnection m_conn; void close() { FAMClose(&m_conn); m_ok = false; } map m_idtopath; const char *event_name(int code); }; // Translate event code to string (debug) const char *RclFAM::event_name(int code) { static const char *famevent[] = { "", "FAMChanged", "FAMDeleted", "FAMStartExecuting", "FAMStopExecuting", "FAMCreated", "FAMMoved", "FAMAcknowledge", "FAMExists", "FAMEndExist" }; static char unknown_event[30]; if (code < FAMChanged || code > FAMEndExist) { sprintf(unknown_event, "unknown (%d)", code); return unknown_event; } return famevent[code]; } RclFAM::RclFAM() : m_ok(false) { if (FAMOpen2(&m_conn, "Recoll")) { LOGERR("RclFAM::RclFAM: FAMOpen2 failed, errno " << errno << "\n"); return; } m_ok = true; } RclFAM::~RclFAM() { if (ok()) FAMClose(&m_conn); } static jmp_buf jbuf; static void onalrm(int sig) { longjmp(jbuf, 1); } bool RclFAM::addWatch(const string& path, bool isdir) { if (!ok()) return false; bool ret = false; MONDEB("RclFAM::addWatch: adding " << path << std::endl); // It happens that the following call block forever. // We'd like to be able to at least terminate on a signal here, but // gamin forever retries its write call on EINTR, so it's not even useful // to unblock signals. SIGALRM is not used by the main thread, so at least // ensure that we exit after gamin gets stuck. if (setjmp(jbuf)) { LOGERR("RclFAM::addWatch: timeout talking to FAM\n"); return false; } signal(SIGALRM, onalrm); alarm(20); FAMRequest req; if (isdir) { if (FAMMonitorDirectory(&m_conn, path.c_str(), &req, 0) != 0) { LOGERR("RclFAM::addWatch: FAMMonitorDirectory failed\n"); goto out; } } else { if (FAMMonitorFile(&m_conn, path.c_str(), &req, 0) != 0) { LOGERR("RclFAM::addWatch: FAMMonitorFile failed\n"); goto out; } } m_idtopath[req.reqnum] = path; ret = true; out: alarm(0); return ret; } // Note: return false only for queue empty or error // Return EVT_NONE for bad event to keep queue processing going bool RclFAM::getEvent(RclMonEvent& ev, int msecs) { if (!ok()) return false; MONDEB("RclFAM::getEvent:\n"); fd_set readfds; int fam_fd = FAMCONNECTION_GETFD(&m_conn); FD_ZERO(&readfds); FD_SET(fam_fd, &readfds); MONDEB("RclFAM::getEvent: select. fam_fd is " << fam_fd << std::endl); // Fam / gamin is sometimes a bit slow to send events. Always add // a little timeout, because if we fail to retrieve enough events, // we risk deadlocking in addwatch() if (msecs == 0) msecs = 2; struct timeval timeout; if (msecs >= 0) { timeout.tv_sec = msecs / 1000; timeout.tv_usec = (msecs % 1000) * 1000; } int ret; if ((ret=select(fam_fd+1, &readfds, 0, 0, msecs >= 0 ? &timeout : 0)) < 0) { LOGERR("RclFAM::getEvent: select failed, errno " << errno << "\n"); close(); return false; } else if (ret == 0) { // timeout MONDEB("RclFAM::getEvent: select timeout\n"); return false; } MONDEB("RclFAM::getEvent: select returned " << ret << std::endl); if (!FD_ISSET(fam_fd, &readfds)) return false; // ?? 2011/03/15 gamin v0.1.10. There is initially a single null // byte on the connection so the first select always succeeds. If // we then call FAMNextEvent we stall. Using FAMPending works // around the issue, but we did not need this in the past and this // is most weird. if (FAMPending(&m_conn) <= 0) { MONDEB("RclFAM::getEvent: FAMPending says no events\n"); return false; } MONDEB("RclFAM::getEvent: call FAMNextEvent\n"); FAMEvent fe; if (FAMNextEvent(&m_conn, &fe) < 0) { LOGERR("RclFAM::getEvent: FAMNextEvent: errno " << errno << "\n"); close(); return false; } MONDEB("RclFAM::getEvent: FAMNextEvent returned\n"); map::const_iterator it; if ((!path_isabsolute(fe.filename)) && (it = m_idtopath.find(fe.fr.reqnum)) != m_idtopath.end()) { ev.m_path = path_cat(it->second, fe.filename); } else { ev.m_path = fe.filename; } MONDEB("RclFAM::getEvent: " << event_name(fe.code) < " " << ev.m_path << std::endl); switch (fe.code) { case FAMCreated: if (path_isdir(ev.m_path)) { ev.m_etyp = RclMonEvent::RCLEVT_DIRCREATE; break; } /* FALLTHROUGH */ case FAMChanged: case FAMExists: // Let the other side sort out the status of this file vs the db ev.m_etyp = RclMonEvent::RCLEVT_MODIFY; break; case FAMMoved: case FAMDeleted: ev.m_etyp = RclMonEvent::RCLEVT_DELETE; // We would like to signal a directory here to enable cleaning // the subtree (on a dir move), but can't test the actual file // which is gone, and fam doesn't tell us if it's a dir or reg. // Let's rely on the fact that a directory should be watched if (eraseWatchSubTree(m_idtopath, ev.m_path)) ev.m_etyp |= RclMonEvent::RCLEVT_ISDIR; break; case FAMStartExecuting: case FAMStopExecuting: case FAMAcknowledge: case FAMEndExist: default: // Have to return something, this is different from an empty queue, // esp if we are trying to empty it... if (fe.code != FAMEndExist) LOGDEB("RclFAM::getEvent: got other event " << fe.code << "!\n"); ev.m_etyp = RclMonEvent::RCLEVT_NONE; break; } return true; } #endif // RCL_USE_FAM #endif // ! INOTIFY #ifdef RCL_USE_INOTIFY ////////////////////////////////////////////////////////////////////////// /** Inotify-based monitor class */ #include #include class RclIntf : public RclMonitor { public: RclIntf() : m_ok(false), m_fd(-1), m_evp(0), m_ep(0) { if ((m_fd = inotify_init()) < 0) { LOGERR("RclIntf:: inotify_init failed, errno " << errno << "\n"); return; } m_ok = true; } virtual ~RclIntf() { close(); } virtual bool addWatch(const string& path, bool isdir); virtual bool getEvent(RclMonEvent& ev, int msecs = -1); bool ok() const {return m_ok;} virtual bool generatesExist() const {return false;} private: bool m_ok; int m_fd; map m_idtopath; // Watch descriptor to name #define EVBUFSIZE (32*1024) char m_evbuf[EVBUFSIZE]; // Event buffer char *m_evp; // Pointer to next event or 0 char *m_ep; // Pointer to end of events const char *event_name(int code); void close() { if (m_fd >= 0) { ::close(m_fd); m_fd = -1; } m_ok = false; } }; const char *RclIntf::event_name(int code) { code &= ~(IN_ISDIR|IN_ONESHOT); switch (code) { case IN_ACCESS: return "IN_ACCESS"; case IN_MODIFY: return "IN_MODIFY"; case IN_ATTRIB: return "IN_ATTRIB"; case IN_CLOSE_WRITE: return "IN_CLOSE_WRITE"; case IN_CLOSE_NOWRITE: return "IN_CLOSE_NOWRITE"; case IN_CLOSE: return "IN_CLOSE"; case IN_OPEN: return "IN_OPEN"; case IN_MOVED_FROM: return "IN_MOVED_FROM"; case IN_MOVED_TO: return "IN_MOVED_TO"; case IN_MOVE: return "IN_MOVE"; case IN_CREATE: return "IN_CREATE"; case IN_DELETE: return "IN_DELETE"; case IN_DELETE_SELF: return "IN_DELETE_SELF"; case IN_MOVE_SELF: return "IN_MOVE_SELF"; case IN_UNMOUNT: return "IN_UNMOUNT"; case IN_Q_OVERFLOW: return "IN_Q_OVERFLOW"; case IN_IGNORED: return "IN_IGNORED"; default: { static char msg[50]; sprintf(msg, "Unknown event 0x%x", code); return msg; } }; } bool RclIntf::addWatch(const string& path, bool) { if (!ok()) return false; MONDEB("RclIntf::addWatch: adding " << path << std::endl); // CLOSE_WRITE is covered through MODIFY. CREATE is needed for mkdirs uint32_t mask = IN_MODIFY | IN_CREATE | IN_MOVED_FROM | IN_MOVED_TO | IN_DELETE // IN_ATTRIB used to be not needed to receive extattr // modification events, which was a bit weird because only ctime is // set, and now it is... | IN_ATTRIB #ifdef IN_DONT_FOLLOW | IN_DONT_FOLLOW #endif #ifdef IN_EXCL_UNLINK | IN_EXCL_UNLINK #endif ; int wd; if ((wd = inotify_add_watch(m_fd, path.c_str(), mask)) < 0) { saved_errno = errno; LOGERR("RclIntf::addWatch: inotify_add_watch failed. errno " << saved_errno << "\n"); if (errno == ENOSPC) { LOGERR("RclIntf::addWatch: ENOSPC error may mean that you should " "increase the inotify kernel constants. See inotify(7)\n"); } return false; } m_idtopath[wd] = path; return true; } // Note: return false only for queue empty or error // Return EVT_NONE for bad event to keep queue processing going bool RclIntf::getEvent(RclMonEvent& ev, int msecs) { if (!ok()) return false; ev.m_etyp = RclMonEvent::RCLEVT_NONE; MONDEB("RclIntf::getEvent:\n"); if (m_evp == 0) { fd_set readfds; FD_ZERO(&readfds); FD_SET(m_fd, &readfds); struct timeval timeout; if (msecs >= 0) { timeout.tv_sec = msecs / 1000; timeout.tv_usec = (msecs % 1000) * 1000; } int ret; MONDEB("RclIntf::getEvent: select\n"); if ((ret = select(m_fd + 1, &readfds, 0, 0, msecs >= 0 ? &timeout : 0)) < 0) { LOGERR("RclIntf::getEvent: select failed, errno " << errno << "\n"); close(); return false; } else if (ret == 0) { MONDEB("RclIntf::getEvent: select timeout\n"); // timeout return false; } MONDEB("RclIntf::getEvent: select returned\n"); if (!FD_ISSET(m_fd, &readfds)) return false; int rret; if ((rret=read(m_fd, m_evbuf, sizeof(m_evbuf))) <= 0) { LOGERR("RclIntf::getEvent: read failed, " << sizeof(m_evbuf) << "->" << rret << " errno " << errno << "\n"); close(); return false; } m_evp = m_evbuf; m_ep = m_evbuf + rret; } struct inotify_event *evp = (struct inotify_event *)m_evp; m_evp += sizeof(struct inotify_event); if (evp->len > 0) m_evp += evp->len; if (m_evp >= m_ep) m_evp = m_ep = 0; map::const_iterator it; if ((it = m_idtopath.find(evp->wd)) == m_idtopath.end()) { LOGERR("RclIntf::getEvent: unknown wd " << evp->wd << "\n"); return true; } ev.m_path = it->second; if (evp->len > 0) { ev.m_path = path_cat(ev.m_path, evp->name); } MONDEB("RclIntf::getEvent: " << event_name(evp->mask) << " " << ev.m_path << std::endl); if ((evp->mask & IN_MOVED_FROM) && (evp->mask & IN_ISDIR)) { // We get this when a directory is renamed. Erase the subtree // entries in the map. The subsequent MOVED_TO will recreate // them. This is probably not needed because the watches // actually still exist in the kernel, so that the wds // returned by future addwatches will be the old ones, and the // map will be updated in place. But still, this feels safer eraseWatchSubTree(m_idtopath, ev.m_path); } // IN_ATTRIB used to be not needed, but now it is if (evp->mask & (IN_MODIFY|IN_ATTRIB)) { ev.m_etyp = RclMonEvent::RCLEVT_MODIFY; } else if (evp->mask & (IN_DELETE | IN_MOVED_FROM)) { ev.m_etyp = RclMonEvent::RCLEVT_DELETE; if (evp->mask & IN_ISDIR) ev.m_etyp |= RclMonEvent::RCLEVT_ISDIR; } else if (evp->mask & (IN_CREATE | IN_MOVED_TO)) { if (evp->mask & IN_ISDIR) { ev.m_etyp = RclMonEvent::RCLEVT_DIRCREATE; } else { // We used to return null event because we would get a // modify event later, but it seems not to be the case any // more (10-2011). So generate MODIFY event ev.m_etyp = RclMonEvent::RCLEVT_MODIFY; } } else if (evp->mask & (IN_IGNORED)) { if (!m_idtopath.erase(evp->wd)) { LOGDEB0("Got IGNORE event for unknown watch\n"); } else { eraseWatchSubTree(m_idtopath, ev.m_path); } } else { LOGDEB("RclIntf::getEvent: unhandled event " << event_name(evp->mask) << " " << evp->mask << " " << ev.m_path << "\n"); return true; } return true; } #endif // RCL_USE_INOTIFY /////////////////////////////////////////////////////////////////////// // The monitor 'factory' static RclMonitor *makeMonitor() { #ifdef RCL_USE_INOTIFY return new RclIntf; #endif #ifndef RCL_USE_INOTIFY #ifdef RCL_USE_FAM return new RclFAM; #endif #endif LOGINFO("RclMonitor: neither Inotify nor Fam was compiled as file system " "change notification interface\n"); return 0; } #endif // RCL_MONITOR recoll-1.26.3/index/webqueuefetcher.cpp0000644000175000017500000000414713533651561015020 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "webqueuefetcher.h" #include #include "rcldoc.h" #include "fetcher.h" #include "log.h" #include "webstore.h" using std::string; // We use a single WebStore object to access the data. We protect it // against multiple thread access. static std::mutex o_beagler_mutex; bool WQDocFetcher::fetch(RclConfig* cnf, const Rcl::Doc& idoc, RawDoc& out) { string udi; if (!idoc.getmeta(Rcl::Doc::keyudi, &udi) || udi.empty()) { LOGERR("WQDocFetcher:: no udi in idoc\n" ); return false; } Rcl::Doc dotdoc; { std::unique_lock locker(o_beagler_mutex); // Retrieve from our webcache (beagle data). The beagler // object is created at the first call of this routine and // deleted when the program exits. static WebStore o_beagler(cnf); if (!o_beagler.getFromCache(udi, dotdoc, out.data)) { LOGINFO("WQDocFetcher::fetch: failed for [" << udi << "]\n"); return false; } } if (dotdoc.mimetype.compare(idoc.mimetype)) { LOGINFO("WQDocFetcher:: udi [" << udi << "], mimetp mismatch: in: [" << idoc.mimetype << "], bgl [" << dotdoc.mimetype << "]\n"); } out.kind = RawDoc::RDK_DATA; return true; } bool WQDocFetcher::makesig(RclConfig* cnf, const Rcl::Doc& idoc, string& sig) { // Web queue sigs are empty sig.clear(); return true; } recoll-1.26.3/index/webqueue.h0000644000175000017500000000510713533651561013121 00000000000000/* Copyright (C) 2009 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _webqueue_h_included_ #define _webqueue_h_included_ #include /** * Process the WEB indexing queue. * * This was originally written to reuse the Beagle Firefox plug-in (which * copied visited pages and bookmarks to the queue), long dead and replaced by a * recoll-specific plugin. */ #include "fstreewalk.h" #include "rcldoc.h" class DbIxStatusUpdater; class CirCache; class RclConfig; class WebStore; namespace Rcl { class Db; } class WebQueueIndexer : public FsTreeWalkerCB { public: WebQueueIndexer(RclConfig *cnf, Rcl::Db *db, DbIxStatusUpdater *updfunc = 0); ~WebQueueIndexer(); /** This is called by the top indexer in recollindex. * Does the walking and the talking */ bool index(); /** Called when we fstreewalk the queue dir */ FsTreeWalker::Status processone(const string &, const struct stat *, FsTreeWalker::CbFlag); /** Index a list of files. No db cleaning or stemdb updating. * Used by the real time monitor */ bool indexFiles(std::list& files); /** Purge a list of files. No way to do this currently and dont want * to do anything as this is mostly called by the monitor when *I* delete * files inside the queue dir */ bool purgeFiles(std::list& files) {return true;} /** Called when indexing data from the cache, and from internfile for * search result preview */ bool getFromCache(const string& udi, Rcl::Doc &doc, string& data, string *hittype = 0); private: RclConfig *m_config; Rcl::Db *m_db; WebStore *m_cache; string m_queuedir; DbIxStatusUpdater *m_updater; bool m_nocacheindex; bool indexFromCache(const string& udi); void updstatus(const string& udi); }; #endif /* _webqueue_h_included_ */ recoll-1.26.3/index/indexer.cpp0000644000175000017500000003223713566451250013273 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include "cstr.h" #include "log.h" #include "recollindex.h" #include "indexer.h" #include "fsindexer.h" #ifndef DISABLE_WEB_INDEXER #include "webqueue.h" #endif #include "mimehandler.h" #include "pathut.h" #include "idxstatus.h" #include "execmd.h" #include "safesysstat.h" #ifdef RCL_USE_ASPELL #include "rclaspell.h" #endif using std::list; using std::string; using std::vector; // Global stop request flag. This is checked in a number of place in the // indexing routines. int stopindexing; // This would more logically live in recollindex.cpp, but then librecoll would // have an undefined symbol ConfSimple idxreasons; void addIdxReason(string who, string reason) { reason = neutchars(reason, "\r\n"); if (!idxreasons.set(who, reason)) { std::cerr << "addIdxReason: confsimple set failed\n"; } } #ifndef DISABLE_WEB_INDEXER bool runWebFilesMoverScript(RclConfig *config) { static string downloadsdir; if (downloadsdir.empty()) { if (!config->getConfParam("webdownloadsdir", downloadsdir)) { downloadsdir = path_tildexpand("~/Downloads"); } } static string cmdpath; vector args; #ifdef _WIN32 const static string cmdnm{"python"}; args.push_back(config->findFilter("recoll-we-move-files.py")); #else const static string cmdnm{"recoll-we-move-files.py"}; #endif if (cmdpath.empty()) { cmdpath = config->findFilter(cmdnm); if (cmdpath.empty()) { LOGERR("runWFMoverScript: recoll-we-move-files.py not found\n"); return false; } } /* Arrange to not actually run the script if the directory did not change */ static time_t dirmtime; time_t ndirmtime = 0; struct stat st; if (path_fileprops(downloadsdir.c_str(), &st) == 0) { ndirmtime = st.st_mtime; } /* If stat fails, presumably Downloads does not exist or is not accessible, dirmtime and mdirmtime stay at 0, and we never execute the script, which is the right thing. */ if (dirmtime != ndirmtime) { /* The script is going to change the directory, so updating dirmtime before it runs means that we are going to execute it one time too many (it will run without doing anything), but we can't set the mtime to after the run in case files are created during the run. */ dirmtime = ndirmtime; ExecCmd cmd; int status = cmd.doexec(cmdpath, args); return status == 0; } return true; } #endif ConfIndexer::ConfIndexer(RclConfig *cnf, DbIxStatusUpdater *updfunc) : m_config(cnf), m_db(cnf), m_fsindexer(0), m_doweb(false), m_webindexer(0), m_updater(updfunc) { m_config->getConfParam("processwebqueue", &m_doweb); } ConfIndexer::~ConfIndexer() { deleteZ(m_fsindexer); #ifndef DISABLE_WEB_INDEXER deleteZ(m_webindexer); #endif } // Determine if this is likely the first time that the user runs // indexing. We don't look at the xapiandb as this may have been // explicitly removed for valid reasons, but at the indexing status // file, which should be unexistant-or-empty only before any indexing // has ever run bool ConfIndexer::runFirstIndexing() { // Indexing status file existing and not empty ? if (path_filesize(m_config->getIdxStatusFile()) > 0) { LOGDEB0("ConfIndexer::runFirstIndexing: no: status file not empty\n"); return false; } // And only do this if the user has kept the default topdirs (~). vector tdl = m_config->getTopdirs(); if (tdl.size() != 1 || tdl[0].compare(path_canon(path_tildexpand("~")))) { LOGDEB0("ConfIndexer::runFirstIndexing: no: not home only\n"); return false; } return true; } bool ConfIndexer::firstFsIndexingSequence() { LOGDEB("ConfIndexer::firstFsIndexingSequence\n"); deleteZ(m_fsindexer); m_fsindexer = new FsIndexer(m_config, &m_db, m_updater); if (!m_fsindexer) { return false; } int flushmb = m_db.getFlushMb(); m_db.setFlushMb(2); m_fsindexer->index(IxFQuickShallow); m_db.doFlush(); m_db.setFlushMb(flushmb); return true; } bool ConfIndexer::index(bool resetbefore, ixType typestorun, int flags) { Rcl::Db::OpenMode mode = resetbefore ? Rcl::Db::DbTrunc : Rcl::Db::DbUpd; if (!m_db.open(mode)) { LOGERR("ConfIndexer: error opening database " << m_config->getDbDir() << " : " << m_db.getReason() << "\n"); addIdxReason("indexer", m_db.getReason()); return false; } m_config->setKeyDir(cstr_null); if (typestorun & IxTFs) { if (runFirstIndexing()) { firstFsIndexingSequence(); } deleteZ(m_fsindexer); m_fsindexer = new FsIndexer(m_config, &m_db, m_updater); if (!m_fsindexer || !m_fsindexer->index(flags)) { if (stopindexing) { addIdxReason("indexer", "Indexing was interrupted."); } else { addIdxReason("indexer", "Index creation failed. See log."); } m_db.close(); return false; } } #ifndef DISABLE_WEB_INDEXER if (m_doweb && (typestorun & IxTWebQueue)) { runWebFilesMoverScript(m_config); deleteZ(m_webindexer); m_webindexer = new WebQueueIndexer(m_config, &m_db, m_updater); if (!m_webindexer || !m_webindexer->index()) { m_db.close(); addIdxReason("indexer", "Web index creation failed. See log"); return false; } } #endif if (typestorun == IxTAll) { // Get rid of all database entries that don't exist in the // filesystem anymore. Only if all *configured* indexers ran. if (m_updater && !m_updater->update(DbIxStatus::DBIXS_PURGE, "")) { m_db.close(); addIdxReason("indexer", "Index purge failed. See log"); return false; } m_db.purge(); } // The close would be done in our destructor, but we want status // here. Makes no sense to check for cancel, we'll have to close // anyway if (m_updater) m_updater->update(DbIxStatus::DBIXS_CLOSING, string()); if (!m_db.close()) { LOGERR("ConfIndexer::index: error closing database in " << m_config->getDbDir() << "\n"); addIdxReason("indexer", "Index close/flush failed. See log"); return false; } if (m_updater && !m_updater->update(DbIxStatus::DBIXS_CLOSING, string())) return false; bool ret = true; if (!createStemmingDatabases()) { ret = false; } if (m_updater && !m_updater->update(DbIxStatus::DBIXS_CLOSING, string())) return false; // Don't fail indexing because of an aspell issue: we ignore the status. // Messages were written to the reasons output (void)createAspellDict(); clearMimeHandlerCache(); if (m_updater) m_updater->update(DbIxStatus::DBIXS_DONE, string()); return ret; } bool ConfIndexer::indexFiles(list& ifiles, int flag) { list myfiles; string origcwd = m_config->getOrigCwd(); for (const auto& entry : ifiles) { myfiles.push_back(path_canon(entry, &origcwd)); } myfiles.sort(); if (!m_db.open(Rcl::Db::DbUpd)) { LOGERR("ConfIndexer: indexFiles error opening database " << m_config->getDbDir() << "\n"); return false; } m_config->setKeyDir(cstr_null); bool ret = false; if (!m_fsindexer) m_fsindexer = new FsIndexer(m_config, &m_db, m_updater); if (m_fsindexer) ret = m_fsindexer->indexFiles(myfiles, flag); LOGDEB2("ConfIndexer::indexFiles: fsindexer returned " << ret << ", " << myfiles.size() << " files remainining\n"); #ifndef DISABLE_WEB_INDEXER if (m_doweb && !myfiles.empty() && !(flag & IxFNoWeb)) { if (!m_webindexer) m_webindexer = new WebQueueIndexer(m_config, &m_db, m_updater); if (m_webindexer) { ret = ret && m_webindexer->indexFiles(myfiles); } else { ret = false; } } #endif if (flag & IxFDoPurge) { m_db.purge(); } // The close would be done in our destructor, but we want status here if (!m_db.close()) { LOGERR("ConfIndexer::index: error closing database in " << m_config->getDbDir() << "\n"); return false; } ifiles = myfiles; clearMimeHandlerCache(); return ret; } // Update index for specific documents. The docs come from an index // query, so the udi, backend etc. fields are filled. bool ConfIndexer::updateDocs(vector &docs, IxFlag flag) { vector paths; docsToPaths(docs, paths); list files(paths.begin(), paths.end()); if (!files.empty()) { return indexFiles(files, flag); } return true; } bool ConfIndexer::purgeFiles(list &files, int flag) { list myfiles; string origcwd = m_config->getOrigCwd(); for (const auto& entry : files) { myfiles.push_back(path_canon(entry, &origcwd)); } myfiles.sort(); if (!m_db.open(Rcl::Db::DbUpd)) { LOGERR("ConfIndexer: purgeFiles error opening database " << m_config->getDbDir() << "\n"); return false; } bool ret = false; m_config->setKeyDir(cstr_null); if (!m_fsindexer) m_fsindexer = new FsIndexer(m_config, &m_db, m_updater); if (m_fsindexer) ret = m_fsindexer->purgeFiles(myfiles); #ifndef DISABLE_WEB_INDEXER if (m_doweb && !myfiles.empty() && !(flag & IxFNoWeb)) { if (!m_webindexer) m_webindexer = new WebQueueIndexer(m_config, &m_db, m_updater); if (m_webindexer) { ret = ret && m_webindexer->purgeFiles(myfiles); } else { ret = false; } } #endif // The close would be done in our destructor, but we want status here if (!m_db.close()) { LOGERR("ConfIndexer::purgefiles: error closing database in " << m_config->getDbDir() << "\n"); return false; } return ret; } // Create stemming databases. We also remove those which are not // configured. bool ConfIndexer::createStemmingDatabases() { string slangs; bool ret = true; if (m_config->getConfParam("indexstemminglanguages", slangs)) { if (!m_db.open(Rcl::Db::DbUpd)) { LOGERR("ConfIndexer::createStemmingDb: could not open db\n"); addIdxReason("stemming", "could not open db"); return false; } vector langs; stringToStrings(slangs, langs); // Get the list of existing stem dbs from the database (some may have // been manually created, we just keep those from the config vector dblangs = m_db.getStemLangs(); vector::const_iterator it; for (it = dblangs.begin(); it != dblangs.end(); it++) { if (find(langs.begin(), langs.end(), *it) == langs.end()) m_db.deleteStemDb(*it); } ret = ret && m_db.createStemDbs(langs); if (!ret) { addIdxReason("stemming", "stem db creation failed"); } } m_db.close(); return ret; } bool ConfIndexer::createStemDb(const string &lang) { if (!m_db.open(Rcl::Db::DbUpd)) return false; vector langs; stringToStrings(lang, langs); return m_db.createStemDbs(langs); } // The language for the aspell dictionary is handled internally by the aspell // module, either from a configuration variable or the NLS environment. bool ConfIndexer::createAspellDict() { LOGDEB2("ConfIndexer::createAspellDict()\n"); #ifdef RCL_USE_ASPELL // For the benefit of the real-time indexer, we only initialize // noaspell from the configuration once. It can then be set to // true if dictionary generation fails, which avoids retrying // it forever. static int noaspell = -12345; if (noaspell == -12345) { noaspell = false; m_config->getConfParam("noaspell", &noaspell); } if (noaspell) return true; if (!m_db.open(Rcl::Db::DbRO)) { LOGERR("ConfIndexer::createAspellDict: could not open db\n"); return false; } Aspell aspell(m_config); string reason; if (!aspell.init(reason)) { LOGERR("ConfIndexer::createAspellDict: aspell init failed: " << reason << "\n"); noaspell = true; return false; } LOGDEB("ConfIndexer::createAspellDict: creating dictionary\n"); if (!aspell.buildDict(m_db, reason)) { LOGERR("ConfIndexer::createAspellDict: aspell buildDict failed: " << reason << "\n"); addIdxReason("aspell", reason); noaspell = true; return false; } #endif return true; } vector ConfIndexer::getStemmerNames() { return Rcl::Db::getStemmerNames(); } recoll-1.26.3/index/subtreelist.cpp0000644000175000017500000000612713533651561014202 00000000000000/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef TEST_SUBTREELIST #include "autoconfig.h" #include #include "cstr.h" #include "rcldb.h" #include "searchdata.h" #include "rclquery.h" #include "subtreelist.h" #include "log.h" bool subtreelist(RclConfig *config, const string& top, vector& paths) { LOGDEB("subtreelist: top: [" << (top) << "]\n" ); Rcl::Db rcldb(config); if (!rcldb.open(Rcl::Db::DbRO)) { LOGERR("subtreelist: can't open database in [" << (config->getDbDir()) << "]: " << (rcldb.getReason()) << "\n" ); return false; } Rcl::SearchData *sd = new Rcl::SearchData(Rcl::SCLT_OR, cstr_null); std::shared_ptr rq(sd); sd->addClause(new Rcl::SearchDataClausePath(top, false)); Rcl::Query query(&rcldb); query.setQuery(rq); int cnt = query.getResCnt(); for (int i = 0; i < cnt; i++) { Rcl::Doc doc; if (!query.getDoc(i, doc)) break; string path = fileurltolocalpath(doc.url); if (!path.empty()) paths.push_back(path); } return true; } #else // TEST #include #include #include #include #include #include #include using namespace std; #include "subtreelist.h" #include "rclconfig.h" #include "rclinit.h" static char *thisprog; static char usage [] = " : list document paths in this tree\n" ; static void Usage(void) { cerr << thisprog << ": usage:" << endl << usage; exit(1); } static int op_flags; #define OPT_o 0x2 int main(int argc, char **argv) { string top; thisprog = argv[0]; argc--; argv++; while (argc > 0 && **argv == '-') { (*argv)++; if (!(**argv)) /* Cas du "adb - core" */ Usage(); while (**argv) switch (*(*argv)++) { default: Usage(); break; } argc--; argv++; } if (argc < 1) Usage(); top = *argv++;argc--; string reason; RclConfig *config = recollinit(0, 0, 0, reason, 0); if (!config || !config->ok()) { fprintf(stderr, "Recoll init failed: %s\n", reason.c_str()); exit(1); } vector paths; if (!subtreelist(config, top, paths)) { cerr << "subtreelist failed" << endl; exit(1); } for (vector::const_iterator it = paths.begin(); it != paths.end(); it++) { cout << *it << endl; } exit(0); } #endif recoll-1.26.3/index/checkretryfailed.cpp0000644000175000017500000000467413533651561015152 00000000000000/* Copyright (C) 2014 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "checkretryfailed.h" #include "safesysstat.h" #include #include #include "rclconfig.h" #include "execmd.h" #include "log.h" #include "pathut.h" #include "recollindex.h" using namespace std; bool checkRetryFailed(RclConfig *conf, bool record) { #ifdef _WIN32 // Under Windows we only retry if the recollindex program is newer // than the index struct stat st; string path(thisprog); if (path_suffix(path).empty()) { path = path + ".exe"; } if (path_fileprops(path, &st) != 0) { LOGERR("checkRetryFailed: can't stat the program file: " << thisprog << endl); return false; } time_t exetime = st.st_mtime; if (path_fileprops(conf->getDbDir(), &st) != 0) { // Maybe it just does not exist. LOGDEB("checkRetryFailed: can't stat the index directory: " << conf->getDbDir() << endl); return false; } time_t dbtime = st.st_mtime; return exetime > dbtime; #else string cmd; if (!conf->getConfParam("checkneedretryindexscript", cmd)) { LOGDEB("checkRetryFailed: 'checkneedretryindexscript' " "not set in config\n"); // We could toss a dice ? Say no retry in this case. return false; } // Look in the filters directory (ies). If not found execpath will // be the same as cmd, and we'll let execvp do its thing. string execpath = conf->findFilter(cmd); vector args; if (record) { args.push_back("1"); } ExecCmd ecmd; int status = ecmd.doexec(execpath, args); if (status == 0) { return true; } return false; #endif } recoll-1.26.3/index/webqueue.cpp0000644000175000017500000003751613566744535013477 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "webqueue.h" #include #include #include "safesysstat.h" #include "safeunistd.h" #include "cstr.h" #include "pathut.h" #include "rclutil.h" #include "log.h" #include "fstreewalk.h" #include "webstore.h" #include "circache.h" #include "smallut.h" #include "fileudi.h" #include "internfile.h" #include "wipedir.h" #include "indexer.h" #include "readfile.h" #include "conftree.h" #include "transcode.h" #include "cancelcheck.h" #include #include using namespace std; #define DOTFILEPREFIX "_" // The browser plugin creates a file named .xxx (where xxx is the name // for the main file in the queue), to hold external metadata (http or // created by the plugin). This class reads the .xxx, dotfile, and turns // it into an Rcl::Doc holder class WebQueueDotFile { public: WebQueueDotFile(RclConfig *conf, const string& fn) : m_conf(conf), m_fn(fn) {} // Read input line, strip it of eol and return as c++ string bool readLine(ifstream& input, string& line) { static const int LL = 2048; char cline[LL]; cline[0] = 0; input.getline(cline, LL-1); if (!input.good()) { if (input.bad()) { LOGERR("WebQueueDotFileRead: input.bad()\n"); } return false; } int ll = strlen(cline); while (ll > 0 && (cline[ll-1] == '\n' || cline[ll-1] == '\r')) { cline[ll-1] = 0; ll--; } line.assign(cline, ll); LOGDEB2("WebQueueDotFile:readLine: [" << line << "]\n"); return true; } // Process a Web queue dot file and set interesting stuff in the doc bool toDoc(Rcl::Doc& doc) { string line; ifstream input; input.open(m_fn.c_str(), ios::in); if (!input.good()) { LOGERR("WebQueueDotFile: open failed for [" << m_fn << "]\n"); return false; } // Read the 3 first lines: // - url // - hit type: we only know about Bookmark and WebHistory for now // - content-type. if (!readLine(input, line)) return false; doc.url = line; if (!readLine(input, line)) return false; doc.meta[Rcl::Doc::keybght] = line; if (!readLine(input, line)) return false; doc.mimetype = line; // We set the bookmarks mtype as html (the text is empty // anyway), so that the html viewer will be called on 'Open' bool isbookmark = false; if (!stringlowercmp("bookmark", doc.meta[Rcl::Doc::keybght])) { isbookmark = true; doc.mimetype = "text/html"; } string confstr; string ss(" "); // Read the rest: fields and keywords. We do a little // massaging of the input lines, then use a ConfSimple to // parse, and finally insert the key/value pairs into the doc // meta[] array for (;;) { if (!readLine(input, line)) { // Eof hopefully break; } if (line.find("t:") != 0) continue; line = line.substr(2); confstr += line + "\n"; } ConfSimple fields(confstr, 1); vector names = fields.getNames(cstr_null); for (vector::iterator it = names.begin(); it != names.end(); it++) { string value; fields.get(*it, value, cstr_null); if (!value.compare("undefined") || !value.compare("null")) continue; string *valuep = &value; string cvalue; if (isbookmark) { // It appears that bookmarks are stored in the users' // locale charset (not too sure). No idea what to do // for other types, would have to check the plugin. string charset = m_conf->getDefCharset(true); transcode(value, cvalue, charset, "UTF-8"); valuep = &cvalue; } string caname = m_conf->fieldCanon(*it); doc.meta[caname].append(ss + *valuep); } // Finally build the confsimple that we will save to the // cache, from the doc fields. This could also be done in // parallel with the doc.meta build above, but simpler this // way. We need it because not all interesting doc fields are // in the meta array (ie: mimetype, url), and we want // something homogenous and easy to save. for (const auto& entry : doc.meta) { m_fields.set(entry.first, entry.second, cstr_null); } m_fields.set(cstr_url, doc.url, cstr_null); m_fields.set(cstr_bgc_mimetype, doc.mimetype, cstr_null); return true; } RclConfig *m_conf; ConfSimple m_fields; string m_fn; }; // Initialize. Compute paths and create a temporary directory that will be // used by internfile() WebQueueIndexer::WebQueueIndexer(RclConfig *cnf, Rcl::Db *db, DbIxStatusUpdater *updfunc) : m_config(cnf), m_db(db), m_cache(0), m_updater(updfunc), m_nocacheindex(false) { m_queuedir = m_config->getWebQueueDir(); path_catslash(m_queuedir); m_cache = new WebStore(cnf); } WebQueueIndexer::~WebQueueIndexer() { LOGDEB("WebQueueIndexer::~\n"); deleteZ(m_cache); } // Index document stored in the cache. bool WebQueueIndexer::indexFromCache(const string& udi) { if (!m_db) return false; CancelCheck::instance().checkCancel(); Rcl::Doc dotdoc; string data; string hittype; if (!m_cache || !m_cache->getFromCache(udi, dotdoc, data, &hittype)) { LOGERR("WebQueueIndexer::indexFromCache: cache failed\n"); return false; } if (hittype.empty()) { LOGERR("WebQueueIndexer::index: cc entry has no hit type\n"); return false; } if (!stringlowercmp("bookmark", hittype)) { // Just index the dotdoc dotdoc.meta[Rcl::Doc::keybcknd] = "BGL"; return m_db->addOrUpdate(udi, cstr_null, dotdoc); } else { Rcl::Doc doc; FileInterner interner(data, m_config, FileInterner::FIF_doUseInputMimetype, dotdoc.mimetype); FileInterner::Status fis; try { fis = interner.internfile(doc); } catch (CancelExcept) { LOGERR("WebQueueIndexer: interrupted\n"); return false; } if (fis != FileInterner::FIDone) { LOGERR("WebQueueIndexer: bad status from internfile\n"); return false; } doc.mimetype = dotdoc.mimetype; doc.fmtime = dotdoc.fmtime; doc.url = dotdoc.url; doc.pcbytes = dotdoc.pcbytes; doc.sig.clear(); doc.meta[Rcl::Doc::keybcknd] = "BGL"; return m_db->addOrUpdate(udi, cstr_null, doc); } } void WebQueueIndexer::updstatus(const string& udi) { if (m_updater) { ++(m_updater->status.docsdone); if (m_updater->status.dbtotdocs < m_updater->status.docsdone) m_updater->status.dbtotdocs = m_updater->status.docsdone; m_updater->status.fn = udi; m_updater->update(); } } bool WebQueueIndexer::index() { if (!m_db) return false; LOGDEB("WebQueueIndexer::processqueue: [" << m_queuedir << "]\n"); m_config->setKeyDir(m_queuedir); if (!path_makepath(m_queuedir, 0700)) { LOGERR("WebQueueIndexer:: can't create queuedir [" << m_queuedir << "] errno " << errno << "\n"); return false; } if (!m_cache || !m_cache->cc()) { LOGERR("WebQueueIndexer: cache initialization failed\n"); return false; } CirCache *cc = m_cache->cc(); // First check/index files found in the cache. If the index was reset, // this actually does work, else it sets the existence flags (avoid // purging). We don't do this when called from indexFiles if (!m_nocacheindex) { bool eof; if (!cc->rewind(eof)) { // rewind can return eof if the cache is empty if (!eof) return false; } int nentries = 0; do { string udi; if (!cc->getCurrentUdi(udi)) { LOGERR("WebQueueIndexer:: cache file damaged\n"); break; } if (udi.empty()) continue; if (m_db->needUpdate(udi, cstr_null)) { try { // indexFromCache does a CirCache::get(). We could // arrange to use a getCurrent() instead, would be more // efficient indexFromCache(udi); updstatus(udi); } catch (CancelExcept) { LOGERR("WebQueueIndexer: interrupted\n"); return false; } } nentries++; } while (cc->next(eof)); } // Finally index the queue FsTreeWalker walker(FsTreeWalker::FtwNoRecurse); walker.addSkippedName(DOTFILEPREFIX "*"); FsTreeWalker::Status status = walker.walk(m_queuedir, *this); LOGDEB("WebQueueIndexer::processqueue: done: status " << status << "\n"); return true; } // Index a list of files (sent by the real time monitor) bool WebQueueIndexer::indexFiles(list& files) { LOGDEB("WebQueueIndexer::indexFiles\n"); if (!m_db) { LOGERR("WebQueueIndexer::indexfiles no db??\n"); return false; } for (list::iterator it = files.begin(); it != files.end();) { if (it->empty()) {//?? it++; continue; } string father = path_getfather(*it); if (father.compare(m_queuedir)) { LOGDEB("WebQueueIndexer::indexfiles: skipping [" << *it << "] (nq)\n"); it++; continue; } // Pb: we are often called with the dot file, before the // normal file exists, and sometimes never called for the // normal file afterwards (ie for bookmarks where the normal // file is empty). So we perform a normal queue run at the end // of the function to catch older stuff. Still this is not // perfect, sometimes some files will not be indexed before // the next run. string fn = path_getsimple(*it); if (fn.empty() || fn.at(0) == '.') { it++; continue; } struct stat st; if (path_fileprops(*it, &st) != 0) { LOGERR("WebQueueIndexer::indexfiles: cant stat [" << *it << "]\n"); it++; continue; } if (!S_ISREG(st.st_mode)) { LOGDEB("WebQueueIndexer::indexfiles: skipping [" << *it << "] (nr)\n"); it++; continue; } processone(*it, &st, FsTreeWalker::FtwRegular); it = files.erase(it); } m_nocacheindex = true; index(); // Note: no need to reset nocacheindex, we're in the monitor now return true; } FsTreeWalker::Status WebQueueIndexer::processone(const string &path, const struct stat *stp, FsTreeWalker::CbFlag flg) { if (!m_db) //?? return FsTreeWalker::FtwError; bool dounlink = false; if (flg != FsTreeWalker::FtwRegular) return FsTreeWalker::FtwOk; string dotpath = path_cat(path_getfather(path), string(DOTFILEPREFIX) + path_getsimple(path)); LOGDEB("WebQueueIndexer: prc1: [" << path << "]\n"); WebQueueDotFile dotfile(m_config, dotpath); Rcl::Doc dotdoc; string udi, udipath; if (!dotfile.toDoc(dotdoc)) goto out; //dotdoc.dump(1); // Have to use the hit type for the udi, because the same url can exist // as a bookmark or a page. udipath = path_cat(dotdoc.meta[Rcl::Doc::keybght], url_gpath(dotdoc.url)); make_udi(udipath, cstr_null, udi); LOGDEB("WebQueueIndexer: prc1: udi [" << udi << "]\n"); char ascdate[30]; sprintf(ascdate, "%ld", long(stp->st_mtime)); if (!stringlowercmp("bookmark", dotdoc.meta[Rcl::Doc::keybght])) { // For bookmarks, we just index the doc that was built from the // metadata. if (dotdoc.fmtime.empty()) dotdoc.fmtime = ascdate; dotdoc.pcbytes = lltodecstr(stp->st_size); // Document signature for up to date checks: none. dotdoc.sig.clear(); dotdoc.meta[Rcl::Doc::keybcknd] = "BGL"; if (!m_db->addOrUpdate(udi, cstr_null, dotdoc)) return FsTreeWalker::FtwError; } else { Rcl::Doc doc; // Store the dotdoc fields in the future doc. In case someone wants // to use fields generated by the browser plugin like inurl doc.meta = dotdoc.meta; FileInterner interner(path, stp, m_config, FileInterner::FIF_doUseInputMimetype, &dotdoc.mimetype); FileInterner::Status fis; try { fis = interner.internfile(doc); } catch (CancelExcept) { LOGERR("WebQueueIndexer: interrupted\n"); goto out; } if (fis != FileInterner::FIDone && fis != FileInterner::FIAgain) { LOGERR("WebQueueIndexer: bad status from internfile\n"); // TOBEDONE: internfile can return FIAgain here if it is // paging a big text file, we should loop. Means we're // only indexing the first page for text/plain files // bigger than the page size (dlft: 1MB) for now. goto out; } if (doc.fmtime.empty()) doc.fmtime = ascdate; dotdoc.fmtime = doc.fmtime; doc.pcbytes = lltodecstr(stp->st_size); // Document signature for up to date checks: none. doc.sig.clear(); doc.url = dotdoc.url; doc.meta[Rcl::Doc::keybcknd] = "BGL"; if (!m_db->addOrUpdate(udi, cstr_null, doc)) return FsTreeWalker::FtwError; } // Copy to cache { // doc fields not in meta, needing saving to the cache dotfile.m_fields.set("fmtime", dotdoc.fmtime, cstr_null); // fbytes is used for historical reasons, should be pcbytes, but makes // no sense to change. dotfile.m_fields.set(cstr_fbytes, dotdoc.pcbytes, cstr_null); dotfile.m_fields.set("udi", udi, cstr_null); string fdata; file_to_string(path, fdata); if (!m_cache || !m_cache->cc()) { LOGERR("WebQueueIndexer: cache initialization failed\n"); goto out; } if (!m_cache->cc()->put(udi, &dotfile.m_fields, fdata, 0)) { LOGERR("WebQueueIndexer::prc1: cache_put failed; " << m_cache->cc()->getReason() << "\n"); goto out; } } updstatus(udi); dounlink = true; out: if (dounlink) { if (unlink(path.c_str())) { LOGSYSERR("WebQueueIndexer::processone", "unlink", path); } if (unlink(dotpath.c_str())) { LOGSYSERR("WebQueueIndexer::processone", "unlink", dotpath); } } return FsTreeWalker::FtwOk; } recoll-1.26.3/index/exefetcher.h0000644000175000017500000000435313533651561013423 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _EXEFETCHER_H_INCLUDED_ #define _EXEFETCHER_H_INCLUDED_ #include #include "fetcher.h" class RclConfig; /** * A fetcher which works by executing external programs, defined in a * configuration file. * At this point this is only used with the sample python mbox indexer, * to show how recoll can work with completely external data extraction code. * * Configuration: The external indexer sets the 'rclbes' recoll field * (backend definition, can be FS or BGL -web- in standard recoll) to * a unique value (e.g. MBOX for the python sample). A 'backends' file * in the configuration directory then links the 'rclbes' value with * commands to execute for fetching the data, which recoll uses at * query time for previewing and opening the document. */ class EXEDocFetcher : public DocFetcher { public: class Internal; EXEDocFetcher(const Internal&); virtual ~EXEDocFetcher() {} virtual bool fetch(RclConfig* cnf, const Rcl::Doc& idoc, RawDoc& out); /** Calls stat to retrieve file signature data */ virtual bool makesig(RclConfig* cnf, const Rcl::Doc& idoc,std::string& sig); friend std::unique_ptr exeDocFetcherMake(RclConfig *, const std::string&); private: Internal *m; }; // Lookup bckid in the config and create an appropriate fetcher. std::unique_ptr exeDocFetcherMake(RclConfig *config, const std::string& bckid); #endif /* _EXEFETCHER_H_INCLUDED_ */ recoll-1.26.3/index/recollindex.h0000644000175000017500000000362113533651561013606 00000000000000/* Copyright (C) 2009 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _recollindex_h_included_ #define _recollindex_h_included_ #include #include /** Helper methods in recollindex.cpp for initial checks/setup to index * a list of files (either from the monitor or the command line) */ class RclConfig; extern bool indexfiles(RclConfig *config, std::list &filenames); extern bool purgefiles(RclConfig *config, std::list &filenames); extern bool createAuxDbs(RclConfig *config); /** * Helper method for executing the recoll-we (new WebExtensions plugin) helper * script. This moves files from the browser download directory (only * place where the browser accepts to create them), to the web queue * dir). This keeps the c++ code compatible with old and new addon. * The script is executed before a batch pass, or from time to time in * the monitor, if web processing is enabled. */ extern bool runWebFilesMoverScript(RclConfig *); extern int stopindexing; // Try to explain what went wrong... extern void addIdxReason(std::string who, std::string reason); class ReExec; extern ReExec *o_reexec; extern std::string thisprog; #endif /* _recollindex_h_included_ */ recoll-1.26.3/index/idxstatus.cpp0000644000175000017500000000261113533651561013657 00000000000000/* Copyright (C) 2017-2018 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "idxstatus.h" #include "rclconfig.h" #include "conftree.h" void readIdxStatus(RclConfig *config, DbIxStatus &status) { ConfSimple cs(config->getIdxStatusFile().c_str(), 1); string val; cs.get("phase", val); status.phase = DbIxStatus::Phase(atoi(val.c_str())); cs.get("fn", status.fn); cs.get("docsdone", &status.docsdone); cs.get("filesdone", &status.filesdone); cs.get("fileerrors", &status.fileerrors); cs.get("dbtotdocs", &status.dbtotdocs); cs.get("totfiles", &status.totfiles); string shm("0"); cs.get("hasmonitor", shm); status.hasmonitor = stringToBool(shm); } recoll-1.26.3/index/fsindexer.h0000644000175000017500000001245213533651561013267 00000000000000/* Copyright (C) 2009-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _fsindexer_h_included_ #define _fsindexer_h_included_ #include #include #include "indexer.h" #include "fstreewalk.h" #ifdef IDX_THREADS #include "workqueue.h" #endif // IDX_THREADS class DbIxStatusUpdater; class FIMissingStore; struct stat; class DbUpdTask; class InternfileTask; namespace Rcl { class Doc; } /** Index selected parts of the file system Tree indexing: we inherits FsTreeWalkerCB so that, the processone() method is called by the file-system tree walk code for each file and directory. We keep all state needed while indexing, and finally call the methods to purge the db of stale entries and create the stemming databases. Single file(s) indexing: there are also calls to index or purge lists of files. No database purging or stem db updating in this case. */ class FsIndexer : public FsTreeWalkerCB { public: /** Constructor does nothing but store parameters * * @param cnf Configuration data * @param updfunc Status updater callback */ FsIndexer(RclConfig *cnf, Rcl::Db *db, DbIxStatusUpdater *updfunc = 0); virtual ~FsIndexer(); /** * Top level file system tree index method for updating a given database. * * We open the database, * then call a file system walk for each top-level directory. */ bool index(int flags); /** Index a list of files. No db cleaning or stemdb updating */ bool indexFiles(std::list &files, int f = ConfIndexer::IxFNone); /** Purge a list of files. */ bool purgeFiles(std::list &files); /** Tree walker callback method */ FsTreeWalker::Status processone(const string &fn, const struct stat *, FsTreeWalker::CbFlag); /** Make signature for file up to date checks */ static void makesig(const struct stat *stp, string& out); private: class PurgeCandidateRecorder { public: PurgeCandidateRecorder() : dorecord(false) {} void setRecord(bool onoff) { dorecord = onoff; } void record(const string& udi) { // This test does not need to be protected: the value is set at // init and never changed. if (!dorecord) return; #ifdef IDX_THREADS std::unique_lock locker(mutex); #endif udis.push_back(udi); } const vector& getCandidates() { return udis; } private: #ifdef IDX_THREADS std::mutex mutex; #endif bool dorecord; std::vector udis; }; bool launchAddOrUpdate(const std::string& udi, const std::string& parent_udi, Rcl::Doc& doc); FsTreeWalker m_walker; RclConfig *m_config; Rcl::Db *m_db; string m_reason; DbIxStatusUpdater *m_updater; // Top/start directories list std::vector m_tdl; // Store for missing filters and associated mime types FIMissingStore *m_missing; // Recorder for files that may need subdoc purging. PurgeCandidateRecorder m_purgeCandidates; // The configuration can set attribute fields to be inherited by // all files in a file system area. Ie: set "rclaptg = thunderbird" // inside ~/.thunderbird. The boolean is set at init to avoid // further wasteful processing if no local fields are set. // This should probably moved to internfile so that the // localfields get exactly the same processing as those generated by the // filters (as was done for metadatacmds fields) bool m_havelocalfields; string m_slocalfields; map m_localfields; // Activate detection of xattr-only document updates. Experimental, so // needs a config option bool m_detectxattronly; // No retry of previously failed files bool m_noretryfailed; #ifdef IDX_THREADS friend void *FsIndexerDbUpdWorker(void*); friend void *FsIndexerInternfileWorker(void*); WorkQueue m_iwqueue; WorkQueue m_dwqueue; bool m_haveInternQ; bool m_haveSplitQ; RclConfig *m_stableconfig; #endif // IDX_THREADS bool init(); void localfieldsfromconf(); void setlocalfields(const map& flds, Rcl::Doc& doc); string getDbDir() {return m_config->getDbDir();} FsTreeWalker::Status processonefile(RclConfig *config, const string &fn, const struct stat *, const map& localfields); }; #endif /* _fsindexer_h_included_ */ recoll-1.26.3/index/webqueuefetcher.h0000644000175000017500000000230113533651561014453 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _WEBQUEUEFETCHER_H_INCLUDED_ #define _WEBQUEUEFETCHER_H_INCLUDED_ #include "fetcher.h" /** * The WEB queue cache fetcher: */ class WQDocFetcher : public DocFetcher{ virtual bool fetch(RclConfig* cnf, const Rcl::Doc& idoc, RawDoc& out); virtual bool makesig(RclConfig* cnf, const Rcl::Doc& idoc, std::string& sig); virtual ~WQDocFetcher() {} }; #endif /* _WEBQUEUEFETCHER_H_INCLUDED_ */ recoll-1.26.3/sampleconf/0000755000175000017500000000000013570165410012215 500000000000000recoll-1.26.3/sampleconf/recoll.qss0000644000175000017500000000360113533651561014153 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* This is an embryonic example Qt style sheet for Recoll. It is not really supposed to be less ugly than the default look, it's just provided in hope that someone with better taste will want to step in, or you can use it to indulge your preference for bright red backgrounds... If you do want to use it, and encounter a difficulty due to (lack of) widget object names or whatever, please get in touch, I'll fix what needs to be. Please check http://developer.qt.nokia.com/doc/qt-4.8/stylesheet.html for Qt style sheet information. */ /* Use a light grey background by default (lighter than the usual qt default): */ * { background: #eeeeee; } /* Use a light yellow background for all text display areas: */ QComboBox[editable="true"], QTextEdit, QLineEdit, QTextBrowser, QTableView, QWebView, QPlainTextEdit { background: #ffffee; color: black; } /* Keep a white background for the QTextEdit inside the Preview window This is rather arbitrary, just to show how to do it: */ Preview QTextEdit { background: white; color: black; font-family: Serif; font-size: 12pt; } recoll-1.26.3/sampleconf/recoll.conf0000644000175000017500000012346513566424763014315 00000000000000# Recoll main configuration file, recoll.conf # The XML tags in the comments are used to help produce the documentation # from the sample/reference file, and not at all at run time, where # comments are just comments. Edit at will. # This typically lives in $prefix/share/recoll/examples and provides # default values. You can override selected parameters by adding assigments # to ~/.recoll/recoll.conf (or $RECOLL_CONFDIR/recoll.conf) # # Most of the important values in this file can be set from the GUI # configuration menus, which may be an easier approach than direct editing. # Parameters affecting what documents we # index # Space-separated list of files or # directories to recursively index.Default to ~ (indexes # $HOME). You can use symbolic links in the list, they will be followed, # independantly of the value of the followLinks variable. topdirs = ~ # # Space-separated list of files or directories to monitor for # updates. # When running the real-time indexer, this allows monitoring only a # subset of the whole indexed area. The elements must be included in the # tree defined by the 'topdirs' members. #monitordirs= # # # Files and directories which should be ignored. # White space separated list of wildcard patterns (simple ones, not paths, # must contain no / ), which will be tested against file and directory # names. The list in the default configuration does not exclude hidden # directories (names beginning with a dot), which means that it may index # quite a few things that you do not want. On the other hand, email user # agents like Thunderbird usually store messages in hidden directories, and # you probably want this indexed. One possible solution is to have ".*" in # "skippedNames", and add things like "~/.thunderbird" "~/.evolution" to # "topdirs". Not even the file names are indexed for patterns in this # list, see the "noContentSuffixes" variable for an alternative approach # which indexes the file names. Can be redefined for any # subtree. skippedNames = #* CVS Cache cache* .cache caughtspam tmp \ .thumbnails .svn \ *~ .beagle .git .hg .bzr loop.ps .xsession-errors \ .recoll* xapiandb recollrc recoll.conf # # # List of name endings to remove from the default skippedNames # list. skippedNames- = # # # List of name endings to add to the default skippedNames # list. skippedNames+ = # # Regular file name filter patterns # If this is set, only the file names not in skippedNames and # matching one of the patterns will be considered for indexing. Can be # redefined per subtree. Does not apply to directories. onlyNames = # # # List of name endings (not necessarily dot-separated suffixes) for # which we don't try MIME type identification, and don't uncompress or # index content.Only the names will be indexed. This # complements the now obsoleted recoll_noindex list from the mimemap file, # which will go away in a future release (the move from mimemap to # recoll.conf allows editing the list through the GUI). This is different # from skippedNames because these are name ending matches only (not # wildcard patterns), and the file name itself gets indexed normally. This # can be redefined for subdirectories. noContentSuffixes = .md5 .map \ .o .lib .dll .a .sys .exe .com \ .mpp .mpt .vsd \ .img .img.gz .img.bz2 .img.xz .image .image.gz .image.bz2 .image.xz \ .dat .bak .rdf .log.gz .log .db .msf .pid \ ,v ~ # # # # List of name endings to remove from the default noContentSuffixes # list. noContentSuffixes- = # # # List of name endings to add to the default noContentSuffixes # list. noContentSuffixes+ = # # # Absolute paths we should not go into. # Space-separated list of wildcard expressions for absolute # filesystem paths. Must be defined at the top level of the configuration # file, not in a subsection. Can contain files and directories. The database and # configuration directories will automatically be added. The expressions # are matched using 'fnmatch(3)' with the FNM_PATHNAME flag set by # default. This means that '/' characters must be matched explicitely. You # can set 'skippedPathsFnmPathname' to 0 to disable the use of FNM_PATHNAME # (meaning that '/*/dir3' will match '/dir1/dir2/dir3'). The default value # contains the usual mount point for removable media to remind you that it # is a bad idea to have Recoll work on these (esp. with the monitor: media # gets indexed on mount, all data gets erased on unmount). Explicitely # adding '/media/xxx' to the 'topdirs' variable will override # this. skippedPaths = /media # Set to 0 to # override use of FNM_PATHNAME for matching skipped # paths. #skippedPathsFnmPathname = 1 # # # File name which will cause its parent directory to be skipped. # Any directory containing a file with this name will be skipped as # if it was part of the skippedPaths list. Ex: .recoll-noindex #nowalkfn = .recoll-noindex # # # skippedPaths equivalent specific to # real time indexing.This enables having parts of the tree # which are initially indexed but not monitored. If daemSkippedPaths is # not set, the daemon uses skippedPaths. #daemSkippedPaths = # # # Use skippedNames inside Zip archives.Fetched # directly by the rclzip handler. Skip the patterns defined by skippedNames # inside Zip archives. Can be redefined for subdirectories. # See https://www.lesbonscomptes.com/recoll/faqsandhowtos/FilteringOutZipArchiveMembers.html # #zipUseSkippedNames = 0 # # # Space-separated list of wildcard expressions for names that should # be ignored inside zip archives.This is used directly by # the zip handler. If zipUseSkippedNames is not set, zipSkippedNames # defines the patterns to be skipped inside archives. If zipUseSkippedNames # is set, the two lists are concatenated and used. Can be redefined for # subdirectories. # See https://www.lesbonscomptes.com/recoll/faqsandhowtos/FilteringOutZipArchiveMembers.html # #zipSkippedNames = # Follow symbolic links during # indexing.The default is to ignore symbolic links to avoid # multiple indexing of linked files. No effort is made to avoid duplication # when this option is set to true. This option can be set individually for # each of the 'topdirs' members by using sections. It can not be changed # below the 'topdirs' level. Links in the 'topdirs' list itself are always # followed. #followLinks = 0 # Restrictive list of # indexed mime types.Normally not set (in which case all # supported types are indexed). If it is set, only the types from the list # will have their contents indexed. The names will be indexed anyway if # indexallfilenames is set (default). MIME type names should be taken from # the mimemap file (the values may be different from xdg-mime or file -i # output in some cases). Can be redefined for subtrees. #indexedmimetypes = # List of excluded MIME # types.Lets you exclude some types from indexing. MIME type # names should be taken from the mimemap file (the values may be different # from xdg-mime or file -i output in some cases) Can be redefined for # subtrees. #excludedmimetypes = # # Don't compute md5 for these types. # md5 checksums are used only for deduplicating results, and can be # very expensive to compute on multimedia or other big files. This list # lets you turn off md5 computation for selected types. It is global (no # redefinition for subtrees). At the moment, it only has an effect for # external handlers (exec and execm). The file types can be specified by # listing either MIME types (e.g. audio/mpeg) or handler names # (e.g. rclaudio). # nomd5types = rclaudio # Size limit for compressed # files.We need to decompress these in a # temporary directory for identification, which can be wasteful in some # cases. Limit the waste. Negative means no limit. 0 results in no # processing of any compressed file. Default 50 MB. compressedfilemaxkbs = 100000 # Size limit for text # files.Mostly for skipping monster # logs. Default 20 MB. textfilemaxmbs = 20 # Index the file names of # unprocessed filesIndex the names of files the contents of # which we don't index because of an excluded or unsupported MIME # type. indexallfilenames = 1 # Use a system command # for file MIME type guessing as a final step in file type # identificationThis is generally useful, but will usually # cause the indexing of many bogus 'text' files. See 'systemfilecommand' # for the command used. usesystemfilecommand = 1 # Command used to guess # MIME types if the internal methods failsThis should be a # "file -i" workalike. The file path will be added as a last parameter to # the command line. "xdg-mime" works better than the traditional "file" # command, and is now the configured default (with a hard-coded fallback to # "file") systemfilecommand = xdg-mime query filetype # Decide if we process the # Web queue.The queue is a directory where the Recoll Web # browser plugins create the copies of visited pages. processwebqueue = 0 # Page size for text # files.If this is set, text/plain files will be divided # into documents of approximately this size. Will reduce memory usage at # index time and help with loading data in the preview window at query # time. Particularly useful with very big files, such as application or # system logs. Also see textfilemaxmbs and # compressedfilemaxkbs. textfilepagekbs = 1000 # Size limit for archive # members.This is passed to the filters in the environment # as RECOLL_FILTER_MAXMEMBERKB. membermaxkbs = 50000 # Parameters affecting how we generate # terms and organize the index # Changing some of these parameters will imply a full # reindex. Also, when using multiple indexes, it may not make sense # to search indexes that don't share the values for these parameters, # because they usually affect both search and index operations. # Decide if we store # character case and diacritics in the index.If we do, # searches sensitive to case and diacritics can be performed, but the index # will be bigger, and some marginal weirdness may sometimes occur. The # default is a stripped index. When using multiple indexes for a search, # this parameter must be defined identically for all. Changing the value # implies an index reset. indexStripChars = 1 # Decide if we store the # documents' text content in the index.Storing the text # allows extracting snippets from it at query time, instead of building # them from index position data. # # Newer Xapian index formats have rendered our use of positions list # unacceptably slow in some cases. The last Xapian index format with good # performance for the old method is Chert, which is default for 1.2, still # supported but not default in 1.4 and will be dropped in 1.6. # # The stored document text is translated from its original format to UTF-8 # plain text, but not stripped of upper-case, diacritics, or punctuation # signs. Storing it increases the index size by 10-20% typically, but also # allows for nicer snippets, so it may be worth enabling it even if not # strictly needed for performance if you can afford the space. # # The variable only has an effect when creating an index, meaning that the # xapiandb directory must not exist yet. Its exact effect depends on the # Xapian version. # # For Xapian 1.4, if the variable is set to 0, the Chert format will be # used, and the text will not be stored. If the variable is 1, Glass will # be used, and the text stored. # # For Xapian 1.2, and for versions after 1.5 and newer, the index format is # always the default, but the variable controls if the text is stored or # not, and the abstract generation method. With Xapian 1.5 and later, and # the variable set to 0, abstract generation may be very slow, but this # setting may still be useful to save space if you do not use abstract # generation at all. # indexStoreDocText = 1 # Decides if terms will be # generated for numbers.For example "123", "1.5e6", # 192.168.1.4, would not be indexed if nonumbers is set ("value123" would # still be). Numbers are often quite interesting to search for, and this # should probably not be set except for special situations, ie, scientific # documents with huge amounts of numbers in them, where setting nonumbers # will reduce the index size. This can only be set for a whole index, not # for a subtree. #nonumbers = 0 # Determines if we index # 'coworker' also when the input is 'co-worker'.This is new # in version 1.22, and on by default. Setting the variable to off allows # restoring the previous behaviour. #dehyphenate = 1 # # Process backslash as normal letter # This may make sense for people wanting to index TeX commands as # such but is not of much general use. # #backslashasletter = 0 # # Maximum term length. # Words longer than this will be discarded. # The default is 40 and used to be hard-coded, but it can now be # adjusted. You need an index reset if you change the value. # #maxtermlength = 40 # Decides if specific East Asian # (Chinese Korean Japanese) characters/word splitting is turned # off.This will save a small amount of CPU if you have no CJK # documents. If your document base does include such text but you are not # interested in searching it, setting nocjk may be a # significant time and space saver. #nocjk = 0 # This lets you adjust the size of # n-grams used for indexing CJK text.The default value of 2 is # probably appropriate in most cases. A value of 3 would allow more precision # and efficiency on longer words, but the index will be approximately twice # as large. #cjkngramlen = 2 # # # Languages for which to create stemming expansion # data.Stemmer names can be found by executing 'recollindex # -l', or this can also be set from a list in the GUI. indexstemminglanguages = english # Default character # set.This is used for files which do not contain a # character set definition (e.g.: text/plain). Values found inside files, # e.g. a 'charset' tag in HTML documents, will override it. If this is not # set, the default character set is the one defined by the NLS environment # ($LC_ALL, $LC_CTYPE, $LANG), or ultimately iso-8859-1 (cp-1252 in fact). # If for some reason you want a general default which does not match your # LANG and is not 8859-1, use this variable. This can be redefined for any # sub-directory. #defaultcharset = iso-8859-1 # A list of characters, # encoded in UTF-8, which should be handled specially # when converting text to unaccented lowercase.For # example, in Swedish, the letter a with diaeresis has full alphabet # citizenship and should not be turned into an a. # Each element in the space-separated list has the special character as # first element and the translation following. The handling of both the # lowercase and upper-case versions of a character should be specified, as # appartenance to the list will turn-off both standard accent and case # processing. The value is global and affects both indexing and querying. # Examples: # Swedish: # unac_except_trans = ää Ää öö Öö üü Üü ßss œoe Œoe æae Æae ffff fifi flfl åå Åå # . German: # unac_except_trans = ää Ää öö Öö üü Üü ßss œoe Œoe æae Æae ffff fifi flfl # In French, you probably want to decompose oe and ae and nobody would type # a German ß # unac_except_trans = ßss œoe Œoe æae Æae ffff fifi flfl # . The default for all until someone protests follows. These decompositions # are not performed by unac, but it is unlikely that someone would type the # composed forms in a search. # unac_except_trans = ßss œoe Œoe æae Æae ffff fifi flfl unac_except_trans = ßss œoe Œoe æae Æae ffff fifi flfl # Overrides the default # character set for email messages which don't specify # one.This is mainly useful for readpst (libpst) dumps, # which are utf-8 but do not say so. #maildefcharset= # Set fields on all files # (usually of a specific fs area).Syntax is the usual: # name = value ; attr1 = val1 ; [...] # value is empty so this needs an initial semi-colon. This is useful, e.g., # for setting the rclaptg field for application selection inside # mimeview. #[/some/app/directory] #localfields = ; rclaptg = someapp; otherfield = somevalue # Use mtime instead of # ctime to test if a file has been modified.The time is used # in addition to the size, which is always used. # Setting this can reduce re-indexing on systems where extended attributes # are used (by some other application), but not indexed, because changing # extended attributes only affects ctime. # Notes: # - This may prevent detection of change in some marginal file rename cases # (the target would need to have the same size and mtime). # - You should probably also set noxattrfields to 1 in this case, except if # you still prefer to perform xattr indexing, for example if the local # file update pattern makes it of value (as in general, there is a risk # for pure extended attributes updates without file modification to go # undetected). Perform a full index reset after changing this. # testmodifusemtime = 0 # Disable extended attributes # conversion to metadata fields.This probably needs to be # set if testmodifusemtime is set. noxattrfields = 0 # Define commands to # gather external metadata, e.g. tmsu tags. # There can be several entries, separated by semi-colons, each defining # which field name the data goes into and the command to use. Don't forget the # initial semi-colon. All the field names must be different. You can use # aliases in the "field" file if necessary. # As a not too pretty hack conceded to convenience, any field name # beginning with "rclmulti" will be taken as an indication that the command # returns multiple field values inside a text blob formatted as a recoll # configuration file ("fieldname = fieldvalue" lines). The rclmultixx name # will be ignored, and field names and values will be parsed from the data. # Example: metadatacmds = ; tags = tmsu tags %f; rclmulti1 = cmdOutputsConf %f # #[/some/area/of/the/fs] #metadatacmds = ; tags = tmsu tags %f; rclmulti1 = cmdOutputsConf %f # Parameters affecting where and how we store # things # # # Top directory for Recoll data.Recoll data # directories are normally located relative to the configuration directory # (e.g. ~/.recoll/xapiandb, ~/.recoll/mboxcache). If 'cachedir' is set, the # directories are stored under the specified value instead (e.g. if # cachedir is ~/.cache/recoll, the default dbdir would be # ~/.cache/recoll/xapiandb). This affects dbdir, webcachedir, # mboxcachedir, aspellDicDir, which can still be individually specified to # override cachedir. Note that if you have multiple configurations, each # must have a different cachedir, there is no automatic computation of a # subpath under cachedir. #cachedir = ~/.cache/recoll # Maximum file system occupation # over which we stop indexing.The value is a percentage, # corresponding to what the "Capacity" df output column shows. The default # value is 0, meaning no checking. maxfsoccuppc = 0 # Xapian database directory # location.This will be created on first indexing. If the # value is not an absolute path, it will be interpreted as relative to # cachedir if set, or the configuration directory (-c argument or # $RECOLL_CONFDIR). If nothing is specified, the default is then # ~/.recoll/xapiandb/ dbdir = xapiandb # # # Name of the scratch file where the indexer process updates its # status.Default: idxstatus.txt inside the configuration # directory. #idxstatusfile = idxstatus.txt # # # Directory location for storing mbox message offsets cache # files.This is normally 'mboxcache' under cachedir if set, # or else under the configuration directory, but it may be useful to share # a directory between different configurations. #mboxcachedir = mboxcache # # # Minimum mbox file size over which we cache the offsets. # There is really no sense in caching offsets for small files. The # default is 5 MB. #mboxcacheminmbs = 5 # # # Maximum mbox member message size in megabytes. # Size over which we assume that the mbox format is bad or we # misinterpreted it, at which point we just stop processing the file. # #mboxmaxmsgmbs = 100 # # # Directory where we store the archived web pages. # This is only used by the web history indexing code # Default: cachedir/webcache if cachedir is set, else # $RECOLL_CONFDIR/webcache webcachedir = webcache # # Maximum size in MB of the Web archive. # This is only used by the web history indexing code. # Default: 40 MB. # Reducing the size will not physically truncate the file. webcachemaxmbs = 40 # # # The path to the Web indexing queue.This used to be # hard-coded in the old plugin as ~/.recollweb/ToIndex so there would be no # need or possibility to change it, but the WebExtensions plugin now downloads # the files to the user Downloads directory, and a script moves them to # webqueuedir. The script reads this value from the config so it has become # possible to change it. #webqueuedir = ~/.recollweb/ToIndex # # # The path to browser downloads directory.This is # where the new browser add-on extension has to create the files. They are # then moved by a script to webqueuedir. #webdownloadsdir = ~/Downloads # # # Aspell dictionary storage directory location. The # aspell dictionary (aspdict.(lang).rws) is normally stored in the # directory specified by cachedir if set, or under the configuration # directory. #aspellDicDir = # # # Directory location for executable input handlers.If # RECOLL_FILTERSDIR is set in the environment, we use it instead. Defaults # to $prefix/share/recoll/filters. Can be redefined for # subdirectories. #filtersdir = /path/to/my/filters # # # Directory location for icons.The only reason to # change this would be if you want to change the icons displayed in the # result list. Defaults to $prefix/share/recoll/images #iconsdir = /path/to/my/icons # Parameters affecting indexing performance and # resource usage # # # Threshold (megabytes of new data) where we flush from memory to # disk index. Setting this allows some control over memory # usage by the indexer process. A value of 0 means no explicit flushing, # which lets Xapian perform its own thing, meaning flushing every # $XAPIAN_FLUSH_THRESHOLD documents created, modified or deleted: as memory # usage depends on average document size, not only document count, the # Xapian approach is is not very useful, and you should let Recoll manage # the flushes. The program compiled value is 0. The configured default # value (from this file) is now 50 MB, and should be ok in many cases. # You can set it as low as 10 to conserve memory, but if you are looking # for maximum speed, you may want to experiment with values between 20 and # 200. In my experience, values beyond this are always counterproductive. If # you find otherwise, please drop me a note. idxflushmb = 50 # # # Maximum external filter execution time in # seconds.Default 1200 (20mn). Set to 0 for no limit. This # is mainly to avoid infinite loops in postscript files # (loop.ps) filtermaxseconds = 1200 # # # Maximum virtual memory space for filter processes # (setrlimit(RLIMIT_AS)), in megabytes. Note that this # includes any mapped libs (there is no reliable Linux way to limit the # data space only), so we need to be a bit generous here. Anything over # 2000 will be ignored on 32 bits machines. filtermaxmbytes = 2000 # # # Stage input queues configuration. There are three # internal queues in the indexing pipeline stages (file data extraction, # terms generation, index update). This parameter defines the queue depths # for each stage (three integer values). If a value of -1 is given for a # given stage, no queue is used, and the thread will go on performing the # next stage. In practise, deep queues have not been shown to increase # performance. Default: a value of 0 for the first queue tells Recoll to # perform autoconfiguration based on the detected number of CPUs (no need # for the two other values in this case). Use thrQSizes = -1 -1 -1 to # disable multithreading entirely. thrQSizes = 0 # # # Number of threads used for each indexing stage. The # three stages are: file data extraction, terms generation, index # update). The use of the counts is also controlled by some special values # in thrQSizes: if the first queue depth is 0, all counts are ignored # (autoconfigured); if a value of -1 is used for a queue depth, the # corresponding thread count is ignored. It makes no sense to use a value # other than 1 for the last stage because updating the Xapian index is # necessarily single-threaded (and protected by a mutex). #thrTCounts = 4 2 1 # Miscellaneous parameters # # # Log file verbosity 1-6. A value of 2 will print # only errors and warnings. 3 will print information like document updates, # 4 is quite verbose and 6 very verbose. loglevel = 3 # # # Log file destination. Use 'stderr' (default) to write to the # console. logfilename = stderr # # # Override loglevel for the indexer. #idxloglevel = 3 # # # Override logfilename for the indexer. #idxlogfilename = stderr # # # Override loglevel for the indexer in real time # mode.The default is to use the idx... values if set, else # the log... values. #daemloglevel = 3 # # # Override logfilename for the indexer in real time # mode.The default is to use the idx... values if set, else # the log... values. #daemlogfilename = /dev/null # # # Override loglevel for the python module. #pyloglevel = 3 # # # Override logfilename for the python module. #pylogfilename = stderr # # # Original location of the configuration directory. # This is used exclusively for movable datasets. Locating the # configuration directory inside the directory tree makes it possible to # provide automatic query time path translations once the data set has # moved (for example, because it has been mounted on another # location). #orgidxconfdir = # # # Current location of the configuration directory. # Complement orgidxconfdir for movable datasets. This should be used # if the configuration directory has been copied from the dataset to # another location, either because the dataset is readonly and an r/w copy # is desired, or for performance reasons. This records the original moved # location before copy, to allow path translation computations. For # example if a dataset originally indexed as '/home/me/mydata/config' has # been mounted to '/media/me/mydata', and the GUI is running from a copied # configuration, orgidxconfdir would be '/home/me/mydata/config', and # curidxconfdir (as set in the copied configuration) would be # '/media/me/mydata/config'. #curidxconfdir = # # # Indexing process current directory. The input # handlers sometimes leave temporary files in the current directory, so it # makes sense to have recollindex chdir to some temporary directory. If the # value is empty, the current directory is not changed. If the # value is (literal) tmp, we use the temporary directory as set by the # environment (RECOLL_TMPDIR else TMPDIR else /tmp). If the value is an # absolute path to a directory, we go there. idxrundir = tmp # # # Script used to heuristically check if we need to retry indexing # files which previously failed. The default script checks # the modified dates on /usr/bin and /usr/local/bin. A relative path will # be looked up in the filters dirs, then in the path. Use an absolute path # to do otherwise. checkneedretryindexscript = rclcheckneedretry.sh # # # Additional places to search for helper executables. # This is only used on Windows for now. #recollhelperpath = c:/someprog/bin;c:/someotherprog/bin # # # Length of abstracts we store while indexing. # Recoll stores an abstract for each indexed file. # The text can come from an actual 'abstract' section in the # document or will just be the beginning of the document. It is stored in # the index so that it can be displayed inside the result lists without # decoding the original file. The idxabsmlen parameter # defines the size of the stored abstract. The default value is 250 # bytes. The search interface gives you the choice to display this stored # text or a synthetic abstract built by extracting text around the search # terms. If you always prefer the synthetic abstract, you can reduce this # value and save a little space. #idxabsmlen = 250 # # # Truncation length of stored metadata fields.This # does not affect indexing (the whole field is processed anyway), just the # amount of data stored in the index for the purpose of displaying fields # inside result lists or previews. The default value is 150 bytes which # may be too low if you have custom fields. #idxmetastoredlen = 150 # # # Truncation length for all document texts.Only index # the beginning of documents. This is not recommended except if you are # sure that the interesting keywords are at the top and have severe disk # space issues. #idxtexttruncatelen = 0 # # # Language definitions to use when creating the aspell # dictionary.The value must match a set of aspell language # definition files. You can type "aspell dicts" to see a list The default # if this is not set is to use the NLS environment to guess the # value. #aspellLanguage = en # # # Additional option and parameter to aspell dictionary creation # command.Some aspell packages may need an additional option # (e.g. on Debian Jessie: --local-data-dir=/usr/lib/aspell). See Debian bug # 772415. #aspellAddCreateParam = --local-data-dir=/usr/lib/aspell # # # Set this to have a look at aspell dictionary creation # errors.There are always many, so this is mostly for # debugging. #aspellKeepStderr = 1 # # # Disable aspell use.The aspell dictionary generation # takes time, and some combinations of aspell version, language, and local # terms, result in aspell crashing, so it sometimes makes sense to just # disable the thing. #noaspell = 1 # # # Auxiliary database update interval.The real time # indexer only updates the auxiliary databases (stemdb, aspell) # periodically, because it would be too costly to do it for every document # change. The default period is one hour. #monauxinterval = 3600 # # # Minimum interval (seconds) between processings of the indexing # queue.The real time indexer does not process each event # when it comes in, but lets the queue accumulate, to diminish overhead and # to aggregate multiple events affecting the same file. Default 30 # S. #monixinterval = 30 # # # Timing parameters for the real time indexing. # Definitions for files which get a longer delay before reindexing # is allowed. This is for fast-changing files, that should only be # reindexed once in a while. A list of wildcardPattern:seconds pairs. The # patterns are matched with fnmatch(pattern, path, 0) You can quote entries # containing white space with double quotes (quote the whole entry, not the # pattern). The default is empty. # Example: mondelaypatterns = *.log:20 "*with spaces.*:30" #mondelaypatterns = *.log:20 "*with spaces.*:30" # # # ionice class for the real time indexing process # On platforms where this is supported. The default value is # 3. # monioniceclass = 3 # # # ionice class parameter for the real time indexing process. # On platforms where this is supported. The default is # empty. #monioniceclassdata = # Query-time parameters (no impact on the # index) # # # auto-trigger diacritics sensitivity (raw index only). # IF the index is not stripped, decide if we automatically trigger # diacritics sensitivity if the search term has accented characters (not in # unac_except_trans). Else you need to use the query language and the "D" # modifier to specify diacritics sensitivity. Default is no. autodiacsens = 0 # # # auto-trigger case sensitivity (raw index only).IF # the index is not stripped (see indexStripChars), decide if we # automatically trigger character case sensitivity if the search term has # upper-case characters in any but the first position. Else you need to use # the query language and the "C" modifier to specify character-case # sensitivity. Default is yes. autocasesens = 1 # Maximum query expansion count # for a single term (e.g.: when using wildcards).This only # affects queries, not indexing. We used to not limit this at all (except # for filenames where the limit was too low at 1000), but it is # unreasonable with a big index. Default 10000. maxTermExpand = 10000 # Maximum number of clauses # we add to a single Xapian query.This only affects queries, # not indexing. In some cases, the result of term expansion can be # multiplicative, and we want to avoid eating all the memory. Default # 50000. maxXapianClauses = 50000 # # # Maximum number of positions we walk while populating a snippet for # the result list.The default of 1,000,000 may be # insufficient for very big documents, the consequence would be snippets # with possibly meaning-altering missing words. snippetMaxPosWalk = 1000000 # Parameters for the PDF input script # # # Attempt OCR of PDF files with no text content if both tesseract and # pdftoppm are installed. # This can be defined in subdirectories. The default is off because # OCR is so very slow. #pdfocr = 0 # # Language to assume for PDF OCR. # This is very important for having a reasonable rate of errors # with tesseract. This can also be set through a configuration variable # or directory-local parameters. See the rclpdf.py script. # #pdfocrlang = eng # # # Enable PDF attachment extraction by executing pdftk (if # available).This is # normally disabled, because it does slow down PDF indexing a bit even if # not one attachment is ever found. #pdfattach = 0 # # # Extract text from selected XMP metadata tags.This # is a space-separated list of qualified XMP tag names. Each element can also # include a translation to a Recoll field name, separated by a '|' # character. If the second element is absent, the tag name is used as the # Recoll field names. You will also need to add specifications to the # "fields" file to direct processing of the extracted data. #pdfextrameta = bibtex:location|location bibtex:booktitle bibtex:pages # # # Define name of XMP field editing script.This # defines the name of a script to be loaded for editing XMP field # values. The script should define a 'MetaFixer' class with a metafix() # method which will be called with the qualified tag name and value of each # selected field, for editing or erasing. A new instance is created for # each document, so that the object can keep state for, e.g. eliminating # duplicate values. #pdfextrametafix = /path/to/fixerscript.py # Parameters set for specific # locations # You could specify different parameters for a subdirectory like this: #[~/hungariandocs/plain] #defaultcharset = iso-8859-2 [/usr/share/man] followLinks = 1 # # # Enable thunderbird/mozilla-seamonkey mbox format quirks # Set this for the directory where the email mbox files are # stored. [~/.thunderbird] mhmboxquirks = tbird [~/.mozilla] mhmboxquirks = tbird # pidgin / purple directories for irc chats have names beginning with # [~/.purple] skippedNames = [~/AppData/Local/Microsoft/Outlook] onlyNames = *.ost *.pst recoll-1.26.3/sampleconf/mimeview.mac0000644000175000017500000001547213566424763014470 00000000000000# @(#$Id: mimeview,v 1.16 2008-09-15 08:03:37 dockes Exp $ (C) 2004 J.F.Dockes ## ########################################## # External viewers, launched by the recoll GUI when you click on a result # 'Open' link - MAC version # On the MAC, we use "open" for everything... # Mime types which we should not uncompress if they are found gzipped or # bzipped because the native viewer knows how to handle. These would be # exceptions and the list is normally empty #nouncompforviewmts = # For releases 1.18 and later: exceptions when using the x-all entry: these # types will use their local definition. This is useful, e.g.: # # - for pdf, where we can pass additional parameters like page to open and # search string # - For pages of CHM and EPUB documents where we can choose to open the # parent document instead of a temporary html file. #xallexcepts = application/pdf application/postscript application/x-dvi \ # text/html|gnuinfo text/html|chm text/html|epub [view] # Pseudo entry used if the 'use desktop' preference is set in the GUI application/x-all = open %f application/epub+zip = ebook-viewer %f # If you want to open the parent epub document for epub parts instead of # opening them as html documents: #text/html|epub = ebook-viewer %F;ignoreipath=1 application/x-gnote = gnote %f application/x-mobipocket-ebook = ebook-viewer %f application/x-kword = kword %f application/x-abiword = abiword %f # Note: the Linux Mint evince clones, atril and xread, have the same options application/pdf = evince --page-index=%p --find=%s %f # Or: #application/pdf = qpdfview --search %s %f#%p application/postscript = evince --page-index=%p --find=%s %f application/x-dvi = evince --page-index=%p --find=%s %f application/x-lyx = lyx %f application/x-scribus = scribus %f application/msword = libreoffice %f application/vnd.ms-excel = libreoffice %f application/vnd.ms-powerpoint = libreoffice %f application/vnd.oasis.opendocument.text = libreoffice %f application/vnd.oasis.opendocument.presentation = libreoffice %f application/vnd.oasis.opendocument.spreadsheet = libreoffice %f application/vnd.openxmlformats-officedocument.wordprocessingml.document = \ libreoffice %f application/vnd.openxmlformats-officedocument.wordprocessingml.template = \ libreoffice %f application/vnd.openxmlformats-officedocument.presentationml.template = \ libreoffice %f application/vnd.openxmlformats-officedocument.presentationml.presentation = \ libreoffice %f application/vnd.openxmlformats-officedocument.spreadsheetml.sheet = \ libreoffice %f application/vnd.openxmlformats-officedocument.spreadsheetml.template =\ libreoffice %f application/vnd.sun.xml.calc = libreoffice %f application/vnd.sun.xml.calc.template = libreoffice %f application/vnd.sun.xml.draw = libreoffice %f application/vnd.sun.xml.draw.template = libreoffice %f application/vnd.sun.xml.impress = libreoffice %f application/vnd.sun.xml.impress.template = libreoffice %f application/vnd.sun.xml.math = libreoffice %f application/vnd.sun.xml.writer = libreoffice %f application/vnd.sun.xml.writer.global = libreoffice %f application/vnd.sun.xml.writer.template = libreoffice %f application/vnd.wordperfect = libreoffice %f text/rtf = libreoffice %f application/x-dia-diagram = dia %f application/x-fsdirectory = dolphin %f inode/directory = dolphin %f # Both dolphin and nautilus can pre-select a file inside a # directory. Thunar can't afaik. xdg-open cant pass an additional # parameters so these are to be xallexcepts. application/x-fsdirectory|parentopen = dolphin --select %(childurl) %f inode/directory|parentopen = dolphin --select %(childurl) %f #application/x-fsdirectory|parentopen = nautilus %(childurl) #inode/directory|parentopen = nautilus %(childurl) application/x-gnuinfo = xterm -e "info -f %f" application/x-gnumeric = gnumeric %f application/x-flac = rhythmbox %f audio/mpeg = rhythmbox %f application/ogg = rhythmbox %f audio/x-karaoke = kmid %f image/jpeg = gwenview %f image/png = gwenview %f image/tiff = gwenview %f image/gif = gwenview %f image/svg+xml = inkview %f image/vnd.djvu = djview %f image/x-xcf = gimp %f image/bmp = gwenview %f image/x-ms-bmp = gwenview %f image/x-xpmi = gwenview %f image/x-nikon-nef = ufraw %f # Opening mail messages: # - Thunderbird will only open a single-message file if it has an .eml # extension # - "sylpheed %f" seems to work ok as of version 3.3 # - "kmail --view %u" works # - claws-mail: works using a small intermediary shell-script, which you # set as the viewer here. You need to have at least one account inside # claws-mail, so that it creates ~/Mail/inbox. Script contents example # follows. Using 1 is probably not a good idea if this is a real account # (here I am using a bogus one, so that I can overwrite anything inside # inbox at will): # #!/bin/bash # cp $1 ~/Mail/inbox/1 # claws-mail --select ~/Mail/inbox/1 # rm ~/Mail/inbox/1 message/rfc822 = thunderbird -file %f text/x-mail = thunderbird -file %f application/x-mimehtml = thunderbird -file %f text/calendar = evolution %f application/x-okular-notes = okular %f application/x-rar = ark %f application/x-tar = ark %f application/zip = ark %f application/x-7z-compressed = ark %f application/x-awk = emacsclient %f application/x-perl = emacsclient %f text/x-perl = emacsclient %f application/x-shellscript = emacsclient %f text/x-shellscript = emacsclient %f # Or firefox -remote "openFile(%u)" text/html = firefox %u # gnu info nodes are translated to html with a "gnuinfo" # rclaptg. rclshowinfo knows how to start the info command on the right # node text/html|gnuinfo = rclshowinfo %F %(title);ignoreipath=1 application/x-webarchive = konqueror %f text/x-fictionbook = ebook-viewer %f application/x-tex = emacsclient %f application/xml = emacsclient %f text/xml = emacsclient %f text/x-tex = emacsclient %f text/plain = emacsclient %f text/x-awk = emacsclient %f text/x-c = emacsclient %f text/x-c+ = emacsclient %f text/x-c++ = emacsclient %f text/x-csv = libreoffice %f text/x-html-sidux-man = konqueror %f text/x-html-aptosid-man = iceweasel %f application/x-chm = kchmviewer %f # Html pages inside a chm have a chm rclaptg set by the filter. Kchmviewer # knows how to use the ipath (which is the internal chm path) to open the # file at the right place text/html|chm = kchmviewer --url %i %F text/x-ini = emacsclient %f text/x-man = xterm -u8 -e "groff -T ascii -man %f | more" text/x-python = idle %f text/x-gaim-log = emacsclient %f text/x-purple-html-log = emacsclient %f text/x-purple-log = emacsclient %f # The video types will usually be handled by the desktop default, but they # need entries here to get an "Open" link video/3gpp = open %f video/mp2p = open %f video/mp2t = open %f video/mp4 = open %f video/mpeg = open %f video/quicktime = open %f video/x-matroska = open %f video/x-ms-asf = open %f video/x-msvideo = open %f recoll-1.26.3/sampleconf/mimeconf0000644000175000017500000004501513533651561013670 00000000000000# (C) 2004 J.F.Dockes # This file contains most of the data which determines how we # handle the different mime types (also see the "mimeview" file). # Sections: # top-level: Decompression parameters. Should not be at top-level, historical. # [index] : Associations of mime types to the filters that translate them # to plain text or html. # [icons] : Associations of mime types to result list icons (GUI) # [categories] : groupings of mime types (media, text, message etc.) # [guifilters] : defines the filtering checkboxes in the GUI. Uses the # above categories by default ## ####################################### # Decompression: these types need a first pass to create a temp file to # work with. We use a script because uncompress utilities usually work in # place, which is not suitable. # # Obviously this should be in a [decompress] section or such, but it was # once forgotten and remained global for compatibility. The first word # 'uncompress' should have been the section name and has no other meaning. # # The second word is the name of a script or program to execute to # produce an uncompressed copy (e.g.: rcluncomp). It must output the # uncompressed file name on stdout, and produce no data out of the # temporary directory given as parameter. The uncompressed file name should # preserve the original file extension (i.e. use gunzip myfile.doc.gz not # gunzip < myfile.doc.gz > myfile.whateverwrongextension) # # The %t parameter will be substituted to the name of a temporary directory # by recoll. This directory is guaranteed empty when calling the filter. # # The %f parameter will be substituted with the input file. # # Note that it should be possible to improve performance a lot by writing a # compressor-specific script instead of the generic rcluncomp which will # copy the compressed file into the temporary directory as a first step in # all cases. # application/gzip = uncompress rcluncomp gunzip %f %t application/x-bzip2 = uncompress rcluncomp bunzip2 %f %t application/x-compress = uncompress rcluncomp gunzip %f %t application/x-gzip = uncompress rcluncomp gunzip %f %t application/x-lzma = uncompress rcluncomp unxz %f %t application/x-xz = uncompress rcluncomp unxz %f %t application/x-zstd = uncompress rcluncomp "unzstd --rm -q" %f %t ## ################################### # Filters for indexing and internal preview. # The "internal" filters are hardwired in the c++ code. # The external "exec" filters are typically scripts. By default, they output the # document in simple html format, have a look at the scripts. # A different format (ie text/plain), and a character set can be defined for # each filter, see the exemples below (ie: msword) [index] application/epub+zip = execm rclepub # Returned by xdg-mime for .js. Future-proofing application/javascript = internal text/plain # MSWORD: the rcldoc script handles a number of marginal case that raw # antiword won't: # - with wvWare: "text stream of this file is too small to handle" # - with unrtf: rtf files disguising as doc files. # The default is now again to use rcldoc. Use raw antiword if speed is more # important for you than catching all data, application/msword = execm rcldoc.py #application/msword = exec antiword -t -i 1 -m UTF-8;mimetype=text/plain # You can also use wvware directly but it's much slower. # application/msword = exec wvWare --charset=utf-8 --nographics # Also Handle the mime type returned by "file -i" for a suffix-less word # file. This could probably just as well be an excel file, but we have to # chose one. application/vnd.ms-office = execm rcldoc.py application/ogg = execm rclaudio application/pdf = execm rclpdf.py application/postscript = exec rclps application/sql = internal text/plain application/vnd.ms-excel = execm rclxls.py application/vnd.ms-outlook = execm rclpst.py application/vnd.ms-powerpoint = execm rclppt.py application/vnd.oasis.opendocument.text = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.oasis.opendocument.text-template = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.oasis.opendocument.presentation = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.oasis.opendocument.spreadsheet = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.oasis.opendocument.graphics = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.oasis.opendocument.presentation-flat-xml = internal xsltproc opendoc-flat.xsl application/vnd.oasis.opendocument.text-flat-xml = internal xsltproc opendoc-flat.xsl application/vnd.oasis.opendocument.spreadsheet-flat-xml = internal xsltproc opendoc-flat.xsl application/vnd.openxmlformats-officedocument.wordprocessingml.document = \ internal xsltproc docProps/core.xml openxml-meta.xsl word/document.xml openxml-word-body.xsl application/vnd.openxmlformats-officedocument.wordprocessingml.template = \ internal xsltproc docProps/core.xml openxml-meta.xsl word/document.xml openxml-word-body.xsl application/vnd.openxmlformats-officedocument.presentationml.template = \ execm rclopxml.py application/vnd.openxmlformats-officedocument.presentationml.presentation = \ execm rclopxml.py application/vnd.openxmlformats-officedocument.spreadsheetml.sheet = \ internal xsltproc docProps/core.xml openxml-meta.xsl xl/sharedStrings.xml openxml-xls-body.xsl application/vnd.openxmlformats-officedocument.spreadsheetml.template =\ internal xsltproc docProps/core.xml openxml-meta.xsl xl/sharedStrings.xml openxml-xls-body.xsl application/vnd.sun.xml.calc = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.sun.xml.calc.template = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.sun.xml.draw = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.sun.xml.draw.template = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.sun.xml.impress = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.sun.xml.impress.template = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.sun.xml.math = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.sun.xml.writer = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.sun.xml.writer.global = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.sun.xml.writer.template = internal xsltproc meta.xml opendoc-meta.xsl content.xml opendoc-body.xsl application/vnd.wordperfect = exec wpd2html;mimetype=text/html application/x-abiword = internal xsltproc abiword.xsl application/x-awk = internal text/plain application/x-chm = execm rclchm application/x-dia-diagram = execm rcldia;mimetype=text/plain application/x-dvi = exec rcldvi application/x-flac = execm rclaudio application/x-gnote = execm rclxml.py application/x-gnuinfo = execm rclinfo application/x-gnumeric = internal xsltproc gnumeric.xsl application/x-kword = exec rclkwd application/x-lyx = exec rcllyx application/x-mimehtml = internal message/rfc822 #application/x-mobipocket-ebook = execm rclmobi application/x-okular-notes = internal xsltproc okular-note.xsl application/x-perl = internal text/plain # Returned by xdg-mime for .php. Future-proofing application/x-php = internal text/plain application/x-rar = execm rclrar;charset=default application/x-scribus = exec rclscribus application/x-shellscript = internal text/plain #application/x-tar = execm rcltar application/x-tex = exec rcltex application/x-webarchive = execm rclwar application/zip = execm rclzip;charset=default application/x-7z-compressed = execm rcl7z audio/ape = execm rclaudio audio/mpeg = execm rclaudio audio/mp4 = execm rclaudio video/mp4 = execm rclaudio video/x-msvideo = execm rclimg audio/aac = execm rclaudio audio/x-karaoke = execm rclkar audio/x-wavpack = execm rclaudio audio/x-musepack = execm rclaudio image/gif = execm rclimg image/jp2 = execm rclimg image/jpeg = execm rclimg image/png = execm rclimg image/tiff = execm rclimg image/vnd.djvu = execm rcldjvu.py image/svg+xml = internal xsltproc svg.xsl image/x-xcf = execm rclimg image/x-nikon-nef = execm rclimg inode/symlink = internal application/x-zerosize = internal inode/x-empty = internal application/x-zerosize message/rfc822 = internal text/calendar = execm rclics;mimetype=text/plain text/html = internal text/plain = internal text/rtf = exec unrtf --nopict --html;mimetype=text/html text/x-c = internal text/x-c++ = internal text/x-c+ = internal text/x-csharp = internal text/plain text/css = internal text/plain application/javascript = internal text/plain text/x-bibtex = exec rclbibtex.sh ; mimetype = text/plain text/x-csv = internal text/plain text/x-fictionbook = internal xsltproc fb2.xsl text/x-gaim-log = exec rclgaim text/x-html-aptosid-man = exec rclaptosidman text/x-lua = internal text/x-chm-html = internal text/html text/x-ini = internal text/plain text/x-java = internal text/plain text/x-mail = internal text/x-man = exec rclman text/x-perl = internal text/plain text/x-purple-log = exec rclpurple text/x-purple-html-log = internal text/html text/x-python = exec rclpython text/x-shellscript = internal text/plain text/x-srt = internal text/plain text/x-tex = exec rcltex # Generic XML is best indexed as text, else it generates too many errors # All parameter and tag names, attribute values etc, are indexed as # text. rclxml.py tries to just index the text content. #application/xml = execm rclxml.py #text/xml = internal xsltproc xml.xsl application/xml = internal text/plain text/xml = internal text/plain ## ############################################# # Icons to be used in the result list if required by gui config [icons] application/epub+zip = book application/javascript = source application/javascript = source application/msword = wordprocessing application/ogg = sownd application/pdf = pdf application/postscript = postscript application/sql = source application/vnd.ms-excel = spreadsheet application/vnd.ms-office = document application/vnd.ms-outlook = document application/vnd.ms-powerpoint = presentation application/vnd.oasis.opendocument.presentation = presentation application/vnd.oasis.opendocument.spreadsheet = spreadsheet application/vnd.oasis.opendocument.text = wordprocessing application/vnd.oasis.opendocument.graphics = presentation application/vnd.oasis.opendocument.presentation-flat-xml = presentation application/vnd.oasis.opendocument.spreadsheet-flat-xml = spreadsheet application/vnd.oasis.opendocument.text-flat-xml = wordprocessing application/vnd.oasis.opendocument.text-template = wordprocessing application/vnd.openxmlformats-officedocument.presentationml.presentation = presentation application/vnd.openxmlformats-officedocument.presentationml.template = presentation application/vnd.openxmlformats-officedocument.spreadsheetml.sheet = spreadsheet application/vnd.openxmlformats-officedocument.spreadsheetml.template = spreadsheet application/vnd.openxmlformats-officedocument.wordprocessingml.document = wordprocessing application/vnd.openxmlformats-officedocument.wordprocessingml.template = wordprocessing application/vnd.sun.xml.calc = spreadsheet application/vnd.sun.xml.calc.template = spreadsheet application/vnd.sun.xml.draw = drawing application/vnd.sun.xml.draw.template = drawing application/vnd.sun.xml.impress = presentation application/vnd.sun.xml.impress.template = presentation application/vnd.sun.xml.math = wordprocessing application/vnd.sun.xml.writer = wordprocessing application/vnd.sun.xml.writer.global = wordprocessing application/vnd.sun.xml.writer.template = wordprocessing application/vnd.wordperfect = wordprocessing application/x-7z-compressed = archive application/x-abiword = wordprocessing application/x-awk = source application/x-chm = book application/x-dia-diagram = drawing application/x-dvi = document application/x-flac = sownd application/x-fsdirectory = folder application/x-gnote = document application/x-gnuinfo = book application/x-gnumeric = spreadsheet application/x-kword = wordprocessing application/x-lyx = wordprocessing application/x-mimehtml = message application/x-mobipocket-ebook = document application/x-okular-notes = document application/x-perl = source application/x-php = source application/x-rar = archive application/x-scribus = document application/x-scribus = wordprocessing application/x-shellscript = source application/x-tar = archive application/x-tex = wordprocessing application/x-webarchive = archive application/xml = document application/zip = archive audio/aac = sownd audio/ape = sownd audio/mp4 = sownd audio/mpeg = sownd audio/x-karaoke = sownd audio/x-musepack = sownd audio/x-wavpack = sownd image/bmp = image image/gif = image image/jp2 = image image/jpeg = image image/png = image image/svg+xml = drawing image/tiff = image image/vnd.djvu = document image/x-nikon-nef = image image/x-xcf = image image/x-xpmi = image inode/directory = folder inode/symlink = emblem-symbolic-link message/rfc822 = message text/css = html text/html = html text/html|chm = bookchap text/html|epub = bookchap text/html|gnuinfo = bookchap text/plain = txt text/rtf = wordprocessing text/x-bibtex = txt text/x-c = source text/x-c+ = source text/x-c++ = source text/x-chm-html = html text/x-csharp = source text/x-csv = txt text/x-fictionbook = book text/x-html-aptosid-man = aptosid-book text/x-ini = txt text/x-java = source text/x-lua = source text/x-mail = message text/x-man = document text/x-perl = source text/x-php = source text/x-purple-html-log = pidgin text/x-purple-log = pidgin text/x-python = text-x-python text/x-shellscript = source text/x-srt = source text/x-tex = wordprocessing text/xml = document video/3gpp = video video/mp2p = video video/mp2t = video video/mp4 = video video/mpeg = video video/quicktime = video video/x-matroska = video video/x-ms-asf = video video/x-msvideo = video [categories] # Categories group mime types by "kind". They can be used from the query # language as an "rclcat" clause. This is fully dynamic, you can change the # names and groups as you wish, only the mime types are stored in the index. # # If you add/remove categories, you may also want to change the # "guifilters" section below. text = \ application/epub+zip \ application/javascript \ application/msword \ application/pdf \ application/postscript \ application/sql \ application/vnd.oasis.opendocument.text \ application/vnd.oasis.opendocument.text-flat-xml \ application/vnd.oasis.opendocument.text-template \ application/vnd.openxmlformats-officedocument.wordprocessingml.document \ application/vnd.openxmlformats-officedocument.wordprocessingml.template \ application/vnd.sun.xml.writer \ application/vnd.sun.xml.writer.global \ application/vnd.sun.xml.writer.template \ application/vnd.wordperfect \ application/x-abiword \ application/vnd.ms-office \ application/x-awk \ application/x-chm \ application/x-dvi \ application/x-gnote \ application/x-gnuinfo \ application/x-kword \ application/x-lyx \ application/x-mobipocket-ebook \ application/x-okular-notes \ application/x-perl \ application/x-php \ application/x-scribus \ application/x-shellscript \ application/x-tex \ application/xml \ text/xml \ text/x-csv \ text/x-tex \ image/vnd.djvu \ text/calendar \ text/css \ text/html \ text/plain \ text/rtf \ text/x-bibtex \ text/x-c \ text/x-c++ \ text/x-c+ \ text/x-chm-html \ text/x-csharp \ text/x-lua \ text/x-fictionbook \ text/x-html-aptosid-man \ text/x-html-sidux-man \ text/x-ini \ text/x-java \ text/x-man \ text/x-perl \ text/x-php \ text/x-python \ text/x-shellscript \ text/x-srt \ spreadsheet = \ application/vnd.ms-excel \ application/vnd.oasis.opendocument.spreadsheet \ application/vnd.oasis.opendocument.spreadsheet-flat-xml \ application/vnd.openxmlformats-officedocument.spreadsheetml.sheet \ application/vnd.openxmlformats-officedocument.spreadsheetml.template \ application/vnd.sun.xml.calc \ application/vnd.sun.xml.calc.template \ application/x-gnumeric \ presentation = \ application/vnd.ms-powerpoint \ application/vnd.oasis.opendocument.graphics \ application/vnd.oasis.opendocument.presentation \ application/vnd.oasis.opendocument.presentation-flat-xml \ application/vnd.openxmlformats-officedocument.presentationml.presentation \ application/vnd.openxmlformats-officedocument.presentationml.template \ application/vnd.sun.xml.impress \ application/vnd.sun.xml.impress.template \ media = \ application/ogg \ application/x-flac \ audio/* \ image/* \ video/* \ message = \ message/rfc822 \ text/x-gaim-log \ text/x-mail \ text/x-purple-html-log \ text/x-purple-log \ other = \ application/vnd.ms-outlook \ application/vnd.sun.xml.draw \ application/vnd.sun.xml.draw.template \ application/vnd.sun.xml.math \ application/x-7z-compressed \ application/x-dia-diagram \ application/x-fsdirectory \ application/x-mimehtml \ application/x-rar \ application/x-tar \ application/x-webarchive \ application/x-zerosize \ application/zip \ inode/directory \ inode/symlink \ inode/x-empty \ [guifilters] # This defines the top level filters in the GUI (accessed by the the # radiobuttons above the results area, or a toolbar combobox). # Each entry defines a label and a query language fragment that will be # applied to filter the current query if the option is activated. # # This does not really belong in mimeconf, but it does belong in the index # config (not the GUI one), because it's not necessarily the same in all # configs, it has to go somewhere, and it's not worth a separate config # file... # # By default this filters by document category (see above), but any # language fragment should be ok. Be aware though that the "document # history" queries only know about simple "rclcat" filtering. # # If you don't want the filter names to be displayed in alphabetic order, # you can define them with a colon. The part before the colon is not # displayed but used for ordering, ie: a:zzbutshouldbefirst b:aacomeslast # text = rclcat:text spreadsheet = rclcat:spreadsheet presentation = rclcat:presentation media = rclcat:media message = rclcat:message other = rclcat:other recoll-1.26.3/sampleconf/fields0000644000175000017500000001413613347664027013345 00000000000000# (C) 2007-2011 J.F.Dockes # License: GPL V2 # # Field names configuration. This defines how one may search ie for: # author:Hemingway # # Important: # - the field names MUST be all lowercase alphabetic ascii here. They can # be anycased in the documents. [prefixes] ##################################################### # This section defines what prefix the terms inside named fields will be # indexed with (in addition to prefix-less indexing for general search) # ALL prefixes MUST be all ASCII UPPERCASE (NO DIGITS) # # The field names should be the canonic ones, not the aliases defined in # the following section. Don't change those which are predefined here, # quite a few are hard-coded in the c++. But you can add more (for new # fields emitted by filters). # # Fields can have two relevance boost factors defined, such as in: # caption = S ; wdfinc=10 # and/or # caption = S ; boost = 10 # The first line would boost the xapian "within document frequency" of # caption terms by a factor of 10 at indexing time. The second one # (NOT CURRENTLY IMPLEMENTED) would automatically boost the weight of a # caption-based field query (ie: caption:mytitle or title:mytitle) at query # time. # # The pfxonly attribute can also be set on entries to express that terms # from the field should be indexed only with a prefix (in general, field # terms are indexed both with and without a prefix). # The following ones are probably hard-coded in the c code, can't change at # all. # Also reserved: F(parentid), Q(uniqueid) author = A xapdate = D keywords= K xapyearmon = M title = S ; wdfinc = 10 mtype = T ext = XE; noterms = 1 rclmd5 = XM dir = XP ; noterms = 1 abstract = XS filename = XSFN ; noterms = 1 containerfilename = XCFN ; pfxonly = 1 ; noterms = 1 rclUnsplitFN = XSFS xapyear = Y recipient = XTO # Extension examples. These are actually used by default by Recoll, you can # add your own to search for fields produced by the filters and not handled # by default. # Some values are internally reserved by recoll: # XE (file ext), XP (for path elements), XSFN, XSFS, XXST, XXND, XXPG rclbes = XB ; noterms = 1 # Using XX was not a good idea. # # I hereby commit to not using XY for Recoll: # *** USE XY for beginning your local prefixes *** ie: # myfield = XYMYPREF [values] ########### ## Fields which will be stored in Xapian values, authorizing range query ## processing. # Entries are specified as 'fieldname = valueslot;[px=val1;py=val2...]'. # Xapian value slots are 32 bits numbers. Numbers below 1000 are reserved # by Recoll or Xapian. Numbers above are available for user configuration # Values have types, which can be 'int' or 'string' at the moment. ints have # an additional 'len' attributes, which specifies the padding size used for # sorting (leading zeroes: all xapian sorting is text-based). 10 is fine # for an unsigned 32 bits integer. # myfield = 1001; type=int; len = 10 # mystrfield = 1002; type = string [stored] ############################ # Some fields are stored in the document data record inside the index and # can be displayed in result lists. There is no necessity that stored fields # should be indexed (have a prefix in the preceding section). Example: "url" # # Some fields are stored by default, don't add them here: # caption, mimetype, url # Only canonical names should be used here, not aliases. # "rclaptg" is used for viewer specialization (depending on local config) # "rclbes" defines the backend type (ie normal fs, firefox cache). Should # probably be hardcoded, don't remove it abstract= author= filename= keywords= rclaptg= rclbes= recipient= [aliases] ########################## # This section defines field names aliases or synonyms. Any right hand side # value will be turned into the lhs canonic name before further treatment # # The left-hand values in the recoll distribution file are well known and # must match names used in the c++ code, or even the index data # record. They can't change! But you can add others. # # Filters should only add canonic names to the meta array when indexing, # not aliases. abstract = summary dc:summary description xesam:description author = creator dc:creator xesam:author xesam:creator from title = caption title dc:title subject # catg = dc:type contentCategory dbytes = size xesam:size dmtime = date dc:date dc:datemodified datemodified contentmodified \ xesam:contentmodified ext = fileextension xesam:fileextension # Don't add subject to keywords aliases, it's better to keep it for email keywords = keyword xesam:keyword tag tags dc:subject xesam:subject \ dc:description mtype = mime mimetype xesam:mimetype contenttype xesam:contenttype dc:format recipient = to xesam:recipient url = dc:identifier xesam:url ################## # The queryaliases section defines aliases which are used exclusively at # query time: there is no risk to pick up a random field from a document # (e.g. an HTML meta field) and index it. [queryaliases] filename = fn containerfilename = cfn [xattrtofields] ###################### # Processing for extended file attributes. # By default, attributes are processed as document fields (after removing # the 'user' prefix from the name on Linux). # You can enter name translations as "xattrname = fieldname". Case matters. # Entering an empty translation will disable use of the attribute. # The values from the extended attributes will extend, not replace, the # data found from equivalent fields inside the document. # As an example, the following would map a quite plausible "tags" extended # attribute into the "keywords" field. tags = keywords # Proposed or standard freedesktop.org extended attributes xdg.tags = keywords xdg.comment = abstract # Some standard fields are not to be indexed mime_type = charset = ######################## # Sections reserved for specific filters follow # ########################## # Mail filter section. You can specify mail headers to be indexed # in addition to the standard ones: (To:, Cc:, From:, Subject:, Date, # Message-Id), along with the field name to be used. For this to be useful, # the field name should also be listed in the [prefixes] and possibly the # [stored] sections. # # [mail] # x-my-tag = mymailtag recoll-1.26.3/sampleconf/mimeview0000644000175000017500000002062113545152160013703 00000000000000# @(#$Id: mimeview,v 1.16 2008-09-15 08:03:37 dockes Exp $ (C) 2004 J.F.Dockes ## ########################################## # External viewers, launched by the recoll GUI when you click on a result # 'edit' link # Mime types which we should not uncompress if they are found gzipped or # bzipped because the native viewer knows how to handle. These would be # exceptions and the list is normally empty #nouncompforviewmts = # For releases 1.18 and later: exceptions when using the x-all entry: these # types will use their local definition. This is useful, e.g.: # # - for pdf, where we can pass additional parameters like page to open and # search string # - For pages of CHM and EPUB documents where we can choose to open the # parent document instead of a temporary html file. # Use xallexcepts- and xallexcepts+ in a user file to add or remove from # the default xallexcepts list xallexcepts = application/pdf application/postscript application/x-dvi \ text/html|gnuinfo text/html|chm text/html|epub text/html|rclman \ application/x-fsdirectory|parentopen inode/directory|parentopen [view] # Pseudo entry used if the 'use desktop' preference is set in the GUI. # Note that we use %U to url-encode the parameter application/x-all = xdg-open %U # But do use the uncoded url for help (F1) because the fragment in there must # actually be processed. text/html|rclman = xdg-open %u application/epub+zip = ebook-viewer %f # Open the parent epub document for epub parts instead of opening them as # html documents. This is almost always what we want. text/html|epub = ebook-viewer %F;ignoreipath=1 application/x-gnote = gnote %f application/x-mobipocket-ebook = ebook-viewer %f application/x-kword = kword %f application/x-abiword = abiword %f # Note: the Linux Mint evince clones, atril and xread, have the same options application/pdf = evince --page-index=%p --find=%s %f # Or: #application/pdf = qpdfview --search %s %f#%p application/postscript = evince --page-index=%p --find=%s %f application/x-dvi = evince --page-index=%p --find=%s %f application/x-lyx = lyx %f application/x-scribus = scribus %f application/msword = libreoffice %f application/vnd.ms-excel = libreoffice %f application/vnd.ms-powerpoint = libreoffice %f application/vnd.oasis.opendocument.text = libreoffice %f application/vnd.oasis.opendocument.presentation = libreoffice %f application/vnd.oasis.opendocument.spreadsheet = libreoffice %f application/vnd.oasis.opendocument.graphics = libreoffice %f application/vnd.oasis.opendocument.presentation-flat-xml = libreoffice %f application/vnd.oasis.opendocument.spreadsheet-flat-xml = libreoffice %f application/vnd.oasis.opendocument.text-flat-xml = libreoffice %f application/vnd.oasis.opendocument.text-template = libreoffice %f application/vnd.openxmlformats-officedocument.wordprocessingml.document = \ libreoffice %f application/vnd.openxmlformats-officedocument.wordprocessingml.template = \ libreoffice %f application/vnd.openxmlformats-officedocument.presentationml.template = \ libreoffice %f application/vnd.openxmlformats-officedocument.presentationml.presentation = \ libreoffice %f application/vnd.openxmlformats-officedocument.spreadsheetml.sheet = \ libreoffice %f application/vnd.openxmlformats-officedocument.spreadsheetml.template =\ libreoffice %f application/vnd.sun.xml.calc = libreoffice %f application/vnd.sun.xml.calc.template = libreoffice %f application/vnd.sun.xml.draw = libreoffice %f application/vnd.sun.xml.draw.template = libreoffice %f application/vnd.sun.xml.impress = libreoffice %f application/vnd.sun.xml.impress.template = libreoffice %f application/vnd.sun.xml.math = libreoffice %f application/vnd.sun.xml.writer = libreoffice %f application/vnd.sun.xml.writer.global = libreoffice %f application/vnd.sun.xml.writer.template = libreoffice %f application/vnd.wordperfect = libreoffice %f text/rtf = libreoffice %f application/x-dia-diagram = dia %f application/x-fsdirectory = dolphin %f inode/directory = dolphin %f # Both dolphin and nautilus can pre-select a file inside a # directory. Thunar can't afaik. xdg-open cant pass an additional # parameters so these are to be xallexcepts. application/x-fsdirectory|parentopen = dolphin --select %(childurl) %f inode/directory|parentopen = dolphin --select %(childurl) %f #application/x-fsdirectory|parentopen = nautilus %(childurl) #inode/directory|parentopen = nautilus %(childurl) application/x-gnuinfo = xterm -e "info -f %f" application/x-gnumeric = gnumeric %f application/x-flac = rhythmbox %f audio/mpeg = rhythmbox %f audio/aac = rhythmbox %f audio/ape = rhythmbox %f audio/mp4 = rhythmbox %f audio/x-musepack = rhythmbox %f audio/x-wavpack = rhythmbox %f application/ogg = rhythmbox %f audio/x-karaoke = kmid %f image/jpeg = ristretto %f image/jp2 = ristretto %f image/png = ristretto %f image/tiff = ristretto %f image/gif = ristretto %f image/vnd.djvu = djview %f image/bmp = ristretto %f image/x-ms-bmp = ristretto %f image/x-xpmi = ristretto %f image/x-xcf = gimp %f image/x-nikon-nef = ufraw %f image/svg+xml = inkview %f # Opening mail messages: # - Thunderbird will only open a single-message file if it has an .eml # extension # - "sylpheed %f" seems to work ok as of version 3.3 # - "kmail --view %u" works # - claws-mail: works using a small intermediary shell-script, which you # set as the viewer here. You need to have at least one account inside # claws-mail, so that it creates ~/Mail/inbox. Script contents example # follows. Using 1 is probably not a good idea if this is a real account # (here I am using a bogus one, so that I can overwrite anything inside # inbox at will): # #!/bin/bash # cp $1 ~/Mail/inbox/1 # claws-mail --select ~/Mail/inbox/1 # rm ~/Mail/inbox/1 message/rfc822 = thunderbird -file %f text/x-mail = thunderbird -file %f application/x-mimehtml = thunderbird -file %f text/calendar = evolution %f application/x-okular-notes = okular %f application/x-rar = ark %f application/x-tar = ark %f application/zip = ark %f application/x-7z-compressed = ark %f application/javascript = emacsclient --no-wait %f application/x-awk = emacsclient --no-wait %f application/x-bibtex = emacsclient --no-wait %f application/x-csharp = emacsclient --no-wait %f application/x-java = emacsclient --no-wait %f application/x-perl = emacsclient --no-wait %f application/x-php = emacsclient --no-wait %f application/x-shellscript = emacsclient --no-wait %f text/x-bibtex = emacsclient --no-wait %f text/css = emacsclient --no-wait %f text/x-csharp = emacsclient --no-wait %f text/x-java = emacsclient --no-wait %f text/x-perl = emacsclient --no-wait %f text/x-shellscript = emacsclient --no-wait %f text/x-srt = emacsclient --no-wait %f # Or firefox -remote "openFile(%u)" text/html = firefox %u application/x-chm-html = %u text/x-chm-html = %u # gnu info nodes are translated to html with a "gnuinfo" # rclaptg. rclshowinfo knows how to start the info command on the right # node text/html|gnuinfo = rclshowinfo %F %(title);ignoreipath=1 application/x-webarchive = konqueror %f text/x-fictionbook = ebook-viewer %f application/x-javascript = emacsclient --no-wait %f application/sql = emacsclient --no-wait %f application/x-tex = emacsclient --no-wait %f application/xml = emacsclient --no-wait %f text/xml = emacsclient --no-wait %f text/x-tex = emacsclient --no-wait %f text/plain = emacsclient --no-wait %f text/x-awk = emacsclient --no-wait %f text/x-c = emacsclient --no-wait %f text/x-lua = emacsclient --no-wait %f text/x-c+ = emacsclient --no-wait %f text/x-c++ = emacsclient --no-wait %f text/x-csv = libreoffice %f text/x-html-sidux-man = konqueror %f text/x-html-aptosid-man = iceweasel %f application/x-chm = kchmviewer %f # Html pages inside a chm have a chm rclaptg set by the filter. Kchmviewer # knows how to use the ipath (which is the internal chm path) to open the # file at the right place text/html|chm = kchmviewer --url %i %F text/x-ini = emacsclient --no-wait %f text/x-man = xterm -u8 -e "groff -T ascii -man %f | more" text/x-python = idle %f text/x-gaim-log = emacsclient --no-wait %f text/x-purple-html-log = emacsclient --no-wait %f text/x-purple-log = emacsclient --no-wait %f # The video types will usually be handled by the desktop default, but they # need entries here to get an "Open" link video/3gpp = vlc %f video/mp2p = vlc %f video/mp2t = vlc %f video/mp4 = vlc %f video/mpeg = vlc %f video/quicktime = vlc %f video/x-matroska = vlc %f video/x-ms-asf = vlc %f video/x-msvideo = vlc %f recoll-1.26.3/sampleconf/fragbuts.xml0000644000175000017500000000234113303776057014505 00000000000000 -rclbes:BGL rclbes:BGL date:2010-01-01/2010-12-31 ext:cpp OR ext:cxx dir:/my/great/directory recoll-1.26.3/sampleconf/mimemap0000644000175000017500000001672513533651561013526 00000000000000# (C) 2004 J.F.Dockes # Associations of file name extensions to mime types # # All entries must be in lower case characters. File name extensions are # lower-cased for comparison during indexing, meaning that an upper or # mixed case entry will never be matched. # # When creating a temporary file for a MIME type (e.g. temp file for # display), the FIRST entry for the MIME type will be used to determine the # file suffix. This may be important if the app used does not grok all the # possible suffixes. .txt = text/plain .text = text/plain .rst = text/plain .md = text/plain .gv = text/plain .srt = text/x-srt .ini = text/x-ini .csv = text/x-csv .bib = text/x-bibtex # Source files. # Defining them with specific types allows using a specific ext viewer (in # mimeview). You can in general use rcltext to wrap them in html for # indexing the contents (and rough preview). You could also just set them # as text/plain (index as text, use text viewer) .cpp = text/x-c .h = text/x-c .c = text/x-c .cc = text/x-c .cxx = text/x-c .hxx = text/x-c .cs = text/x-csharp .css = text/css .java = text/x-java .js = application/javascript .lua = text/x-lua .f = text/x-fortran .py = text/x-python .awk = application/x-awk .pl = application/x-perl .sh = application/x-shellscript .sql = application/sql .tcl = text/x-tcl .xml = text/xml .note = application/x-gnote .rtf = text/rtf .html = text/html .htm = text/html .shtml = text/html .php = text/html .ics = text/calendar # .eml is used as an extension by several mail apps for a single message # saved in raw MIME format. Mainly used here to get Thunderbird to open an # extracted message. Also used by Windows Live Mail .eml = message/rfc822 .pst = application/vnd.ms-outlook .ost = application/vnd.ms-outlook .pdf = application/pdf .ps = application/postscript .eps = application/postscript .ai = application/postscript .tex = application/x-tex .dvi = application/x-dvi .djvu = image/vnd.djvu .svg = image/svg+xml .dia = application/x-dia-diagram .gz = application/x-gzip .Z = application/x-gzip .bz2 = application/x-bzip2 .rar = application/x-rar #.Z = application/x-compress .zip = application/zip .7z = application/x-7z-compressed .maff = application/zip .zst = application/x-zstd # The rcltar module can handle compressed tar formats internally so we # use application/x-tar for all tar files compressed or not. Note that tar # file indexing is disabled by default, you'll need to copy and uncomment # the application/x-tar commented line from mimeconf into your personal config .tar = application/x-tar .tar.gz = application/x-tar .tgz = application/x-tar .tbz = application/x-tar .tar.bz2 = application/x-tar .doc = application/msword .dot = application/msword .ppt = application/vnd.ms-powerpoint .pps = application/vnd.ms-powerpoint .pot = application/vnd.ms-powerpoint .xls = application/vnd.ms-excel .xla = application/vnd.ms-excel .chm = application/x-chm .epub = application/epub+zip .mobi = application/x-mobipocket-ebook # OpenOffice / opendocument. We handle opendocument as old openoffice files # for now .ods = application/vnd.sun.xml.calc .sxc = application/vnd.sun.xml.calc .stc = application/vnd.sun.xml.calc.template .odg = application/vnd.sun.xml.draw .sxd = application/vnd.sun.xml.draw .std = application/vnd.sun.xml.draw.template .odp = application/vnd.sun.xml.impress .sxi = application/vnd.sun.xml.impress .sti = application/vnd.sun.xml.impress.template .sxm = application/vnd.sun.xml.math .odt = application/vnd.sun.xml.writer .sxw = application/vnd.sun.xml.writer .sxg = application/vnd.sun.xml.writer.global .stw = application/vnd.sun.xml.writer.template .fodp = application/vnd.oasis.opendocument.presentation-flat-xml .fodt = application/vnd.oasis.opendocument.text-flat-xml .fods = application/vnd.oasis.opendocument.spreadsheet-flat-xml # ms openxml .docm = application/vnd.ms-word.document.macroEnabled.12 .docx = application/vnd.openxmlformats-officedocument.wordprocessingml.document .dotm = application/vnd.ms-word.template.macroEnabled.12 .dotx = application/vnd.openxmlformats-officedocument.wordprocessingml.template .potm = application/vnd.ms-powerpoint.template.macroEnabled.12 .potx = application/vnd.openxmlformats-officedocument.presentationml.template .ppam = application/vnd.ms-powerpoint.addin.macroEnabled.12 .ppsm = application/vnd.ms-powerpoint.slideshow.macroEnabled.12 .ppsx = application/vnd.openxmlformats-officedocument.presentationml.slideshow .pptm = application/vnd.ms-powerpoint.presentation.macroEnabled.12 .pptx = application/vnd.openxmlformats-officedocument.presentationml.presentation .xlam = application/vnd.ms-excel.addin.macroEnabled.12 .xlsb = application/vnd.ms-excel.sheet.binary.macroEnabled.12 .xlsm = application/vnd.ms-excel.sheet.macroEnabled.12 .xlsx = application/vnd.openxmlformats-officedocument.spreadsheetml.sheet .xltm = application/vnd.ms-excel.template.macroEnabled.12 .xltx = application/vnd.openxmlformats-officedocument.spreadsheetml.template .abw = application/x-abiword .lyx = application/x-lyx .sla = application/x-scribus .scd = application/x-scribus .info = application/x-gnuinfo .kwd = application/x-kword .gnumeric = application/x-gnumeric .gnm = application/x-gnumeric .wpd = application/vnd.wordperfect .rtf = text/rtf # Note: file -i says audio/x-midi, but soft karaoke files are special. .aac = audio/aac .ape = audio/ape .flac = application/x-flac .kar = audio/x-karaoke .m4a = audio/mp4 .mid = audio/x-karaoke .mp3 = audio/mpeg .mpc = audio/x-musepack .oga = application/ogg .ogg = application/ogg .wv = audio/x-wavpack .mkv = video/x-matroska .ogv = video/ogg .flv = video/x-flv .mp4 = video/mp4 .avi = video/x-msvideo .ts = video/MP2T .png = image/png .jp2 = image/jp2 .jpg = image/jpeg .jpeg = image/jpeg .gif = image/gif .tiff = image/tiff .tif = image/tiff .xcf = image/x-xcf .bmp = image/bmp .xpm = image/x-xpmi .nef = image/x-nikon-nef .nrw = image/x-nikon-nef .fb2 = text/x-fictionbook .war = application/x-webarchive .mht = application/x-mimehtml .mhtml = application/x-mimehtml # Note: recoll_noindex has been obsoleted and moved to recoll.conf as # noContentSuffixes. recoll_noindex from your personal mimemap file is # still taken into account for now, but you should move its contents to the # new recoll.conf variable. # Special handling of .txt files inside ~/.gaim and ~/.purple directories [~/.gaim] .txt = text/x-gaim-log [~/.purple] .txt = text/x-purple-log .html = text/x-purple-html-log # Special handling of aptosid manual menu system [/usr/share/aptosid-manual] .htm = text/x-html-aptosid-man .html = text/x-html-aptosid-man # Special handling of sidux manual menu system [/usr/share/sidux-manual] .htm = text/x-html-sidux-man .html = text/x-html-sidux-man # Manual files. You may want to adjust the location for your system # We can't use the default text/troff type because this doesn't say # what macro set to use (groff -man) [/usr/share/man] .0p = text/x-man .1 = text/x-man .1m = text/x-man .1p = text/x-man .1ssl = text/x-man .1x = text/x-man .2 = text/x-man .3 = text/x-man .3curses = text/x-man .3form = text/x-man .3gl = text/x-man .3menu = text/x-man .3ncurses = text/x-man .3p = text/x-man .3pm = text/x-man .3ssl = text/x-man .3tiff = text/x-man .3x = text/x-man .4 = text/x-man .5 = text/x-man .5ssl = text/x-man .6 = text/x-man .6x = text/x-man .7 = text/x-man .7ssl = text/x-man .8 = text/x-man .9 = text/x-man .n = text/x-man # Special handling for okular notes [~/.kde4/share/apps/okular/docdata] .xml = application/x-okular-notes [~/.kde/share/apps/okular/docdata] .xml = application/x-okular-notes recoll-1.26.3/ltmain.sh0000644000175000017500000117147413305725273011652 00000000000000#! /bin/sh ## DO NOT EDIT - This file generated from ./build-aux/ltmain.in ## by inline-source v2014-01-03.01 # libtool (GNU libtool) 2.4.6 # Provide generalized library-building support services. # Written by Gordon Matzigkeit , 1996 # Copyright (C) 1996-2015 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, # if you distribute this file as part of a program or library that # is built using GNU Libtool, you may include this file under the # same distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . PROGRAM=libtool PACKAGE=libtool VERSION="2.4.6 Debian-2.4.6-2" package_revision=2.4.6 ## ------ ## ## Usage. ## ## ------ ## # Run './libtool --help' for help with using this script from the # command line. ## ------------------------------- ## ## User overridable command paths. ## ## ------------------------------- ## # After configure completes, it has a better idea of some of the # shell tools we need than the defaults used by the functions shared # with bootstrap, so set those here where they can still be over- # ridden by the user, but otherwise take precedence. : ${AUTOCONF="autoconf"} : ${AUTOMAKE="automake"} ## -------------------------- ## ## Source external libraries. ## ## -------------------------- ## # Much of our low-level functionality needs to be sourced from external # libraries, which are installed to $pkgauxdir. # Set a version string for this script. scriptversion=2015-01-20.17; # UTC # General shell script boiler plate, and helper functions. # Written by Gary V. Vaughan, 2004 # Copyright (C) 2004-2015 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # As a special exception to the GNU General Public License, if you distribute # this file as part of a program or library that is built using GNU Libtool, # you may include this file under the same distribution terms that you use # for the rest of that program. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNES FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Please report bugs or propose patches to gary@gnu.org. ## ------ ## ## Usage. ## ## ------ ## # Evaluate this file near the top of your script to gain access to # the functions and variables defined here: # # . `echo "$0" | ${SED-sed} 's|[^/]*$||'`/build-aux/funclib.sh # # If you need to override any of the default environment variable # settings, do that before evaluating this file. ## -------------------- ## ## Shell normalisation. ## ## -------------------- ## # Some shells need a little help to be as Bourne compatible as possible. # Before doing anything else, make sure all that help has been provided! DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in *posix*) set -o posix ;; esac fi # NLS nuisances: We save the old values in case they are required later. _G_user_locale= _G_safe_locale= for _G_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test set = \"\${$_G_var+set}\"; then save_$_G_var=\$$_G_var $_G_var=C export $_G_var _G_user_locale=\"$_G_var=\\\$save_\$_G_var; \$_G_user_locale\" _G_safe_locale=\"$_G_var=C; \$_G_safe_locale\" fi" done # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Make sure IFS has a sensible default sp=' ' nl=' ' IFS="$sp $nl" # There are apparently some retarded systems that use ';' as a PATH separator! if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi ## ------------------------- ## ## Locate command utilities. ## ## ------------------------- ## # func_executable_p FILE # ---------------------- # Check that FILE is an executable regular file. func_executable_p () { test -f "$1" && test -x "$1" } # func_path_progs PROGS_LIST CHECK_FUNC [PATH] # -------------------------------------------- # Search for either a program that responds to --version with output # containing "GNU", or else returned by CHECK_FUNC otherwise, by # trying all the directories in PATH with each of the elements of # PROGS_LIST. # # CHECK_FUNC should accept the path to a candidate program, and # set $func_check_prog_result if it truncates its output less than # $_G_path_prog_max characters. func_path_progs () { _G_progs_list=$1 _G_check_func=$2 _G_PATH=${3-"$PATH"} _G_path_prog_max=0 _G_path_prog_found=false _G_save_IFS=$IFS; IFS=${PATH_SEPARATOR-:} for _G_dir in $_G_PATH; do IFS=$_G_save_IFS test -z "$_G_dir" && _G_dir=. for _G_prog_name in $_G_progs_list; do for _exeext in '' .EXE; do _G_path_prog=$_G_dir/$_G_prog_name$_exeext func_executable_p "$_G_path_prog" || continue case `"$_G_path_prog" --version 2>&1` in *GNU*) func_path_progs_result=$_G_path_prog _G_path_prog_found=: ;; *) $_G_check_func $_G_path_prog func_path_progs_result=$func_check_prog_result ;; esac $_G_path_prog_found && break 3 done done done IFS=$_G_save_IFS test -z "$func_path_progs_result" && { echo "no acceptable sed could be found in \$PATH" >&2 exit 1 } } # We want to be able to use the functions in this file before configure # has figured out where the best binaries are kept, which means we have # to search for them ourselves - except when the results are already set # where we skip the searches. # Unless the user overrides by setting SED, search the path for either GNU # sed, or the sed that truncates its output the least. test -z "$SED" && { _G_sed_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for _G_i in 1 2 3 4 5 6 7; do _G_sed_script=$_G_sed_script$nl$_G_sed_script done echo "$_G_sed_script" 2>/dev/null | sed 99q >conftest.sed _G_sed_script= func_check_prog_sed () { _G_path_prog=$1 _G_count=0 printf 0123456789 >conftest.in while : do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo '' >> conftest.nl "$_G_path_prog" -f conftest.sed conftest.out 2>/dev/null || break diff conftest.out conftest.nl >/dev/null 2>&1 || break _G_count=`expr $_G_count + 1` if test "$_G_count" -gt "$_G_path_prog_max"; then # Best one so far, save it but keep looking for a better one func_check_prog_result=$_G_path_prog _G_path_prog_max=$_G_count fi # 10*(2^10) chars as input seems more than enough test 10 -lt "$_G_count" && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out } func_path_progs "sed gsed" func_check_prog_sed $PATH:/usr/xpg4/bin rm -f conftest.sed SED=$func_path_progs_result } # Unless the user overrides by setting GREP, search the path for either GNU # grep, or the grep that truncates its output the least. test -z "$GREP" && { func_check_prog_grep () { _G_path_prog=$1 _G_count=0 _G_path_prog_max=0 printf 0123456789 >conftest.in while : do cat conftest.in conftest.in >conftest.tmp mv conftest.tmp conftest.in cp conftest.in conftest.nl echo 'GREP' >> conftest.nl "$_G_path_prog" -e 'GREP$' -e '-(cannot match)-' conftest.out 2>/dev/null || break diff conftest.out conftest.nl >/dev/null 2>&1 || break _G_count=`expr $_G_count + 1` if test "$_G_count" -gt "$_G_path_prog_max"; then # Best one so far, save it but keep looking for a better one func_check_prog_result=$_G_path_prog _G_path_prog_max=$_G_count fi # 10*(2^10) chars as input seems more than enough test 10 -lt "$_G_count" && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out } func_path_progs "grep ggrep" func_check_prog_grep $PATH:/usr/xpg4/bin GREP=$func_path_progs_result } ## ------------------------------- ## ## User overridable command paths. ## ## ------------------------------- ## # All uppercase variable names are used for environment variables. These # variables can be overridden by the user before calling a script that # uses them if a suitable command of that name is not already available # in the command search PATH. : ${CP="cp -f"} : ${ECHO="printf %s\n"} : ${EGREP="$GREP -E"} : ${FGREP="$GREP -F"} : ${LN_S="ln -s"} : ${MAKE="make"} : ${MKDIR="mkdir"} : ${MV="mv -f"} : ${RM="rm -f"} : ${SHELL="${CONFIG_SHELL-/bin/sh}"} ## -------------------- ## ## Useful sed snippets. ## ## -------------------- ## sed_dirname='s|/[^/]*$||' sed_basename='s|^.*/||' # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='s|\([`"$\\]\)|\\\1|g' # Same as above, but do not quote variable references. sed_double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution that turns a string into a regex matching for the # string literally. sed_make_literal_regex='s|[].[^$\\*\/]|\\&|g' # Sed substitution that converts a w32 file name or path # that contains forward slashes, into one that contains # (escaped) backslashes. A very naive implementation. sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' # Re-'\' parameter expansions in output of sed_double_quote_subst that # were '\'-ed in input to the same. If an odd number of '\' preceded a # '$' in input to sed_double_quote_subst, that '$' was protected from # expansion. Since each input '\' is now two '\'s, look for any number # of runs of four '\'s followed by two '\'s and then a '$'. '\' that '$'. _G_bs='\\' _G_bs2='\\\\' _G_bs4='\\\\\\\\' _G_dollar='\$' sed_double_backslash="\ s/$_G_bs4/&\\ /g s/^$_G_bs2$_G_dollar/$_G_bs&/ s/\\([^$_G_bs]\\)$_G_bs2$_G_dollar/\\1$_G_bs2$_G_bs$_G_dollar/g s/\n//g" ## ----------------- ## ## Global variables. ## ## ----------------- ## # Except for the global variables explicitly listed below, the following # functions in the '^func_' namespace, and the '^require_' namespace # variables initialised in the 'Resource management' section, sourcing # this file will not pollute your global namespace with anything # else. There's no portable way to scope variables in Bourne shell # though, so actually running these functions will sometimes place # results into a variable named after the function, and often use # temporary variables in the '^_G_' namespace. If you are careful to # avoid using those namespaces casually in your sourcing script, things # should continue to work as you expect. And, of course, you can freely # overwrite any of the functions or variables defined here before # calling anything to customize them. EXIT_SUCCESS=0 EXIT_FAILURE=1 EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. # Allow overriding, eg assuming that you follow the convention of # putting '$debug_cmd' at the start of all your functions, you can get # bash to show function call trace with: # # debug_cmd='eval echo "${FUNCNAME[0]} $*" >&2' bash your-script-name debug_cmd=${debug_cmd-":"} exit_cmd=: # By convention, finish your script with: # # exit $exit_status # # so that you can set exit_status to non-zero if you want to indicate # something went wrong during execution without actually bailing out at # the point of failure. exit_status=$EXIT_SUCCESS # Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh # is ksh but when the shell is invoked as "sh" and the current value of # the _XPG environment variable is not equal to 1 (one), the special # positional parameter $0, within a function call, is the name of the # function. progpath=$0 # The name of this program. progname=`$ECHO "$progpath" |$SED "$sed_basename"` # Make sure we have an absolute progpath for reexecution: case $progpath in [\\/]*|[A-Za-z]:\\*) ;; *[\\/]*) progdir=`$ECHO "$progpath" |$SED "$sed_dirname"` progdir=`cd "$progdir" && pwd` progpath=$progdir/$progname ;; *) _G_IFS=$IFS IFS=${PATH_SEPARATOR-:} for progdir in $PATH; do IFS=$_G_IFS test -x "$progdir/$progname" && break done IFS=$_G_IFS test -n "$progdir" || progdir=`pwd` progpath=$progdir/$progname ;; esac ## ----------------- ## ## Standard options. ## ## ----------------- ## # The following options affect the operation of the functions defined # below, and should be set appropriately depending on run-time para- # meters passed on the command line. opt_dry_run=false opt_quiet=false opt_verbose=false # Categories 'all' and 'none' are always available. Append any others # you will pass as the first argument to func_warning from your own # code. warning_categories= # By default, display warnings according to 'opt_warning_types'. Set # 'warning_func' to ':' to elide all warnings, or func_fatal_error to # treat the next displayed warning as a fatal error. warning_func=func_warn_and_continue # Set to 'all' to display all warnings, 'none' to suppress all # warnings, or a space delimited list of some subset of # 'warning_categories' to display only the listed warnings. opt_warning_types=all ## -------------------- ## ## Resource management. ## ## -------------------- ## # This section contains definitions for functions that each ensure a # particular resource (a file, or a non-empty configuration variable for # example) is available, and if appropriate to extract default values # from pertinent package files. Call them using their associated # 'require_*' variable to ensure that they are executed, at most, once. # # It's entirely deliberate that calling these functions can set # variables that don't obey the namespace limitations obeyed by the rest # of this file, in order that that they be as useful as possible to # callers. # require_term_colors # ------------------- # Allow display of bold text on terminals that support it. require_term_colors=func_require_term_colors func_require_term_colors () { $debug_cmd test -t 1 && { # COLORTERM and USE_ANSI_COLORS environment variables take # precedence, because most terminfo databases neglect to describe # whether color sequences are supported. test -n "${COLORTERM+set}" && : ${USE_ANSI_COLORS="1"} if test 1 = "$USE_ANSI_COLORS"; then # Standard ANSI escape sequences tc_reset='' tc_bold=''; tc_standout='' tc_red=''; tc_green='' tc_blue=''; tc_cyan='' else # Otherwise trust the terminfo database after all. test -n "`tput sgr0 2>/dev/null`" && { tc_reset=`tput sgr0` test -n "`tput bold 2>/dev/null`" && tc_bold=`tput bold` tc_standout=$tc_bold test -n "`tput smso 2>/dev/null`" && tc_standout=`tput smso` test -n "`tput setaf 1 2>/dev/null`" && tc_red=`tput setaf 1` test -n "`tput setaf 2 2>/dev/null`" && tc_green=`tput setaf 2` test -n "`tput setaf 4 2>/dev/null`" && tc_blue=`tput setaf 4` test -n "`tput setaf 5 2>/dev/null`" && tc_cyan=`tput setaf 5` } fi } require_term_colors=: } ## ----------------- ## ## Function library. ## ## ----------------- ## # This section contains a variety of useful functions to call in your # scripts. Take note of the portable wrappers for features provided by # some modern shells, which will fall back to slower equivalents on # less featureful shells. # func_append VAR VALUE # --------------------- # Append VALUE onto the existing contents of VAR. # We should try to minimise forks, especially on Windows where they are # unreasonably slow, so skip the feature probes when bash or zsh are # being used: if test set = "${BASH_VERSION+set}${ZSH_VERSION+set}"; then : ${_G_HAVE_ARITH_OP="yes"} : ${_G_HAVE_XSI_OPS="yes"} # The += operator was introduced in bash 3.1 case $BASH_VERSION in [12].* | 3.0 | 3.0*) ;; *) : ${_G_HAVE_PLUSEQ_OP="yes"} ;; esac fi # _G_HAVE_PLUSEQ_OP # Can be empty, in which case the shell is probed, "yes" if += is # useable or anything else if it does not work. test -z "$_G_HAVE_PLUSEQ_OP" \ && (eval 'x=a; x+=" b"; test "a b" = "$x"') 2>/dev/null \ && _G_HAVE_PLUSEQ_OP=yes if test yes = "$_G_HAVE_PLUSEQ_OP" then # This is an XSI compatible shell, allowing a faster implementation... eval 'func_append () { $debug_cmd eval "$1+=\$2" }' else # ...otherwise fall back to using expr, which is often a shell builtin. func_append () { $debug_cmd eval "$1=\$$1\$2" } fi # func_append_quoted VAR VALUE # ---------------------------- # Quote VALUE and append to the end of shell variable VAR, separated # by a space. if test yes = "$_G_HAVE_PLUSEQ_OP"; then eval 'func_append_quoted () { $debug_cmd func_quote_for_eval "$2" eval "$1+=\\ \$func_quote_for_eval_result" }' else func_append_quoted () { $debug_cmd func_quote_for_eval "$2" eval "$1=\$$1\\ \$func_quote_for_eval_result" } fi # func_append_uniq VAR VALUE # -------------------------- # Append unique VALUE onto the existing contents of VAR, assuming # entries are delimited by the first character of VALUE. For example: # # func_append_uniq options " --another-option option-argument" # # will only append to $options if " --another-option option-argument " # is not already present somewhere in $options already (note spaces at # each end implied by leading space in second argument). func_append_uniq () { $debug_cmd eval _G_current_value='`$ECHO $'$1'`' _G_delim=`expr "$2" : '\(.\)'` case $_G_delim$_G_current_value$_G_delim in *"$2$_G_delim"*) ;; *) func_append "$@" ;; esac } # func_arith TERM... # ------------------ # Set func_arith_result to the result of evaluating TERMs. test -z "$_G_HAVE_ARITH_OP" \ && (eval 'test 2 = $(( 1 + 1 ))') 2>/dev/null \ && _G_HAVE_ARITH_OP=yes if test yes = "$_G_HAVE_ARITH_OP"; then eval 'func_arith () { $debug_cmd func_arith_result=$(( $* )) }' else func_arith () { $debug_cmd func_arith_result=`expr "$@"` } fi # func_basename FILE # ------------------ # Set func_basename_result to FILE with everything up to and including # the last / stripped. if test yes = "$_G_HAVE_XSI_OPS"; then # If this shell supports suffix pattern removal, then use it to avoid # forking. Hide the definitions single quotes in case the shell chokes # on unsupported syntax... _b='func_basename_result=${1##*/}' _d='case $1 in */*) func_dirname_result=${1%/*}$2 ;; * ) func_dirname_result=$3 ;; esac' else # ...otherwise fall back to using sed. _b='func_basename_result=`$ECHO "$1" |$SED "$sed_basename"`' _d='func_dirname_result=`$ECHO "$1" |$SED "$sed_dirname"` if test "X$func_dirname_result" = "X$1"; then func_dirname_result=$3 else func_append func_dirname_result "$2" fi' fi eval 'func_basename () { $debug_cmd '"$_b"' }' # func_dirname FILE APPEND NONDIR_REPLACEMENT # ------------------------------------------- # Compute the dirname of FILE. If nonempty, add APPEND to the result, # otherwise set result to NONDIR_REPLACEMENT. eval 'func_dirname () { $debug_cmd '"$_d"' }' # func_dirname_and_basename FILE APPEND NONDIR_REPLACEMENT # -------------------------------------------------------- # Perform func_basename and func_dirname in a single function # call: # dirname: Compute the dirname of FILE. If nonempty, # add APPEND to the result, otherwise set result # to NONDIR_REPLACEMENT. # value returned in "$func_dirname_result" # basename: Compute filename of FILE. # value retuned in "$func_basename_result" # For efficiency, we do not delegate to the functions above but instead # duplicate the functionality here. eval 'func_dirname_and_basename () { $debug_cmd '"$_b"' '"$_d"' }' # func_echo ARG... # ---------------- # Echo program name prefixed message. func_echo () { $debug_cmd _G_message=$* func_echo_IFS=$IFS IFS=$nl for _G_line in $_G_message; do IFS=$func_echo_IFS $ECHO "$progname: $_G_line" done IFS=$func_echo_IFS } # func_echo_all ARG... # -------------------- # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "$*" } # func_echo_infix_1 INFIX ARG... # ------------------------------ # Echo program name, followed by INFIX on the first line, with any # additional lines not showing INFIX. func_echo_infix_1 () { $debug_cmd $require_term_colors _G_infix=$1; shift _G_indent=$_G_infix _G_prefix="$progname: $_G_infix: " _G_message=$* # Strip color escape sequences before counting printable length for _G_tc in "$tc_reset" "$tc_bold" "$tc_standout" "$tc_red" "$tc_green" "$tc_blue" "$tc_cyan" do test -n "$_G_tc" && { _G_esc_tc=`$ECHO "$_G_tc" | $SED "$sed_make_literal_regex"` _G_indent=`$ECHO "$_G_indent" | $SED "s|$_G_esc_tc||g"` } done _G_indent="$progname: "`echo "$_G_indent" | $SED 's|.| |g'`" " ## exclude from sc_prohibit_nested_quotes func_echo_infix_1_IFS=$IFS IFS=$nl for _G_line in $_G_message; do IFS=$func_echo_infix_1_IFS $ECHO "$_G_prefix$tc_bold$_G_line$tc_reset" >&2 _G_prefix=$_G_indent done IFS=$func_echo_infix_1_IFS } # func_error ARG... # ----------------- # Echo program name prefixed message to standard error. func_error () { $debug_cmd $require_term_colors func_echo_infix_1 " $tc_standout${tc_red}error$tc_reset" "$*" >&2 } # func_fatal_error ARG... # ----------------------- # Echo program name prefixed message to standard error, and exit. func_fatal_error () { $debug_cmd func_error "$*" exit $EXIT_FAILURE } # func_grep EXPRESSION FILENAME # ----------------------------- # Check whether EXPRESSION matches any line of FILENAME, without output. func_grep () { $debug_cmd $GREP "$1" "$2" >/dev/null 2>&1 } # func_len STRING # --------------- # Set func_len_result to the length of STRING. STRING may not # start with a hyphen. test -z "$_G_HAVE_XSI_OPS" \ && (eval 'x=a/b/c; test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ && _G_HAVE_XSI_OPS=yes if test yes = "$_G_HAVE_XSI_OPS"; then eval 'func_len () { $debug_cmd func_len_result=${#1} }' else func_len () { $debug_cmd func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len` } fi # func_mkdir_p DIRECTORY-PATH # --------------------------- # Make sure the entire path to DIRECTORY-PATH is available. func_mkdir_p () { $debug_cmd _G_directory_path=$1 _G_dir_list= if test -n "$_G_directory_path" && test : != "$opt_dry_run"; then # Protect directory names starting with '-' case $_G_directory_path in -*) _G_directory_path=./$_G_directory_path ;; esac # While some portion of DIR does not yet exist... while test ! -d "$_G_directory_path"; do # ...make a list in topmost first order. Use a colon delimited # list incase some portion of path contains whitespace. _G_dir_list=$_G_directory_path:$_G_dir_list # If the last portion added has no slash in it, the list is done case $_G_directory_path in */*) ;; *) break ;; esac # ...otherwise throw away the child directory and loop _G_directory_path=`$ECHO "$_G_directory_path" | $SED -e "$sed_dirname"` done _G_dir_list=`$ECHO "$_G_dir_list" | $SED 's|:*$||'` func_mkdir_p_IFS=$IFS; IFS=: for _G_dir in $_G_dir_list; do IFS=$func_mkdir_p_IFS # mkdir can fail with a 'File exist' error if two processes # try to create one of the directories concurrently. Don't # stop in that case! $MKDIR "$_G_dir" 2>/dev/null || : done IFS=$func_mkdir_p_IFS # Bail out if we (or some other process) failed to create a directory. test -d "$_G_directory_path" || \ func_fatal_error "Failed to create '$1'" fi } # func_mktempdir [BASENAME] # ------------------------- # Make a temporary directory that won't clash with other running # libtool processes, and avoids race conditions if possible. If # given, BASENAME is the basename for that directory. func_mktempdir () { $debug_cmd _G_template=${TMPDIR-/tmp}/${1-$progname} if test : = "$opt_dry_run"; then # Return a directory name, but don't create it in dry-run mode _G_tmpdir=$_G_template-$$ else # If mktemp works, use that first and foremost _G_tmpdir=`mktemp -d "$_G_template-XXXXXXXX" 2>/dev/null` if test ! -d "$_G_tmpdir"; then # Failing that, at least try and use $RANDOM to avoid a race _G_tmpdir=$_G_template-${RANDOM-0}$$ func_mktempdir_umask=`umask` umask 0077 $MKDIR "$_G_tmpdir" umask $func_mktempdir_umask fi # If we're not in dry-run mode, bomb out on failure test -d "$_G_tmpdir" || \ func_fatal_error "cannot create temporary directory '$_G_tmpdir'" fi $ECHO "$_G_tmpdir" } # func_normal_abspath PATH # ------------------------ # Remove doubled-up and trailing slashes, "." path components, # and cancel out any ".." path components in PATH after making # it an absolute path. func_normal_abspath () { $debug_cmd # These SED scripts presuppose an absolute path with a trailing slash. _G_pathcar='s|^/\([^/]*\).*$|\1|' _G_pathcdr='s|^/[^/]*||' _G_removedotparts=':dotsl s|/\./|/|g t dotsl s|/\.$|/|' _G_collapseslashes='s|/\{1,\}|/|g' _G_finalslash='s|/*$|/|' # Start from root dir and reassemble the path. func_normal_abspath_result= func_normal_abspath_tpath=$1 func_normal_abspath_altnamespace= case $func_normal_abspath_tpath in "") # Empty path, that just means $cwd. func_stripname '' '/' "`pwd`" func_normal_abspath_result=$func_stripname_result return ;; # The next three entries are used to spot a run of precisely # two leading slashes without using negated character classes; # we take advantage of case's first-match behaviour. ///*) # Unusual form of absolute path, do nothing. ;; //*) # Not necessarily an ordinary path; POSIX reserves leading '//' # and for example Cygwin uses it to access remote file shares # over CIFS/SMB, so we conserve a leading double slash if found. func_normal_abspath_altnamespace=/ ;; /*) # Absolute path, do nothing. ;; *) # Relative path, prepend $cwd. func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath ;; esac # Cancel out all the simple stuff to save iterations. We also want # the path to end with a slash for ease of parsing, so make sure # there is one (and only one) here. func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$_G_removedotparts" -e "$_G_collapseslashes" -e "$_G_finalslash"` while :; do # Processed it all yet? if test / = "$func_normal_abspath_tpath"; then # If we ascended to the root using ".." the result may be empty now. if test -z "$func_normal_abspath_result"; then func_normal_abspath_result=/ fi break fi func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$_G_pathcar"` func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ -e "$_G_pathcdr"` # Figure out what to do with it case $func_normal_abspath_tcomponent in "") # Trailing empty path component, ignore it. ;; ..) # Parent dir; strip last assembled component from result. func_dirname "$func_normal_abspath_result" func_normal_abspath_result=$func_dirname_result ;; *) # Actual path component, append it. func_append func_normal_abspath_result "/$func_normal_abspath_tcomponent" ;; esac done # Restore leading double-slash if one was found on entry. func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result } # func_notquiet ARG... # -------------------- # Echo program name prefixed message only when not in quiet mode. func_notquiet () { $debug_cmd $opt_quiet || func_echo ${1+"$@"} # A bug in bash halts the script if the last line of a function # fails when set -e is in force, so we need another command to # work around that: : } # func_relative_path SRCDIR DSTDIR # -------------------------------- # Set func_relative_path_result to the relative path from SRCDIR to DSTDIR. func_relative_path () { $debug_cmd func_relative_path_result= func_normal_abspath "$1" func_relative_path_tlibdir=$func_normal_abspath_result func_normal_abspath "$2" func_relative_path_tbindir=$func_normal_abspath_result # Ascend the tree starting from libdir while :; do # check if we have found a prefix of bindir case $func_relative_path_tbindir in $func_relative_path_tlibdir) # found an exact match func_relative_path_tcancelled= break ;; $func_relative_path_tlibdir*) # found a matching prefix func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" func_relative_path_tcancelled=$func_stripname_result if test -z "$func_relative_path_result"; then func_relative_path_result=. fi break ;; *) func_dirname $func_relative_path_tlibdir func_relative_path_tlibdir=$func_dirname_result if test -z "$func_relative_path_tlibdir"; then # Have to descend all the way to the root! func_relative_path_result=../$func_relative_path_result func_relative_path_tcancelled=$func_relative_path_tbindir break fi func_relative_path_result=../$func_relative_path_result ;; esac done # Now calculate path; take care to avoid doubling-up slashes. func_stripname '' '/' "$func_relative_path_result" func_relative_path_result=$func_stripname_result func_stripname '/' '/' "$func_relative_path_tcancelled" if test -n "$func_stripname_result"; then func_append func_relative_path_result "/$func_stripname_result" fi # Normalisation. If bindir is libdir, return '.' else relative path. if test -n "$func_relative_path_result"; then func_stripname './' '' "$func_relative_path_result" func_relative_path_result=$func_stripname_result fi test -n "$func_relative_path_result" || func_relative_path_result=. : } # func_quote_for_eval ARG... # -------------------------- # Aesthetically quote ARGs to be evaled later. # This function returns two values: # i) func_quote_for_eval_result # double-quoted, suitable for a subsequent eval # ii) func_quote_for_eval_unquoted_result # has all characters that are still active within double # quotes backslashified. func_quote_for_eval () { $debug_cmd func_quote_for_eval_unquoted_result= func_quote_for_eval_result= while test 0 -lt $#; do case $1 in *[\\\`\"\$]*) _G_unquoted_arg=`printf '%s\n' "$1" |$SED "$sed_quote_subst"` ;; *) _G_unquoted_arg=$1 ;; esac if test -n "$func_quote_for_eval_unquoted_result"; then func_append func_quote_for_eval_unquoted_result " $_G_unquoted_arg" else func_append func_quote_for_eval_unquoted_result "$_G_unquoted_arg" fi case $_G_unquoted_arg in # Double-quote args containing shell metacharacters to delay # word splitting, command substitution and variable expansion # for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") _G_quoted_arg=\"$_G_unquoted_arg\" ;; *) _G_quoted_arg=$_G_unquoted_arg ;; esac if test -n "$func_quote_for_eval_result"; then func_append func_quote_for_eval_result " $_G_quoted_arg" else func_append func_quote_for_eval_result "$_G_quoted_arg" fi shift done } # func_quote_for_expand ARG # ------------------------- # Aesthetically quote ARG to be evaled later; same as above, # but do not quote variable references. func_quote_for_expand () { $debug_cmd case $1 in *[\\\`\"]*) _G_arg=`$ECHO "$1" | $SED \ -e "$sed_double_quote_subst" -e "$sed_double_backslash"` ;; *) _G_arg=$1 ;; esac case $_G_arg in # Double-quote args containing shell metacharacters to delay # word splitting and command substitution for a subsequent eval. # Many Bourne shells cannot handle close brackets correctly # in scan sets, so we specify it separately. *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") _G_arg=\"$_G_arg\" ;; esac func_quote_for_expand_result=$_G_arg } # func_stripname PREFIX SUFFIX NAME # --------------------------------- # strip PREFIX and SUFFIX from NAME, and store in func_stripname_result. # PREFIX and SUFFIX must not contain globbing or regex special # characters, hashes, percent signs, but SUFFIX may contain a leading # dot (in which case that matches only a dot). if test yes = "$_G_HAVE_XSI_OPS"; then eval 'func_stripname () { $debug_cmd # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are # positional parameters, so assign one to ordinary variable first. func_stripname_result=$3 func_stripname_result=${func_stripname_result#"$1"} func_stripname_result=${func_stripname_result%"$2"} }' else func_stripname () { $debug_cmd case $2 in .*) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%\\\\$2\$%%"`;; *) func_stripname_result=`$ECHO "$3" | $SED -e "s%^$1%%" -e "s%$2\$%%"`;; esac } fi # func_show_eval CMD [FAIL_EXP] # ----------------------------- # Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. func_show_eval () { $debug_cmd _G_cmd=$1 _G_fail_exp=${2-':'} func_quote_for_expand "$_G_cmd" eval "func_notquiet $func_quote_for_expand_result" $opt_dry_run || { eval "$_G_cmd" _G_status=$? if test 0 -ne "$_G_status"; then eval "(exit $_G_status); $_G_fail_exp" fi } } # func_show_eval_locale CMD [FAIL_EXP] # ------------------------------------ # Unless opt_quiet is true, then output CMD. Then, if opt_dryrun is # not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP # is given, then evaluate it. Use the saved locale for evaluation. func_show_eval_locale () { $debug_cmd _G_cmd=$1 _G_fail_exp=${2-':'} $opt_quiet || { func_quote_for_expand "$_G_cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || { eval "$_G_user_locale $_G_cmd" _G_status=$? eval "$_G_safe_locale" if test 0 -ne "$_G_status"; then eval "(exit $_G_status); $_G_fail_exp" fi } } # func_tr_sh # ---------- # Turn $1 into a string suitable for a shell variable name. # Result is stored in $func_tr_sh_result. All characters # not in the set a-zA-Z0-9_ are replaced with '_'. Further, # if $1 begins with a digit, a '_' is prepended as well. func_tr_sh () { $debug_cmd case $1 in [0-9]* | *[!a-zA-Z0-9_]*) func_tr_sh_result=`$ECHO "$1" | $SED -e 's/^\([0-9]\)/_\1/' -e 's/[^a-zA-Z0-9_]/_/g'` ;; * ) func_tr_sh_result=$1 ;; esac } # func_verbose ARG... # ------------------- # Echo program name prefixed message in verbose mode only. func_verbose () { $debug_cmd $opt_verbose && func_echo "$*" : } # func_warn_and_continue ARG... # ----------------------------- # Echo program name prefixed warning message to standard error. func_warn_and_continue () { $debug_cmd $require_term_colors func_echo_infix_1 "${tc_red}warning$tc_reset" "$*" >&2 } # func_warning CATEGORY ARG... # ---------------------------- # Echo program name prefixed warning message to standard error. Warning # messages can be filtered according to CATEGORY, where this function # elides messages where CATEGORY is not listed in the global variable # 'opt_warning_types'. func_warning () { $debug_cmd # CATEGORY must be in the warning_categories list! case " $warning_categories " in *" $1 "*) ;; *) func_internal_error "invalid warning category '$1'" ;; esac _G_category=$1 shift case " $opt_warning_types " in *" $_G_category "*) $warning_func ${1+"$@"} ;; esac } # func_sort_ver VER1 VER2 # ----------------------- # 'sort -V' is not generally available. # Note this deviates from the version comparison in automake # in that it treats 1.5 < 1.5.0, and treats 1.4.4a < 1.4-p3a # but this should suffice as we won't be specifying old # version formats or redundant trailing .0 in bootstrap.conf. # If we did want full compatibility then we should probably # use m4_version_compare from autoconf. func_sort_ver () { $debug_cmd printf '%s\n%s\n' "$1" "$2" \ | sort -t. -k 1,1n -k 2,2n -k 3,3n -k 4,4n -k 5,5n -k 6,6n -k 7,7n -k 8,8n -k 9,9n } # func_lt_ver PREV CURR # --------------------- # Return true if PREV and CURR are in the correct order according to # func_sort_ver, otherwise false. Use it like this: # # func_lt_ver "$prev_ver" "$proposed_ver" || func_fatal_error "..." func_lt_ver () { $debug_cmd test "x$1" = x`func_sort_ver "$1" "$2" | $SED 1q` } # Local variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" # time-stamp-time-zone: "UTC" # End: #! /bin/sh # Set a version string for this script. scriptversion=2014-01-07.03; # UTC # A portable, pluggable option parser for Bourne shell. # Written by Gary V. Vaughan, 2010 # Copyright (C) 2010-2015 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # Please report bugs or propose patches to gary@gnu.org. ## ------ ## ## Usage. ## ## ------ ## # This file is a library for parsing options in your shell scripts along # with assorted other useful supporting features that you can make use # of too. # # For the simplest scripts you might need only: # # #!/bin/sh # . relative/path/to/funclib.sh # . relative/path/to/options-parser # scriptversion=1.0 # func_options ${1+"$@"} # eval set dummy "$func_options_result"; shift # ...rest of your script... # # In order for the '--version' option to work, you will need to have a # suitably formatted comment like the one at the top of this file # starting with '# Written by ' and ending with '# warranty; '. # # For '-h' and '--help' to work, you will also need a one line # description of your script's purpose in a comment directly above the # '# Written by ' line, like the one at the top of this file. # # The default options also support '--debug', which will turn on shell # execution tracing (see the comment above debug_cmd below for another # use), and '--verbose' and the func_verbose function to allow your script # to display verbose messages only when your user has specified # '--verbose'. # # After sourcing this file, you can plug processing for additional # options by amending the variables from the 'Configuration' section # below, and following the instructions in the 'Option parsing' # section further down. ## -------------- ## ## Configuration. ## ## -------------- ## # You should override these variables in your script after sourcing this # file so that they reflect the customisations you have added to the # option parser. # The usage line for option parsing errors and the start of '-h' and # '--help' output messages. You can embed shell variables for delayed # expansion at the time the message is displayed, but you will need to # quote other shell meta-characters carefully to prevent them being # expanded when the contents are evaled. usage='$progpath [OPTION]...' # Short help message in response to '-h' and '--help'. Add to this or # override it after sourcing this library to reflect the full set of # options your script accepts. usage_message="\ --debug enable verbose shell tracing -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] -v, --verbose verbosely report processing --version print version information and exit -h, --help print short or long help message and exit " # Additional text appended to 'usage_message' in response to '--help'. long_help_message=" Warning categories include: 'all' show all warnings 'none' turn off all the warnings 'error' warnings are treated as fatal errors" # Help message printed before fatal option parsing errors. fatal_help="Try '\$progname --help' for more information." ## ------------------------- ## ## Hook function management. ## ## ------------------------- ## # This section contains functions for adding, removing, and running hooks # to the main code. A hook is just a named list of of function, that can # be run in order later on. # func_hookable FUNC_NAME # ----------------------- # Declare that FUNC_NAME will run hooks added with # 'func_add_hook FUNC_NAME ...'. func_hookable () { $debug_cmd func_append hookable_fns " $1" } # func_add_hook FUNC_NAME HOOK_FUNC # --------------------------------- # Request that FUNC_NAME call HOOK_FUNC before it returns. FUNC_NAME must # first have been declared "hookable" by a call to 'func_hookable'. func_add_hook () { $debug_cmd case " $hookable_fns " in *" $1 "*) ;; *) func_fatal_error "'$1' does not accept hook functions." ;; esac eval func_append ${1}_hooks '" $2"' } # func_remove_hook FUNC_NAME HOOK_FUNC # ------------------------------------ # Remove HOOK_FUNC from the list of functions called by FUNC_NAME. func_remove_hook () { $debug_cmd eval ${1}_hooks='`$ECHO "\$'$1'_hooks" |$SED "s| '$2'||"`' } # func_run_hooks FUNC_NAME [ARG]... # --------------------------------- # Run all hook functions registered to FUNC_NAME. # It is assumed that the list of hook functions contains nothing more # than a whitespace-delimited list of legal shell function names, and # no effort is wasted trying to catch shell meta-characters or preserve # whitespace. func_run_hooks () { $debug_cmd case " $hookable_fns " in *" $1 "*) ;; *) func_fatal_error "'$1' does not support hook funcions.n" ;; esac eval _G_hook_fns=\$$1_hooks; shift for _G_hook in $_G_hook_fns; do eval $_G_hook '"$@"' # store returned options list back into positional # parameters for next 'cmd' execution. eval _G_hook_result=\$${_G_hook}_result eval set dummy "$_G_hook_result"; shift done func_quote_for_eval ${1+"$@"} func_run_hooks_result=$func_quote_for_eval_result } ## --------------- ## ## Option parsing. ## ## --------------- ## # In order to add your own option parsing hooks, you must accept the # full positional parameter list in your hook function, remove any # options that you action, and then pass back the remaining unprocessed # options in '_result', escaped suitably for # 'eval'. Like this: # # my_options_prep () # { # $debug_cmd # # # Extend the existing usage message. # usage_message=$usage_message' # -s, --silent don'\''t print informational messages # ' # # func_quote_for_eval ${1+"$@"} # my_options_prep_result=$func_quote_for_eval_result # } # func_add_hook func_options_prep my_options_prep # # # my_silent_option () # { # $debug_cmd # # # Note that for efficiency, we parse as many options as we can # # recognise in a loop before passing the remainder back to the # # caller on the first unrecognised argument we encounter. # while test $# -gt 0; do # opt=$1; shift # case $opt in # --silent|-s) opt_silent=: ;; # # Separate non-argument short options: # -s*) func_split_short_opt "$_G_opt" # set dummy "$func_split_short_opt_name" \ # "-$func_split_short_opt_arg" ${1+"$@"} # shift # ;; # *) set dummy "$_G_opt" "$*"; shift; break ;; # esac # done # # func_quote_for_eval ${1+"$@"} # my_silent_option_result=$func_quote_for_eval_result # } # func_add_hook func_parse_options my_silent_option # # # my_option_validation () # { # $debug_cmd # # $opt_silent && $opt_verbose && func_fatal_help "\ # '--silent' and '--verbose' options are mutually exclusive." # # func_quote_for_eval ${1+"$@"} # my_option_validation_result=$func_quote_for_eval_result # } # func_add_hook func_validate_options my_option_validation # # You'll alse need to manually amend $usage_message to reflect the extra # options you parse. It's preferable to append if you can, so that # multiple option parsing hooks can be added safely. # func_options [ARG]... # --------------------- # All the functions called inside func_options are hookable. See the # individual implementations for details. func_hookable func_options func_options () { $debug_cmd func_options_prep ${1+"$@"} eval func_parse_options \ ${func_options_prep_result+"$func_options_prep_result"} eval func_validate_options \ ${func_parse_options_result+"$func_parse_options_result"} eval func_run_hooks func_options \ ${func_validate_options_result+"$func_validate_options_result"} # save modified positional parameters for caller func_options_result=$func_run_hooks_result } # func_options_prep [ARG]... # -------------------------- # All initialisations required before starting the option parse loop. # Note that when calling hook functions, we pass through the list of # positional parameters. If a hook function modifies that list, and # needs to propogate that back to rest of this script, then the complete # modified list must be put in 'func_run_hooks_result' before # returning. func_hookable func_options_prep func_options_prep () { $debug_cmd # Option defaults: opt_verbose=false opt_warning_types= func_run_hooks func_options_prep ${1+"$@"} # save modified positional parameters for caller func_options_prep_result=$func_run_hooks_result } # func_parse_options [ARG]... # --------------------------- # The main option parsing loop. func_hookable func_parse_options func_parse_options () { $debug_cmd func_parse_options_result= # this just eases exit handling while test $# -gt 0; do # Defer to hook functions for initial option parsing, so they # get priority in the event of reusing an option name. func_run_hooks func_parse_options ${1+"$@"} # Adjust func_parse_options positional parameters to match eval set dummy "$func_run_hooks_result"; shift # Break out of the loop if we already parsed every option. test $# -gt 0 || break _G_opt=$1 shift case $_G_opt in --debug|-x) debug_cmd='set -x' func_echo "enabling shell trace mode" $debug_cmd ;; --no-warnings|--no-warning|--no-warn) set dummy --warnings none ${1+"$@"} shift ;; --warnings|--warning|-W) test $# = 0 && func_missing_arg $_G_opt && break case " $warning_categories $1" in *" $1 "*) # trailing space prevents matching last $1 above func_append_uniq opt_warning_types " $1" ;; *all) opt_warning_types=$warning_categories ;; *none) opt_warning_types=none warning_func=: ;; *error) opt_warning_types=$warning_categories warning_func=func_fatal_error ;; *) func_fatal_error \ "unsupported warning category: '$1'" ;; esac shift ;; --verbose|-v) opt_verbose=: ;; --version) func_version ;; -\?|-h) func_usage ;; --help) func_help ;; # Separate optargs to long options (plugins may need this): --*=*) func_split_equals "$_G_opt" set dummy "$func_split_equals_lhs" \ "$func_split_equals_rhs" ${1+"$@"} shift ;; # Separate optargs to short options: -W*) func_split_short_opt "$_G_opt" set dummy "$func_split_short_opt_name" \ "$func_split_short_opt_arg" ${1+"$@"} shift ;; # Separate non-argument short options: -\?*|-h*|-v*|-x*) func_split_short_opt "$_G_opt" set dummy "$func_split_short_opt_name" \ "-$func_split_short_opt_arg" ${1+"$@"} shift ;; --) break ;; -*) func_fatal_help "unrecognised option: '$_G_opt'" ;; *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; esac done # save modified positional parameters for caller func_quote_for_eval ${1+"$@"} func_parse_options_result=$func_quote_for_eval_result } # func_validate_options [ARG]... # ------------------------------ # Perform any sanity checks on option settings and/or unconsumed # arguments. func_hookable func_validate_options func_validate_options () { $debug_cmd # Display all warnings if -W was not given. test -n "$opt_warning_types" || opt_warning_types=" $warning_categories" func_run_hooks func_validate_options ${1+"$@"} # Bail if the options were screwed! $exit_cmd $EXIT_FAILURE # save modified positional parameters for caller func_validate_options_result=$func_run_hooks_result } ## ----------------- ## ## Helper functions. ## ## ----------------- ## # This section contains the helper functions used by the rest of the # hookable option parser framework in ascii-betical order. # func_fatal_help ARG... # ---------------------- # Echo program name prefixed message to standard error, followed by # a help hint, and exit. func_fatal_help () { $debug_cmd eval \$ECHO \""Usage: $usage"\" eval \$ECHO \""$fatal_help"\" func_error ${1+"$@"} exit $EXIT_FAILURE } # func_help # --------- # Echo long help message to standard output and exit. func_help () { $debug_cmd func_usage_message $ECHO "$long_help_message" exit 0 } # func_missing_arg ARGNAME # ------------------------ # Echo program name prefixed message to standard error and set global # exit_cmd. func_missing_arg () { $debug_cmd func_error "Missing argument for '$1'." exit_cmd=exit } # func_split_equals STRING # ------------------------ # Set func_split_equals_lhs and func_split_equals_rhs shell variables after # splitting STRING at the '=' sign. test -z "$_G_HAVE_XSI_OPS" \ && (eval 'x=a/b/c; test 5aa/bb/cc = "${#x}${x%%/*}${x%/*}${x#*/}${x##*/}"') 2>/dev/null \ && _G_HAVE_XSI_OPS=yes if test yes = "$_G_HAVE_XSI_OPS" then # This is an XSI compatible shell, allowing a faster implementation... eval 'func_split_equals () { $debug_cmd func_split_equals_lhs=${1%%=*} func_split_equals_rhs=${1#*=} test "x$func_split_equals_lhs" = "x$1" \ && func_split_equals_rhs= }' else # ...otherwise fall back to using expr, which is often a shell builtin. func_split_equals () { $debug_cmd func_split_equals_lhs=`expr "x$1" : 'x\([^=]*\)'` func_split_equals_rhs= test "x$func_split_equals_lhs" = "x$1" \ || func_split_equals_rhs=`expr "x$1" : 'x[^=]*=\(.*\)$'` } fi #func_split_equals # func_split_short_opt SHORTOPT # ----------------------------- # Set func_split_short_opt_name and func_split_short_opt_arg shell # variables after splitting SHORTOPT after the 2nd character. if test yes = "$_G_HAVE_XSI_OPS" then # This is an XSI compatible shell, allowing a faster implementation... eval 'func_split_short_opt () { $debug_cmd func_split_short_opt_arg=${1#??} func_split_short_opt_name=${1%"$func_split_short_opt_arg"} }' else # ...otherwise fall back to using expr, which is often a shell builtin. func_split_short_opt () { $debug_cmd func_split_short_opt_name=`expr "x$1" : 'x-\(.\)'` func_split_short_opt_arg=`expr "x$1" : 'x-.\(.*\)$'` } fi #func_split_short_opt # func_usage # ---------- # Echo short help message to standard output and exit. func_usage () { $debug_cmd func_usage_message $ECHO "Run '$progname --help |${PAGER-more}' for full usage" exit 0 } # func_usage_message # ------------------ # Echo short help message to standard output. func_usage_message () { $debug_cmd eval \$ECHO \""Usage: $usage"\" echo $SED -n 's|^# || /^Written by/{ x;p;x } h /^Written by/q' < "$progpath" echo eval \$ECHO \""$usage_message"\" } # func_version # ------------ # Echo version message to standard output and exit. func_version () { $debug_cmd printf '%s\n' "$progname $scriptversion" $SED -n ' /(C)/!b go :more /\./!{ N s|\n# | | b more } :go /^# Written by /,/# warranty; / { s|^# || s|^# *$|| s|\((C)\)[ 0-9,-]*[ ,-]\([1-9][0-9]* \)|\1 \2| p } /^# Written by / { s|^# || p } /^warranty; /q' < "$progpath" exit $? } # Local variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'before-save-hook 'time-stamp) # time-stamp-pattern: "10/scriptversion=%:y-%02m-%02d.%02H; # UTC" # time-stamp-time-zone: "UTC" # End: # Set a version string. scriptversion='(GNU libtool) 2.4.6' # func_echo ARG... # ---------------- # Libtool also displays the current mode in messages, so override # funclib.sh func_echo with this custom definition. func_echo () { $debug_cmd _G_message=$* func_echo_IFS=$IFS IFS=$nl for _G_line in $_G_message; do IFS=$func_echo_IFS $ECHO "$progname${opt_mode+: $opt_mode}: $_G_line" done IFS=$func_echo_IFS } # func_warning ARG... # ------------------- # Libtool warnings are not categorized, so override funclib.sh # func_warning with this simpler definition. func_warning () { $debug_cmd $warning_func ${1+"$@"} } ## ---------------- ## ## Options parsing. ## ## ---------------- ## # Hook in the functions to make sure our own options are parsed during # the option parsing loop. usage='$progpath [OPTION]... [MODE-ARG]...' # Short help message in response to '-h'. usage_message="Options: --config show all configuration variables --debug enable verbose shell tracing -n, --dry-run display commands without modifying any files --features display basic configuration information and exit --mode=MODE use operation mode MODE --no-warnings equivalent to '-Wnone' --preserve-dup-deps don't remove duplicate dependency libraries --quiet, --silent don't print informational messages --tag=TAG use configuration variables from tag TAG -v, --verbose print more informational messages than default --version print version information -W, --warnings=CATEGORY report the warnings falling in CATEGORY [all] -h, --help, --help-all print short, long, or detailed help message " # Additional text appended to 'usage_message' in response to '--help'. func_help () { $debug_cmd func_usage_message $ECHO "$long_help_message MODE must be one of the following: clean remove files from the build directory compile compile a source file into a libtool object execute automatically set library path, then run a program finish complete the installation of libtool libraries install install libraries or executables link create a library or an executable uninstall remove libraries from an installed directory MODE-ARGS vary depending on the MODE. When passed as first option, '--mode=MODE' may be abbreviated as 'MODE' or a unique abbreviation of that. Try '$progname --help --mode=MODE' for a more detailed description of MODE. When reporting a bug, please describe a test case to reproduce it and include the following information: host-triplet: $host shell: $SHELL compiler: $LTCC compiler flags: $LTCFLAGS linker: $LD (gnu? $with_gnu_ld) version: $progname $scriptversion Debian-2.4.6-2 automake: `($AUTOMAKE --version) 2>/dev/null |$SED 1q` autoconf: `($AUTOCONF --version) 2>/dev/null |$SED 1q` Report bugs to . GNU libtool home page: . General help using GNU software: ." exit 0 } # func_lo2o OBJECT-NAME # --------------------- # Transform OBJECT-NAME from a '.lo' suffix to the platform specific # object suffix. lo2o=s/\\.lo\$/.$objext/ o2lo=s/\\.$objext\$/.lo/ if test yes = "$_G_HAVE_XSI_OPS"; then eval 'func_lo2o () { case $1 in *.lo) func_lo2o_result=${1%.lo}.$objext ;; * ) func_lo2o_result=$1 ;; esac }' # func_xform LIBOBJ-OR-SOURCE # --------------------------- # Transform LIBOBJ-OR-SOURCE from a '.o' or '.c' (or otherwise) # suffix to a '.lo' libtool-object suffix. eval 'func_xform () { func_xform_result=${1%.*}.lo }' else # ...otherwise fall back to using sed. func_lo2o () { func_lo2o_result=`$ECHO "$1" | $SED "$lo2o"` } func_xform () { func_xform_result=`$ECHO "$1" | $SED 's|\.[^.]*$|.lo|'` } fi # func_fatal_configuration ARG... # ------------------------------- # Echo program name prefixed message to standard error, followed by # a configuration failure hint, and exit. func_fatal_configuration () { func__fatal_error ${1+"$@"} \ "See the $PACKAGE documentation for more information." \ "Fatal configuration error." } # func_config # ----------- # Display the configuration for all the tags in this script. func_config () { re_begincf='^# ### BEGIN LIBTOOL' re_endcf='^# ### END LIBTOOL' # Default configuration. $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" # Now print the configurations for the tags. for tagname in $taglist; do $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" done exit $? } # func_features # ------------- # Display the features supported by this script. func_features () { echo "host: $host" if test yes = "$build_libtool_libs"; then echo "enable shared libraries" else echo "disable shared libraries" fi if test yes = "$build_old_libs"; then echo "enable static libraries" else echo "disable static libraries" fi exit $? } # func_enable_tag TAGNAME # ----------------------- # Verify that TAGNAME is valid, and either flag an error and exit, or # enable the TAGNAME tag. We also add TAGNAME to the global $taglist # variable here. func_enable_tag () { # Global variable: tagname=$1 re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" sed_extractcf=/$re_begincf/,/$re_endcf/p # Validate tagname. case $tagname in *[!-_A-Za-z0-9,/]*) func_fatal_error "invalid tag name: $tagname" ;; esac # Don't test for the "default" C tag, as we know it's # there but not specially marked. case $tagname in CC) ;; *) if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then taglist="$taglist $tagname" # Evaluate the configuration. Be careful to quote the path # and the sed script, to avoid splitting on whitespace, but # also don't use non-portable quotes within backquotes within # quotes we have to do it in 2 steps: extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` eval "$extractedcf" else func_error "ignoring unknown tag $tagname" fi ;; esac } # func_check_version_match # ------------------------ # Ensure that we are using m4 macros, and libtool script from the same # release of libtool. func_check_version_match () { if test "$package_revision" != "$macro_revision"; then if test "$VERSION" != "$macro_version"; then if test -z "$macro_version"; then cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from an older release. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, but the $progname: definition of this LT_INIT comes from $PACKAGE $macro_version. $progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION $progname: and run autoconf again. _LT_EOF fi else cat >&2 <<_LT_EOF $progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, $progname: but the definition of this LT_INIT comes from revision $macro_revision. $progname: You should recreate aclocal.m4 with macros from revision $package_revision $progname: of $PACKAGE $VERSION and run autoconf again. _LT_EOF fi exit $EXIT_MISMATCH fi } # libtool_options_prep [ARG]... # ----------------------------- # Preparation for options parsed by libtool. libtool_options_prep () { $debug_mode # Option defaults: opt_config=false opt_dlopen= opt_dry_run=false opt_help=false opt_mode= opt_preserve_dup_deps=false opt_quiet=false nonopt= preserve_args= # Shorthand for --mode=foo, only valid as the first argument case $1 in clean|clea|cle|cl) shift; set dummy --mode clean ${1+"$@"}; shift ;; compile|compil|compi|comp|com|co|c) shift; set dummy --mode compile ${1+"$@"}; shift ;; execute|execut|execu|exec|exe|ex|e) shift; set dummy --mode execute ${1+"$@"}; shift ;; finish|finis|fini|fin|fi|f) shift; set dummy --mode finish ${1+"$@"}; shift ;; install|instal|insta|inst|ins|in|i) shift; set dummy --mode install ${1+"$@"}; shift ;; link|lin|li|l) shift; set dummy --mode link ${1+"$@"}; shift ;; uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) shift; set dummy --mode uninstall ${1+"$@"}; shift ;; esac # Pass back the list of options. func_quote_for_eval ${1+"$@"} libtool_options_prep_result=$func_quote_for_eval_result } func_add_hook func_options_prep libtool_options_prep # libtool_parse_options [ARG]... # --------------------------------- # Provide handling for libtool specific options. libtool_parse_options () { $debug_cmd # Perform our own loop to consume as many options as possible in # each iteration. while test $# -gt 0; do _G_opt=$1 shift case $_G_opt in --dry-run|--dryrun|-n) opt_dry_run=: ;; --config) func_config ;; --dlopen|-dlopen) opt_dlopen="${opt_dlopen+$opt_dlopen }$1" shift ;; --preserve-dup-deps) opt_preserve_dup_deps=: ;; --features) func_features ;; --finish) set dummy --mode finish ${1+"$@"}; shift ;; --help) opt_help=: ;; --help-all) opt_help=': help-all' ;; --mode) test $# = 0 && func_missing_arg $_G_opt && break opt_mode=$1 case $1 in # Valid mode arguments: clean|compile|execute|finish|install|link|relink|uninstall) ;; # Catch anything else as an error *) func_error "invalid argument for $_G_opt" exit_cmd=exit break ;; esac shift ;; --no-silent|--no-quiet) opt_quiet=false func_append preserve_args " $_G_opt" ;; --no-warnings|--no-warning|--no-warn) opt_warning=false func_append preserve_args " $_G_opt" ;; --no-verbose) opt_verbose=false func_append preserve_args " $_G_opt" ;; --silent|--quiet) opt_quiet=: opt_verbose=false func_append preserve_args " $_G_opt" ;; --tag) test $# = 0 && func_missing_arg $_G_opt && break opt_tag=$1 func_append preserve_args " $_G_opt $1" func_enable_tag "$1" shift ;; --verbose|-v) opt_quiet=false opt_verbose=: func_append preserve_args " $_G_opt" ;; # An option not handled by this hook function: *) set dummy "$_G_opt" ${1+"$@"}; shift; break ;; esac done # save modified positional parameters for caller func_quote_for_eval ${1+"$@"} libtool_parse_options_result=$func_quote_for_eval_result } func_add_hook func_parse_options libtool_parse_options # libtool_validate_options [ARG]... # --------------------------------- # Perform any sanity checks on option settings and/or unconsumed # arguments. libtool_validate_options () { # save first non-option argument if test 0 -lt $#; then nonopt=$1 shift fi # preserve --debug test : = "$debug_cmd" || func_append preserve_args " --debug" case $host in # Solaris2 added to fix http://debbugs.gnu.org/cgi/bugreport.cgi?bug=16452 # see also: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=59788 *cygwin* | *mingw* | *pw32* | *cegcc* | *solaris2* | *os2*) # don't eliminate duplications in $postdeps and $predeps opt_duplicate_compiler_generated_deps=: ;; *) opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps ;; esac $opt_help || { # Sanity checks first: func_check_version_match test yes != "$build_libtool_libs" \ && test yes != "$build_old_libs" \ && func_fatal_configuration "not configured to build any kind of library" # Darwin sucks eval std_shrext=\"$shrext_cmds\" # Only execute mode is allowed to have -dlopen flags. if test -n "$opt_dlopen" && test execute != "$opt_mode"; then func_error "unrecognized option '-dlopen'" $ECHO "$help" 1>&2 exit $EXIT_FAILURE fi # Change the help message to a mode-specific one. generic_help=$help help="Try '$progname --help --mode=$opt_mode' for more information." } # Pass back the unparsed argument list func_quote_for_eval ${1+"$@"} libtool_validate_options_result=$func_quote_for_eval_result } func_add_hook func_validate_options libtool_validate_options # Process options as early as possible so that --help and --version # can return quickly. func_options ${1+"$@"} eval set dummy "$func_options_result"; shift ## ----------- ## ## Main. ## ## ----------- ## magic='%%%MAGIC variable%%%' magic_exe='%%%MAGIC EXE variable%%%' # Global variables. extracted_archives= extracted_serial=0 # If this variable is set in any of the actions, the command in it # will be execed at the end. This prevents here-documents from being # left over by shells. exec_cmd= # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $1 _LTECHO_EOF' } # func_generated_by_libtool # True iff stdin has been generated by Libtool. This function is only # a basic sanity check; it will hardly flush out determined imposters. func_generated_by_libtool_p () { $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 } # func_lalib_p file # True iff FILE is a libtool '.la' library or '.lo' object file. # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_lalib_p () { test -f "$1" && $SED -e 4q "$1" 2>/dev/null | func_generated_by_libtool_p } # func_lalib_unsafe_p file # True iff FILE is a libtool '.la' library or '.lo' object file. # This function implements the same check as func_lalib_p without # resorting to external programs. To this end, it redirects stdin and # closes it afterwards, without saving the original file descriptor. # As a safety measure, use it only where a negative result would be # fatal anyway. Works if 'file' does not exist. func_lalib_unsafe_p () { lalib_p=no if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then for lalib_p_l in 1 2 3 4 do read lalib_p_line case $lalib_p_line in \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; esac done exec 0<&5 5<&- fi test yes = "$lalib_p" } # func_ltwrapper_script_p file # True iff FILE is a libtool wrapper script # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_script_p () { test -f "$1" && $lt_truncate_bin < "$1" 2>/dev/null | func_generated_by_libtool_p } # func_ltwrapper_executable_p file # True iff FILE is a libtool wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_executable_p () { func_ltwrapper_exec_suffix= case $1 in *.exe) ;; *) func_ltwrapper_exec_suffix=.exe ;; esac $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 } # func_ltwrapper_scriptname file # Assumes file is an ltwrapper_executable # uses $file to determine the appropriate filename for a # temporary ltwrapper_script. func_ltwrapper_scriptname () { func_dirname_and_basename "$1" "" "." func_stripname '' '.exe' "$func_basename_result" func_ltwrapper_scriptname_result=$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper } # func_ltwrapper_p file # True iff FILE is a libtool wrapper script or wrapper executable # This function is only a basic sanity check; it will hardly flush out # determined imposters. func_ltwrapper_p () { func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" } # func_execute_cmds commands fail_cmd # Execute tilde-delimited COMMANDS. # If FAIL_CMD is given, eval that upon failure. # FAIL_CMD may read-access the current command in variable CMD! func_execute_cmds () { $debug_cmd save_ifs=$IFS; IFS='~' for cmd in $1; do IFS=$sp$nl eval cmd=\"$cmd\" IFS=$save_ifs func_show_eval "$cmd" "${2-:}" done IFS=$save_ifs } # func_source file # Source FILE, adding directory component if necessary. # Note that it is not necessary on cygwin/mingw to append a dot to # FILE even if both FILE and FILE.exe exist: automatic-append-.exe # behavior happens only for exec(3), not for open(2)! Also, sourcing # 'FILE.' does not work on cygwin managed mounts. func_source () { $debug_cmd case $1 in */* | *\\*) . "$1" ;; *) . "./$1" ;; esac } # func_resolve_sysroot PATH # Replace a leading = in PATH with a sysroot. Store the result into # func_resolve_sysroot_result func_resolve_sysroot () { func_resolve_sysroot_result=$1 case $func_resolve_sysroot_result in =*) func_stripname '=' '' "$func_resolve_sysroot_result" func_resolve_sysroot_result=$lt_sysroot$func_stripname_result ;; esac } # func_replace_sysroot PATH # If PATH begins with the sysroot, replace it with = and # store the result into func_replace_sysroot_result. func_replace_sysroot () { case $lt_sysroot:$1 in ?*:"$lt_sysroot"*) func_stripname "$lt_sysroot" '' "$1" func_replace_sysroot_result='='$func_stripname_result ;; *) # Including no sysroot. func_replace_sysroot_result=$1 ;; esac } # func_infer_tag arg # Infer tagged configuration to use if any are available and # if one wasn't chosen via the "--tag" command line option. # Only attempt this if the compiler in the base compile # command doesn't match the default compiler. # arg is usually of the form 'gcc ...' func_infer_tag () { $debug_cmd if test -n "$available_tags" && test -z "$tagname"; then CC_quoted= for arg in $CC; do func_append_quoted CC_quoted "$arg" done CC_expanded=`func_echo_all $CC` CC_quoted_expanded=`func_echo_all $CC_quoted` case $@ in # Blanks in the command may have been stripped by the calling shell, # but not from the CC environment variable when configure was run. " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; # Blanks at the start of $base_compile will cause this to fail # if we don't check for them as well. *) for z in $available_tags; do if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then # Evaluate the configuration. eval "`$SED -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" CC_quoted= for arg in $CC; do # Double-quote args containing other shell metacharacters. func_append_quoted CC_quoted "$arg" done CC_expanded=`func_echo_all $CC` CC_quoted_expanded=`func_echo_all $CC_quoted` case "$@ " in " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) # The compiler in the base compile command matches # the one in the tagged configuration. # Assume this is the tagged configuration we want. tagname=$z break ;; esac fi done # If $tagname still isn't set, then no tagged configuration # was found and let the user know that the "--tag" command # line option must be used. if test -z "$tagname"; then func_echo "unable to infer tagged configuration" func_fatal_error "specify a tag with '--tag'" # else # func_verbose "using $tagname tagged configuration" fi ;; esac fi } # func_write_libtool_object output_name pic_name nonpic_name # Create a libtool object file (analogous to a ".la" file), # but don't create it if we're doing a dry run. func_write_libtool_object () { write_libobj=$1 if test yes = "$build_libtool_libs"; then write_lobj=\'$2\' else write_lobj=none fi if test yes = "$build_old_libs"; then write_oldobj=\'$3\' else write_oldobj=none fi $opt_dry_run || { cat >${write_libobj}T </dev/null` if test "$?" -eq 0 && test -n "$func_convert_core_file_wine_to_w32_tmp"; then func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | $SED -e "$sed_naive_backslashify"` else func_convert_core_file_wine_to_w32_result= fi fi } # end: func_convert_core_file_wine_to_w32 # func_convert_core_path_wine_to_w32 ARG # Helper function used by path conversion functions when $build is *nix, and # $host is mingw, cygwin, or some other w32 environment. Relies on a correctly # configured wine environment available, with the winepath program in $build's # $PATH. Assumes ARG has no leading or trailing path separator characters. # # ARG is path to be converted from $build format to win32. # Result is available in $func_convert_core_path_wine_to_w32_result. # Unconvertible file (directory) names in ARG are skipped; if no directory names # are convertible, then the result may be empty. func_convert_core_path_wine_to_w32 () { $debug_cmd # unfortunately, winepath doesn't convert paths, only file names func_convert_core_path_wine_to_w32_result= if test -n "$1"; then oldIFS=$IFS IFS=: for func_convert_core_path_wine_to_w32_f in $1; do IFS=$oldIFS func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" if test -n "$func_convert_core_file_wine_to_w32_result"; then if test -z "$func_convert_core_path_wine_to_w32_result"; then func_convert_core_path_wine_to_w32_result=$func_convert_core_file_wine_to_w32_result else func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" fi fi done IFS=$oldIFS fi } # end: func_convert_core_path_wine_to_w32 # func_cygpath ARGS... # Wrapper around calling the cygpath program via LT_CYGPATH. This is used when # when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) # $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or # (2), returns the Cygwin file name or path in func_cygpath_result (input # file name or path is assumed to be in w32 format, as previously converted # from $build's *nix or MSYS format). In case (3), returns the w32 file name # or path in func_cygpath_result (input file name or path is assumed to be in # Cygwin format). Returns an empty string on error. # # ARGS are passed to cygpath, with the last one being the file name or path to # be converted. # # Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH # environment variable; do not put it in $PATH. func_cygpath () { $debug_cmd if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` if test "$?" -ne 0; then # on failure, ensure result is empty func_cygpath_result= fi else func_cygpath_result= func_error "LT_CYGPATH is empty or specifies non-existent file: '$LT_CYGPATH'" fi } #end: func_cygpath # func_convert_core_msys_to_w32 ARG # Convert file name or path ARG from MSYS format to w32 format. Return # result in func_convert_core_msys_to_w32_result. func_convert_core_msys_to_w32 () { $debug_cmd # awkward: cmd appends spaces to result func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | $SED -e 's/[ ]*$//' -e "$sed_naive_backslashify"` } #end: func_convert_core_msys_to_w32 # func_convert_file_check ARG1 ARG2 # Verify that ARG1 (a file name in $build format) was converted to $host # format in ARG2. Otherwise, emit an error message, but continue (resetting # func_to_host_file_result to ARG1). func_convert_file_check () { $debug_cmd if test -z "$2" && test -n "$1"; then func_error "Could not determine host file name corresponding to" func_error " '$1'" func_error "Continuing, but uninstalled executables may not work." # Fallback: func_to_host_file_result=$1 fi } # end func_convert_file_check # func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH # Verify that FROM_PATH (a path in $build format) was converted to $host # format in TO_PATH. Otherwise, emit an error message, but continue, resetting # func_to_host_file_result to a simplistic fallback value (see below). func_convert_path_check () { $debug_cmd if test -z "$4" && test -n "$3"; then func_error "Could not determine the host path corresponding to" func_error " '$3'" func_error "Continuing, but uninstalled executables may not work." # Fallback. This is a deliberately simplistic "conversion" and # should not be "improved". See libtool.info. if test "x$1" != "x$2"; then lt_replace_pathsep_chars="s|$1|$2|g" func_to_host_path_result=`echo "$3" | $SED -e "$lt_replace_pathsep_chars"` else func_to_host_path_result=$3 fi fi } # end func_convert_path_check # func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG # Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT # and appending REPL if ORIG matches BACKPAT. func_convert_path_front_back_pathsep () { $debug_cmd case $4 in $1 ) func_to_host_path_result=$3$func_to_host_path_result ;; esac case $4 in $2 ) func_append func_to_host_path_result "$3" ;; esac } # end func_convert_path_front_back_pathsep ################################################## # $build to $host FILE NAME CONVERSION FUNCTIONS # ################################################## # invoked via '$to_host_file_cmd ARG' # # In each case, ARG is the path to be converted from $build to $host format. # Result will be available in $func_to_host_file_result. # func_to_host_file ARG # Converts the file name ARG from $build format to $host format. Return result # in func_to_host_file_result. func_to_host_file () { $debug_cmd $to_host_file_cmd "$1" } # end func_to_host_file # func_to_tool_file ARG LAZY # converts the file name ARG from $build format to toolchain format. Return # result in func_to_tool_file_result. If the conversion in use is listed # in (the comma separated) LAZY, no conversion takes place. func_to_tool_file () { $debug_cmd case ,$2, in *,"$to_tool_file_cmd",*) func_to_tool_file_result=$1 ;; *) $to_tool_file_cmd "$1" func_to_tool_file_result=$func_to_host_file_result ;; esac } # end func_to_tool_file # func_convert_file_noop ARG # Copy ARG to func_to_host_file_result. func_convert_file_noop () { func_to_host_file_result=$1 } # end func_convert_file_noop # func_convert_file_msys_to_w32 ARG # Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic # conversion to w32 is not available inside the cwrapper. Returns result in # func_to_host_file_result. func_convert_file_msys_to_w32 () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then func_convert_core_msys_to_w32 "$1" func_to_host_file_result=$func_convert_core_msys_to_w32_result fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_msys_to_w32 # func_convert_file_cygwin_to_w32 ARG # Convert file name ARG from Cygwin to w32 format. Returns result in # func_to_host_file_result. func_convert_file_cygwin_to_w32 () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then # because $build is cygwin, we call "the" cygpath in $PATH; no need to use # LT_CYGPATH in this case. func_to_host_file_result=`cygpath -m "$1"` fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_cygwin_to_w32 # func_convert_file_nix_to_w32 ARG # Convert file name ARG from *nix to w32 format. Requires a wine environment # and a working winepath. Returns result in func_to_host_file_result. func_convert_file_nix_to_w32 () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then func_convert_core_file_wine_to_w32 "$1" func_to_host_file_result=$func_convert_core_file_wine_to_w32_result fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_nix_to_w32 # func_convert_file_msys_to_cygwin ARG # Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. # Returns result in func_to_host_file_result. func_convert_file_msys_to_cygwin () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then func_convert_core_msys_to_w32 "$1" func_cygpath -u "$func_convert_core_msys_to_w32_result" func_to_host_file_result=$func_cygpath_result fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_msys_to_cygwin # func_convert_file_nix_to_cygwin ARG # Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed # in a wine environment, working winepath, and LT_CYGPATH set. Returns result # in func_to_host_file_result. func_convert_file_nix_to_cygwin () { $debug_cmd func_to_host_file_result=$1 if test -n "$1"; then # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. func_convert_core_file_wine_to_w32 "$1" func_cygpath -u "$func_convert_core_file_wine_to_w32_result" func_to_host_file_result=$func_cygpath_result fi func_convert_file_check "$1" "$func_to_host_file_result" } # end func_convert_file_nix_to_cygwin ############################################# # $build to $host PATH CONVERSION FUNCTIONS # ############################################# # invoked via '$to_host_path_cmd ARG' # # In each case, ARG is the path to be converted from $build to $host format. # The result will be available in $func_to_host_path_result. # # Path separators are also converted from $build format to $host format. If # ARG begins or ends with a path separator character, it is preserved (but # converted to $host format) on output. # # All path conversion functions are named using the following convention: # file name conversion function : func_convert_file_X_to_Y () # path conversion function : func_convert_path_X_to_Y () # where, for any given $build/$host combination the 'X_to_Y' value is the # same. If conversion functions are added for new $build/$host combinations, # the two new functions must follow this pattern, or func_init_to_host_path_cmd # will break. # func_init_to_host_path_cmd # Ensures that function "pointer" variable $to_host_path_cmd is set to the # appropriate value, based on the value of $to_host_file_cmd. to_host_path_cmd= func_init_to_host_path_cmd () { $debug_cmd if test -z "$to_host_path_cmd"; then func_stripname 'func_convert_file_' '' "$to_host_file_cmd" to_host_path_cmd=func_convert_path_$func_stripname_result fi } # func_to_host_path ARG # Converts the path ARG from $build format to $host format. Return result # in func_to_host_path_result. func_to_host_path () { $debug_cmd func_init_to_host_path_cmd $to_host_path_cmd "$1" } # end func_to_host_path # func_convert_path_noop ARG # Copy ARG to func_to_host_path_result. func_convert_path_noop () { func_to_host_path_result=$1 } # end func_convert_path_noop # func_convert_path_msys_to_w32 ARG # Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic # conversion to w32 is not available inside the cwrapper. Returns result in # func_to_host_path_result. func_convert_path_msys_to_w32 () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # Remove leading and trailing path separator characters from ARG. MSYS # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; # and winepath ignores them completely. func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" func_to_host_path_result=$func_convert_core_msys_to_w32_result func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_msys_to_w32 # func_convert_path_cygwin_to_w32 ARG # Convert path ARG from Cygwin to w32 format. Returns result in # func_to_host_file_result. func_convert_path_cygwin_to_w32 () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_cygwin_to_w32 # func_convert_path_nix_to_w32 ARG # Convert path ARG from *nix to w32 format. Requires a wine environment and # a working winepath. Returns result in func_to_host_file_result. func_convert_path_nix_to_w32 () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" func_to_host_path_result=$func_convert_core_path_wine_to_w32_result func_convert_path_check : ";" \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" fi } # end func_convert_path_nix_to_w32 # func_convert_path_msys_to_cygwin ARG # Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. # Returns result in func_to_host_file_result. func_convert_path_msys_to_cygwin () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # See func_convert_path_msys_to_w32: func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" func_cygpath -u -p "$func_convert_core_msys_to_w32_result" func_to_host_path_result=$func_cygpath_result func_convert_path_check : : \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" : "$1" fi } # end func_convert_path_msys_to_cygwin # func_convert_path_nix_to_cygwin ARG # Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a # a wine environment, working winepath, and LT_CYGPATH set. Returns result in # func_to_host_file_result. func_convert_path_nix_to_cygwin () { $debug_cmd func_to_host_path_result=$1 if test -n "$1"; then # Remove leading and trailing path separator characters from # ARG. msys behavior is inconsistent here, cygpath turns them # into '.;' and ';.', and winepath ignores them completely. func_stripname : : "$1" func_to_host_path_tmp1=$func_stripname_result func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" func_to_host_path_result=$func_cygpath_result func_convert_path_check : : \ "$func_to_host_path_tmp1" "$func_to_host_path_result" func_convert_path_front_back_pathsep ":*" "*:" : "$1" fi } # end func_convert_path_nix_to_cygwin # func_dll_def_p FILE # True iff FILE is a Windows DLL '.def' file. # Keep in sync with _LT_DLL_DEF_P in libtool.m4 func_dll_def_p () { $debug_cmd func_dll_def_p_tmp=`$SED -n \ -e 's/^[ ]*//' \ -e '/^\(;.*\)*$/d' \ -e 's/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p' \ -e q \ "$1"` test DEF = "$func_dll_def_p_tmp" } # func_mode_compile arg... func_mode_compile () { $debug_cmd # Get the compilation command and the source file. base_compile= srcfile=$nonopt # always keep a non-empty value in "srcfile" suppress_opt=yes suppress_output= arg_mode=normal libobj= later= pie_flag= for arg do case $arg_mode in arg ) # do not "continue". Instead, add this to base_compile lastarg=$arg arg_mode=normal ;; target ) libobj=$arg arg_mode=normal continue ;; normal ) # Accept any command-line options. case $arg in -o) test -n "$libobj" && \ func_fatal_error "you cannot specify '-o' more than once" arg_mode=target continue ;; -pie | -fpie | -fPIE) func_append pie_flag " $arg" continue ;; -shared | -static | -prefer-pic | -prefer-non-pic) func_append later " $arg" continue ;; -no-suppress) suppress_opt=no continue ;; -Xcompiler) arg_mode=arg # the next one goes into the "base_compile" arg list continue # The current "srcfile" will either be retained or ;; # replaced later. I would guess that would be a bug. -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result lastarg= save_ifs=$IFS; IFS=, for arg in $args; do IFS=$save_ifs func_append_quoted lastarg "$arg" done IFS=$save_ifs func_stripname ' ' '' "$lastarg" lastarg=$func_stripname_result # Add the arguments to base_compile. func_append base_compile " $lastarg" continue ;; *) # Accept the current argument as the source file. # The previous "srcfile" becomes the current argument. # lastarg=$srcfile srcfile=$arg ;; esac # case $arg ;; esac # case $arg_mode # Aesthetically quote the previous argument. func_append_quoted base_compile "$lastarg" done # for arg case $arg_mode in arg) func_fatal_error "you must specify an argument for -Xcompile" ;; target) func_fatal_error "you must specify a target with '-o'" ;; *) # Get the name of the library object. test -z "$libobj" && { func_basename "$srcfile" libobj=$func_basename_result } ;; esac # Recognize several different file suffixes. # If the user specifies -o file.o, it is replaced with file.lo case $libobj in *.[cCFSifmso] | \ *.ada | *.adb | *.ads | *.asm | \ *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) func_xform "$libobj" libobj=$func_xform_result ;; esac case $libobj in *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; *) func_fatal_error "cannot determine name of library object from '$libobj'" ;; esac func_infer_tag $base_compile for arg in $later; do case $arg in -shared) test yes = "$build_libtool_libs" \ || func_fatal_configuration "cannot build a shared library" build_old_libs=no continue ;; -static) build_libtool_libs=no build_old_libs=yes continue ;; -prefer-pic) pic_mode=yes continue ;; -prefer-non-pic) pic_mode=no continue ;; esac done func_quote_for_eval "$libobj" test "X$libobj" != "X$func_quote_for_eval_result" \ && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ && func_warning "libobj name '$libobj' may not contain shell special characters." func_dirname_and_basename "$obj" "/" "" objname=$func_basename_result xdir=$func_dirname_result lobj=$xdir$objdir/$objname test -z "$base_compile" && \ func_fatal_help "you must specify a compilation command" # Delete any leftover library objects. if test yes = "$build_old_libs"; then removelist="$obj $lobj $libobj ${libobj}T" else removelist="$lobj $libobj ${libobj}T" fi # On Cygwin there's no "real" PIC flag so we must build both object types case $host_os in cygwin* | mingw* | pw32* | os2* | cegcc*) pic_mode=default ;; esac if test no = "$pic_mode" && test pass_all != "$deplibs_check_method"; then # non-PIC code in shared libraries is not supported pic_mode=default fi # Calculate the filename of the output object if compiler does # not support -o with -c if test no = "$compiler_c_o"; then output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.$objext lockfile=$output_obj.lock else output_obj= need_locks=no lockfile= fi # Lock this critical section if it is needed # We use this script file to make the link, it avoids creating a new file if test yes = "$need_locks"; then until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done elif test warn = "$need_locks"; then if test -f "$lockfile"; then $ECHO "\ *** ERROR, $lockfile exists and contains: `cat $lockfile 2>/dev/null` This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support '-c' and '-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi func_append removelist " $output_obj" $ECHO "$srcfile" > "$lockfile" fi $opt_dry_run || $RM $removelist func_append removelist " $lockfile" trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 srcfile=$func_to_tool_file_result func_quote_for_eval "$srcfile" qsrcfile=$func_quote_for_eval_result # Only build a PIC object if we are building libtool libraries. if test yes = "$build_libtool_libs"; then # Without this assignment, base_compile gets emptied. fbsd_hideous_sh_bug=$base_compile if test no != "$pic_mode"; then command="$base_compile $qsrcfile $pic_flag" else # Don't build PIC code command="$base_compile $qsrcfile" fi func_mkdir_p "$xdir$objdir" if test -z "$output_obj"; then # Place PIC objects in $objdir func_append command " -o $lobj" fi func_show_eval_locale "$command" \ 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' if test warn = "$need_locks" && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support '-c' and '-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed, then go on to compile the next one if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then func_show_eval '$MV "$output_obj" "$lobj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi # Allow error messages only from the first compilation. if test yes = "$suppress_opt"; then suppress_output=' >/dev/null 2>&1' fi fi # Only build a position-dependent object if we build old libraries. if test yes = "$build_old_libs"; then if test yes != "$pic_mode"; then # Don't build PIC code command="$base_compile $qsrcfile$pie_flag" else command="$base_compile $qsrcfile $pic_flag" fi if test yes = "$compiler_c_o"; then func_append command " -o $obj" fi # Suppress compiler output if we already did a PIC compilation. func_append command "$suppress_output" func_show_eval_locale "$command" \ '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' if test warn = "$need_locks" && test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then $ECHO "\ *** ERROR, $lockfile contains: `cat $lockfile 2>/dev/null` but it should contain: $srcfile This indicates that another process is trying to use the same temporary object file, and libtool could not work around it because your compiler does not support '-c' and '-o' together. If you repeat this compilation, it may succeed, by chance, but you had better avoid parallel builds (make -j) in this platform, or get a better compiler." $opt_dry_run || $RM $removelist exit $EXIT_FAILURE fi # Just move the object if needed if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then func_show_eval '$MV "$output_obj" "$obj"' \ 'error=$?; $opt_dry_run || $RM $removelist; exit $error' fi fi $opt_dry_run || { func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" # Unlock the critical section if it was locked if test no != "$need_locks"; then removelist=$lockfile $RM "$lockfile" fi } exit $EXIT_SUCCESS } $opt_help || { test compile = "$opt_mode" && func_mode_compile ${1+"$@"} } func_mode_help () { # We need to display help for each of the modes. case $opt_mode in "") # Generic help is extracted from the usage comments # at the start of this file. func_help ;; clean) $ECHO \ "Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... Remove files from the build directory. RM is the name of the program to use to delete files associated with each FILE (typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed to RM. If FILE is a libtool library, object or program, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; compile) $ECHO \ "Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE Compile a source file into a libtool library object. This mode accepts the following additional options: -o OUTPUT-FILE set the output file name to OUTPUT-FILE -no-suppress do not suppress compiler output for multiple passes -prefer-pic try to build PIC objects only -prefer-non-pic try to build non-PIC objects only -shared do not build a '.o' file suitable for static linking -static only build a '.o' file suitable for static linking -Wc,FLAG pass FLAG directly to the compiler COMPILE-COMMAND is a command to be used in creating a 'standard' object file from the given SOURCEFILE. The output file name is determined by removing the directory component from SOURCEFILE, then substituting the C source code suffix '.c' with the library object suffix, '.lo'." ;; execute) $ECHO \ "Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... Automatically set library path, then run a program. This mode accepts the following additional options: -dlopen FILE add the directory containing FILE to the library path This mode sets the library path environment variable according to '-dlopen' flags. If any of the ARGS are libtool executable wrappers, then they are translated into their corresponding uninstalled binary, and any of their required library directories are added to the library path. Then, COMMAND is executed, with ARGS as arguments." ;; finish) $ECHO \ "Usage: $progname [OPTION]... --mode=finish [LIBDIR]... Complete the installation of libtool libraries. Each LIBDIR is a directory that contains libtool libraries. The commands that this mode executes may require superuser privileges. Use the '--dry-run' option if you just want to see what would be executed." ;; install) $ECHO \ "Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... Install executables or libraries. INSTALL-COMMAND is the installation command. The first component should be either the 'install' or 'cp' program. The following components of INSTALL-COMMAND are treated specially: -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation The rest of the components are interpreted as arguments to that command (only BSD-compatible install options are recognized)." ;; link) $ECHO \ "Usage: $progname [OPTION]... --mode=link LINK-COMMAND... Link object files or libraries together to form another library, or to create an executable program. LINK-COMMAND is a command using the C compiler that you would use to create a program from several object files. The following components of LINK-COMMAND are treated specially: -all-static do not do any dynamic linking at all -avoid-version do not add a version suffix if possible -bindir BINDIR specify path to binaries directory (for systems where libraries must be found in the PATH setting at runtime) -dlopen FILE '-dlpreopen' FILE if it cannot be dlopened at runtime -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) -export-symbols SYMFILE try to export only the symbols listed in SYMFILE -export-symbols-regex REGEX try to export only the symbols matching REGEX -LLIBDIR search LIBDIR for required installed libraries -lNAME OUTPUT-FILE requires the installed library libNAME -module build a library that can dlopened -no-fast-install disable the fast-install mode -no-install link a not-installable executable -no-undefined declare that a library does not refer to external symbols -o OUTPUT-FILE create OUTPUT-FILE from the specified objects -objectlist FILE use a list of object files found in FILE to specify objects -os2dllname NAME force a short DLL name on OS/2 (no effect on other OSes) -precious-files-regex REGEX don't remove output files matching REGEX -release RELEASE specify package release information -rpath LIBDIR the created library will eventually be installed in LIBDIR -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries -shared only do dynamic linking of libtool libraries -shrext SUFFIX override the standard shared library file extension -static do not do any dynamic linking of uninstalled libtool libraries -static-libtool-libs do not do any dynamic linking of libtool libraries -version-info CURRENT[:REVISION[:AGE]] specify library version info [each variable defaults to 0] -weak LIBNAME declare that the target provides the LIBNAME interface -Wc,FLAG -Xcompiler FLAG pass linker-specific FLAG directly to the compiler -Wl,FLAG -Xlinker FLAG pass linker-specific FLAG directly to the linker -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) All other options (arguments beginning with '-') are ignored. Every other argument is treated as a filename. Files ending in '.la' are treated as uninstalled libtool libraries, other files are standard or library object files. If the OUTPUT-FILE ends in '.la', then a libtool library is created, only library objects ('.lo' files) may be specified, and '-rpath' is required, except when creating a convenience library. If OUTPUT-FILE ends in '.a' or '.lib', then a standard library is created using 'ar' and 'ranlib', or on Windows using 'lib'. If OUTPUT-FILE ends in '.lo' or '.$objext', then a reloadable object file is created, otherwise an executable program is created." ;; uninstall) $ECHO \ "Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... Remove libraries from an installation directory. RM is the name of the program to use to delete files associated with each FILE (typically '/bin/rm'). RM-OPTIONS are options (such as '-f') to be passed to RM. If FILE is a libtool library, all the files associated with it are deleted. Otherwise, only FILE itself is deleted using RM." ;; *) func_fatal_help "invalid operation mode '$opt_mode'" ;; esac echo $ECHO "Try '$progname --help' for more information about other modes." } # Now that we've collected a possible --mode arg, show help if necessary if $opt_help; then if test : = "$opt_help"; then func_mode_help else { func_help noexit for opt_mode in compile link execute install finish uninstall clean; do func_mode_help done } | $SED -n '1p; 2,$s/^Usage:/ or: /p' { func_help noexit for opt_mode in compile link execute install finish uninstall clean; do echo func_mode_help done } | $SED '1d /^When reporting/,/^Report/{ H d } $x /information about other modes/d /more detailed .*MODE/d s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' fi exit $? fi # func_mode_execute arg... func_mode_execute () { $debug_cmd # The first argument is the command name. cmd=$nonopt test -z "$cmd" && \ func_fatal_help "you must specify a COMMAND" # Handle -dlopen flags immediately. for file in $opt_dlopen; do test -f "$file" \ || func_fatal_help "'$file' is not a file" dir= case $file in *.la) func_resolve_sysroot "$file" file=$func_resolve_sysroot_result # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "'$lib' is not a valid libtool archive" # Read the libtool library. dlname= library_names= func_source "$file" # Skip this library if it cannot be dlopened. if test -z "$dlname"; then # Warn if it was a shared library. test -n "$library_names" && \ func_warning "'$file' was not linked with '-export-dynamic'" continue fi func_dirname "$file" "" "." dir=$func_dirname_result if test -f "$dir/$objdir/$dlname"; then func_append dir "/$objdir" else if test ! -f "$dir/$dlname"; then func_fatal_error "cannot find '$dlname' in '$dir' or '$dir/$objdir'" fi fi ;; *.lo) # Just add the directory containing the .lo file. func_dirname "$file" "" "." dir=$func_dirname_result ;; *) func_warning "'-dlopen' is ignored for non-libtool libraries and objects" continue ;; esac # Get the absolute pathname. absdir=`cd "$dir" && pwd` test -n "$absdir" && dir=$absdir # Now add the directory to shlibpath_var. if eval "test -z \"\$$shlibpath_var\""; then eval "$shlibpath_var=\"\$dir\"" else eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" fi done # This variable tells wrapper scripts just to set shlibpath_var # rather than running their programs. libtool_execute_magic=$magic # Check if any of the arguments is a wrapper script. args= for file do case $file in -* | *.la | *.lo ) ;; *) # Do a test to see if this is really a libtool program. if func_ltwrapper_script_p "$file"; then func_source "$file" # Transform arg to wrapped name. file=$progdir/$program elif func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" func_source "$func_ltwrapper_scriptname_result" # Transform arg to wrapped name. file=$progdir/$program fi ;; esac # Quote arguments (to preserve shell metacharacters). func_append_quoted args "$file" done if $opt_dry_run; then # Display what would be done. if test -n "$shlibpath_var"; then eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" echo "export $shlibpath_var" fi $ECHO "$cmd$args" exit $EXIT_SUCCESS else if test -n "$shlibpath_var"; then # Export the shlibpath_var. eval "export $shlibpath_var" fi # Restore saved environment variables for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES do eval "if test \"\${save_$lt_var+set}\" = set; then $lt_var=\$save_$lt_var; export $lt_var else $lt_unset $lt_var fi" done # Now prepare to actually exec the command. exec_cmd=\$cmd$args fi } test execute = "$opt_mode" && func_mode_execute ${1+"$@"} # func_mode_finish arg... func_mode_finish () { $debug_cmd libs= libdirs= admincmds= for opt in "$nonopt" ${1+"$@"} do if test -d "$opt"; then func_append libdirs " $opt" elif test -f "$opt"; then if func_lalib_unsafe_p "$opt"; then func_append libs " $opt" else func_warning "'$opt' is not a valid libtool archive" fi else func_fatal_error "invalid argument '$opt'" fi done if test -n "$libs"; then if test -n "$lt_sysroot"; then sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" else sysroot_cmd= fi # Remove sysroot references if $opt_dry_run; then for lib in $libs; do echo "removing references to $lt_sysroot and '=' prefixes from $lib" done else tmpdir=`func_mktempdir` for lib in $libs; do $SED -e "$sysroot_cmd s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ > $tmpdir/tmp-la mv -f $tmpdir/tmp-la $lib done ${RM}r "$tmpdir" fi fi if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then for libdir in $libdirs; do if test -n "$finish_cmds"; then # Do each command in the finish commands. func_execute_cmds "$finish_cmds" 'admincmds="$admincmds '"$cmd"'"' fi if test -n "$finish_eval"; then # Do the single finish_eval. eval cmds=\"$finish_eval\" $opt_dry_run || eval "$cmds" || func_append admincmds " $cmds" fi done fi # Exit here if they wanted silent mode. $opt_quiet && exit $EXIT_SUCCESS if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then echo "----------------------------------------------------------------------" echo "Libraries have been installed in:" for libdir in $libdirs; do $ECHO " $libdir" done echo echo "If you ever happen to want to link against installed libraries" echo "in a given directory, LIBDIR, you must either use libtool, and" echo "specify the full pathname of the library, or use the '-LLIBDIR'" echo "flag during linking and do at least one of the following:" if test -n "$shlibpath_var"; then echo " - add LIBDIR to the '$shlibpath_var' environment variable" echo " during execution" fi if test -n "$runpath_var"; then echo " - add LIBDIR to the '$runpath_var' environment variable" echo " during linking" fi if test -n "$hardcode_libdir_flag_spec"; then libdir=LIBDIR eval flag=\"$hardcode_libdir_flag_spec\" $ECHO " - use the '$flag' linker flag" fi if test -n "$admincmds"; then $ECHO " - have your system administrator run these commands:$admincmds" fi if test -f /etc/ld.so.conf; then echo " - have your system administrator add LIBDIR to '/etc/ld.so.conf'" fi echo echo "See any operating system documentation about shared libraries for" case $host in solaris2.[6789]|solaris2.1[0-9]) echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" echo "pages." ;; *) echo "more information, such as the ld(1) and ld.so(8) manual pages." ;; esac echo "----------------------------------------------------------------------" fi exit $EXIT_SUCCESS } test finish = "$opt_mode" && func_mode_finish ${1+"$@"} # func_mode_install arg... func_mode_install () { $debug_cmd # There may be an optional sh(1) argument at the beginning of # install_prog (especially on Windows NT). if test "$SHELL" = "$nonopt" || test /bin/sh = "$nonopt" || # Allow the use of GNU shtool's install command. case $nonopt in *shtool*) :;; *) false;; esac then # Aesthetically quote it. func_quote_for_eval "$nonopt" install_prog="$func_quote_for_eval_result " arg=$1 shift else install_prog= arg=$nonopt fi # The real first argument should be the name of the installation program. # Aesthetically quote it. func_quote_for_eval "$arg" func_append install_prog "$func_quote_for_eval_result" install_shared_prog=$install_prog case " $install_prog " in *[\\\ /]cp\ *) install_cp=: ;; *) install_cp=false ;; esac # We need to accept at least all the BSD install flags. dest= files= opts= prev= install_type= isdir=false stripme= no_mode=: for arg do arg2= if test -n "$dest"; then func_append files " $dest" dest=$arg continue fi case $arg in -d) isdir=: ;; -f) if $install_cp; then :; else prev=$arg fi ;; -g | -m | -o) prev=$arg ;; -s) stripme=" -s" continue ;; -*) ;; *) # If the previous option needed an argument, then skip it. if test -n "$prev"; then if test X-m = "X$prev" && test -n "$install_override_mode"; then arg2=$install_override_mode no_mode=false fi prev= else dest=$arg continue fi ;; esac # Aesthetically quote the argument. func_quote_for_eval "$arg" func_append install_prog " $func_quote_for_eval_result" if test -n "$arg2"; then func_quote_for_eval "$arg2" fi func_append install_shared_prog " $func_quote_for_eval_result" done test -z "$install_prog" && \ func_fatal_help "you must specify an install program" test -n "$prev" && \ func_fatal_help "the '$prev' option requires an argument" if test -n "$install_override_mode" && $no_mode; then if $install_cp; then :; else func_quote_for_eval "$install_override_mode" func_append install_shared_prog " -m $func_quote_for_eval_result" fi fi if test -z "$files"; then if test -z "$dest"; then func_fatal_help "no file or destination specified" else func_fatal_help "you must specify a destination" fi fi # Strip any trailing slash from the destination. func_stripname '' '/' "$dest" dest=$func_stripname_result # Check to see that the destination is a directory. test -d "$dest" && isdir=: if $isdir; then destdir=$dest destname= else func_dirname_and_basename "$dest" "" "." destdir=$func_dirname_result destname=$func_basename_result # Not a directory, so check to see that there is only one file specified. set dummy $files; shift test "$#" -gt 1 && \ func_fatal_help "'$dest' is not a directory" fi case $destdir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) for file in $files; do case $file in *.lo) ;; *) func_fatal_help "'$destdir' must be an absolute directory name" ;; esac done ;; esac # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic=$magic staticlibs= future_libdirs= current_libdirs= for file in $files; do # Do each installation. case $file in *.$libext) # Do the static libraries later. func_append staticlibs " $file" ;; *.la) func_resolve_sysroot "$file" file=$func_resolve_sysroot_result # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$file" \ || func_fatal_help "'$file' is not a valid libtool archive" library_names= old_library= relink_command= func_source "$file" # Add the libdir to current_libdirs if it is the destination. if test "X$destdir" = "X$libdir"; then case "$current_libdirs " in *" $libdir "*) ;; *) func_append current_libdirs " $libdir" ;; esac else # Note the libdir as a future libdir. case "$future_libdirs " in *" $libdir "*) ;; *) func_append future_libdirs " $libdir" ;; esac fi func_dirname "$file" "/" "" dir=$func_dirname_result func_append dir "$objdir" if test -n "$relink_command"; then # Determine the prefix the user has applied to our future dir. inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` # Don't allow the user to place us outside of our expected # location b/c this prevents finding dependent libraries that # are installed to the same prefix. # At present, this check doesn't affect windows .dll's that # are installed into $libdir/../bin (currently, that works fine) # but it's something to keep an eye on. test "$inst_prefix_dir" = "$destdir" && \ func_fatal_error "error: cannot install '$file' to a directory not ending in $libdir" if test -n "$inst_prefix_dir"; then # Stick the inst_prefix_dir data into the link command. relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` else relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` fi func_warning "relinking '$file'" func_show_eval "$relink_command" \ 'func_fatal_error "error: relink '\''$file'\'' with the above command before installing it"' fi # See the names of the shared library. set dummy $library_names; shift if test -n "$1"; then realname=$1 shift srcname=$realname test -n "$relink_command" && srcname=${realname}T # Install the shared library and build the symlinks. func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ 'exit $?' tstripme=$stripme case $host_os in cygwin* | mingw* | pw32* | cegcc*) case $realname in *.dll.a) tstripme= ;; esac ;; os2*) case $realname in *_dll.a) tstripme= ;; esac ;; esac if test -n "$tstripme" && test -n "$striplib"; then func_show_eval "$striplib $destdir/$realname" 'exit $?' fi if test "$#" -gt 0; then # Delete the old symlinks, and create new ones. # Try 'ln -sf' first, because the 'ln' binary might depend on # the symlink we replace! Solaris /bin/ln does not understand -f, # so we also need to try rm && ln -s. for linkname do test "$linkname" != "$realname" \ && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" done fi # Do each command in the postinstall commands. lib=$destdir/$realname func_execute_cmds "$postinstall_cmds" 'exit $?' fi # Install the pseudo-library for information purposes. func_basename "$file" name=$func_basename_result instname=$dir/${name}i func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' # Maybe install the static library, too. test -n "$old_library" && func_append staticlibs " $dir/$old_library" ;; *.lo) # Install (i.e. copy) a libtool object. # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile=$destdir/$destname else func_basename "$file" destfile=$func_basename_result destfile=$destdir/$destfile fi # Deduce the name of the destination old-style object file. case $destfile in *.lo) func_lo2o "$destfile" staticdest=$func_lo2o_result ;; *.$objext) staticdest=$destfile destfile= ;; *) func_fatal_help "cannot copy a libtool object to '$destfile'" ;; esac # Install the libtool object if requested. test -n "$destfile" && \ func_show_eval "$install_prog $file $destfile" 'exit $?' # Install the old object if enabled. if test yes = "$build_old_libs"; then # Deduce the name of the old-style object file. func_lo2o "$file" staticobj=$func_lo2o_result func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' fi exit $EXIT_SUCCESS ;; *) # Figure out destination file name, if it wasn't already specified. if test -n "$destname"; then destfile=$destdir/$destname else func_basename "$file" destfile=$func_basename_result destfile=$destdir/$destfile fi # If the file is missing, and there is a .exe on the end, strip it # because it is most likely a libtool script we actually want to # install stripped_ext= case $file in *.exe) if test ! -f "$file"; then func_stripname '' '.exe' "$file" file=$func_stripname_result stripped_ext=.exe fi ;; esac # Do a test to see if this is really a libtool program. case $host in *cygwin* | *mingw*) if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" wrapper=$func_ltwrapper_scriptname_result else func_stripname '' '.exe' "$file" wrapper=$func_stripname_result fi ;; *) wrapper=$file ;; esac if func_ltwrapper_script_p "$wrapper"; then notinst_deplibs= relink_command= func_source "$wrapper" # Check the variables that should have been set. test -z "$generated_by_libtool_version" && \ func_fatal_error "invalid libtool wrapper script '$wrapper'" finalize=: for lib in $notinst_deplibs; do # Check to see that each library is installed. libdir= if test -f "$lib"; then func_source "$lib" fi libfile=$libdir/`$ECHO "$lib" | $SED 's%^.*/%%g'` if test -n "$libdir" && test ! -f "$libfile"; then func_warning "'$lib' has not been installed in '$libdir'" finalize=false fi done relink_command= func_source "$wrapper" outputname= if test no = "$fast_install" && test -n "$relink_command"; then $opt_dry_run || { if $finalize; then tmpdir=`func_mktempdir` func_basename "$file$stripped_ext" file=$func_basename_result outputname=$tmpdir/$file # Replace the output file specification. relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` $opt_quiet || { func_quote_for_expand "$relink_command" eval "func_echo $func_quote_for_expand_result" } if eval "$relink_command"; then : else func_error "error: relink '$file' with the above command before installing it" $opt_dry_run || ${RM}r "$tmpdir" continue fi file=$outputname else func_warning "cannot relink '$file'" fi } else # Install the binary that we compiled earlier. file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` fi fi # remove .exe since cygwin /usr/bin/install will append another # one anyway case $install_prog,$host in */usr/bin/install*,*cygwin*) case $file:$destfile in *.exe:*.exe) # this is ok ;; *.exe:*) destfile=$destfile.exe ;; *:*.exe) func_stripname '' '.exe' "$destfile" destfile=$func_stripname_result ;; esac ;; esac func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' $opt_dry_run || if test -n "$outputname"; then ${RM}r "$tmpdir" fi ;; esac done for file in $staticlibs; do func_basename "$file" name=$func_basename_result # Set up the ranlib parameters. oldlib=$destdir/$name func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 tool_oldlib=$func_to_tool_file_result func_show_eval "$install_prog \$file \$oldlib" 'exit $?' if test -n "$stripme" && test -n "$old_striplib"; then func_show_eval "$old_striplib $tool_oldlib" 'exit $?' fi # Do each command in the postinstall commands. func_execute_cmds "$old_postinstall_cmds" 'exit $?' done test -n "$future_libdirs" && \ func_warning "remember to run '$progname --finish$future_libdirs'" if test -n "$current_libdirs"; then # Maybe just do a dry run. $opt_dry_run && current_libdirs=" -n$current_libdirs" exec_cmd='$SHELL "$progpath" $preserve_args --finish$current_libdirs' else exit $EXIT_SUCCESS fi } test install = "$opt_mode" && func_mode_install ${1+"$@"} # func_generate_dlsyms outputname originator pic_p # Extract symbols from dlprefiles and create ${outputname}S.o with # a dlpreopen symbol table. func_generate_dlsyms () { $debug_cmd my_outputname=$1 my_originator=$2 my_pic_p=${3-false} my_prefix=`$ECHO "$my_originator" | $SED 's%[^a-zA-Z0-9]%_%g'` my_dlsyms= if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then if test -n "$NM" && test -n "$global_symbol_pipe"; then my_dlsyms=${my_outputname}S.c else func_error "not configured to extract global symbols from dlpreopened files" fi fi if test -n "$my_dlsyms"; then case $my_dlsyms in "") ;; *.c) # Discover the nlist of each of the dlfiles. nlist=$output_objdir/$my_outputname.nm func_show_eval "$RM $nlist ${nlist}S ${nlist}T" # Parse the name list into a source file. func_verbose "creating $output_objdir/$my_dlsyms" $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ /* $my_dlsyms - symbol resolution table for '$my_outputname' dlsym emulation. */ /* Generated by $PROGRAM (GNU $PACKAGE) $VERSION */ #ifdef __cplusplus extern \"C\" { #endif #if defined __GNUC__ && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) #pragma GCC diagnostic ignored \"-Wstrict-prototypes\" #endif /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE /* DATA imports from DLLs on WIN32 can't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT_DLSYM_CONST #elif defined __osf__ /* This system does not cope well with relocations in const data. */ # define LT_DLSYM_CONST #else # define LT_DLSYM_CONST const #endif #define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) /* External symbol declarations for the compiler. */\ " if test yes = "$dlself"; then func_verbose "generating symbol list for '$output'" $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" # Add our own program objects to the symbol list. progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` for progfile in $progfiles; do func_to_tool_file "$progfile" func_convert_file_msys_to_w32 func_verbose "extracting global C symbols from '$func_to_tool_file_result'" $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" done if test -n "$exclude_expsyms"; then $opt_dry_run || { eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi if test -n "$export_symbols_regex"; then $opt_dry_run || { eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' } fi # Prepare the list of exported symbols if test -z "$export_symbols"; then export_symbols=$output_objdir/$outputname.exp $opt_dry_run || { $RM $export_symbols eval "$SED -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' ;; esac } else $opt_dry_run || { eval "$SED -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' eval '$MV "$nlist"T "$nlist"' case $host in *cygwin* | *mingw* | *cegcc* ) eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' ;; esac } fi fi for dlprefile in $dlprefiles; do func_verbose "extracting global C symbols from '$dlprefile'" func_basename "$dlprefile" name=$func_basename_result case $host in *cygwin* | *mingw* | *cegcc* ) # if an import library, we need to obtain dlname if func_win32_import_lib_p "$dlprefile"; then func_tr_sh "$dlprefile" eval "curr_lafile=\$libfile_$func_tr_sh_result" dlprefile_dlbasename= if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then # Use subshell, to avoid clobbering current variable values dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` if test -n "$dlprefile_dlname"; then func_basename "$dlprefile_dlname" dlprefile_dlbasename=$func_basename_result else # no lafile. user explicitly requested -dlpreopen . $sharedlib_from_linklib_cmd "$dlprefile" dlprefile_dlbasename=$sharedlib_from_linklib_result fi fi $opt_dry_run || { if test -n "$dlprefile_dlbasename"; then eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' else func_warning "Could not compute DLL name from $name" eval '$ECHO ": $name " >> "$nlist"' fi func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" } else # not an import lib $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" } fi ;; *) $opt_dry_run || { eval '$ECHO ": $name " >> "$nlist"' func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" } ;; esac done $opt_dry_run || { # Make sure we have at least an empty file. test -f "$nlist" || : > "$nlist" if test -n "$exclude_expsyms"; then $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T $MV "$nlist"T "$nlist" fi # Try sorting and uniquifying the output. if $GREP -v "^: " < "$nlist" | if sort -k 3 /dev/null 2>&1; then sort -k 3 else sort +2 fi | uniq > "$nlist"S; then : else $GREP -v "^: " < "$nlist" > "$nlist"S fi if test -f "$nlist"S; then eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' else echo '/* NONE */' >> "$output_objdir/$my_dlsyms" fi func_show_eval '$RM "${nlist}I"' if test -n "$global_symbol_to_import"; then eval "$global_symbol_to_import"' < "$nlist"S > "$nlist"I' fi echo >> "$output_objdir/$my_dlsyms" "\ /* The mapping between symbol names and symbols. */ typedef struct { const char *name; void *address; } lt_dlsymlist; extern LT_DLSYM_CONST lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[];\ " if test -s "$nlist"I; then echo >> "$output_objdir/$my_dlsyms" "\ static void lt_syminit(void) { LT_DLSYM_CONST lt_dlsymlist *symbol = lt_${my_prefix}_LTX_preloaded_symbols; for (; symbol->name; ++symbol) {" $SED 's/.*/ if (STREQ (symbol->name, \"&\")) symbol->address = (void *) \&&;/' < "$nlist"I >> "$output_objdir/$my_dlsyms" echo >> "$output_objdir/$my_dlsyms" "\ } }" fi echo >> "$output_objdir/$my_dlsyms" "\ LT_DLSYM_CONST lt_dlsymlist lt_${my_prefix}_LTX_preloaded_symbols[] = { {\"$my_originator\", (void *) 0}," if test -s "$nlist"I; then echo >> "$output_objdir/$my_dlsyms" "\ {\"@INIT@\", (void *) <_syminit}," fi case $need_lib_prefix in no) eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; *) eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" ;; esac echo >> "$output_objdir/$my_dlsyms" "\ {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt_${my_prefix}_LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif\ " } # !$opt_dry_run pic_flag_for_symtable= case "$compile_command " in *" -static "*) ;; *) case $host in # compiling the symbol table file with pic_flag works around # a FreeBSD bug that causes programs to crash when -lm is # linked before any other PIC object. But we must not use # pic_flag when linking with -static. The problem exists in # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; *-*-hpux*) pic_flag_for_symtable=" $pic_flag" ;; *) $my_pic_p && pic_flag_for_symtable=" $pic_flag" ;; esac ;; esac symtab_cflags= for arg in $LTCFLAGS; do case $arg in -pie | -fpie | -fPIE) ;; *) func_append symtab_cflags " $arg" ;; esac done # Now compile the dynamic symbol file. func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' # Clean up the generated files. func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T" "${nlist}I"' # Transform the symbol file into the correct name. symfileobj=$output_objdir/${my_outputname}S.$objext case $host in *cygwin* | *mingw* | *cegcc* ) if test -f "$output_objdir/$my_outputname.def"; then compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` else compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` fi ;; *) compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` ;; esac ;; *) func_fatal_error "unknown suffix for '$my_dlsyms'" ;; esac else # We keep going just in case the user didn't refer to # lt_preloaded_symbols. The linker will fail if global_symbol_pipe # really was required. # Nullify the symbol file. compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` fi } # func_cygming_gnu_implib_p ARG # This predicate returns with zero status (TRUE) if # ARG is a GNU/binutils-style import library. Returns # with nonzero status (FALSE) otherwise. func_cygming_gnu_implib_p () { $debug_cmd func_to_tool_file "$1" func_convert_file_msys_to_w32 func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` test -n "$func_cygming_gnu_implib_tmp" } # func_cygming_ms_implib_p ARG # This predicate returns with zero status (TRUE) if # ARG is an MS-style import library. Returns # with nonzero status (FALSE) otherwise. func_cygming_ms_implib_p () { $debug_cmd func_to_tool_file "$1" func_convert_file_msys_to_w32 func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` test -n "$func_cygming_ms_implib_tmp" } # func_win32_libid arg # return the library type of file 'arg' # # Need a lot of goo to handle *both* DLLs and import libs # Has to be a shell function in order to 'eat' the argument # that is supplied when $file_magic_command is called. # Despite the name, also deal with 64 bit binaries. func_win32_libid () { $debug_cmd win32_libid_type=unknown win32_fileres=`file -L $1 2>/dev/null` case $win32_fileres in *ar\ archive\ import\ library*) # definitely import win32_libid_type="x86 archive import" ;; *ar\ archive*) # could be an import, or static # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then case $nm_interface in "MS dumpbin") if func_cygming_ms_implib_p "$1" || func_cygming_gnu_implib_p "$1" then win32_nmres=import else win32_nmres= fi ;; *) func_to_tool_file "$1" func_convert_file_msys_to_w32 win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | $SED -n -e ' 1,100{ / I /{ s|.*|import| p q } }'` ;; esac case $win32_nmres in import*) win32_libid_type="x86 archive import";; *) win32_libid_type="x86 archive static";; esac fi ;; *DLL*) win32_libid_type="x86 DLL" ;; *executable*) # but shell scripts are "executable" too... case $win32_fileres in *MS\ Windows\ PE\ Intel*) win32_libid_type="x86 DLL" ;; esac ;; esac $ECHO "$win32_libid_type" } # func_cygming_dll_for_implib ARG # # Platform-specific function to extract the # name of the DLL associated with the specified # import library ARG. # Invoked by eval'ing the libtool variable # $sharedlib_from_linklib_cmd # Result is available in the variable # $sharedlib_from_linklib_result func_cygming_dll_for_implib () { $debug_cmd sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` } # func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs # # The is the core of a fallback implementation of a # platform-specific function to extract the name of the # DLL associated with the specified import library LIBNAME. # # SECTION_NAME is either .idata$6 or .idata$7, depending # on the platform and compiler that created the implib. # # Echos the name of the DLL associated with the # specified import library. func_cygming_dll_for_implib_fallback_core () { $debug_cmd match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` $OBJDUMP -s --section "$1" "$2" 2>/dev/null | $SED '/^Contents of section '"$match_literal"':/{ # Place marker at beginning of archive member dllname section s/.*/====MARK====/ p d } # These lines can sometimes be longer than 43 characters, but # are always uninteresting /:[ ]*file format pe[i]\{,1\}-/d /^In archive [^:]*:/d # Ensure marker is printed /^====MARK====/p # Remove all lines with less than 43 characters /^.\{43\}/!d # From remaining lines, remove first 43 characters s/^.\{43\}//' | $SED -n ' # Join marker and all lines until next marker into a single line /^====MARK====/ b para H $ b para b :para x s/\n//g # Remove the marker s/^====MARK====// # Remove trailing dots and whitespace s/[\. \t]*$// # Print /./p' | # we now have a list, one entry per line, of the stringified # contents of the appropriate section of all members of the # archive that possess that section. Heuristic: eliminate # all those that have a first or second character that is # a '.' (that is, objdump's representation of an unprintable # character.) This should work for all archives with less than # 0x302f exports -- but will fail for DLLs whose name actually # begins with a literal '.' or a single character followed by # a '.'. # # Of those that remain, print the first one. $SED -e '/^\./d;/^.\./d;q' } # func_cygming_dll_for_implib_fallback ARG # Platform-specific function to extract the # name of the DLL associated with the specified # import library ARG. # # This fallback implementation is for use when $DLLTOOL # does not support the --identify-strict option. # Invoked by eval'ing the libtool variable # $sharedlib_from_linklib_cmd # Result is available in the variable # $sharedlib_from_linklib_result func_cygming_dll_for_implib_fallback () { $debug_cmd if func_cygming_gnu_implib_p "$1"; then # binutils import library sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` elif func_cygming_ms_implib_p "$1"; then # ms-generated import library sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` else # unknown sharedlib_from_linklib_result= fi } # func_extract_an_archive dir oldlib func_extract_an_archive () { $debug_cmd f_ex_an_ar_dir=$1; shift f_ex_an_ar_oldlib=$1 if test yes = "$lock_old_archive_extraction"; then lockfile=$f_ex_an_ar_oldlib.lock until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do func_echo "Waiting for $lockfile to be removed" sleep 2 done fi func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ 'stat=$?; rm -f "$lockfile"; exit $stat' if test yes = "$lock_old_archive_extraction"; then $opt_dry_run || rm -f "$lockfile" fi if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then : else func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" fi } # func_extract_archives gentop oldlib ... func_extract_archives () { $debug_cmd my_gentop=$1; shift my_oldlibs=${1+"$@"} my_oldobjs= my_xlib= my_xabs= my_xdir= for my_xlib in $my_oldlibs; do # Extract the objects. case $my_xlib in [\\/]* | [A-Za-z]:[\\/]*) my_xabs=$my_xlib ;; *) my_xabs=`pwd`"/$my_xlib" ;; esac func_basename "$my_xlib" my_xlib=$func_basename_result my_xlib_u=$my_xlib while :; do case " $extracted_archives " in *" $my_xlib_u "*) func_arith $extracted_serial + 1 extracted_serial=$func_arith_result my_xlib_u=lt$extracted_serial-$my_xlib ;; *) break ;; esac done extracted_archives="$extracted_archives $my_xlib_u" my_xdir=$my_gentop/$my_xlib_u func_mkdir_p "$my_xdir" case $host in *-darwin*) func_verbose "Extracting $my_xabs" # Do not bother doing anything if just a dry run $opt_dry_run || { darwin_orig_dir=`pwd` cd $my_xdir || exit $? darwin_archive=$my_xabs darwin_curdir=`pwd` func_basename "$darwin_archive" darwin_base_archive=$func_basename_result darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` if test -n "$darwin_arches"; then darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` darwin_arch= func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" for darwin_arch in $darwin_arches; do func_mkdir_p "unfat-$$/$darwin_base_archive-$darwin_arch" $LIPO -thin $darwin_arch -output "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" "$darwin_archive" cd "unfat-$$/$darwin_base_archive-$darwin_arch" func_extract_an_archive "`pwd`" "$darwin_base_archive" cd "$darwin_curdir" $RM "unfat-$$/$darwin_base_archive-$darwin_arch/$darwin_base_archive" done # $darwin_arches ## Okay now we've a bunch of thin objects, gotta fatten them up :) darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$sed_basename" | sort -u` darwin_file= darwin_files= for darwin_file in $darwin_filelist; do darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` $LIPO -create -output "$darwin_file" $darwin_files done # $darwin_filelist $RM -rf unfat-$$ cd "$darwin_orig_dir" else cd $darwin_orig_dir func_extract_an_archive "$my_xdir" "$my_xabs" fi # $darwin_arches } # !$opt_dry_run ;; *) func_extract_an_archive "$my_xdir" "$my_xabs" ;; esac my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` done func_extract_archives_result=$my_oldobjs } # func_emit_wrapper [arg=no] # # Emit a libtool wrapper script on stdout. # Don't directly open a file because we may want to # incorporate the script contents within a cygwin/mingw # wrapper executable. Must ONLY be called from within # func_mode_link because it depends on a number of variables # set therein. # # ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR # variable will take. If 'yes', then the emitted script # will assume that the directory where it is stored is # the $objdir directory. This is a cygwin/mingw-specific # behavior. func_emit_wrapper () { func_emit_wrapper_arg1=${1-no} $ECHO "\ #! $SHELL # $output - temporary wrapper script for $objdir/$outputname # Generated by $PROGRAM (GNU $PACKAGE) $VERSION # # The $output program cannot be directly executed until all the libtool # libraries that it depends on are installed. # # This wrapper script should never be moved out of the build directory. # If it is, it will not operate correctly. # Sed substitution that helps us do robust quoting. It backslashifies # metacharacters that are still active within double-quoted strings. sed_quote_subst='$sed_quote_subst' # Be Bourne compatible if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac fi BIN_SH=xpg4; export BIN_SH # for Tru64 DUALCASE=1; export DUALCASE # for MKS sh # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH relink_command=\"$relink_command\" # This environment variable determines our operation mode. if test \"\$libtool_install_magic\" = \"$magic\"; then # install mode needs the following variables: generated_by_libtool_version='$macro_version' notinst_deplibs='$notinst_deplibs' else # When we are sourced in execute mode, \$file and \$ECHO are already set. if test \"\$libtool_execute_magic\" != \"$magic\"; then file=\"\$0\"" qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` $ECHO "\ # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$1 _LTECHO_EOF' } ECHO=\"$qECHO\" fi # Very basic option parsing. These options are (a) specific to # the libtool wrapper, (b) are identical between the wrapper # /script/ and the wrapper /executable/ that is used only on # windows platforms, and (c) all begin with the string "--lt-" # (application programs are unlikely to have options that match # this pattern). # # There are only two supported options: --lt-debug and # --lt-dump-script. There is, deliberately, no --lt-help. # # The first argument to this parsing function should be the # script's $0 value, followed by "$@". lt_option_debug= func_parse_lt_options () { lt_script_arg0=\$0 shift for lt_opt do case \"\$lt_opt\" in --lt-debug) lt_option_debug=1 ;; --lt-dump-script) lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` cat \"\$lt_dump_D/\$lt_dump_F\" exit 0 ;; --lt-*) \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 exit 1 ;; esac done # Print the debug banner immediately: if test -n \"\$lt_option_debug\"; then echo \"$outputname:$output:\$LINENO: libtool wrapper (GNU $PACKAGE) $VERSION\" 1>&2 fi } # Used when --lt-debug. Prints its arguments to stdout # (redirection is the responsibility of the caller) func_lt_dump_args () { lt_dump_args_N=1; for lt_arg do \$ECHO \"$outputname:$output:\$LINENO: newargv[\$lt_dump_args_N]: \$lt_arg\" lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` done } # Core function for launching the target application func_exec_program_core () { " case $host in # Backslashes separate directories on plain windows *-*-mingw | *-*-os2* | *-cegcc*) $ECHO "\ if test -n \"\$lt_option_debug\"; then \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir\\\\\$program\" 1>&2 func_lt_dump_args \${1+\"\$@\"} 1>&2 fi exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} " ;; *) $ECHO "\ if test -n \"\$lt_option_debug\"; then \$ECHO \"$outputname:$output:\$LINENO: newargv[0]: \$progdir/\$program\" 1>&2 func_lt_dump_args \${1+\"\$@\"} 1>&2 fi exec \"\$progdir/\$program\" \${1+\"\$@\"} " ;; esac $ECHO "\ \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 exit 1 } # A function to encapsulate launching the target application # Strips options in the --lt-* namespace from \$@ and # launches target application with the remaining arguments. func_exec_program () { case \" \$* \" in *\\ --lt-*) for lt_wr_arg do case \$lt_wr_arg in --lt-*) ;; *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; esac shift done ;; esac func_exec_program_core \${1+\"\$@\"} } # Parse options func_parse_lt_options \"\$0\" \${1+\"\$@\"} # Find the directory that this script lives in. thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` test \"x\$thisdir\" = \"x\$file\" && thisdir=. # Follow symbolic links until we get to the real thisdir. file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` while test -n \"\$file\"; do destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` # If there was a directory component, then change thisdir. if test \"x\$destdir\" != \"x\$file\"; then case \"\$destdir\" in [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; *) thisdir=\"\$thisdir/\$destdir\" ;; esac fi file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` done # Usually 'no', except on cygwin/mingw when embedded into # the cwrapper. WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then # special case for '.' if test \"\$thisdir\" = \".\"; then thisdir=\`pwd\` fi # remove .libs from thisdir case \"\$thisdir\" in *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; $objdir ) thisdir=. ;; esac fi # Try to get the absolute directory name. absdir=\`cd \"\$thisdir\" && pwd\` test -n \"\$absdir\" && thisdir=\"\$absdir\" " if test yes = "$fast_install"; then $ECHO "\ program=lt-'$outputname'$exeext progdir=\"\$thisdir/$objdir\" if test ! -f \"\$progdir/\$program\" || { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | $SED 1q\`; \\ test \"X\$file\" != \"X\$progdir/\$program\"; }; then file=\"\$\$-\$program\" if test ! -d \"\$progdir\"; then $MKDIR \"\$progdir\" else $RM \"\$progdir/\$file\" fi" $ECHO "\ # relink executable if necessary if test -n \"\$relink_command\"; then if relink_command_output=\`eval \$relink_command 2>&1\`; then : else \$ECHO \"\$relink_command_output\" >&2 $RM \"\$progdir/\$file\" exit 1 fi fi $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || { $RM \"\$progdir/\$program\"; $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } $RM \"\$progdir/\$file\" fi" else $ECHO "\ program='$outputname' progdir=\"\$thisdir/$objdir\" " fi $ECHO "\ if test -f \"\$progdir/\$program\"; then" # fixup the dll searchpath if we need to. # # Fix the DLL searchpath if we need to. Do this before prepending # to shlibpath, because on Windows, both are PATH and uninstalled # libraries must come first. if test -n "$dllsearchpath"; then $ECHO "\ # Add the dll search path components to the executable PATH PATH=$dllsearchpath:\$PATH " fi # Export our shlibpath_var if we have one. if test yes = "$shlibpath_overrides_runpath" && test -n "$shlibpath_var" && test -n "$temp_rpath"; then $ECHO "\ # Add our own library path to $shlibpath_var $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" # Some systems cannot cope with colon-terminated $shlibpath_var # The second colon is a workaround for a bug in BeOS R4 sed $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` export $shlibpath_var " fi $ECHO "\ if test \"\$libtool_execute_magic\" != \"$magic\"; then # Run the actual program with our arguments. func_exec_program \${1+\"\$@\"} fi else # The program doesn't exist. \$ECHO \"\$0: error: '\$progdir/\$program' does not exist\" 1>&2 \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 exit 1 fi fi\ " } # func_emit_cwrapperexe_src # emit the source code for a wrapper executable on stdout # Must ONLY be called from within func_mode_link because # it depends on a number of variable set therein. func_emit_cwrapperexe_src () { cat < #include #ifdef _MSC_VER # include # include # include #else # include # include # ifdef __CYGWIN__ # include # endif #endif #include #include #include #include #include #include #include #include #define STREQ(s1, s2) (strcmp ((s1), (s2)) == 0) /* declarations of non-ANSI functions */ #if defined __MINGW32__ # ifdef __STRICT_ANSI__ int _putenv (const char *); # endif #elif defined __CYGWIN__ # ifdef __STRICT_ANSI__ char *realpath (const char *, char *); int putenv (char *); int setenv (const char *, const char *, int); # endif /* #elif defined other_platform || defined ... */ #endif /* portability defines, excluding path handling macros */ #if defined _MSC_VER # define setmode _setmode # define stat _stat # define chmod _chmod # define getcwd _getcwd # define putenv _putenv # define S_IXUSR _S_IEXEC #elif defined __MINGW32__ # define setmode _setmode # define stat _stat # define chmod _chmod # define getcwd _getcwd # define putenv _putenv #elif defined __CYGWIN__ # define HAVE_SETENV # define FOPEN_WB "wb" /* #elif defined other platforms ... */ #endif #if defined PATH_MAX # define LT_PATHMAX PATH_MAX #elif defined MAXPATHLEN # define LT_PATHMAX MAXPATHLEN #else # define LT_PATHMAX 1024 #endif #ifndef S_IXOTH # define S_IXOTH 0 #endif #ifndef S_IXGRP # define S_IXGRP 0 #endif /* path handling portability macros */ #ifndef DIR_SEPARATOR # define DIR_SEPARATOR '/' # define PATH_SEPARATOR ':' #endif #if defined _WIN32 || defined __MSDOS__ || defined __DJGPP__ || \ defined __OS2__ # define HAVE_DOS_BASED_FILE_SYSTEM # define FOPEN_WB "wb" # ifndef DIR_SEPARATOR_2 # define DIR_SEPARATOR_2 '\\' # endif # ifndef PATH_SEPARATOR_2 # define PATH_SEPARATOR_2 ';' # endif #endif #ifndef DIR_SEPARATOR_2 # define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) #else /* DIR_SEPARATOR_2 */ # define IS_DIR_SEPARATOR(ch) \ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) #endif /* DIR_SEPARATOR_2 */ #ifndef PATH_SEPARATOR_2 # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) #else /* PATH_SEPARATOR_2 */ # define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) #endif /* PATH_SEPARATOR_2 */ #ifndef FOPEN_WB # define FOPEN_WB "w" #endif #ifndef _O_BINARY # define _O_BINARY 0 #endif #define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) #define XFREE(stale) do { \ if (stale) { free (stale); stale = 0; } \ } while (0) #if defined LT_DEBUGWRAPPER static int lt_debug = 1; #else static int lt_debug = 0; #endif const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ void *xmalloc (size_t num); char *xstrdup (const char *string); const char *base_name (const char *name); char *find_executable (const char *wrapper); char *chase_symlinks (const char *pathspec); int make_executable (const char *path); int check_executable (const char *path); char *strendzap (char *str, const char *pat); void lt_debugprintf (const char *file, int line, const char *fmt, ...); void lt_fatal (const char *file, int line, const char *message, ...); static const char *nonnull (const char *s); static const char *nonempty (const char *s); void lt_setenv (const char *name, const char *value); char *lt_extend_str (const char *orig_value, const char *add, int to_end); void lt_update_exe_path (const char *name, const char *value); void lt_update_lib_path (const char *name, const char *value); char **prepare_spawn (char **argv); void lt_dump_script (FILE *f); EOF cat <= 0) && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) return 1; else return 0; } int make_executable (const char *path) { int rval = 0; struct stat st; lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", nonempty (path)); if ((!path) || (!*path)) return 0; if (stat (path, &st) >= 0) { rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); } return rval; } /* Searches for the full path of the wrapper. Returns newly allocated full path name if found, NULL otherwise Does not chase symlinks, even on platforms that support them. */ char * find_executable (const char *wrapper) { int has_slash = 0; const char *p; const char *p_next; /* static buffer for getcwd */ char tmp[LT_PATHMAX + 1]; size_t tmp_len; char *concat_name; lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", nonempty (wrapper)); if ((wrapper == NULL) || (*wrapper == '\0')) return NULL; /* Absolute path? */ #if defined HAVE_DOS_BASED_FILE_SYSTEM if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } else { #endif if (IS_DIR_SEPARATOR (wrapper[0])) { concat_name = xstrdup (wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } #if defined HAVE_DOS_BASED_FILE_SYSTEM } #endif for (p = wrapper; *p; p++) if (*p == '/') { has_slash = 1; break; } if (!has_slash) { /* no slashes; search PATH */ const char *path = getenv ("PATH"); if (path != NULL) { for (p = path; *p; p = p_next) { const char *q; size_t p_len; for (q = p; *q; q++) if (IS_PATH_SEPARATOR (*q)) break; p_len = (size_t) (q - p); p_next = (*q == '\0' ? q : q + 1); if (p_len == 0) { /* empty path: current directory */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", nonnull (strerror (errno))); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); } else { concat_name = XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, p, p_len); concat_name[p_len] = '/'; strcpy (concat_name + p_len + 1, wrapper); } if (check_executable (concat_name)) return concat_name; XFREE (concat_name); } } /* not found in PATH; assume curdir */ } /* Relative path | not found in path: prepend cwd */ if (getcwd (tmp, LT_PATHMAX) == NULL) lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", nonnull (strerror (errno))); tmp_len = strlen (tmp); concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); memcpy (concat_name, tmp, tmp_len); concat_name[tmp_len] = '/'; strcpy (concat_name + tmp_len + 1, wrapper); if (check_executable (concat_name)) return concat_name; XFREE (concat_name); return NULL; } char * chase_symlinks (const char *pathspec) { #ifndef S_ISLNK return xstrdup (pathspec); #else char buf[LT_PATHMAX]; struct stat s; char *tmp_pathspec = xstrdup (pathspec); char *p; int has_symlinks = 0; while (strlen (tmp_pathspec) && !has_symlinks) { lt_debugprintf (__FILE__, __LINE__, "checking path component for symlinks: %s\n", tmp_pathspec); if (lstat (tmp_pathspec, &s) == 0) { if (S_ISLNK (s.st_mode) != 0) { has_symlinks = 1; break; } /* search backwards for last DIR_SEPARATOR */ p = tmp_pathspec + strlen (tmp_pathspec) - 1; while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) p--; if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) { /* no more DIR_SEPARATORS left */ break; } *p = '\0'; } else { lt_fatal (__FILE__, __LINE__, "error accessing file \"%s\": %s", tmp_pathspec, nonnull (strerror (errno))); } } XFREE (tmp_pathspec); if (!has_symlinks) { return xstrdup (pathspec); } tmp_pathspec = realpath (pathspec, buf); if (tmp_pathspec == 0) { lt_fatal (__FILE__, __LINE__, "could not follow symlinks for %s", pathspec); } return xstrdup (tmp_pathspec); #endif } char * strendzap (char *str, const char *pat) { size_t len, patlen; assert (str != NULL); assert (pat != NULL); len = strlen (str); patlen = strlen (pat); if (patlen <= len) { str += len - patlen; if (STREQ (str, pat)) *str = '\0'; } return str; } void lt_debugprintf (const char *file, int line, const char *fmt, ...) { va_list args; if (lt_debug) { (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); va_start (args, fmt); (void) vfprintf (stderr, fmt, args); va_end (args); } } static void lt_error_core (int exit_status, const char *file, int line, const char *mode, const char *message, va_list ap) { fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); vfprintf (stderr, message, ap); fprintf (stderr, ".\n"); if (exit_status >= 0) exit (exit_status); } void lt_fatal (const char *file, int line, const char *message, ...) { va_list ap; va_start (ap, message); lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); va_end (ap); } static const char * nonnull (const char *s) { return s ? s : "(null)"; } static const char * nonempty (const char *s) { return (s && !*s) ? "(empty)" : nonnull (s); } void lt_setenv (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_setenv) setting '%s' to '%s'\n", nonnull (name), nonnull (value)); { #ifdef HAVE_SETENV /* always make a copy, for consistency with !HAVE_SETENV */ char *str = xstrdup (value); setenv (name, str, 1); #else size_t len = strlen (name) + 1 + strlen (value) + 1; char *str = XMALLOC (char, len); sprintf (str, "%s=%s", name, value); if (putenv (str) != EXIT_SUCCESS) { XFREE (str); } #endif } } char * lt_extend_str (const char *orig_value, const char *add, int to_end) { char *new_value; if (orig_value && *orig_value) { size_t orig_value_len = strlen (orig_value); size_t add_len = strlen (add); new_value = XMALLOC (char, add_len + orig_value_len + 1); if (to_end) { strcpy (new_value, orig_value); strcpy (new_value + orig_value_len, add); } else { strcpy (new_value, add); strcpy (new_value + add_len, orig_value); } } else { new_value = xstrdup (add); } return new_value; } void lt_update_exe_path (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", nonnull (name), nonnull (value)); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); /* some systems can't cope with a ':'-terminated path #' */ size_t len = strlen (new_value); while ((len > 0) && IS_PATH_SEPARATOR (new_value[len-1])) { new_value[--len] = '\0'; } lt_setenv (name, new_value); XFREE (new_value); } } void lt_update_lib_path (const char *name, const char *value) { lt_debugprintf (__FILE__, __LINE__, "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", nonnull (name), nonnull (value)); if (name && *name && value && *value) { char *new_value = lt_extend_str (getenv (name), value, 0); lt_setenv (name, new_value); XFREE (new_value); } } EOF case $host_os in mingw*) cat <<"EOF" /* Prepares an argument vector before calling spawn(). Note that spawn() does not by itself call the command interpreter (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); GetVersionEx(&v); v.dwPlatformId == VER_PLATFORM_WIN32_NT; }) ? "cmd.exe" : "command.com"). Instead it simply concatenates the arguments, separated by ' ', and calls CreateProcess(). We must quote the arguments since Win32 CreateProcess() interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a special way: - Space and tab are interpreted as delimiters. They are not treated as delimiters if they are surrounded by double quotes: "...". - Unescaped double quotes are removed from the input. Their only effect is that within double quotes, space and tab are treated like normal characters. - Backslashes not followed by double quotes are not special. - But 2*n+1 backslashes followed by a double quote become n backslashes followed by a double quote (n >= 0): \" -> " \\\" -> \" \\\\\" -> \\" */ #define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" #define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" char ** prepare_spawn (char **argv) { size_t argc; char **new_argv; size_t i; /* Count number of arguments. */ for (argc = 0; argv[argc] != NULL; argc++) ; /* Allocate new argument vector. */ new_argv = XMALLOC (char *, argc + 1); /* Put quoted arguments into the new argument vector. */ for (i = 0; i < argc; i++) { const char *string = argv[i]; if (string[0] == '\0') new_argv[i] = xstrdup ("\"\""); else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) { int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); size_t length; unsigned int backslashes; const char *s; char *quoted_string; char *p; length = 0; backslashes = 0; if (quote_around) length++; for (s = string; *s != '\0'; s++) { char c = *s; if (c == '"') length += backslashes + 1; length++; if (c == '\\') backslashes++; else backslashes = 0; } if (quote_around) length += backslashes + 1; quoted_string = XMALLOC (char, length + 1); p = quoted_string; backslashes = 0; if (quote_around) *p++ = '"'; for (s = string; *s != '\0'; s++) { char c = *s; if (c == '"') { unsigned int j; for (j = backslashes + 1; j > 0; j--) *p++ = '\\'; } *p++ = c; if (c == '\\') backslashes++; else backslashes = 0; } if (quote_around) { unsigned int j; for (j = backslashes; j > 0; j--) *p++ = '\\'; *p++ = '"'; } *p = '\0'; new_argv[i] = quoted_string; } else new_argv[i] = (char *) string; } new_argv[argc] = NULL; return new_argv; } EOF ;; esac cat <<"EOF" void lt_dump_script (FILE* f) { EOF func_emit_wrapper yes | $SED -n -e ' s/^\(.\{79\}\)\(..*\)/\1\ \2/ h s/\([\\"]\)/\\\1/g s/$/\\n/ s/\([^\n]*\).*/ fputs ("\1", f);/p g D' cat <<"EOF" } EOF } # end: func_emit_cwrapperexe_src # func_win32_import_lib_p ARG # True if ARG is an import lib, as indicated by $file_magic_cmd func_win32_import_lib_p () { $debug_cmd case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in *import*) : ;; *) false ;; esac } # func_suncc_cstd_abi # !!ONLY CALL THIS FOR SUN CC AFTER $compile_command IS FULLY EXPANDED!! # Several compiler flags select an ABI that is incompatible with the # Cstd library. Avoid specifying it if any are in CXXFLAGS. func_suncc_cstd_abi () { $debug_cmd case " $compile_command " in *" -compat=g "*|*\ -std=c++[0-9][0-9]\ *|*" -library=stdcxx4 "*|*" -library=stlport4 "*) suncc_use_cstd_abi=no ;; *) suncc_use_cstd_abi=yes ;; esac } # func_mode_link arg... func_mode_link () { $debug_cmd case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) # It is impossible to link a dll without this setting, and # we shouldn't force the makefile maintainer to figure out # what system we are compiling for in order to pass an extra # flag for every libtool invocation. # allow_undefined=no # FIXME: Unfortunately, there are problems with the above when trying # to make a dll that has undefined symbols, in which case not # even a static library is built. For now, we need to specify # -no-undefined on the libtool link line when we can be certain # that all symbols are satisfied, otherwise we get a static library. allow_undefined=yes ;; *) allow_undefined=yes ;; esac libtool_args=$nonopt base_compile="$nonopt $@" compile_command=$nonopt finalize_command=$nonopt compile_rpath= finalize_rpath= compile_shlibpath= finalize_shlibpath= convenience= old_convenience= deplibs= old_deplibs= compiler_flags= linker_flags= dllsearchpath= lib_search_path=`pwd` inst_prefix_dir= new_inherited_linker_flags= avoid_version=no bindir= dlfiles= dlprefiles= dlself=no export_dynamic=no export_symbols= export_symbols_regex= generated= libobjs= ltlibs= module=no no_install=no objs= os2dllname= non_pic_objects= precious_files_regex= prefer_static_libs=no preload=false prev= prevarg= release= rpath= xrpath= perm_rpath= temp_rpath= thread_safe=no vinfo= vinfo_number=no weak_libs= single_module=$wl-single_module func_infer_tag $base_compile # We need to know -static, to get the right output filenames. for arg do case $arg in -shared) test yes != "$build_libtool_libs" \ && func_fatal_configuration "cannot build a shared library" build_old_libs=no break ;; -all-static | -static | -static-libtool-libs) case $arg in -all-static) if test yes = "$build_libtool_libs" && test -z "$link_static_flag"; then func_warning "complete static linking is impossible in this configuration" fi if test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; -static) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=built ;; -static-libtool-libs) if test -z "$pic_flag" && test -n "$link_static_flag"; then dlopen_self=$dlopen_self_static fi prefer_static_libs=yes ;; esac build_libtool_libs=no build_old_libs=yes break ;; esac done # See if our shared archives depend on static archives. test -n "$old_archive_from_new_cmds" && build_old_libs=yes # Go through the arguments, transforming them on the way. while test "$#" -gt 0; do arg=$1 shift func_quote_for_eval "$arg" qarg=$func_quote_for_eval_unquoted_result func_append libtool_args " $func_quote_for_eval_result" # If the previous option needs an argument, assign it. if test -n "$prev"; then case $prev in output) func_append compile_command " @OUTPUT@" func_append finalize_command " @OUTPUT@" ;; esac case $prev in bindir) bindir=$arg prev= continue ;; dlfiles|dlprefiles) $preload || { # Add the symbol object into the linking commands. func_append compile_command " @SYMFILE@" func_append finalize_command " @SYMFILE@" preload=: } case $arg in *.la | *.lo) ;; # We handle these cases below. force) if test no = "$dlself"; then dlself=needless export_dynamic=yes fi prev= continue ;; self) if test dlprefiles = "$prev"; then dlself=yes elif test dlfiles = "$prev" && test yes != "$dlopen_self"; then dlself=yes else dlself=needless export_dynamic=yes fi prev= continue ;; *) if test dlfiles = "$prev"; then func_append dlfiles " $arg" else func_append dlprefiles " $arg" fi prev= continue ;; esac ;; expsyms) export_symbols=$arg test -f "$arg" \ || func_fatal_error "symbol file '$arg' does not exist" prev= continue ;; expsyms_regex) export_symbols_regex=$arg prev= continue ;; framework) case $host in *-*-darwin*) case "$deplibs " in *" $qarg.ltframework "*) ;; *) func_append deplibs " $qarg.ltframework" # this is fixed later ;; esac ;; esac prev= continue ;; inst_prefix) inst_prefix_dir=$arg prev= continue ;; mllvm) # Clang does not use LLVM to link, so we can simply discard any # '-mllvm $arg' options when doing the link step. prev= continue ;; objectlist) if test -f "$arg"; then save_arg=$arg moreargs= for fil in `cat "$save_arg"` do # func_append moreargs " $fil" arg=$fil # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test none = "$pic_object" && test none = "$non_pic_object"; then func_fatal_error "cannot find name of object for '$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir=$func_dirname_result if test none != "$pic_object"; then # Prepend the subdirectory the object is found in. pic_object=$xdir$pic_object if test dlfiles = "$prev"; then if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then func_append dlfiles " $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test dlprefiles = "$prev"; then # Preload the old-style object. func_append dlprefiles " $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg=$pic_object fi # Non-PIC object. if test none != "$non_pic_object"; then # Prepend the subdirectory the object is found in. non_pic_object=$xdir$non_pic_object # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test none = "$pic_object"; then arg=$non_pic_object fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object=$pic_object func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir=$func_dirname_result func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "'$arg' is not a valid libtool object" fi fi done else func_fatal_error "link input file '$arg' does not exist" fi arg=$save_arg prev= continue ;; os2dllname) os2dllname=$arg prev= continue ;; precious_regex) precious_files_regex=$arg prev= continue ;; release) release=-$arg prev= continue ;; rpath | xrpath) # We need an absolute path. case $arg in [\\/]* | [A-Za-z]:[\\/]*) ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac if test rpath = "$prev"; then case "$rpath " in *" $arg "*) ;; *) func_append rpath " $arg" ;; esac else case "$xrpath " in *" $arg "*) ;; *) func_append xrpath " $arg" ;; esac fi prev= continue ;; shrext) shrext_cmds=$arg prev= continue ;; weak) func_append weak_libs " $arg" prev= continue ;; xcclinker) func_append linker_flags " $qarg" func_append compiler_flags " $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xcompiler) func_append compiler_flags " $qarg" prev= func_append compile_command " $qarg" func_append finalize_command " $qarg" continue ;; xlinker) func_append linker_flags " $qarg" func_append compiler_flags " $wl$qarg" prev= func_append compile_command " $wl$qarg" func_append finalize_command " $wl$qarg" continue ;; *) eval "$prev=\"\$arg\"" prev= continue ;; esac fi # test -n "$prev" prevarg=$arg case $arg in -all-static) if test -n "$link_static_flag"; then # See comment for -static flag below, for more details. func_append compile_command " $link_static_flag" func_append finalize_command " $link_static_flag" fi continue ;; -allow-undefined) # FIXME: remove this flag sometime in the future. func_fatal_error "'-allow-undefined' must not be used because it is the default" ;; -avoid-version) avoid_version=yes continue ;; -bindir) prev=bindir continue ;; -dlopen) prev=dlfiles continue ;; -dlpreopen) prev=dlprefiles continue ;; -export-dynamic) export_dynamic=yes continue ;; -export-symbols | -export-symbols-regex) if test -n "$export_symbols" || test -n "$export_symbols_regex"; then func_fatal_error "more than one -exported-symbols argument is not allowed" fi if test X-export-symbols = "X$arg"; then prev=expsyms else prev=expsyms_regex fi continue ;; -framework) prev=framework continue ;; -inst-prefix-dir) prev=inst_prefix continue ;; # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* # so, if we see these flags be careful not to treat them like -L -L[A-Z][A-Z]*:*) case $with_gcc/$host in no/*-*-irix* | /*-*-irix*) func_append compile_command " $arg" func_append finalize_command " $arg" ;; esac continue ;; -L*) func_stripname "-L" '' "$arg" if test -z "$func_stripname_result"; then if test "$#" -gt 0; then func_fatal_error "require no space between '-L' and '$1'" else func_fatal_error "need path for '-L' option" fi fi func_resolve_sysroot "$func_stripname_result" dir=$func_resolve_sysroot_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; *) absdir=`cd "$dir" && pwd` test -z "$absdir" && \ func_fatal_error "cannot determine absolute directory name of '$dir'" dir=$absdir ;; esac case "$deplibs " in *" -L$dir "* | *" $arg "*) # Will only happen for absolute or sysroot arguments ;; *) # Preserve sysroot, but never include relative directories case $dir in [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; *) func_append deplibs " -L$dir" ;; esac func_append lib_search_path " $dir" ;; esac case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` case :$dllsearchpath: in *":$dir:"*) ;; ::) dllsearchpath=$dir;; *) func_append dllsearchpath ":$dir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) func_append dllsearchpath ":$testbindir";; esac ;; esac continue ;; -l*) if test X-lc = "X$arg" || test X-lm = "X$arg"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) # These systems don't actually have a C or math library (as such) continue ;; *-*-os2*) # These systems don't actually have a C library (as such) test X-lc = "X$arg" && continue ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) # Do not include libc due to us having libc/libc_r. test X-lc = "X$arg" && continue ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C and math libraries are in the System framework func_append deplibs " System.ltframework" continue ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype test X-lc = "X$arg" && continue ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work test X-lc = "X$arg" && continue ;; esac elif test X-lc_r = "X$arg"; then case $host in *-*-openbsd* | *-*-freebsd* | *-*-dragonfly* | *-*-bitrig*) # Do not include libc_r directly, use -pthread flag. continue ;; esac fi func_append deplibs " $arg" continue ;; -mllvm) prev=mllvm continue ;; -module) module=yes continue ;; # Tru64 UNIX uses -model [arg] to determine the layout of C++ # classes, name mangling, and exception handling. # Darwin uses the -arch flag to determine output architecture. -model|-arch|-isysroot|--sysroot) func_append compiler_flags " $arg" func_append compile_command " $arg" func_append finalize_command " $arg" prev=xcompiler continue ;; -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) func_append compiler_flags " $arg" func_append compile_command " $arg" func_append finalize_command " $arg" case "$new_inherited_linker_flags " in *" $arg "*) ;; * ) func_append new_inherited_linker_flags " $arg" ;; esac continue ;; -multi_module) single_module=$wl-multi_module continue ;; -no-fast-install) fast_install=no continue ;; -no-install) case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) # The PATH hackery in wrapper scripts is required on Windows # and Darwin in order for the loader to find any dlls it needs. func_warning "'-no-install' is ignored for $host" func_warning "assuming '-no-fast-install' instead" fast_install=no ;; *) no_install=yes ;; esac continue ;; -no-undefined) allow_undefined=no continue ;; -objectlist) prev=objectlist continue ;; -os2dllname) prev=os2dllname continue ;; -o) prev=output ;; -precious-files-regex) prev=precious_regex continue ;; -release) prev=release continue ;; -rpath) prev=rpath continue ;; -R) prev=xrpath continue ;; -R*) func_stripname '-R' '' "$arg" dir=$func_stripname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) ;; =*) func_stripname '=' '' "$dir" dir=$lt_sysroot$func_stripname_result ;; *) func_fatal_error "only absolute run-paths are allowed" ;; esac case "$xrpath " in *" $dir "*) ;; *) func_append xrpath " $dir" ;; esac continue ;; -shared) # The effects of -shared are defined in a previous loop. continue ;; -shrext) prev=shrext continue ;; -static | -static-libtool-libs) # The effects of -static are defined in a previous loop. # We used to do the same as -all-static on platforms that # didn't have a PIC flag, but the assumption that the effects # would be equivalent was wrong. It would break on at least # Digital Unix and AIX. continue ;; -thread-safe) thread_safe=yes continue ;; -version-info) prev=vinfo continue ;; -version-number) prev=vinfo vinfo_number=yes continue ;; -weak) prev=weak continue ;; -Wc,*) func_stripname '-Wc,' '' "$arg" args=$func_stripname_result arg= save_ifs=$IFS; IFS=, for flag in $args; do IFS=$save_ifs func_quote_for_eval "$flag" func_append arg " $func_quote_for_eval_result" func_append compiler_flags " $func_quote_for_eval_result" done IFS=$save_ifs func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Wl,*) func_stripname '-Wl,' '' "$arg" args=$func_stripname_result arg= save_ifs=$IFS; IFS=, for flag in $args; do IFS=$save_ifs func_quote_for_eval "$flag" func_append arg " $wl$func_quote_for_eval_result" func_append compiler_flags " $wl$func_quote_for_eval_result" func_append linker_flags " $func_quote_for_eval_result" done IFS=$save_ifs func_stripname ' ' '' "$arg" arg=$func_stripname_result ;; -Xcompiler) prev=xcompiler continue ;; -Xlinker) prev=xlinker continue ;; -XCClinker) prev=xcclinker continue ;; # -msg_* for osf cc -msg_*) func_quote_for_eval "$arg" arg=$func_quote_for_eval_result ;; # Flags to be passed through unchanged, with rationale: # -64, -mips[0-9] enable 64-bit mode for the SGI compiler # -r[0-9][0-9]* specify processor for the SGI compiler # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler # +DA*, +DD* enable 64-bit mode for the HP compiler # -q* compiler args for the IBM compiler # -m*, -t[45]*, -txscale* architecture-specific flags for GCC # -F/path path to uninstalled frameworks, gcc on darwin # -p, -pg, --coverage, -fprofile-* profiling flags for GCC # -fstack-protector* stack protector flags for GCC # @file GCC response files # -tp=* Portland pgcc target processor selection # --sysroot=* for sysroot support # -O*, -g*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization # -specs=* GCC specs files # -stdlib=* select c++ std lib with clang # -fsanitize=* Clang/GCC memory and address sanitizer -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ -O*|-g*|-flto*|-fwhopr*|-fuse-linker-plugin|-fstack-protector*|-stdlib=*| \ -specs=*|-fsanitize=*) func_quote_for_eval "$arg" arg=$func_quote_for_eval_result func_append compile_command " $arg" func_append finalize_command " $arg" func_append compiler_flags " $arg" continue ;; -Z*) if test os2 = "`expr $host : '.*\(os2\)'`"; then # OS/2 uses -Zxxx to specify OS/2-specific options compiler_flags="$compiler_flags $arg" func_append compile_command " $arg" func_append finalize_command " $arg" case $arg in -Zlinker | -Zstack) prev=xcompiler ;; esac continue else # Otherwise treat like 'Some other compiler flag' below func_quote_for_eval "$arg" arg=$func_quote_for_eval_result fi ;; # Some other compiler flag. -* | +*) func_quote_for_eval "$arg" arg=$func_quote_for_eval_result ;; *.$objext) # A standard object. func_append objs " $arg" ;; *.lo) # A libtool-controlled object. # Check to see that this really is a libtool object. if func_lalib_unsafe_p "$arg"; then pic_object= non_pic_object= # Read the .lo file func_source "$arg" if test -z "$pic_object" || test -z "$non_pic_object" || test none = "$pic_object" && test none = "$non_pic_object"; then func_fatal_error "cannot find name of object for '$arg'" fi # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir=$func_dirname_result test none = "$pic_object" || { # Prepend the subdirectory the object is found in. pic_object=$xdir$pic_object if test dlfiles = "$prev"; then if test yes = "$build_libtool_libs" && test yes = "$dlopen_support"; then func_append dlfiles " $pic_object" prev= continue else # If libtool objects are unsupported, then we need to preload. prev=dlprefiles fi fi # CHECK ME: I think I busted this. -Ossama if test dlprefiles = "$prev"; then # Preload the old-style object. func_append dlprefiles " $pic_object" prev= fi # A PIC object. func_append libobjs " $pic_object" arg=$pic_object } # Non-PIC object. if test none != "$non_pic_object"; then # Prepend the subdirectory the object is found in. non_pic_object=$xdir$non_pic_object # A standard non-PIC object func_append non_pic_objects " $non_pic_object" if test -z "$pic_object" || test none = "$pic_object"; then arg=$non_pic_object fi else # If the PIC object exists, use it instead. # $xdir was prepended to $pic_object above. non_pic_object=$pic_object func_append non_pic_objects " $non_pic_object" fi else # Only an error if not doing a dry-run. if $opt_dry_run; then # Extract subdirectory from the argument. func_dirname "$arg" "/" "" xdir=$func_dirname_result func_lo2o "$arg" pic_object=$xdir$objdir/$func_lo2o_result non_pic_object=$xdir$func_lo2o_result func_append libobjs " $pic_object" func_append non_pic_objects " $non_pic_object" else func_fatal_error "'$arg' is not a valid libtool object" fi fi ;; *.$libext) # An archive. func_append deplibs " $arg" func_append old_deplibs " $arg" continue ;; *.la) # A libtool-controlled library. func_resolve_sysroot "$arg" if test dlfiles = "$prev"; then # This library was specified with -dlopen. func_append dlfiles " $func_resolve_sysroot_result" prev= elif test dlprefiles = "$prev"; then # The library was specified with -dlpreopen. func_append dlprefiles " $func_resolve_sysroot_result" prev= else func_append deplibs " $func_resolve_sysroot_result" fi continue ;; # Some other compiler argument. *) # Unknown arguments in both finalize_command and compile_command need # to be aesthetically quoted because they are evaled later. func_quote_for_eval "$arg" arg=$func_quote_for_eval_result ;; esac # arg # Now actually substitute the argument into the commands. if test -n "$arg"; then func_append compile_command " $arg" func_append finalize_command " $arg" fi done # argument parsing loop test -n "$prev" && \ func_fatal_help "the '$prevarg' option requires an argument" if test yes = "$export_dynamic" && test -n "$export_dynamic_flag_spec"; then eval arg=\"$export_dynamic_flag_spec\" func_append compile_command " $arg" func_append finalize_command " $arg" fi oldlibs= # calculate the name of the file, without its directory func_basename "$output" outputname=$func_basename_result libobjs_save=$libobjs if test -n "$shlibpath_var"; then # get the directories listed in $shlibpath_var eval shlib_search_path=\`\$ECHO \"\$$shlibpath_var\" \| \$SED \'s/:/ /g\'\` else shlib_search_path= fi eval sys_lib_search_path=\"$sys_lib_search_path_spec\" eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" # Definition is injected by LT_CONFIG during libtool generation. func_munge_path_list sys_lib_dlsearch_path "$LT_SYS_LIBRARY_PATH" func_dirname "$output" "/" "" output_objdir=$func_dirname_result$objdir func_to_tool_file "$output_objdir/" tool_output_objdir=$func_to_tool_file_result # Create the object directory. func_mkdir_p "$output_objdir" # Determine the type of output case $output in "") func_fatal_help "you must specify an output file" ;; *.$libext) linkmode=oldlib ;; *.lo | *.$objext) linkmode=obj ;; *.la) linkmode=lib ;; *) linkmode=prog ;; # Anything else should be a program. esac specialdeplibs= libs= # Find all interdependent deplibs by searching for libraries # that are linked more than once (e.g. -la -lb -la) for deplib in $deplibs; do if $opt_preserve_dup_deps; then case "$libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append libs " $deplib" done if test lib = "$linkmode"; then libs="$predeps $libs $compiler_lib_search_path $postdeps" # Compute libraries that are listed more than once in $predeps # $postdeps and mark them as special (i.e., whose duplicates are # not to be eliminated). pre_post_deps= if $opt_duplicate_compiler_generated_deps; then for pre_post_dep in $predeps $postdeps; do case "$pre_post_deps " in *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; esac func_append pre_post_deps " $pre_post_dep" done fi pre_post_deps= fi deplibs= newdependency_libs= newlib_search_path= need_relink=no # whether we're linking any uninstalled libtool libraries notinst_deplibs= # not-installed libtool libraries notinst_path= # paths that contain not-installed libtool libraries case $linkmode in lib) passes="conv dlpreopen link" for file in $dlfiles $dlprefiles; do case $file in *.la) ;; *) func_fatal_help "libraries can '-dlopen' only libtool libraries: $file" ;; esac done ;; prog) compile_deplibs= finalize_deplibs= alldeplibs=false newdlfiles= newdlprefiles= passes="conv scan dlopen dlpreopen link" ;; *) passes="conv" ;; esac for pass in $passes; do # The preopen pass in lib mode reverses $deplibs; put it back here # so that -L comes before libs that need it for instance... if test lib,link = "$linkmode,$pass"; then ## FIXME: Find the place where the list is rebuilt in the wrong ## order, and fix it there properly tmp_deplibs= for deplib in $deplibs; do tmp_deplibs="$deplib $tmp_deplibs" done deplibs=$tmp_deplibs fi if test lib,link = "$linkmode,$pass" || test prog,scan = "$linkmode,$pass"; then libs=$deplibs deplibs= fi if test prog = "$linkmode"; then case $pass in dlopen) libs=$dlfiles ;; dlpreopen) libs=$dlprefiles ;; link) libs="$deplibs %DEPLIBS%" test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" ;; esac fi if test lib,dlpreopen = "$linkmode,$pass"; then # Collect and forward deplibs of preopened libtool libs for lib in $dlprefiles; do # Ignore non-libtool-libs dependency_libs= func_resolve_sysroot "$lib" case $lib in *.la) func_source "$func_resolve_sysroot_result" ;; esac # Collect preopened libtool deplibs, except any this library # has declared as weak libs for deplib in $dependency_libs; do func_basename "$deplib" deplib_base=$func_basename_result case " $weak_libs " in *" $deplib_base "*) ;; *) func_append deplibs " $deplib" ;; esac done done libs=$dlprefiles fi if test dlopen = "$pass"; then # Collect dlpreopened libraries save_deplibs=$deplibs deplibs= fi for deplib in $libs; do lib= found=false case $deplib in -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) if test prog,link = "$linkmode,$pass"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else func_append compiler_flags " $deplib" if test lib = "$linkmode"; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) func_append new_inherited_linker_flags " $deplib" ;; esac fi fi continue ;; -l*) if test lib != "$linkmode" && test prog != "$linkmode"; then func_warning "'-l' is ignored for archives/objects" continue fi func_stripname '-l' '' "$deplib" name=$func_stripname_result if test lib = "$linkmode"; then searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" else searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" fi for searchdir in $searchdirs; do for search_ext in .la $std_shrext .so .a; do # Search the libtool library lib=$searchdir/lib$name$search_ext if test -f "$lib"; then if test .la = "$search_ext"; then found=: else found=false fi break 2 fi done done if $found; then # deplib is a libtool library # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, # We need to do some special things here, and not later. if test yes = "$allow_libtool_libs_with_static_runtimes"; then case " $predeps $postdeps " in *" $deplib "*) if func_lalib_p "$lib"; then library_names= old_library= func_source "$lib" for l in $old_library $library_names; do ll=$l done if test "X$ll" = "X$old_library"; then # only static version available found=false func_dirname "$lib" "" "." ladir=$func_dirname_result lib=$ladir/$old_library if test prog,link = "$linkmode,$pass"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" fi continue fi fi ;; *) ;; esac fi else # deplib doesn't seem to be a libtool library if test prog,link = "$linkmode,$pass"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" test lib = "$linkmode" && newdependency_libs="$deplib $newdependency_libs" fi continue fi ;; # -l *.ltframework) if test prog,link = "$linkmode,$pass"; then compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else deplibs="$deplib $deplibs" if test lib = "$linkmode"; then case "$new_inherited_linker_flags " in *" $deplib "*) ;; * ) func_append new_inherited_linker_flags " $deplib" ;; esac fi fi continue ;; -L*) case $linkmode in lib) deplibs="$deplib $deplibs" test conv = "$pass" && continue newdependency_libs="$deplib $newdependency_libs" func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; prog) if test conv = "$pass"; then deplibs="$deplib $deplibs" continue fi if test scan = "$pass"; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; *) func_warning "'-L' is ignored for archives/objects" ;; esac # linkmode continue ;; # -L -R*) if test link = "$pass"; then func_stripname '-R' '' "$deplib" func_resolve_sysroot "$func_stripname_result" dir=$func_resolve_sysroot_result # Make sure the xrpath contains only unique directories. case "$xrpath " in *" $dir "*) ;; *) func_append xrpath " $dir" ;; esac fi deplibs="$deplib $deplibs" continue ;; *.la) func_resolve_sysroot "$deplib" lib=$func_resolve_sysroot_result ;; *.$libext) if test conv = "$pass"; then deplibs="$deplib $deplibs" continue fi case $linkmode in lib) # Linking convenience modules into shared libraries is allowed, # but linking other static libraries is non-portable. case " $dlpreconveniencelibs " in *" $deplib "*) ;; *) valid_a_lib=false case $deplibs_check_method in match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ | $EGREP "$match_pattern_regex" > /dev/null; then valid_a_lib=: fi ;; pass_all) valid_a_lib=: ;; esac if $valid_a_lib; then echo $ECHO "*** Warning: Linking the shared library $output against the" $ECHO "*** static library $deplib is not portable!" deplibs="$deplib $deplibs" else echo $ECHO "*** Warning: Trying to link with static lib archive $deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because the file extensions .$libext of this argument makes me believe" echo "*** that it is just a static archive that I should not use here." fi ;; esac continue ;; prog) if test link != "$pass"; then deplibs="$deplib $deplibs" else compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" fi continue ;; esac # linkmode ;; # *.$libext *.lo | *.$objext) if test conv = "$pass"; then deplibs="$deplib $deplibs" elif test prog = "$linkmode"; then if test dlpreopen = "$pass" || test yes != "$dlopen_support" || test no = "$build_libtool_libs"; then # If there is no dlopen support or we're linking statically, # we need to preload. func_append newdlprefiles " $deplib" compile_deplibs="$deplib $compile_deplibs" finalize_deplibs="$deplib $finalize_deplibs" else func_append newdlfiles " $deplib" fi fi continue ;; %DEPLIBS%) alldeplibs=: continue ;; esac # case $deplib $found || test -f "$lib" \ || func_fatal_error "cannot find the library '$lib' or unhandled argument '$deplib'" # Check to see that this really is a libtool archive. func_lalib_unsafe_p "$lib" \ || func_fatal_error "'$lib' is not a valid libtool archive" func_dirname "$lib" "" "." ladir=$func_dirname_result dlname= dlopen= dlpreopen= libdir= library_names= old_library= inherited_linker_flags= # If the library was installed with an old release of libtool, # it will not redefine variables installed, or shouldnotlink installed=yes shouldnotlink=no avoidtemprpath= # Read the .la file func_source "$lib" # Convert "-framework foo" to "foo.ltframework" if test -n "$inherited_linker_flags"; then tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do case " $new_inherited_linker_flags " in *" $tmp_inherited_linker_flag "*) ;; *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; esac done fi dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` if test lib,link = "$linkmode,$pass" || test prog,scan = "$linkmode,$pass" || { test prog != "$linkmode" && test lib != "$linkmode"; }; then test -n "$dlopen" && func_append dlfiles " $dlopen" test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" fi if test conv = "$pass"; then # Only check for convenience libraries deplibs="$lib $deplibs" if test -z "$libdir"; then if test -z "$old_library"; then func_fatal_error "cannot find name of link library for '$lib'" fi # It is a libtool convenience library, so add in its objects. func_append convenience " $ladir/$objdir/$old_library" func_append old_convenience " $ladir/$objdir/$old_library" tmp_libs= for deplib in $dependency_libs; do deplibs="$deplib $deplibs" if $opt_preserve_dup_deps; then case "$tmp_libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append tmp_libs " $deplib" done elif test prog != "$linkmode" && test lib != "$linkmode"; then func_fatal_error "'$lib' is not a convenience library" fi continue fi # $pass = conv # Get the name of the library we link against. linklib= if test -n "$old_library" && { test yes = "$prefer_static_libs" || test built,no = "$prefer_static_libs,$installed"; }; then linklib=$old_library else for l in $old_library $library_names; do linklib=$l done fi if test -z "$linklib"; then func_fatal_error "cannot find name of link library for '$lib'" fi # This library was specified with -dlopen. if test dlopen = "$pass"; then test -z "$libdir" \ && func_fatal_error "cannot -dlopen a convenience library: '$lib'" if test -z "$dlname" || test yes != "$dlopen_support" || test no = "$build_libtool_libs" then # If there is no dlname, no dlopen support or we're linking # statically, we need to preload. We also need to preload any # dependent libraries so libltdl's deplib preloader doesn't # bomb out in the load deplibs phase. func_append dlprefiles " $lib $dependency_libs" else func_append newdlfiles " $lib" fi continue fi # $pass = dlopen # We need an absolute path. case $ladir in [\\/]* | [A-Za-z]:[\\/]*) abs_ladir=$ladir ;; *) abs_ladir=`cd "$ladir" && pwd` if test -z "$abs_ladir"; then func_warning "cannot determine absolute directory name of '$ladir'" func_warning "passing it literally to the linker, although it might fail" abs_ladir=$ladir fi ;; esac func_basename "$lib" laname=$func_basename_result # Find the relevant object directory and library name. if test yes = "$installed"; then if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then func_warning "library '$lib' was moved." dir=$ladir absdir=$abs_ladir libdir=$abs_ladir else dir=$lt_sysroot$libdir absdir=$lt_sysroot$libdir fi test yes = "$hardcode_automatic" && avoidtemprpath=yes else if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then dir=$ladir absdir=$abs_ladir # Remove this search path later func_append notinst_path " $abs_ladir" else dir=$ladir/$objdir absdir=$abs_ladir/$objdir # Remove this search path later func_append notinst_path " $abs_ladir" fi fi # $installed = yes func_stripname 'lib' '.la' "$laname" name=$func_stripname_result # This library was specified with -dlpreopen. if test dlpreopen = "$pass"; then if test -z "$libdir" && test prog = "$linkmode"; then func_fatal_error "only libraries may -dlpreopen a convenience library: '$lib'" fi case $host in # special handling for platforms with PE-DLLs. *cygwin* | *mingw* | *cegcc* ) # Linker will automatically link against shared library if both # static and shared are present. Therefore, ensure we extract # symbols from the import library if a shared library is present # (otherwise, the dlopen module name will be incorrect). We do # this by putting the import library name into $newdlprefiles. # We recover the dlopen module name by 'saving' the la file # name in a special purpose variable, and (later) extracting the # dlname from the la file. if test -n "$dlname"; then func_tr_sh "$dir/$linklib" eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" func_append newdlprefiles " $dir/$linklib" else func_append newdlprefiles " $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ func_append dlpreconveniencelibs " $dir/$old_library" fi ;; * ) # Prefer using a static library (so that no silly _DYNAMIC symbols # are required to link). if test -n "$old_library"; then func_append newdlprefiles " $dir/$old_library" # Keep a list of preopened convenience libraries to check # that they are being used correctly in the link pass. test -z "$libdir" && \ func_append dlpreconveniencelibs " $dir/$old_library" # Otherwise, use the dlname, so that lt_dlopen finds it. elif test -n "$dlname"; then func_append newdlprefiles " $dir/$dlname" else func_append newdlprefiles " $dir/$linklib" fi ;; esac fi # $pass = dlpreopen if test -z "$libdir"; then # Link the convenience library if test lib = "$linkmode"; then deplibs="$dir/$old_library $deplibs" elif test prog,link = "$linkmode,$pass"; then compile_deplibs="$dir/$old_library $compile_deplibs" finalize_deplibs="$dir/$old_library $finalize_deplibs" else deplibs="$lib $deplibs" # used for prog,scan pass fi continue fi if test prog = "$linkmode" && test link != "$pass"; then func_append newlib_search_path " $ladir" deplibs="$lib $deplibs" linkalldeplibs=false if test no != "$link_all_deplibs" || test -z "$library_names" || test no = "$build_libtool_libs"; then linkalldeplibs=: fi tmp_libs= for deplib in $dependency_libs; do case $deplib in -L*) func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result" func_append newlib_search_path " $func_resolve_sysroot_result" ;; esac # Need to link against all dependency_libs? if $linkalldeplibs; then deplibs="$deplib $deplibs" else # Need to hardcode shared library paths # or/and link against static libraries newdependency_libs="$deplib $newdependency_libs" fi if $opt_preserve_dup_deps; then case "$tmp_libs " in *" $deplib "*) func_append specialdeplibs " $deplib" ;; esac fi func_append tmp_libs " $deplib" done # for deplib continue fi # $linkmode = prog... if test prog,link = "$linkmode,$pass"; then if test -n "$library_names" && { { test no = "$prefer_static_libs" || test built,yes = "$prefer_static_libs,$installed"; } || test -z "$old_library"; }; then # We need to hardcode the library path if test -n "$shlibpath_var" && test -z "$avoidtemprpath"; then # Make sure the rpath contains only unique directories. case $temp_rpath: in *"$absdir:"*) ;; *) func_append temp_rpath "$absdir:" ;; esac fi # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) func_append compile_rpath " $absdir" ;; esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac ;; esac fi # $linkmode,$pass = prog,link... if $alldeplibs && { test pass_all = "$deplibs_check_method" || { test yes = "$build_libtool_libs" && test -n "$library_names"; }; }; then # We only need to search for static libraries continue fi fi link_static=no # Whether the deplib will be linked statically use_static_libs=$prefer_static_libs if test built = "$use_static_libs" && test yes = "$installed"; then use_static_libs=no fi if test -n "$library_names" && { test no = "$use_static_libs" || test -z "$old_library"; }; then case $host in *cygwin* | *mingw* | *cegcc* | *os2*) # No point in relinking DLLs because paths are not encoded func_append notinst_deplibs " $lib" need_relink=no ;; *) if test no = "$installed"; then func_append notinst_deplibs " $lib" need_relink=yes fi ;; esac # This is a shared library # Warn about portability, can't link against -module's on some # systems (darwin). Don't bleat about dlopened modules though! dlopenmodule= for dlpremoduletest in $dlprefiles; do if test "X$dlpremoduletest" = "X$lib"; then dlopenmodule=$dlpremoduletest break fi done if test -z "$dlopenmodule" && test yes = "$shouldnotlink" && test link = "$pass"; then echo if test prog = "$linkmode"; then $ECHO "*** Warning: Linking the executable $output against the loadable module" else $ECHO "*** Warning: Linking the shared library $output against the loadable module" fi $ECHO "*** $linklib is not portable!" fi if test lib = "$linkmode" && test yes = "$hardcode_into_libs"; then # Hardcode the library path. # Skip directories that are in the system default run-time # search path. case " $sys_lib_dlsearch_path " in *" $absdir "*) ;; *) case "$compile_rpath " in *" $absdir "*) ;; *) func_append compile_rpath " $absdir" ;; esac ;; esac case " $sys_lib_dlsearch_path " in *" $libdir "*) ;; *) case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac ;; esac fi if test -n "$old_archive_from_expsyms_cmds"; then # figure out the soname set dummy $library_names shift realname=$1 shift libname=`eval "\\$ECHO \"$libname_spec\""` # use dlname if we got it. it's perfectly good, no? if test -n "$dlname"; then soname=$dlname elif test -n "$soname_spec"; then # bleh windows case $host in *cygwin* | mingw* | *cegcc* | *os2*) func_arith $current - $age major=$func_arith_result versuffix=-$major ;; esac eval soname=\"$soname_spec\" else soname=$realname fi # Make a new name for the extract_expsyms_cmds to use soroot=$soname func_basename "$soroot" soname=$func_basename_result func_stripname 'lib' '.dll' "$soname" newlib=libimp-$func_stripname_result.a # If the library has no export list, then create one now if test -f "$output_objdir/$soname-def"; then : else func_verbose "extracting exported symbol list from '$soname'" func_execute_cmds "$extract_expsyms_cmds" 'exit $?' fi # Create $newlib if test -f "$output_objdir/$newlib"; then :; else func_verbose "generating import library for '$soname'" func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' fi # make sure the library variables are pointing to the new library dir=$output_objdir linklib=$newlib fi # test -n "$old_archive_from_expsyms_cmds" if test prog = "$linkmode" || test relink != "$opt_mode"; then add_shlibpath= add_dir= add= lib_linked=yes case $hardcode_action in immediate | unsupported) if test no = "$hardcode_direct"; then add=$dir/$linklib case $host in *-*-sco3.2v5.0.[024]*) add_dir=-L$dir ;; *-*-sysv4*uw2*) add_dir=-L$dir ;; *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ *-*-unixware7*) add_dir=-L$dir ;; *-*-darwin* ) # if the lib is a (non-dlopened) module then we cannot # link against it, someone is ignoring the earlier warnings if /usr/bin/file -L $add 2> /dev/null | $GREP ": [^:]* bundle" >/dev/null; then if test "X$dlopenmodule" != "X$lib"; then $ECHO "*** Warning: lib $linklib is a module, not a shared library" if test -z "$old_library"; then echo echo "*** And there doesn't seem to be a static archive available" echo "*** The link will probably fail, sorry" else add=$dir/$old_library fi elif test -n "$old_library"; then add=$dir/$old_library fi fi esac elif test no = "$hardcode_minus_L"; then case $host in *-*-sunos*) add_shlibpath=$dir ;; esac add_dir=-L$dir add=-l$name elif test no = "$hardcode_shlibpath_var"; then add_shlibpath=$dir add=-l$name else lib_linked=no fi ;; relink) if test yes = "$hardcode_direct" && test no = "$hardcode_direct_absolute"; then add=$dir/$linklib elif test yes = "$hardcode_minus_L"; then add_dir=-L$absdir # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) func_append add_dir " -L$inst_prefix_dir$libdir" ;; esac fi add=-l$name elif test yes = "$hardcode_shlibpath_var"; then add_shlibpath=$dir add=-l$name else lib_linked=no fi ;; *) lib_linked=no ;; esac if test yes != "$lib_linked"; then func_fatal_configuration "unsupported hardcode properties" fi if test -n "$add_shlibpath"; then case :$compile_shlibpath: in *":$add_shlibpath:"*) ;; *) func_append compile_shlibpath "$add_shlibpath:" ;; esac fi if test prog = "$linkmode"; then test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" test -n "$add" && compile_deplibs="$add $compile_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" if test yes != "$hardcode_direct" && test yes != "$hardcode_minus_L" && test yes = "$hardcode_shlibpath_var"; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) func_append finalize_shlibpath "$libdir:" ;; esac fi fi fi if test prog = "$linkmode" || test relink = "$opt_mode"; then add_shlibpath= add_dir= add= # Finalize command for both is simple: just hardcode it. if test yes = "$hardcode_direct" && test no = "$hardcode_direct_absolute"; then add=$libdir/$linklib elif test yes = "$hardcode_minus_L"; then add_dir=-L$libdir add=-l$name elif test yes = "$hardcode_shlibpath_var"; then case :$finalize_shlibpath: in *":$libdir:"*) ;; *) func_append finalize_shlibpath "$libdir:" ;; esac add=-l$name elif test yes = "$hardcode_automatic"; then if test -n "$inst_prefix_dir" && test -f "$inst_prefix_dir$libdir/$linklib"; then add=$inst_prefix_dir$libdir/$linklib else add=$libdir/$linklib fi else # We cannot seem to hardcode it, guess we'll fake it. add_dir=-L$libdir # Try looking first in the location we're being installed to. if test -n "$inst_prefix_dir"; then case $libdir in [\\/]*) func_append add_dir " -L$inst_prefix_dir$libdir" ;; esac fi add=-l$name fi if test prog = "$linkmode"; then test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" test -n "$add" && finalize_deplibs="$add $finalize_deplibs" else test -n "$add_dir" && deplibs="$add_dir $deplibs" test -n "$add" && deplibs="$add $deplibs" fi fi elif test prog = "$linkmode"; then # Here we assume that one of hardcode_direct or hardcode_minus_L # is not unsupported. This is valid on all known static and # shared platforms. if test unsupported != "$hardcode_direct"; then test -n "$old_library" && linklib=$old_library compile_deplibs="$dir/$linklib $compile_deplibs" finalize_deplibs="$dir/$linklib $finalize_deplibs" else compile_deplibs="-l$name -L$dir $compile_deplibs" finalize_deplibs="-l$name -L$dir $finalize_deplibs" fi elif test yes = "$build_libtool_libs"; then # Not a shared library if test pass_all != "$deplibs_check_method"; then # We're trying link a shared library against a static one # but the system doesn't support it. # Just print a warning and add the library to dependency_libs so # that the program can be linked against the static library. echo $ECHO "*** Warning: This system cannot link to static lib archive $lib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have." if test yes = "$module"; then echo "*** But as you try to build a module library, libtool will still create " echo "*** a static module, that should work as long as the dlopening application" echo "*** is linked with the -dlopen flag to resolve symbols at runtime." if test -z "$global_symbol_pipe"; then echo echo "*** However, this would only work if libtool was able to extract symbol" echo "*** lists from a program, using 'nm' or equivalent, but libtool could" echo "*** not find such a program. So, this module is probably useless." echo "*** 'nm' from GNU binutils and a full rebuild may help." fi if test no = "$build_old_libs"; then build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi else deplibs="$dir/$old_library $deplibs" link_static=yes fi fi # link shared/static library? if test lib = "$linkmode"; then if test -n "$dependency_libs" && { test yes != "$hardcode_into_libs" || test yes = "$build_old_libs" || test yes = "$link_static"; }; then # Extract -R from dependency_libs temp_deplibs= for libdir in $dependency_libs; do case $libdir in -R*) func_stripname '-R' '' "$libdir" temp_xrpath=$func_stripname_result case " $xrpath " in *" $temp_xrpath "*) ;; *) func_append xrpath " $temp_xrpath";; esac;; *) func_append temp_deplibs " $libdir";; esac done dependency_libs=$temp_deplibs fi func_append newlib_search_path " $absdir" # Link against this library test no = "$link_static" && newdependency_libs="$abs_ladir/$laname $newdependency_libs" # ... and its dependency_libs tmp_libs= for deplib in $dependency_libs; do newdependency_libs="$deplib $newdependency_libs" case $deplib in -L*) func_stripname '-L' '' "$deplib" func_resolve_sysroot "$func_stripname_result";; *) func_resolve_sysroot "$deplib" ;; esac if $opt_preserve_dup_deps; then case "$tmp_libs " in *" $func_resolve_sysroot_result "*) func_append specialdeplibs " $func_resolve_sysroot_result" ;; esac fi func_append tmp_libs " $func_resolve_sysroot_result" done if test no != "$link_all_deplibs"; then # Add the search paths of all dependency libraries for deplib in $dependency_libs; do path= case $deplib in -L*) path=$deplib ;; *.la) func_resolve_sysroot "$deplib" deplib=$func_resolve_sysroot_result func_dirname "$deplib" "" "." dir=$func_dirname_result # We need an absolute path. case $dir in [\\/]* | [A-Za-z]:[\\/]*) absdir=$dir ;; *) absdir=`cd "$dir" && pwd` if test -z "$absdir"; then func_warning "cannot determine absolute directory name of '$dir'" absdir=$dir fi ;; esac if $GREP "^installed=no" $deplib > /dev/null; then case $host in *-*-darwin*) depdepl= eval deplibrary_names=`$SED -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` if test -n "$deplibrary_names"; then for tmp in $deplibrary_names; do depdepl=$tmp done if test -f "$absdir/$objdir/$depdepl"; then depdepl=$absdir/$objdir/$depdepl darwin_install_name=`$OTOOL -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` if test -z "$darwin_install_name"; then darwin_install_name=`$OTOOL64 -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` fi func_append compiler_flags " $wl-dylib_file $wl$darwin_install_name:$depdepl" func_append linker_flags " -dylib_file $darwin_install_name:$depdepl" path= fi fi ;; *) path=-L$absdir/$objdir ;; esac else eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` test -z "$libdir" && \ func_fatal_error "'$deplib' is not a valid libtool archive" test "$absdir" != "$libdir" && \ func_warning "'$deplib' seems to be moved" path=-L$absdir fi ;; esac case " $deplibs " in *" $path "*) ;; *) deplibs="$path $deplibs" ;; esac done fi # link_all_deplibs != no fi # linkmode = lib done # for deplib in $libs if test link = "$pass"; then if test prog = "$linkmode"; then compile_deplibs="$new_inherited_linker_flags $compile_deplibs" finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" else compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` fi fi dependency_libs=$newdependency_libs if test dlpreopen = "$pass"; then # Link the dlpreopened libraries before other libraries for deplib in $save_deplibs; do deplibs="$deplib $deplibs" done fi if test dlopen != "$pass"; then test conv = "$pass" || { # Make sure lib_search_path contains only unique directories. lib_search_path= for dir in $newlib_search_path; do case "$lib_search_path " in *" $dir "*) ;; *) func_append lib_search_path " $dir" ;; esac done newlib_search_path= } if test prog,link = "$linkmode,$pass"; then vars="compile_deplibs finalize_deplibs" else vars=deplibs fi for var in $vars dependency_libs; do # Add libraries to $var in reverse order eval tmp_libs=\"\$$var\" new_libs= for deplib in $tmp_libs; do # FIXME: Pedantically, this is the right thing to do, so # that some nasty dependency loop isn't accidentally # broken: #new_libs="$deplib $new_libs" # Pragmatically, this seems to cause very few problems in # practice: case $deplib in -L*) new_libs="$deplib $new_libs" ;; -R*) ;; *) # And here is the reason: when a library appears more # than once as an explicit dependence of a library, or # is implicitly linked in more than once by the # compiler, it is considered special, and multiple # occurrences thereof are not removed. Compare this # with having the same library being listed as a # dependency of multiple other libraries: in this case, # we know (pedantically, we assume) the library does not # need to be listed more than once, so we keep only the # last copy. This is not always right, but it is rare # enough that we require users that really mean to play # such unportable linking tricks to link the library # using -Wl,-lname, so that libtool does not consider it # for duplicate removal. case " $specialdeplibs " in *" $deplib "*) new_libs="$deplib $new_libs" ;; *) case " $new_libs " in *" $deplib "*) ;; *) new_libs="$deplib $new_libs" ;; esac ;; esac ;; esac done tmp_libs= for deplib in $new_libs; do case $deplib in -L*) case " $tmp_libs " in *" $deplib "*) ;; *) func_append tmp_libs " $deplib" ;; esac ;; *) func_append tmp_libs " $deplib" ;; esac done eval $var=\"$tmp_libs\" done # for var fi # Add Sun CC postdeps if required: test CXX = "$tagname" && { case $host_os in linux*) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 func_suncc_cstd_abi if test no != "$suncc_use_cstd_abi"; then func_append postdeps ' -library=Cstd -library=Crun' fi ;; esac ;; solaris*) func_cc_basename "$CC" case $func_cc_basename_result in CC* | sunCC*) func_suncc_cstd_abi if test no != "$suncc_use_cstd_abi"; then func_append postdeps ' -library=Cstd -library=Crun' fi ;; esac ;; esac } # Last step: remove runtime libs from dependency_libs # (they stay in deplibs) tmp_libs= for i in $dependency_libs; do case " $predeps $postdeps $compiler_lib_search_path " in *" $i "*) i= ;; esac if test -n "$i"; then func_append tmp_libs " $i" fi done dependency_libs=$tmp_libs done # for pass if test prog = "$linkmode"; then dlfiles=$newdlfiles fi if test prog = "$linkmode" || test lib = "$linkmode"; then dlprefiles=$newdlprefiles fi case $linkmode in oldlib) if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then func_warning "'-dlopen' is ignored for archives" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "'-l' and '-L' are ignored for archives" ;; esac test -n "$rpath" && \ func_warning "'-rpath' is ignored for archives" test -n "$xrpath" && \ func_warning "'-R' is ignored for archives" test -n "$vinfo" && \ func_warning "'-version-info/-version-number' is ignored for archives" test -n "$release" && \ func_warning "'-release' is ignored for archives" test -n "$export_symbols$export_symbols_regex" && \ func_warning "'-export-symbols' is ignored for archives" # Now set the variables for building old libraries. build_libtool_libs=no oldlibs=$output func_append objs "$old_deplibs" ;; lib) # Make sure we only generate libraries of the form 'libNAME.la'. case $outputname in lib*) func_stripname 'lib' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" ;; *) test no = "$module" \ && func_fatal_help "libtool library '$output' must begin with 'lib'" if test no != "$need_lib_prefix"; then # Add the "lib" prefix for modules if required func_stripname '' '.la' "$outputname" name=$func_stripname_result eval shared_ext=\"$shrext_cmds\" eval libname=\"$libname_spec\" else func_stripname '' '.la' "$outputname" libname=$func_stripname_result fi ;; esac if test -n "$objs"; then if test pass_all != "$deplibs_check_method"; then func_fatal_error "cannot build libtool library '$output' from non-libtool objects on this host:$objs" else echo $ECHO "*** Warning: Linking the shared library $output against the non-libtool" $ECHO "*** objects $objs is not portable!" func_append libobjs " $objs" fi fi test no = "$dlself" \ || func_warning "'-dlopen self' is ignored for libtool libraries" set dummy $rpath shift test 1 -lt "$#" \ && func_warning "ignoring multiple '-rpath's for a libtool library" install_libdir=$1 oldlibs= if test -z "$rpath"; then if test yes = "$build_libtool_libs"; then # Building a libtool convenience library. # Some compilers have problems with a '.al' extension so # convenience libraries should have the same extension an # archive normally would. oldlibs="$output_objdir/$libname.$libext $oldlibs" build_libtool_libs=convenience build_old_libs=yes fi test -n "$vinfo" && \ func_warning "'-version-info/-version-number' is ignored for convenience libraries" test -n "$release" && \ func_warning "'-release' is ignored for convenience libraries" else # Parse the version information argument. save_ifs=$IFS; IFS=: set dummy $vinfo 0 0 0 shift IFS=$save_ifs test -n "$7" && \ func_fatal_help "too many parameters to '-version-info'" # convert absolute version numbers to libtool ages # this retains compatibility with .la files and attempts # to make the code below a bit more comprehensible case $vinfo_number in yes) number_major=$1 number_minor=$2 number_revision=$3 # # There are really only two kinds -- those that # use the current revision as the major version # and those that subtract age and use age as # a minor version. But, then there is irix # that has an extra 1 added just for fun # case $version_type in # correct linux to gnu/linux during the next big refactor darwin|freebsd-elf|linux|osf|windows|none) func_arith $number_major + $number_minor current=$func_arith_result age=$number_minor revision=$number_revision ;; freebsd-aout|qnx|sunos) current=$number_major revision=$number_minor age=0 ;; irix|nonstopux) func_arith $number_major + $number_minor current=$func_arith_result age=$number_minor revision=$number_minor lt_irix_increment=no ;; *) func_fatal_configuration "$modename: unknown library version type '$version_type'" ;; esac ;; no) current=$1 revision=$2 age=$3 ;; esac # Check that each of the things are valid numbers. case $current in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "CURRENT '$current' must be a nonnegative integer" func_fatal_error "'$vinfo' is not valid version information" ;; esac case $revision in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "REVISION '$revision' must be a nonnegative integer" func_fatal_error "'$vinfo' is not valid version information" ;; esac case $age in 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; *) func_error "AGE '$age' must be a nonnegative integer" func_fatal_error "'$vinfo' is not valid version information" ;; esac if test "$age" -gt "$current"; then func_error "AGE '$age' is greater than the current interface number '$current'" func_fatal_error "'$vinfo' is not valid version information" fi # Calculate the version variables. major= versuffix= verstring= case $version_type in none) ;; darwin) # Like Linux, but with the current version available in # verstring for coding it into the library header func_arith $current - $age major=.$func_arith_result versuffix=$major.$age.$revision # Darwin ld doesn't like 0 for these options... func_arith $current + 1 minor_current=$func_arith_result xlcverstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" # On Darwin other compilers case $CC in nagfor*) verstring="$wl-compatibility_version $wl$minor_current $wl-current_version $wl$minor_current.$revision" ;; *) verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" ;; esac ;; freebsd-aout) major=.$current versuffix=.$current.$revision ;; freebsd-elf) func_arith $current - $age major=.$func_arith_result versuffix=$major.$age.$revision ;; irix | nonstopux) if test no = "$lt_irix_increment"; then func_arith $current - $age else func_arith $current - $age + 1 fi major=$func_arith_result case $version_type in nonstopux) verstring_prefix=nonstopux ;; *) verstring_prefix=sgi ;; esac verstring=$verstring_prefix$major.$revision # Add in all the interfaces that we are compatible with. loop=$revision while test 0 -ne "$loop"; do func_arith $revision - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring=$verstring_prefix$major.$iface:$verstring done # Before this point, $major must not contain '.'. major=.$major versuffix=$major.$revision ;; linux) # correct to gnu/linux during the next big refactor func_arith $current - $age major=.$func_arith_result versuffix=$major.$age.$revision ;; osf) func_arith $current - $age major=.$func_arith_result versuffix=.$current.$age.$revision verstring=$current.$age.$revision # Add in all the interfaces that we are compatible with. loop=$age while test 0 -ne "$loop"; do func_arith $current - $loop iface=$func_arith_result func_arith $loop - 1 loop=$func_arith_result verstring=$verstring:$iface.0 done # Make executables depend on our current version. func_append verstring ":$current.0" ;; qnx) major=.$current versuffix=.$current ;; sco) major=.$current versuffix=.$current ;; sunos) major=.$current versuffix=.$current.$revision ;; windows) # Use '-' rather than '.', since we only want one # extension on DOS 8.3 file systems. func_arith $current - $age major=$func_arith_result versuffix=-$major ;; *) func_fatal_configuration "unknown library version type '$version_type'" ;; esac # Clear the version info if we defaulted, and they specified a release. if test -z "$vinfo" && test -n "$release"; then major= case $version_type in darwin) # we can't check for "0.0" in archive_cmds due to quoting # problems, so we reset it completely verstring= ;; *) verstring=0.0 ;; esac if test no = "$need_version"; then versuffix= else versuffix=.0.0 fi fi # Remove version info from name if versioning should be avoided if test yes,no = "$avoid_version,$need_version"; then major= versuffix= verstring= fi # Check to see if the archive will have undefined symbols. if test yes = "$allow_undefined"; then if test unsupported = "$allow_undefined_flag"; then if test yes = "$build_old_libs"; then func_warning "undefined symbols not allowed in $host shared libraries; building static only" build_libtool_libs=no else func_fatal_error "can't build $host shared library unless -no-undefined is specified" fi fi else # Don't allow undefined symbols. allow_undefined_flag=$no_undefined_flag fi fi func_generate_dlsyms "$libname" "$libname" : func_append libobjs " $symfileobj" test " " = "$libobjs" && libobjs= if test relink != "$opt_mode"; then # Remove our outputs, but don't remove object files since they # may have been created when compiling PIC objects. removelist= tempremovelist=`$ECHO "$output_objdir/*"` for p in $tempremovelist; do case $p in *.$objext | *.gcno) ;; $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/$libname$release.*) if test -n "$precious_files_regex"; then if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 then continue fi fi func_append removelist " $p" ;; *) ;; esac done test -n "$removelist" && \ func_show_eval "${RM}r \$removelist" fi # Now set the variables for building old libraries. if test yes = "$build_old_libs" && test convenience != "$build_libtool_libs"; then func_append oldlibs " $output_objdir/$libname.$libext" # Transform .lo files to .o files. oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; $lo2o" | $NL2SP` fi # Eliminate all temporary directories. #for path in $notinst_path; do # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` #done if test -n "$xrpath"; then # If the user specified any rpath flags, then add them. temp_xrpath= for libdir in $xrpath; do func_replace_sysroot "$libdir" func_append temp_xrpath " -R$func_replace_sysroot_result" case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac done if test yes != "$hardcode_into_libs" || test yes = "$build_old_libs"; then dependency_libs="$temp_xrpath $dependency_libs" fi fi # Make sure dlfiles contains only unique files that won't be dlpreopened old_dlfiles=$dlfiles dlfiles= for lib in $old_dlfiles; do case " $dlprefiles $dlfiles " in *" $lib "*) ;; *) func_append dlfiles " $lib" ;; esac done # Make sure dlprefiles contains only unique files old_dlprefiles=$dlprefiles dlprefiles= for lib in $old_dlprefiles; do case "$dlprefiles " in *" $lib "*) ;; *) func_append dlprefiles " $lib" ;; esac done if test yes = "$build_libtool_libs"; then if test -n "$rpath"; then case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) # these systems don't actually have a c library (as such)! ;; *-*-rhapsody* | *-*-darwin1.[012]) # Rhapsody C library is in the System framework func_append deplibs " System.ltframework" ;; *-*-netbsd*) # Don't link with libc until the a.out ld.so is fixed. ;; *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) # Do not include libc due to us having libc/libc_r. ;; *-*-sco3.2v5* | *-*-sco5v6*) # Causes problems with __ctype ;; *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) # Compiler inserts libc in the correct place for threads to work ;; *) # Add libc to deplibs on all other systems if necessary. if test yes = "$build_libtool_need_lc"; then func_append deplibs " -lc" fi ;; esac fi # Transform deplibs into only deplibs that can be linked in shared. name_save=$name libname_save=$libname release_save=$release versuffix_save=$versuffix major_save=$major # I'm not sure if I'm treating the release correctly. I think # release should show up in the -l (ie -lgmp5) so we don't want to # add it in twice. Is that correct? release= versuffix= major= newdeplibs= droppeddeps=no case $deplibs_check_method in pass_all) # Don't check for shared/static. Everything works. # This might be a little naive. We might want to check # whether the library exists or not. But this is on # osf3 & osf4 and I'm not really sure... Just # implementing what was already the behavior. newdeplibs=$deplibs ;; test_compile) # This code stresses the "libraries are programs" paradigm to its # limits. Maybe even breaks it. We compile a program, linking it # against the deplibs as a proxy for the library. Then we can check # whether they linked in statically or dynamically with ldd. $opt_dry_run || $RM conftest.c cat > conftest.c </dev/null` $nocaseglob else potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` fi for potent_lib in $potential_libs; do # Follow soft links. if ls -lLd "$potent_lib" 2>/dev/null | $GREP " -> " >/dev/null; then continue fi # The statement above tries to avoid entering an # endless loop below, in case of cyclic links. # We might still enter an endless loop, since a link # loop can be closed while we follow links, # but so what? potlib=$potent_lib while test -h "$potlib" 2>/dev/null; do potliblink=`ls -ld $potlib | $SED 's/.* -> //'` case $potliblink in [\\/]* | [A-Za-z]:[\\/]*) potlib=$potliblink;; *) potlib=`$ECHO "$potlib" | $SED 's|[^/]*$||'`"$potliblink";; esac done if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | $SED -e 10q | $EGREP "$file_magic_regex" > /dev/null; then func_append newdeplibs " $a_deplib" a_deplib= break 2 fi done done fi if test -n "$a_deplib"; then droppeddeps=yes echo $ECHO "*** Warning: linker path does not have real file for library $a_deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because I did check the linker path looking for a file starting" if test -z "$potlib"; then $ECHO "*** with $libname but no candidates were found. (...for file magic test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a file magic. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. func_append newdeplibs " $a_deplib" ;; esac done # Gone through all deplibs. ;; match_pattern*) set dummy $deplibs_check_method; shift match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` for a_deplib in $deplibs; do case $a_deplib in -l*) func_stripname -l '' "$a_deplib" name=$func_stripname_result if test yes = "$allow_libtool_libs_with_static_runtimes"; then case " $predeps $postdeps " in *" $a_deplib "*) func_append newdeplibs " $a_deplib" a_deplib= ;; esac fi if test -n "$a_deplib"; then libname=`eval "\\$ECHO \"$libname_spec\""` for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do potential_libs=`ls $i/$libname[.-]* 2>/dev/null` for potent_lib in $potential_libs; do potlib=$potent_lib # see symlink-check above in file_magic test if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ $EGREP "$match_pattern_regex" > /dev/null; then func_append newdeplibs " $a_deplib" a_deplib= break 2 fi done done fi if test -n "$a_deplib"; then droppeddeps=yes echo $ECHO "*** Warning: linker path does not have real file for library $a_deplib." echo "*** I have the capability to make that library automatically link in when" echo "*** you link to this library. But I can only do this if you have a" echo "*** shared version of the library, which you do not appear to have" echo "*** because I did check the linker path looking for a file starting" if test -z "$potlib"; then $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" else $ECHO "*** with $libname and none of the candidates passed a file format test" $ECHO "*** using a regex pattern. Last file checked: $potlib" fi fi ;; *) # Add a -L argument. func_append newdeplibs " $a_deplib" ;; esac done # Gone through all deplibs. ;; none | unknown | *) newdeplibs= tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` if test yes = "$allow_libtool_libs_with_static_runtimes"; then for i in $predeps $postdeps; do # can't use Xsed below, because $i might contain '/' tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s|$i||"` done fi case $tmp_deplibs in *[!\ \ ]*) echo if test none = "$deplibs_check_method"; then echo "*** Warning: inter-library dependencies are not supported in this platform." else echo "*** Warning: inter-library dependencies are not known to be supported." fi echo "*** All declared inter-library dependencies are being dropped." droppeddeps=yes ;; esac ;; esac versuffix=$versuffix_save major=$major_save release=$release_save libname=$libname_save name=$name_save case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library with the System framework newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` ;; esac if test yes = "$droppeddeps"; then if test yes = "$module"; then echo echo "*** Warning: libtool could not satisfy all declared inter-library" $ECHO "*** dependencies of module $libname. Therefore, libtool will create" echo "*** a static module, that should work as long as the dlopening" echo "*** application is linked with the -dlopen flag." if test -z "$global_symbol_pipe"; then echo echo "*** However, this would only work if libtool was able to extract symbol" echo "*** lists from a program, using 'nm' or equivalent, but libtool could" echo "*** not find such a program. So, this module is probably useless." echo "*** 'nm' from GNU binutils and a full rebuild may help." fi if test no = "$build_old_libs"; then oldlibs=$output_objdir/$libname.$libext build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi else echo "*** The inter-library dependencies that have been dropped here will be" echo "*** automatically added whenever a program is linked with this library" echo "*** or is declared to -dlopen it." if test no = "$allow_undefined"; then echo echo "*** Since this library must not contain undefined symbols," echo "*** because either the platform does not support them or" echo "*** it was explicitly requested with -no-undefined," echo "*** libtool will only create a static version of it." if test no = "$build_old_libs"; then oldlibs=$output_objdir/$libname.$libext build_libtool_libs=module build_old_libs=yes else build_libtool_libs=no fi fi fi fi # Done checking deplibs! deplibs=$newdeplibs fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" case $host in *-*-darwin*) newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $deplibs " in *" -L$path/$objdir "*) func_append new_libs " -L$path/$objdir" ;; esac ;; esac done for deplib in $deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) func_append new_libs " $deplib" ;; esac ;; *) func_append new_libs " $deplib" ;; esac done deplibs=$new_libs # All the library-specific variables (install_libdir is set above). library_names= old_library= dlname= # Test again, we may have decided not to build it any more if test yes = "$build_libtool_libs"; then # Remove $wl instances when linking with ld. # FIXME: should test the right _cmds variable. case $archive_cmds in *\$LD\ *) wl= ;; esac if test yes = "$hardcode_into_libs"; then # Hardcode the library paths hardcode_libdirs= dep_rpath= rpath=$finalize_rpath test relink = "$opt_mode" || rpath=$compile_rpath$rpath for libdir in $rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then func_replace_sysroot "$libdir" libdir=$func_replace_sysroot_result if test -z "$hardcode_libdirs"; then hardcode_libdirs=$libdir else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append dep_rpath " $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) func_append perm_rpath " $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir=$hardcode_libdirs eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" fi if test -n "$runpath_var" && test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do func_append rpath "$dir:" done eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" fi test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" fi shlibpath=$finalize_shlibpath test relink = "$opt_mode" || shlibpath=$compile_shlibpath$shlibpath if test -n "$shlibpath"; then eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" fi # Get the real and link names of the library. eval shared_ext=\"$shrext_cmds\" eval library_names=\"$library_names_spec\" set dummy $library_names shift realname=$1 shift if test -n "$soname_spec"; then eval soname=\"$soname_spec\" else soname=$realname fi if test -z "$dlname"; then dlname=$soname fi lib=$output_objdir/$realname linknames= for link do func_append linknames " $link" done # Use standard objects if they are pic test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` test "X$libobjs" = "X " && libobjs= delfiles= if test -n "$export_symbols" && test -n "$include_expsyms"; then $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" export_symbols=$output_objdir/$libname.uexp func_append delfiles " $export_symbols" fi orig_export_symbols= case $host_os in cygwin* | mingw* | cegcc*) if test -n "$export_symbols" && test -z "$export_symbols_regex"; then # exporting using user supplied symfile func_dll_def_p "$export_symbols" || { # and it's NOT already a .def file. Must figure out # which of the given symbols are data symbols and tag # them as such. So, trigger use of export_symbols_cmds. # export_symbols gets reassigned inside the "prepare # the list of exported symbols" if statement, so the # include_expsyms logic still works. orig_export_symbols=$export_symbols export_symbols= always_export_symbols=yes } fi ;; esac # Prepare the list of exported symbols if test -z "$export_symbols"; then if test yes = "$always_export_symbols" || test -n "$export_symbols_regex"; then func_verbose "generating symbol list for '$libname.la'" export_symbols=$output_objdir/$libname.exp $opt_dry_run || $RM $export_symbols cmds=$export_symbols_cmds save_ifs=$IFS; IFS='~' for cmd1 in $cmds; do IFS=$save_ifs # Take the normal branch if the nm_file_list_spec branch # doesn't work or if tool conversion is not needed. case $nm_file_list_spec~$to_tool_file_cmd in *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) try_normal_branch=yes eval cmd=\"$cmd1\" func_len " $cmd" len=$func_len_result ;; *) try_normal_branch=no ;; esac if test yes = "$try_normal_branch" \ && { test "$len" -lt "$max_cmd_len" \ || test "$max_cmd_len" -le -1; } then func_show_eval "$cmd" 'exit $?' skipped_export=false elif test -n "$nm_file_list_spec"; then func_basename "$output" output_la=$func_basename_result save_libobjs=$libobjs save_output=$output output=$output_objdir/$output_la.nm func_to_tool_file "$output" libobjs=$nm_file_list_spec$func_to_tool_file_result func_append delfiles " $output" func_verbose "creating $NM input file list: $output" for obj in $save_libobjs; do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" done > "$output" eval cmd=\"$cmd1\" func_show_eval "$cmd" 'exit $?' output=$save_output libobjs=$save_libobjs skipped_export=false else # The command line is too long to execute in one step. func_verbose "using reloadable object file for export list..." skipped_export=: # Break out early, otherwise skipped_export may be # set to false by a later but shorter cmd. break fi done IFS=$save_ifs if test -n "$export_symbols_regex" && test : != "$skipped_export"; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi fi if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols=$export_symbols test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' fi if test : != "$skipped_export" && test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for '$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands, which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter func_append delfiles " $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi tmp_deplibs= for test_deplib in $deplibs; do case " $convenience " in *" $test_deplib "*) ;; *) func_append tmp_deplibs " $test_deplib" ;; esac done deplibs=$tmp_deplibs if test -n "$convenience"; then if test -n "$whole_archive_flag_spec" && test yes = "$compiler_needs_object" && test -z "$libobjs"; then # extract the archives, so we have objects to list. # TODO: could optimize this to just extract one archive. whole_archive_flag_spec= fi if test -n "$whole_archive_flag_spec"; then save_libobjs=$libobjs eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= else gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_extract_archives $gentop $convenience func_append libobjs " $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi fi if test yes = "$thread_safe" && test -n "$thread_safe_flag_spec"; then eval flag=\"$thread_safe_flag_spec\" func_append linker_flags " $flag" fi # Make a backup of the uninstalled library when relinking if test relink = "$opt_mode"; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? fi # Do each of the archive commands. if test yes = "$module" && test -n "$module_cmds"; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then eval test_cmds=\"$module_expsym_cmds\" cmds=$module_expsym_cmds else eval test_cmds=\"$module_cmds\" cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then eval test_cmds=\"$archive_expsym_cmds\" cmds=$archive_expsym_cmds else eval test_cmds=\"$archive_cmds\" cmds=$archive_cmds fi fi if test : != "$skipped_export" && func_len " $test_cmds" && len=$func_len_result && test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then : else # The command line is too long to link in one step, link piecewise # or, if using GNU ld and skipped_export is not :, use a linker # script. # Save the value of $output and $libobjs because we want to # use them later. If we have whole_archive_flag_spec, we # want to use save_libobjs as it was before # whole_archive_flag_spec was expanded, because we can't # assume the linker understands whole_archive_flag_spec. # This may have to be revisited, in case too many # convenience libraries get linked in and end up exceeding # the spec. if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then save_libobjs=$libobjs fi save_output=$output func_basename "$output" output_la=$func_basename_result # Clear the reloadable object creation command queue and # initialize k to one. test_cmds= concat_cmds= objlist= last_robj= k=1 if test -n "$save_libobjs" && test : != "$skipped_export" && test yes = "$with_gnu_ld"; then output=$output_objdir/$output_la.lnkscript func_verbose "creating GNU ld script: $output" echo 'INPUT (' > $output for obj in $save_libobjs do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" >> $output done echo ')' >> $output func_append delfiles " $output" func_to_tool_file "$output" output=$func_to_tool_file_result elif test -n "$save_libobjs" && test : != "$skipped_export" && test -n "$file_list_spec"; then output=$output_objdir/$output_la.lnk func_verbose "creating linker input file list: $output" : > $output set x $save_libobjs shift firstobj= if test yes = "$compiler_needs_object"; then firstobj="$1 " shift fi for obj do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" >> $output done func_append delfiles " $output" func_to_tool_file "$output" output=$firstobj\"$file_list_spec$func_to_tool_file_result\" else if test -n "$save_libobjs"; then func_verbose "creating reloadable object files..." output=$output_objdir/$output_la-$k.$objext eval test_cmds=\"$reload_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 # Loop over the list of objects to be linked. for obj in $save_libobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result if test -z "$objlist" || test "$len" -lt "$max_cmd_len"; then func_append objlist " $obj" else # The command $test_cmds is almost too long, add a # command to the queue. if test 1 -eq "$k"; then # The first file doesn't have a previous command to add. reload_objs=$objlist eval concat_cmds=\"$reload_cmds\" else # All subsequent reloadable object files will link in # the last one created. reload_objs="$objlist $last_robj" eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" fi last_robj=$output_objdir/$output_la-$k.$objext func_arith $k + 1 k=$func_arith_result output=$output_objdir/$output_la-$k.$objext objlist=" $obj" func_len " $last_robj" func_arith $len0 + $func_len_result len=$func_arith_result fi done # Handle the remaining objects by creating one last # reloadable object file. All subsequent reloadable object # files will link in the last one created. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ reload_objs="$objlist $last_robj" eval concat_cmds=\"\$concat_cmds$reload_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" fi func_append delfiles " $output" else output= fi ${skipped_export-false} && { func_verbose "generating symbol list for '$libname.la'" export_symbols=$output_objdir/$libname.exp $opt_dry_run || $RM $export_symbols libobjs=$output # Append the command to create the export file. test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" if test -n "$last_robj"; then eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" fi } test -n "$save_libobjs" && func_verbose "creating a temporary reloadable object file: $output" # Loop through the commands generated above and execute them. save_ifs=$IFS; IFS='~' for cmd in $concat_cmds; do IFS=$save_ifs $opt_quiet || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test relink = "$opt_mode"; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS=$save_ifs if test -n "$export_symbols_regex" && ${skipped_export-false}; then func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' func_show_eval '$MV "${export_symbols}T" "$export_symbols"' fi fi ${skipped_export-false} && { if test -n "$export_symbols" && test -n "$include_expsyms"; then tmp_export_symbols=$export_symbols test -n "$orig_export_symbols" && tmp_export_symbols=$orig_export_symbols $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' fi if test -n "$orig_export_symbols"; then # The given exports_symbols file has to be filtered, so filter it. func_verbose "filter symbol list for '$libname.la' to tag DATA exports" # FIXME: $output_objdir/$libname.filter potentially contains lots of # 's' commands, which not all seds can handle. GNU sed should be fine # though. Also, the filter scales superlinearly with the number of # global variables. join(1) would be nice here, but unfortunately # isn't a blessed tool. $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter func_append delfiles " $export_symbols $output_objdir/$libname.filter" export_symbols=$output_objdir/$libname.def $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols fi } libobjs=$output # Restore the value of output. output=$save_output if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then eval libobjs=\"\$libobjs $whole_archive_flag_spec\" test "X$libobjs" = "X " && libobjs= fi # Expand the library linking commands again to reset the # value of $libobjs for piecewise linking. # Do each of the archive commands. if test yes = "$module" && test -n "$module_cmds"; then if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then cmds=$module_expsym_cmds else cmds=$module_cmds fi else if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then cmds=$archive_expsym_cmds else cmds=$archive_cmds fi fi fi if test -n "$delfiles"; then # Append the command to remove temporary files to $cmds. eval cmds=\"\$cmds~\$RM $delfiles\" fi # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_extract_archives $gentop $dlprefiles func_append libobjs " $func_extract_archives_result" test "X$libobjs" = "X " && libobjs= fi save_ifs=$IFS; IFS='~' for cmd in $cmds; do IFS=$sp$nl eval cmd=\"$cmd\" IFS=$save_ifs $opt_quiet || { func_quote_for_expand "$cmd" eval "func_echo $func_quote_for_expand_result" } $opt_dry_run || eval "$cmd" || { lt_exit=$? # Restore the uninstalled library and exit if test relink = "$opt_mode"; then ( cd "$output_objdir" && \ $RM "${realname}T" && \ $MV "${realname}U" "$realname" ) fi exit $lt_exit } done IFS=$save_ifs # Restore the uninstalled library and exit if test relink = "$opt_mode"; then $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? if test -n "$convenience"; then if test -z "$whole_archive_flag_spec"; then func_show_eval '${RM}r "$gentop"' fi fi exit $EXIT_SUCCESS fi # Create links to the real library. for linkname in $linknames; do if test "$realname" != "$linkname"; then func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' fi done # If -module or -export-dynamic was specified, set the dlname. if test yes = "$module" || test yes = "$export_dynamic"; then # On all known operating systems, these are identical. dlname=$soname fi fi ;; obj) if test -n "$dlfiles$dlprefiles" || test no != "$dlself"; then func_warning "'-dlopen' is ignored for objects" fi case " $deplibs" in *\ -l* | *\ -L*) func_warning "'-l' and '-L' are ignored for objects" ;; esac test -n "$rpath" && \ func_warning "'-rpath' is ignored for objects" test -n "$xrpath" && \ func_warning "'-R' is ignored for objects" test -n "$vinfo" && \ func_warning "'-version-info' is ignored for objects" test -n "$release" && \ func_warning "'-release' is ignored for objects" case $output in *.lo) test -n "$objs$old_deplibs" && \ func_fatal_error "cannot build library object '$output' from non-libtool objects" libobj=$output func_lo2o "$libobj" obj=$func_lo2o_result ;; *) libobj= obj=$output ;; esac # Delete the old objects. $opt_dry_run || $RM $obj $libobj # Objects from convenience libraries. This assumes # single-version convenience libraries. Whenever we create # different ones for PIC/non-PIC, this we'll have to duplicate # the extraction. reload_conv_objs= gentop= # if reload_cmds runs $LD directly, get rid of -Wl from # whole_archive_flag_spec and hope we can get by with turning comma # into space. case $reload_cmds in *\$LD[\ \$]*) wl= ;; esac if test -n "$convenience"; then if test -n "$whole_archive_flag_spec"; then eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" test -n "$wl" || tmp_whole_archive_flags=`$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` reload_conv_objs=$reload_objs\ $tmp_whole_archive_flags else gentop=$output_objdir/${obj}x func_append generated " $gentop" func_extract_archives $gentop $convenience reload_conv_objs="$reload_objs $func_extract_archives_result" fi fi # If we're not building shared, we need to use non_pic_objs test yes = "$build_libtool_libs" || libobjs=$non_pic_objects # Create the old-style object. reload_objs=$objs$old_deplibs' '`$ECHO "$libobjs" | $SP2NL | $SED "/\.$libext$/d; /\.lib$/d; $lo2o" | $NL2SP`' '$reload_conv_objs output=$obj func_execute_cmds "$reload_cmds" 'exit $?' # Exit if we aren't doing a library object file. if test -z "$libobj"; then if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS fi test yes = "$build_libtool_libs" || { if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi # Create an invalid libtool object if no PIC, so that we don't # accidentally link it into a program. # $show "echo timestamp > $libobj" # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? exit $EXIT_SUCCESS } if test -n "$pic_flag" || test default != "$pic_mode"; then # Only do commands if we really have different PIC objects. reload_objs="$libobjs $reload_conv_objs" output=$libobj func_execute_cmds "$reload_cmds" 'exit $?' fi if test -n "$gentop"; then func_show_eval '${RM}r "$gentop"' fi exit $EXIT_SUCCESS ;; prog) case $host in *cygwin*) func_stripname '' '.exe' "$output" output=$func_stripname_result.exe;; esac test -n "$vinfo" && \ func_warning "'-version-info' is ignored for programs" test -n "$release" && \ func_warning "'-release' is ignored for programs" $preload \ && test unknown,unknown,unknown = "$dlopen_support,$dlopen_self,$dlopen_self_static" \ && func_warning "'LT_INIT([dlopen])' not used. Assuming no dlopen support." case $host in *-*-rhapsody* | *-*-darwin1.[012]) # On Rhapsody replace the C library is the System framework compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` ;; esac case $host in *-*-darwin*) # Don't allow lazy linking, it breaks C++ global constructors # But is supposedly fixed on 10.4 or later (yay!). if test CXX = "$tagname"; then case ${MACOSX_DEPLOYMENT_TARGET-10.0} in 10.[0123]) func_append compile_command " $wl-bind_at_load" func_append finalize_command " $wl-bind_at_load" ;; esac fi # Time to change all our "foo.ltframework" stuff back to "-framework foo" compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` ;; esac # move library search paths that coincide with paths to not yet # installed libraries to the beginning of the library search list new_libs= for path in $notinst_path; do case " $new_libs " in *" -L$path/$objdir "*) ;; *) case " $compile_deplibs " in *" -L$path/$objdir "*) func_append new_libs " -L$path/$objdir" ;; esac ;; esac done for deplib in $compile_deplibs; do case $deplib in -L*) case " $new_libs " in *" $deplib "*) ;; *) func_append new_libs " $deplib" ;; esac ;; *) func_append new_libs " $deplib" ;; esac done compile_deplibs=$new_libs func_append compile_command " $compile_deplibs" func_append finalize_command " $finalize_deplibs" if test -n "$rpath$xrpath"; then # If the user specified any rpath flags, then add them. for libdir in $rpath $xrpath; do # This is the magic to use -rpath. case "$finalize_rpath " in *" $libdir "*) ;; *) func_append finalize_rpath " $libdir" ;; esac done fi # Now hardcode the library paths rpath= hardcode_libdirs= for libdir in $compile_rpath $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs=$libdir else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append rpath " $flag" fi elif test -n "$runpath_var"; then case "$perm_rpath " in *" $libdir "*) ;; *) func_append perm_rpath " $libdir" ;; esac fi case $host in *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) testbindir=`$ECHO "$libdir" | $SED -e 's*/lib$*/bin*'` case :$dllsearchpath: in *":$libdir:"*) ;; ::) dllsearchpath=$libdir;; *) func_append dllsearchpath ":$libdir";; esac case :$dllsearchpath: in *":$testbindir:"*) ;; ::) dllsearchpath=$testbindir;; *) func_append dllsearchpath ":$testbindir";; esac ;; esac done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir=$hardcode_libdirs eval rpath=\" $hardcode_libdir_flag_spec\" fi compile_rpath=$rpath rpath= hardcode_libdirs= for libdir in $finalize_rpath; do if test -n "$hardcode_libdir_flag_spec"; then if test -n "$hardcode_libdir_separator"; then if test -z "$hardcode_libdirs"; then hardcode_libdirs=$libdir else # Just accumulate the unique libdirs. case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) ;; *) func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" ;; esac fi else eval flag=\"$hardcode_libdir_flag_spec\" func_append rpath " $flag" fi elif test -n "$runpath_var"; then case "$finalize_perm_rpath " in *" $libdir "*) ;; *) func_append finalize_perm_rpath " $libdir" ;; esac fi done # Substitute the hardcoded libdirs into the rpath. if test -n "$hardcode_libdir_separator" && test -n "$hardcode_libdirs"; then libdir=$hardcode_libdirs eval rpath=\" $hardcode_libdir_flag_spec\" fi finalize_rpath=$rpath if test -n "$libobjs" && test yes = "$build_old_libs"; then # Transform all the library objects into standard objects. compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` fi func_generate_dlsyms "$outputname" "@PROGRAM@" false # template prelinking step if test -n "$prelink_cmds"; then func_execute_cmds "$prelink_cmds" 'exit $?' fi wrappers_required=: case $host in *cegcc* | *mingw32ce*) # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. wrappers_required=false ;; *cygwin* | *mingw* ) test yes = "$build_libtool_libs" || wrappers_required=false ;; *) if test no = "$need_relink" || test yes != "$build_libtool_libs"; then wrappers_required=false fi ;; esac $wrappers_required || { # Replace the output file specification. compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` link_command=$compile_command$compile_rpath # We have no uninstalled library dependencies, so finalize right now. exit_status=0 func_show_eval "$link_command" 'exit_status=$?' if test -n "$postlink_cmds"; then func_to_tool_file "$output" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi # Delete the generated files. if test -f "$output_objdir/${outputname}S.$objext"; then func_show_eval '$RM "$output_objdir/${outputname}S.$objext"' fi exit $exit_status } if test -n "$compile_shlibpath$finalize_shlibpath"; then compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" fi if test -n "$finalize_shlibpath"; then finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" fi compile_var= finalize_var= if test -n "$runpath_var"; then if test -n "$perm_rpath"; then # We should set the runpath_var. rpath= for dir in $perm_rpath; do func_append rpath "$dir:" done compile_var="$runpath_var=\"$rpath\$$runpath_var\" " fi if test -n "$finalize_perm_rpath"; then # We should set the runpath_var. rpath= for dir in $finalize_perm_rpath; do func_append rpath "$dir:" done finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " fi fi if test yes = "$no_install"; then # We don't need to create a wrapper script. link_command=$compile_var$compile_command$compile_rpath # Replace the output file specification. link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` # Delete the old output file. $opt_dry_run || $RM $output # Link the executable and exit func_show_eval "$link_command" 'exit $?' if test -n "$postlink_cmds"; then func_to_tool_file "$output" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi exit $EXIT_SUCCESS fi case $hardcode_action,$fast_install in relink,*) # Fast installation is not supported link_command=$compile_var$compile_command$compile_rpath relink_command=$finalize_var$finalize_command$finalize_rpath func_warning "this platform does not like uninstalled shared libraries" func_warning "'$output' will be relinked during installation" ;; *,yes) link_command=$finalize_var$compile_command$finalize_rpath relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` ;; *,no) link_command=$compile_var$compile_command$compile_rpath relink_command=$finalize_var$finalize_command$finalize_rpath ;; *,needless) link_command=$finalize_var$compile_command$finalize_rpath relink_command= ;; esac # Replace the output file specification. link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` # Delete the old output files. $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname func_show_eval "$link_command" 'exit $?' if test -n "$postlink_cmds"; then func_to_tool_file "$output_objdir/$outputname" postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` func_execute_cmds "$postlink_cmds" 'exit $?' fi # Now create the wrapper script. func_verbose "creating $output" # Quote the relink command for shipping. if test -n "$relink_command"; then # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done relink_command="(cd `pwd`; $relink_command)" relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` fi # Only actually do things if not in dry run mode. $opt_dry_run || { # win32 will think the script is a binary if it has # a .exe suffix, so we strip it off here. case $output in *.exe) func_stripname '' '.exe' "$output" output=$func_stripname_result ;; esac # test for cygwin because mv fails w/o .exe extensions case $host in *cygwin*) exeext=.exe func_stripname '' '.exe' "$outputname" outputname=$func_stripname_result ;; *) exeext= ;; esac case $host in *cygwin* | *mingw* ) func_dirname_and_basename "$output" "" "." output_name=$func_basename_result output_path=$func_dirname_result cwrappersource=$output_path/$objdir/lt-$output_name.c cwrapper=$output_path/$output_name.exe $RM $cwrappersource $cwrapper trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 func_emit_cwrapperexe_src > $cwrappersource # The wrapper executable is built using the $host compiler, # because it contains $host paths and files. If cross- # compiling, it, like the target executable, must be # executed on the $host or under an emulation environment. $opt_dry_run || { $LTCC $LTCFLAGS -o $cwrapper $cwrappersource $STRIP $cwrapper } # Now, create the wrapper script for func_source use: func_ltwrapper_scriptname $cwrapper $RM $func_ltwrapper_scriptname_result trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 $opt_dry_run || { # note: this script will not be executed, so do not chmod. if test "x$build" = "x$host"; then $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result else func_emit_wrapper no > $func_ltwrapper_scriptname_result fi } ;; * ) $RM $output trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 func_emit_wrapper no > $output chmod +x $output ;; esac } exit $EXIT_SUCCESS ;; esac # See if we need to build an old-fashioned archive. for oldlib in $oldlibs; do case $build_libtool_libs in convenience) oldobjs="$libobjs_save $symfileobj" addlibs=$convenience build_libtool_libs=no ;; module) oldobjs=$libobjs_save addlibs=$old_convenience build_libtool_libs=no ;; *) oldobjs="$old_deplibs $non_pic_objects" $preload && test -f "$symfileobj" \ && func_append oldobjs " $symfileobj" addlibs=$old_convenience ;; esac if test -n "$addlibs"; then gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_extract_archives $gentop $addlibs func_append oldobjs " $func_extract_archives_result" fi # Do each command in the archive commands. if test -n "$old_archive_from_new_cmds" && test yes = "$build_libtool_libs"; then cmds=$old_archive_from_new_cmds else # Add any objects from preloaded convenience libraries if test -n "$dlprefiles"; then gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_extract_archives $gentop $dlprefiles func_append oldobjs " $func_extract_archives_result" fi # POSIX demands no paths to be encoded in archives. We have # to avoid creating archives with duplicate basenames if we # might have to extract them afterwards, e.g., when creating a # static archive out of a convenience library, or when linking # the entirety of a libtool archive into another (currently # not supported by libtool). if (for obj in $oldobjs do func_basename "$obj" $ECHO "$func_basename_result" done | sort | sort -uc >/dev/null 2>&1); then : else echo "copying selected object files to avoid basename conflicts..." gentop=$output_objdir/${outputname}x func_append generated " $gentop" func_mkdir_p "$gentop" save_oldobjs=$oldobjs oldobjs= counter=1 for obj in $save_oldobjs do func_basename "$obj" objbase=$func_basename_result case " $oldobjs " in " ") oldobjs=$obj ;; *[\ /]"$objbase "*) while :; do # Make sure we don't pick an alternate name that also # overlaps. newobj=lt$counter-$objbase func_arith $counter + 1 counter=$func_arith_result case " $oldobjs " in *[\ /]"$newobj "*) ;; *) if test ! -f "$gentop/$newobj"; then break; fi ;; esac done func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" func_append oldobjs " $gentop/$newobj" ;; *) func_append oldobjs " $obj" ;; esac done fi func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 tool_oldlib=$func_to_tool_file_result eval cmds=\"$old_archive_cmds\" func_len " $cmds" len=$func_len_result if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then cmds=$old_archive_cmds elif test -n "$archiver_list_spec"; then func_verbose "using command file archive linking..." for obj in $oldobjs do func_to_tool_file "$obj" $ECHO "$func_to_tool_file_result" done > $output_objdir/$libname.libcmd func_to_tool_file "$output_objdir/$libname.libcmd" oldobjs=" $archiver_list_spec$func_to_tool_file_result" cmds=$old_archive_cmds else # the command line is too long to link in one step, link in parts func_verbose "using piecewise archive linking..." save_RANLIB=$RANLIB RANLIB=: objlist= concat_cmds= save_oldobjs=$oldobjs oldobjs= # Is there a better way of finding the last object in the list? for obj in $save_oldobjs do last_oldobj=$obj done eval test_cmds=\"$old_archive_cmds\" func_len " $test_cmds" len0=$func_len_result len=$len0 for obj in $save_oldobjs do func_len " $obj" func_arith $len + $func_len_result len=$func_arith_result func_append objlist " $obj" if test "$len" -lt "$max_cmd_len"; then : else # the above command should be used before it gets too long oldobjs=$objlist if test "$obj" = "$last_oldobj"; then RANLIB=$save_RANLIB fi test -z "$concat_cmds" || concat_cmds=$concat_cmds~ eval concat_cmds=\"\$concat_cmds$old_archive_cmds\" objlist= len=$len0 fi done RANLIB=$save_RANLIB oldobjs=$objlist if test -z "$oldobjs"; then eval cmds=\"\$concat_cmds\" else eval cmds=\"\$concat_cmds~\$old_archive_cmds\" fi fi fi func_execute_cmds "$cmds" 'exit $?' done test -n "$generated" && \ func_show_eval "${RM}r$generated" # Now create the libtool archive. case $output in *.la) old_library= test yes = "$build_old_libs" && old_library=$libname.$libext func_verbose "creating $output" # Preserve any variables that may affect compiler behavior for var in $variables_saved_for_relink; do if eval test -z \"\${$var+set}\"; then relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" elif eval var_value=\$$var; test -z "$var_value"; then relink_command="$var=; export $var; $relink_command" else func_quote_for_eval "$var_value" relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" fi done # Quote the link command for shipping. relink_command="(cd `pwd`; $SHELL \"$progpath\" $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` if test yes = "$hardcode_automatic"; then relink_command= fi # Only create the output if not a dry run. $opt_dry_run || { for installed in no yes; do if test yes = "$installed"; then if test -z "$install_libdir"; then break fi output=$output_objdir/${outputname}i # Replace all uninstalled libtool libraries with the installed ones newdependency_libs= for deplib in $dependency_libs; do case $deplib in *.la) func_basename "$deplib" name=$func_basename_result func_resolve_sysroot "$deplib" eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` test -z "$libdir" && \ func_fatal_error "'$deplib' is not a valid libtool archive" func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" ;; -L*) func_stripname -L '' "$deplib" func_replace_sysroot "$func_stripname_result" func_append newdependency_libs " -L$func_replace_sysroot_result" ;; -R*) func_stripname -R '' "$deplib" func_replace_sysroot "$func_stripname_result" func_append newdependency_libs " -R$func_replace_sysroot_result" ;; *) func_append newdependency_libs " $deplib" ;; esac done dependency_libs=$newdependency_libs newdlfiles= for lib in $dlfiles; do case $lib in *.la) func_basename "$lib" name=$func_basename_result eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "'$lib' is not a valid libtool archive" func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" ;; *) func_append newdlfiles " $lib" ;; esac done dlfiles=$newdlfiles newdlprefiles= for lib in $dlprefiles; do case $lib in *.la) # Only pass preopened files to the pseudo-archive (for # eventual linking with the app. that links it) if we # didn't already link the preopened objects directly into # the library: func_basename "$lib" name=$func_basename_result eval libdir=`$SED -n -e 's/^libdir=\(.*\)$/\1/p' $lib` test -z "$libdir" && \ func_fatal_error "'$lib' is not a valid libtool archive" func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" ;; esac done dlprefiles=$newdlprefiles else newdlfiles= for lib in $dlfiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; *) abs=`pwd`"/$lib" ;; esac func_append newdlfiles " $abs" done dlfiles=$newdlfiles newdlprefiles= for lib in $dlprefiles; do case $lib in [\\/]* | [A-Za-z]:[\\/]*) abs=$lib ;; *) abs=`pwd`"/$lib" ;; esac func_append newdlprefiles " $abs" done dlprefiles=$newdlprefiles fi $RM $output # place dlname in correct position for cygwin # In fact, it would be nice if we could use this code for all target # systems that can't hard-code library paths into their executables # and that have no shared library path variable independent of PATH, # but it turns out we can't easily determine that from inspecting # libtool variables, so we have to hard-code the OSs to which it # applies here; at the moment, that means platforms that use the PE # object format with DLL files. See the long comment at the top of # tests/bindir.at for full details. tdlname=$dlname case $host,$output,$installed,$module,$dlname in *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) # If a -bindir argument was supplied, place the dll there. if test -n "$bindir"; then func_relative_path "$install_libdir" "$bindir" tdlname=$func_relative_path_result/$dlname else # Otherwise fall back on heuristic. tdlname=../bin/$dlname fi ;; esac $ECHO > $output "\ # $outputname - a libtool library file # Generated by $PROGRAM (GNU $PACKAGE) $VERSION # # Please DO NOT delete this file! # It is necessary for linking the library. # The name that we can dlopen(3). dlname='$tdlname' # Names of this library. library_names='$library_names' # The name of the static archive. old_library='$old_library' # Linker flags that cannot go in dependency_libs. inherited_linker_flags='$new_inherited_linker_flags' # Libraries that this one depends upon. dependency_libs='$dependency_libs' # Names of additional weak libraries provided by this library weak_library_names='$weak_libs' # Version information for $libname. current=$current age=$age revision=$revision # Is this an already installed library? installed=$installed # Should we warn about portability when linking against -modules? shouldnotlink=$module # Files to dlopen/dlpreopen dlopen='$dlfiles' dlpreopen='$dlprefiles' # Directory that this library needs to be installed in: libdir='$install_libdir'" if test no,yes = "$installed,$need_relink"; then $ECHO >> $output "\ relink_command=\"$relink_command\"" fi done } # Do a symbolic link so that the libtool archive can be found in # LD_LIBRARY_PATH before the program is installed. func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' ;; esac exit $EXIT_SUCCESS } if test link = "$opt_mode" || test relink = "$opt_mode"; then func_mode_link ${1+"$@"} fi # func_mode_uninstall arg... func_mode_uninstall () { $debug_cmd RM=$nonopt files= rmforce=false exit_status=0 # This variable tells wrapper scripts just to set variables rather # than running their programs. libtool_install_magic=$magic for arg do case $arg in -f) func_append RM " $arg"; rmforce=: ;; -*) func_append RM " $arg" ;; *) func_append files " $arg" ;; esac done test -z "$RM" && \ func_fatal_help "you must specify an RM program" rmdirs= for file in $files; do func_dirname "$file" "" "." dir=$func_dirname_result if test . = "$dir"; then odir=$objdir else odir=$dir/$objdir fi func_basename "$file" name=$func_basename_result test uninstall = "$opt_mode" && odir=$dir # Remember odir for removal later, being careful to avoid duplicates if test clean = "$opt_mode"; then case " $rmdirs " in *" $odir "*) ;; *) func_append rmdirs " $odir" ;; esac fi # Don't error if the file doesn't exist and rm -f was used. if { test -L "$file"; } >/dev/null 2>&1 || { test -h "$file"; } >/dev/null 2>&1 || test -f "$file"; then : elif test -d "$file"; then exit_status=1 continue elif $rmforce; then continue fi rmfiles=$file case $name in *.la) # Possibly a libtool archive, so verify it. if func_lalib_p "$file"; then func_source $dir/$name # Delete the libtool libraries and symlinks. for n in $library_names; do func_append rmfiles " $odir/$n" done test -n "$old_library" && func_append rmfiles " $odir/$old_library" case $opt_mode in clean) case " $library_names " in *" $dlname "*) ;; *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; esac test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" ;; uninstall) if test -n "$library_names"; then # Do each command in the postuninstall commands. func_execute_cmds "$postuninstall_cmds" '$rmforce || exit_status=1' fi if test -n "$old_library"; then # Do each command in the old_postuninstall commands. func_execute_cmds "$old_postuninstall_cmds" '$rmforce || exit_status=1' fi # FIXME: should reinstall the best remaining shared library. ;; esac fi ;; *.lo) # Possibly a libtool object, so verify it. if func_lalib_p "$file"; then # Read the .lo file func_source $dir/$name # Add PIC object to the list of files to remove. if test -n "$pic_object" && test none != "$pic_object"; then func_append rmfiles " $dir/$pic_object" fi # Add non-PIC object to the list of files to remove. if test -n "$non_pic_object" && test none != "$non_pic_object"; then func_append rmfiles " $dir/$non_pic_object" fi fi ;; *) if test clean = "$opt_mode"; then noexename=$name case $file in *.exe) func_stripname '' '.exe' "$file" file=$func_stripname_result func_stripname '' '.exe' "$name" noexename=$func_stripname_result # $file with .exe has already been added to rmfiles, # add $file without .exe func_append rmfiles " $file" ;; esac # Do a test to see if this is a libtool program. if func_ltwrapper_p "$file"; then if func_ltwrapper_executable_p "$file"; then func_ltwrapper_scriptname "$file" relink_command= func_source $func_ltwrapper_scriptname_result func_append rmfiles " $func_ltwrapper_scriptname_result" else relink_command= func_source $dir/$noexename fi # note $name still contains .exe if it was in $file originally # as does the version of $file that was added into $rmfiles func_append rmfiles " $odir/$name $odir/${name}S.$objext" if test yes = "$fast_install" && test -n "$relink_command"; then func_append rmfiles " $odir/lt-$name" fi if test "X$noexename" != "X$name"; then func_append rmfiles " $odir/lt-$noexename.c" fi fi fi ;; esac func_show_eval "$RM $rmfiles" 'exit_status=1' done # Try to remove the $objdir's in the directories where we deleted files for dir in $rmdirs; do if test -d "$dir"; then func_show_eval "rmdir $dir >/dev/null 2>&1" fi done exit $exit_status } if test uninstall = "$opt_mode" || test clean = "$opt_mode"; then func_mode_uninstall ${1+"$@"} fi test -z "$opt_mode" && { help=$generic_help func_fatal_help "you must specify a MODE" } test -z "$exec_cmd" && \ func_fatal_help "invalid operation mode '$opt_mode'" if test -n "$exec_cmd"; then eval exec "$exec_cmd" exit $EXIT_FAILURE fi exit $exit_status # The TAGs below are defined such that we never get into a situation # where we disable both kinds of libraries. Given conflicting # choices, we go for a static library, that is the most portable, # since we can't tell whether shared libraries were disabled because # the user asked for that or because the platform doesn't support # them. This is particularly important on AIX, because we don't # support having both static and shared libraries enabled at the same # time on that platform, so we default to a shared-only configuration. # If a disable-shared tag is given, we'll fallback to a static-only # configuration. But we'll never go from static-only to shared-only. # ### BEGIN LIBTOOL TAG CONFIG: disable-shared build_libtool_libs=no build_old_libs=yes # ### END LIBTOOL TAG CONFIG: disable-shared # ### BEGIN LIBTOOL TAG CONFIG: disable-static build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` # ### END LIBTOOL TAG CONFIG: disable-static # Local Variables: # mode:shell-script # sh-indentation:2 # End: recoll-1.26.3/query/0000755000175000017500000000000013570165410011233 500000000000000recoll-1.26.3/query/location.hh0000644000175000017500000001212413347664027013316 00000000000000// A Bison parser, made by GNU Bison 3.0.4. // Locations for Bison parsers in C++ // Copyright (C) 2002-2015 Free Software Foundation, Inc. // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see . // As a special exception, you may create a larger work that contains // part or all of the Bison parser skeleton and distribute that work // under terms of your choice, so long as that work isn't itself a // parser generator using the skeleton or a modified version thereof // as a parser skeleton. Alternatively, if you modify or redistribute // the parser skeleton itself, you may (at your option) remove this // special exception, which will cause the skeleton and the resulting // Bison output files to be licensed under the GNU General Public // License without this special exception. // This special exception was added by the Free Software Foundation in // version 2.2 of Bison. /** ** \file location.hh ** Define the yy::location class. */ #ifndef YY_YY_LOCATION_HH_INCLUDED # define YY_YY_LOCATION_HH_INCLUDED # include "position.hh" namespace yy { #line 46 "location.hh" // location.cc:296 /// Abstract a location. class location { public: /// Construct a location from \a b to \a e. location (const position& b, const position& e) : begin (b) , end (e) { } /// Construct a 0-width location in \a p. explicit location (const position& p = position ()) : begin (p) , end (p) { } /// Construct a 0-width location in \a f, \a l, \a c. explicit location (std::string* f, unsigned int l = 1u, unsigned int c = 1u) : begin (f, l, c) , end (f, l, c) { } /// Initialization. void initialize (std::string* f = YY_NULLPTR, unsigned int l = 1u, unsigned int c = 1u) { begin.initialize (f, l, c); end = begin; } /** \name Line and Column related manipulators ** \{ */ public: /// Reset initial location to final location. void step () { begin = end; } /// Extend the current location to the COUNT next columns. void columns (int count = 1) { end += count; } /// Extend the current location to the COUNT next lines. void lines (int count = 1) { end.lines (count); } /** \} */ public: /// Beginning of the located region. position begin; /// End of the located region. position end; }; /// Join two locations, in place. inline location& operator+= (location& res, const location& end) { res.end = end.end; return res; } /// Join two locations. inline location operator+ (location res, const location& end) { return res += end; } /// Add \a width columns to the end position, in place. inline location& operator+= (location& res, int width) { res.columns (width); return res; } /// Add \a width columns to the end position. inline location operator+ (location res, int width) { return res += width; } /// Subtract \a width columns to the end position, in place. inline location& operator-= (location& res, int width) { return res += -width; } /// Subtract \a width columns to the end position. inline location operator- (location res, int width) { return res -= width; } /// Compare two location objects. inline bool operator== (const location& loc1, const location& loc2) { return loc1.begin == loc2.begin && loc1.end == loc2.end; } /// Compare two location objects. inline bool operator!= (const location& loc1, const location& loc2) { return !(loc1 == loc2); } /** \brief Intercept output stream redirection. ** \param ostr the destination output stream ** \param loc a reference to the location to redirect ** ** Avoid duplicate information. */ template inline std::basic_ostream& operator<< (std::basic_ostream& ostr, const location& loc) { unsigned int end_col = 0 < loc.end.column ? loc.end.column - 1 : 0; ostr << loc.begin; if (loc.end.filename && (!loc.begin.filename || *loc.begin.filename != *loc.end.filename)) ostr << '-' << loc.end.filename << ':' << loc.end.line << '.' << end_col; else if (loc.begin.line < loc.end.line) ostr << '-' << loc.end.line << '.' << end_col; else if (loc.begin.column < end_col) ostr << '-' << end_col; return ostr; } } // yy #line 192 "location.hh" // location.cc:296 #endif // !YY_YY_LOCATION_HH_INCLUDED recoll-1.26.3/query/docseq.h0000644000175000017500000002122513566424763012622 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _DOCSEQ_H_INCLUDED_ #define _DOCSEQ_H_INCLUDED_ #include "autoconfig.h" #include #include #include #include #include #include "rcldoc.h" #include "hldata.h" // Need this for the "Snippet" class def. #include "rclquery.h" // A result list entry. struct ResListEntry { Rcl::Doc doc; std::string subHeader; }; /** Sort specification. */ class DocSeqSortSpec { public: DocSeqSortSpec() : desc(false) {} bool isNotNull() const {return !field.empty();} void reset() {field.erase();} std::string field; bool desc; }; /** Filtering spec. This is only used to filter by doc category for now, hence the rather specialized interface */ class DocSeqFiltSpec { public: DocSeqFiltSpec() {} enum Crit {DSFS_MIMETYPE, DSFS_QLANG, DSFS_PASSALL}; void orCrit(Crit crit, const std::string& value) { crits.push_back(crit); values.push_back(value); } std::vector crits; std::vector values; void reset() {crits.clear(); values.clear();} bool isNotNull() const {return crits.size() != 0;} }; /** Interface for a list of documents coming from some source. The result list display data may come from different sources (ie: history or Db query), and be post-processed (DocSeqSorted). Additional functionality like filtering/sorting can either be obtained by stacking DocSequence objects (ie: sorting history), or by native capability (ex: docseqdb can sort and filter). The implementation might be nicer by using more sophisticated c++ with multiple inheritance of sort and filter virtual interfaces, but the current one will have to do for now. */ class DocSequence { public: DocSequence(const std::string &t) : m_title(t) {} virtual ~DocSequence() {} /** Get document at given rank. * * @param num document rank in sequence * @param doc return data * @param sh subheader to display before this result (ie: date change * inside history) * @return true if ok, false for error or end of data */ virtual bool getDoc(int num, Rcl::Doc &doc, std::string *sh = 0) = 0; /** Get next page of documents. This accumulates entries into the result * list parameter (doesn't reset it). */ virtual int getSeqSlice(int offs, int cnt, std::vector& result); /** Get abstract for document. This is special because it may take time. * The default is to return the input doc's abstract fields, but some * sequences can compute a better value (ie: docseqdb) */ virtual bool getAbstract(Rcl::Doc& doc, std::vector& abs) { abs.push_back(doc.meta[Rcl::Doc::keyabs]); return true; } virtual bool getAbstract(Rcl::Doc& doc, std::vector& abs, int, bool) { abs.push_back(Rcl::Snippet(0, doc.meta[Rcl::Doc::keyabs])); return true; } virtual int getFirstMatchPage(Rcl::Doc&, std::string&) { return -1; } /** Get duplicates. */ virtual bool docDups(const Rcl::Doc&, std::vector&) { return false; } virtual bool getEnclosing(Rcl::Doc&, Rcl::Doc&); /** Get estimated total count in results */ virtual int getResCnt() = 0; /** Get title for result list */ virtual std::string title() { return m_title; } /** Can do snippets ? */ virtual bool snippetsCapable() { return false; } /** Get description for underlying query */ virtual std::string getDescription() = 0; /** Get search terms (for highlighting abstracts). Some sequences * may have no associated search terms. Implement this for them. */ virtual void getTerms(HighlightData& hld) { hld.clear(); } virtual std::list expand(Rcl::Doc &) { return std::list(); } virtual std::string getReason() { return m_reason; } /** Optional functionality. */ virtual bool canFilter() {return false;} virtual bool canSort() {return false;} virtual bool setFiltSpec(const DocSeqFiltSpec &) {return false;} virtual bool setSortSpec(const DocSeqSortSpec &) {return false;} virtual std::shared_ptr getSourceSeq() { return std::shared_ptr();} static void set_translations(const std::string& sort, const std::string& filt) { o_sort_trans = sort; o_filt_trans = filt; } protected: friend class DocSeqModifier; virtual std::shared_ptr getDb() = 0; static std::mutex o_dblock; static std::string o_sort_trans; static std::string o_filt_trans; std::string m_reason; private: std::string m_title; }; /** A modifier has a child sequence which does the real work and does * something with the results. Some operations are just delegated */ class DocSeqModifier : public DocSequence { public: DocSeqModifier(std::shared_ptr iseq) : DocSequence(""), m_seq(iseq) {} virtual ~DocSeqModifier() {} virtual bool getAbstract(Rcl::Doc& doc, std::vector& abs) override{ if (!m_seq) return false; return m_seq->getAbstract(doc, abs); } virtual bool getAbstract(Rcl::Doc& doc, std::vector& abs, int maxlen, bool bypage) override { if (!m_seq) return false; return m_seq->getAbstract(doc, abs, maxlen, bypage); } /** Get duplicates. */ virtual bool docDups(const Rcl::Doc& doc, std::vector& dups) override { if (!m_seq) return false; return m_seq->docDups(doc, dups); } virtual bool snippetsCapable() override { if (!m_seq) return false; return m_seq->snippetsCapable(); } virtual std::string getDescription() override { if (!m_seq) return ""; return m_seq->getDescription(); } virtual void getTerms(HighlightData& hld) override { if (!m_seq) return; m_seq->getTerms(hld); } virtual bool getEnclosing(Rcl::Doc& doc, Rcl::Doc& pdoc) override { if (!m_seq) return false; return m_seq->getEnclosing(doc, pdoc); } virtual std::string getReason() override { if (!m_seq) return string(); return m_seq->getReason(); } virtual std::string title() override { return m_seq->title(); } virtual std::shared_ptr getSourceSeq() override { return m_seq; } protected: virtual std::shared_ptr getDb() override { if (!m_seq) return 0; return m_seq->getDb(); } std::shared_ptr m_seq; }; class RclConfig; // A DocSource can juggle docseqs of different kinds to implement // sorting and filtering in ways depending on the base seqs capabilities class DocSource : public DocSeqModifier { public: DocSource(RclConfig *config, std::shared_ptr iseq) : DocSeqModifier(iseq), m_config(config) {} virtual bool canFilter() {return true;} virtual bool canSort() {return true;} virtual bool setFiltSpec(const DocSeqFiltSpec &); virtual bool setSortSpec(const DocSeqSortSpec &); virtual bool getDoc(int num, Rcl::Doc &doc, std::string *sh = 0) { if (!m_seq) return false; return m_seq->getDoc(num, doc, sh); } virtual int getResCnt() { if (!m_seq) return 0; return m_seq->getResCnt(); } virtual std::string title(); private: bool buildStack(); void stripStack(); RclConfig *m_config; DocSeqFiltSpec m_fspec; DocSeqSortSpec m_sspec; }; #endif /* _DOCSEQ_H_ */ recoll-1.26.3/query/wasaparse.hpp0000644000175000017500000003402413347664027013667 00000000000000// A Bison parser, made by GNU Bison 3.0.4. // Skeleton interface for Bison LALR(1) parsers in C++ // Copyright (C) 2002-2015 Free Software Foundation, Inc. // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see . // As a special exception, you may create a larger work that contains // part or all of the Bison parser skeleton and distribute that work // under terms of your choice, so long as that work isn't itself a // parser generator using the skeleton or a modified version thereof // as a parser skeleton. Alternatively, if you modify or redistribute // the parser skeleton itself, you may (at your option) remove this // special exception, which will cause the skeleton and the resulting // Bison output files to be licensed under the GNU General Public // License without this special exception. // This special exception was added by the Free Software Foundation in // version 2.2 of Bison. /** ** \file y.tab.h ** Define the yy::parser class. */ // C++ LALR(1) parser skeleton written by Akim Demaille. #ifndef YY_YY_Y_TAB_H_INCLUDED # define YY_YY_Y_TAB_H_INCLUDED # include // std::abort # include # include # include # include # include "stack.hh" # include "location.hh" #ifndef YY_ATTRIBUTE # if (defined __GNUC__ \ && (2 < __GNUC__ || (__GNUC__ == 2 && 96 <= __GNUC_MINOR__))) \ || defined __SUNPRO_C && 0x5110 <= __SUNPRO_C # define YY_ATTRIBUTE(Spec) __attribute__(Spec) # else # define YY_ATTRIBUTE(Spec) /* empty */ # endif #endif #ifndef YY_ATTRIBUTE_PURE # define YY_ATTRIBUTE_PURE YY_ATTRIBUTE ((__pure__)) #endif #ifndef YY_ATTRIBUTE_UNUSED # define YY_ATTRIBUTE_UNUSED YY_ATTRIBUTE ((__unused__)) #endif #if !defined _Noreturn \ && (!defined __STDC_VERSION__ || __STDC_VERSION__ < 201112) # if defined _MSC_VER && 1200 <= _MSC_VER # define _Noreturn __declspec (noreturn) # else # define _Noreturn YY_ATTRIBUTE ((__noreturn__)) # endif #endif /* Suppress unused-variable warnings by "using" E. */ #if ! defined lint || defined __GNUC__ # define YYUSE(E) ((void) (E)) #else # define YYUSE(E) /* empty */ #endif #if defined __GNUC__ && 407 <= __GNUC__ * 100 + __GNUC_MINOR__ /* Suppress an incorrect diagnostic about yylval being uninitialized. */ # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN \ _Pragma ("GCC diagnostic push") \ _Pragma ("GCC diagnostic ignored \"-Wuninitialized\"")\ _Pragma ("GCC diagnostic ignored \"-Wmaybe-uninitialized\"") # define YY_IGNORE_MAYBE_UNINITIALIZED_END \ _Pragma ("GCC diagnostic pop") #else # define YY_INITIAL_VALUE(Value) Value #endif #ifndef YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_BEGIN # define YY_IGNORE_MAYBE_UNINITIALIZED_END #endif #ifndef YY_INITIAL_VALUE # define YY_INITIAL_VALUE(Value) /* Nothing. */ #endif /* Debug traces. */ #ifndef YYDEBUG # define YYDEBUG 0 #endif namespace yy { #line 114 "y.tab.h" // lalr1.cc:377 /// A Bison parser. class parser { public: #ifndef YYSTYPE /// Symbol semantic values. union semantic_type { #line 46 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:377 std::string *str; Rcl::SearchDataClauseRange *rg; Rcl::SearchDataClauseSimple *cl; Rcl::SearchData *sd; #line 135 "y.tab.h" // lalr1.cc:377 }; #else typedef YYSTYPE semantic_type; #endif /// Symbol locations. typedef location location_type; /// Syntax errors thrown from user actions. struct syntax_error : std::runtime_error { syntax_error (const location_type& l, const std::string& m); location_type location; }; /// Tokens. struct token { enum yytokentype { WORD = 258, QUOTED = 259, QUALIFIERS = 260, AND = 261, UCONCAT = 262, OR = 263, EQUALS = 264, CONTAINS = 265, SMALLEREQ = 266, SMALLER = 267, GREATEREQ = 268, GREATER = 269, RANGE = 270 }; }; /// (External) token type, as returned by yylex. typedef token::yytokentype token_type; /// Symbol type: an internal symbol number. typedef int symbol_number_type; /// The symbol type number to denote an empty symbol. enum { empty_symbol = -2 }; /// Internal symbol number for tokens (subsumed by symbol_number_type). typedef unsigned char token_number_type; /// A complete symbol. /// /// Expects its Base type to provide access to the symbol type /// via type_get(). /// /// Provide access to semantic value and location. template struct basic_symbol : Base { /// Alias to Base. typedef Base super_type; /// Default constructor. basic_symbol (); /// Copy constructor. basic_symbol (const basic_symbol& other); /// Constructor for valueless symbols. basic_symbol (typename Base::kind_type t, const location_type& l); /// Constructor for symbols with semantic value. basic_symbol (typename Base::kind_type t, const semantic_type& v, const location_type& l); /// Destroy the symbol. ~basic_symbol (); /// Destroy contents, and record that is empty. void clear (); /// Whether empty. bool empty () const; /// Destructive move, \a s is emptied into this. void move (basic_symbol& s); /// The semantic value. semantic_type value; /// The location. location_type location; private: /// Assignment operator. basic_symbol& operator= (const basic_symbol& other); }; /// Type access provider for token (enum) based symbols. struct by_type { /// Default constructor. by_type (); /// Copy constructor. by_type (const by_type& other); /// The symbol type as needed by the constructor. typedef token_type kind_type; /// Constructor from (external) token numbers. by_type (kind_type t); /// Record that this symbol is empty. void clear (); /// Steal the symbol type from \a that. void move (by_type& that); /// The (internal) type number (corresponding to \a type). /// \a empty when empty. symbol_number_type type_get () const; /// The token. token_type token () const; /// The symbol type. /// \a empty_symbol when empty. /// An int, not token_number_type, to be able to store empty_symbol. int type; }; /// "External" symbols: returned by the scanner. typedef basic_symbol symbol_type; /// Build a parser object. parser (WasaParserDriver* d_yyarg); virtual ~parser (); /// Parse. /// \returns 0 iff parsing succeeded. virtual int parse (); #if YYDEBUG /// The current debugging stream. std::ostream& debug_stream () const YY_ATTRIBUTE_PURE; /// Set the current debugging stream. void set_debug_stream (std::ostream &); /// Type for debugging levels. typedef int debug_level_type; /// The current debugging level. debug_level_type debug_level () const YY_ATTRIBUTE_PURE; /// Set the current debugging level. void set_debug_level (debug_level_type l); #endif /// Report a syntax error. /// \param loc where the syntax error is found. /// \param msg a description of the syntax error. virtual void error (const location_type& loc, const std::string& msg); /// Report a syntax error. void error (const syntax_error& err); private: /// This class is not copyable. parser (const parser&); parser& operator= (const parser&); /// State numbers. typedef int state_type; /// Generate an error message. /// \param yystate the state where the error occurred. /// \param yyla the lookahead token. virtual std::string yysyntax_error_ (state_type yystate, const symbol_type& yyla) const; /// Compute post-reduction state. /// \param yystate the current state /// \param yysym the nonterminal to push on the stack state_type yy_lr_goto_state_ (state_type yystate, int yysym); /// Whether the given \c yypact_ value indicates a defaulted state. /// \param yyvalue the value to check static bool yy_pact_value_is_default_ (int yyvalue); /// Whether the given \c yytable_ value indicates a syntax error. /// \param yyvalue the value to check static bool yy_table_value_is_error_ (int yyvalue); static const signed char yypact_ninf_; static const signed char yytable_ninf_; /// Convert a scanner token number \a t to a symbol number. static token_number_type yytranslate_ (int t); // Tables. // YYPACT[STATE-NUM] -- Index in YYTABLE of the portion describing // STATE-NUM. static const signed char yypact_[]; // YYDEFACT[STATE-NUM] -- Default reduction number in state STATE-NUM. // Performed when YYTABLE does not specify something else to do. Zero // means the default is an error. static const unsigned char yydefact_[]; // YYPGOTO[NTERM-NUM]. static const signed char yypgoto_[]; // YYDEFGOTO[NTERM-NUM]. static const signed char yydefgoto_[]; // YYTABLE[YYPACT[STATE-NUM]] -- What to do in state STATE-NUM. If // positive, shift that token. If negative, reduce the rule whose // number is the opposite. If YYTABLE_NINF, syntax error. static const signed char yytable_[]; static const signed char yycheck_[]; // YYSTOS[STATE-NUM] -- The (internal number of the) accessing // symbol of state STATE-NUM. static const unsigned char yystos_[]; // YYR1[YYN] -- Symbol number of symbol that rule YYN derives. static const unsigned char yyr1_[]; // YYR2[YYN] -- Number of symbols on the right hand side of rule YYN. static const unsigned char yyr2_[]; /// Convert the symbol name \a n to a form suitable for a diagnostic. static std::string yytnamerr_ (const char *n); /// For a symbol, its name in clear. static const char* const yytname_[]; #if YYDEBUG // YYRLINE[YYN] -- Source line where rule number YYN was defined. static const unsigned short int yyrline_[]; /// Report on the debug stream that the rule \a r is going to be reduced. virtual void yy_reduce_print_ (int r); /// Print the state stack on the debug stream. virtual void yystack_print_ (); // Debugging. int yydebug_; std::ostream* yycdebug_; /// \brief Display a symbol type, value and location. /// \param yyo The output stream. /// \param yysym The symbol. template void yy_print_ (std::ostream& yyo, const basic_symbol& yysym) const; #endif /// \brief Reclaim the memory associated to a symbol. /// \param yymsg Why this token is reclaimed. /// If null, print nothing. /// \param yysym The symbol. template void yy_destroy_ (const char* yymsg, basic_symbol& yysym) const; private: /// Type access provider for state based symbols. struct by_state { /// Default constructor. by_state (); /// The symbol type as needed by the constructor. typedef state_type kind_type; /// Constructor. by_state (kind_type s); /// Copy constructor. by_state (const by_state& other); /// Record that this symbol is empty. void clear (); /// Steal the symbol type from \a that. void move (by_state& that); /// The (internal) type number (corresponding to \a state). /// \a empty_symbol when empty. symbol_number_type type_get () const; /// The state number used to denote an empty symbol. enum { empty_state = -1 }; /// The state. /// \a empty when empty. state_type state; }; /// "Internal" symbol: element of the stack. struct stack_symbol_type : basic_symbol { /// Superclass. typedef basic_symbol super_type; /// Construct an empty symbol. stack_symbol_type (); /// Steal the contents from \a sym to build this. stack_symbol_type (state_type s, symbol_type& sym); /// Assignment, needed by push_back. stack_symbol_type& operator= (const stack_symbol_type& that); }; /// Stack type. typedef stack stack_type; /// The stack. stack_type yystack_; /// Push a new state on the stack. /// \param m a debug message to display /// if null, no trace is output. /// \param s the symbol /// \warning the contents of \a s.value is stolen. void yypush_ (const char* m, stack_symbol_type& s); /// Push a new look ahead token on the state on the stack. /// \param m a debug message to display /// if null, no trace is output. /// \param s the state /// \param sym the symbol (for its value and location). /// \warning the contents of \a s.value is stolen. void yypush_ (const char* m, state_type s, symbol_type& sym); /// Pop \a n symbols the three stacks. void yypop_ (unsigned int n = 1); /// Constants. enum { yyeof_ = 0, yylast_ = 60, ///< Last index in yytable_. yynnts_ = 8, ///< Number of nonterminal symbols. yyfinal_ = 14, ///< Termination state number. yyterror_ = 1, yyerrcode_ = 256, yyntokens_ = 19 ///< Number of tokens. }; // User arguments. WasaParserDriver* d; }; } // yy #line 491 "y.tab.h" // lalr1.cc:377 #endif // !YY_YY_Y_TAB_H_INCLUDED recoll-1.26.3/query/recollqmain.cpp0000644000175000017500000000201413533651561014170 00000000000000/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ // Takes a query and run it, no gui, results to stdout #include "autoconfig.h" #include #include "rclconfig.h" #include "recollq.h" static RclConfig *rclconfig; int main(int argc, char **argv) { return(recollq(&rclconfig, argc, argv)); } recoll-1.26.3/query/recollq.h0000644000175000017500000000204013533651561012767 00000000000000/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _recollq_h_included_ #define _recollq_h_included_ /// Execute query, print results to stdout. This is just an api to the /// recollq command line program. class RclConfig; extern int recollq(RclConfig **cfp, int argc, char **argv); #endif /* _recollq_h_included_ */ recoll-1.26.3/query/reslistpager.cpp0000644000175000017500000004120413566424763014402 00000000000000/* Copyright (C) 2007-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include #include #include #include using std::ostringstream; using std::endl; using std::list; #include "cstr.h" #include "reslistpager.h" #include "log.h" #include "rclconfig.h" #include "smallut.h" #include "rclutil.h" #include "plaintorich.h" #include "mimehandler.h" #include "transcode.h" // Default highlighter. No need for locking, this is query-only. static const string cstr_hlfontcolor(""); static const string cstr_hlendfont(""); class PlainToRichHtReslist : public PlainToRich { public: virtual string startMatch(unsigned int) { return cstr_hlfontcolor; } virtual string endMatch() { return cstr_hlendfont; } }; static PlainToRichHtReslist g_hiliter; ResListPager::ResListPager(int pagesize) : m_pagesize(pagesize), m_newpagesize(pagesize), m_resultsInCurrentPage(0), m_winfirst(-1), m_hasNext(true), m_hiliter(&g_hiliter) { } void ResListPager::resultPageNext() { if (!m_docSource) { LOGDEB("ResListPager::resultPageNext: null source\n"); return; } int resCnt = m_docSource->getResCnt(); LOGDEB("ResListPager::resultPageNext: rescnt " << resCnt << ", winfirst " << m_winfirst << "\n"); if (m_winfirst < 0) { m_winfirst = 0; } else { m_winfirst += int(m_respage.size()); } // Get the next page of results. Note that we look ahead by one to // determine if there is actually a next page vector npage; int pagelen = m_docSource->getSeqSlice(m_winfirst, m_pagesize + 1, npage); // If page was truncated, there is no next m_hasNext = (pagelen == m_pagesize + 1); // Get rid of the possible excess result if (pagelen == m_pagesize + 1) { npage.resize(m_pagesize); pagelen--; } if (pagelen <= 0) { // No results ? This can only happen on the first page or if the // actual result list size is a multiple of the page pref (else // there would have been no Next on the last page) if (m_winfirst > 0) { // Have already results. Let them show, just disable the // Next button. We'd need to remove the Next link from the page // too. // Restore the m_winfirst value, let the current result vector alone m_winfirst -= int(m_respage.size()); } else { // No results at all (on first page) m_winfirst = -1; } return; } m_resultsInCurrentPage = pagelen; m_respage = npage; } static string maybeEscapeHtml(const string& fld) { if (fld.compare(0, cstr_fldhtm.size(), cstr_fldhtm)) return escapeHtml(fld); else return fld.substr(cstr_fldhtm.size()); } void ResListPager::resultPageFor(int docnum) { if (!m_docSource) { LOGDEB("ResListPager::resultPageFor: null source\n"); return; } int resCnt = m_docSource->getResCnt(); LOGDEB("ResListPager::resultPageFor(" << docnum << "): rescnt " << resCnt << ", winfirst " << m_winfirst << "\n"); m_winfirst = (docnum / m_pagesize) * m_pagesize; // Get the next page of results. vector npage; int pagelen = m_docSource->getSeqSlice(m_winfirst, m_pagesize, npage); // If page was truncated, there is no next m_hasNext = (pagelen == m_pagesize); if (pagelen <= 0) { m_winfirst = -1; return; } m_respage = npage; } void ResListPager::displayDoc(RclConfig *config, int i, Rcl::Doc& doc, const HighlightData& hdata, const string& sh) { ostringstream chunk; // Determine icon to display if any string iconurl = iconUrl(config, doc); // Printable url: either utf-8 if transcoding succeeds, or url-encoded string url; printableUrl(config->getDefCharset(), doc.url, url); // Same as url, but with file:// possibly stripped. output by %u instead // of %U. string urlOrLocal; urlOrLocal = fileurltolocalpath(url); if (urlOrLocal.empty()) urlOrLocal = url; // Make title out of file name if none yet string titleOrFilename; string utf8fn; doc.getmeta(Rcl::Doc::keytt, &titleOrFilename); doc.getmeta(Rcl::Doc::keyfn, &utf8fn); if (utf8fn.empty()) { utf8fn = path_getsimple(url); } if (titleOrFilename.empty()) { titleOrFilename = utf8fn; } // Url for the parent directory. We strip the file:// part for local // paths string parenturl = url_parentfolder(url); { string localpath = fileurltolocalpath(parenturl); if (!localpath.empty()) parenturl = localpath; } // Result number char numbuf[20]; int docnumforlinks = m_winfirst + 1 + i; sprintf(numbuf, "%d", docnumforlinks); // Document date: either doc or file modification times string datebuf; if (!doc.dmtime.empty() || !doc.fmtime.empty()) { char cdate[100]; cdate[0] = 0; time_t mtime = doc.dmtime.empty() ? atoll(doc.fmtime.c_str()) : atoll(doc.dmtime.c_str()); struct tm *tm = localtime(&mtime); strftime(cdate, 99, dateFormat().c_str(), tm); transcode(cdate, datebuf, RclConfig::getLocaleCharset(), "UTF-8"); } // Size information. We print both doc and file if they differ a lot int64_t fsize = -1, dsize = -1; if (!doc.dbytes.empty()) dsize = static_cast(atoll(doc.dbytes.c_str())); if (!doc.fbytes.empty()) fsize = static_cast(atoll(doc.fbytes.c_str())); string sizebuf; if (dsize > 0) { sizebuf = displayableBytes(dsize); if (fsize > 10 * dsize && fsize - dsize > 1000) sizebuf += string(" / ") + displayableBytes(fsize); } else if (fsize >= 0) { sizebuf = displayableBytes(fsize); } string richabst; bool needabstract = parFormat().find("%A") != string::npos; if (needabstract && m_docSource) { vector vabs; m_docSource->getAbstract(doc, vabs); m_hiliter->set_inputhtml(false); for (vector::const_iterator it = vabs.begin(); it != vabs.end(); it++) { if (!it->empty()) { // No need to call escapeHtml(), plaintorich handles it list lr; // There may be data like page numbers before the snippet text. // will be in brackets. string::size_type bckt = it->find("]"); if (bckt == string::npos) { m_hiliter->plaintorich(*it, lr, hdata); } else { m_hiliter->plaintorich(it->substr(bckt), lr, hdata); lr.front() = it->substr(0, bckt) + lr.front(); } richabst += lr.front(); richabst += absSep(); } } } // Links; Uses utilities from mimehandler.h ostringstream linksbuf; if (canIntern(&doc, config)) { linksbuf << "" << trans("Preview") << "  "; } if (canOpen(&doc, config)) { linksbuf << "" << trans("Open") << ""; } ostringstream snipsbuf; if (doc.haspages) { snipsbuf << "" << trans("Snippets") << "  "; linksbuf << "  " << snipsbuf.str(); } string collapscnt; if (doc.getmeta(Rcl::Doc::keycc, &collapscnt) && !collapscnt.empty()) { ostringstream collpsbuf; int clc = atoi(collapscnt.c_str()) + 1; collpsbuf << "" << trans("Dups") << "(" << clc << ")" << "  "; linksbuf << "  " << collpsbuf.str(); } // Build the result list paragraph: // Subheader: this is used by history if (!sh.empty()) chunk << "

" << sh << "

\n

"; else chunk << "

"; char xdocidbuf[100]; sprintf(xdocidbuf, "%lu", doc.xdocid); // Configurable stuff map subs; subs["A"] = !richabst.empty() ? richabst : ""; subs["D"] = datebuf; subs["E"] = snipsbuf.str(); subs["I"] = iconurl; subs["i"] = doc.ipath; subs["K"] = !doc.meta[Rcl::Doc::keykw].empty() ? string("[") + maybeEscapeHtml(doc.meta[Rcl::Doc::keykw]) + "]" : ""; subs["L"] = linksbuf.str(); subs["N"] = numbuf; subs["M"] = doc.mimetype; subs["P"] = parenturl; subs["R"] = doc.meta[Rcl::Doc::keyrr]; subs["S"] = sizebuf; subs["T"] = maybeEscapeHtml(titleOrFilename); subs["t"] = maybeEscapeHtml(doc.meta[Rcl::Doc::keytt]); subs["U"] = url; subs["u"] = urlOrLocal; subs["x"] = xdocidbuf; // Let %(xx) access all metadata. HTML-neuter everything: for (const auto& entry : doc.meta) { if (!entry.first.empty()) subs[entry.first] = maybeEscapeHtml(entry.second); } string formatted; pcSubst(parFormat(), formatted, subs); chunk << formatted; chunk << "

" << endl; // This was to force qt 4.x to clear the margins (which it should do // anyway because of the paragraph's style), but we finally took // the table approach for 1.15 for now (in guiutils.cpp) // chunk << "
" << endl; LOGDEB2("Chunk: [" << chunk.rdbuf()->str() << "]\n"); append(chunk.rdbuf()->str(), i, doc); } bool ResListPager::getDoc(int num, Rcl::Doc& doc) { if (m_winfirst < 0 || m_respage.size() == 0) return false; if (num < m_winfirst || num >= m_winfirst + int(m_respage.size())) return false; doc = m_respage[num-m_winfirst].doc; return true; } void ResListPager::displayPage(RclConfig *config) { LOGDEB("ResListPager::displayPage. linkPrefix: " << linkPrefix() << "\n"); if (!m_docSource) { LOGDEB("ResListPager::displayPage: null source\n"); return; } if (m_winfirst < 0 && !pageEmpty()) { LOGDEB("ResListPager::displayPage: sequence error: winfirst < 0\n"); return; } ostringstream chunk; // Display list header // We could use a but the textedit doesnt display // it prominently // Note: have to append text in chunks that make sense // html-wise. If we break things up too much, the editor // gets confused. Hence the use of the 'chunk' text // accumulator // Also note that there can be results beyond the estimated resCnt. chunk << "<html><head>" << endl << "<meta http-equiv=\"content-type\"" << " content=\"text/html; charset=utf-8\">" << endl << headerContent() << "</head><body>" << endl << pageTop() << "<p><span style=\"font-size:110%;\"><b>" << m_docSource->title() << "</b></span>   "; if (pageEmpty()) { chunk << trans("<p><b>No results found</b><br>"); string reason = m_docSource->getReason(); if (!reason.empty()) { chunk << "<blockquote>" << escapeHtml(reason) << "</blockquote></p>"; } else { HighlightData hldata; m_docSource->getTerms(hldata); vector<string> uterms(hldata.uterms.begin(), hldata.uterms.end()); if (!uterms.empty()) { map<string, vector<string> > spellings; suggest(uterms, spellings); if (!spellings.empty()) { if (o_index_stripchars) { chunk << trans("<p><i>Alternate spellings (accents suppressed): </i>") << "<br /><blockquote>"; } else { chunk << trans("<p><i>Alternate spellings: </i>") << "<br /><blockquote>"; } for (const auto& entry: spellings) { chunk << "<b>" << entry.first << "</b> : "; for (const auto& spelling : entry.second) { chunk << spelling << " "; } chunk << "<br />"; } chunk << "</blockquote></p>"; } } } } else { unsigned int resCnt = m_docSource->getResCnt(); if (m_winfirst + m_respage.size() < resCnt) { chunk << trans("Documents") << " <b>" << m_winfirst + 1 << "-" << m_winfirst + m_respage.size() << "</b> " << trans("out of at least") << " " << resCnt << " " << trans("for") << " " ; } else { chunk << trans("Documents") << " <b>" << m_winfirst + 1 << "-" << m_winfirst + m_respage.size() << "</b> " << trans("for") << " "; } } chunk << detailsLink(); if (hasPrev() || hasNext()) { chunk << "  "; if (hasPrev()) { chunk << "<a href=\"" << linkPrefix() + prevUrl() + "\"><b>" << trans("Previous") << "</b></a>   "; } if (hasNext()) { chunk << "<a href=\"" << linkPrefix() + nextUrl() + "\"><b>" << trans("Next") << "</b></a>"; } } chunk << "</p>" << endl; append(chunk.rdbuf()->str()); chunk.rdbuf()->str(""); if (pageEmpty()) return; HighlightData hdata; m_docSource->getTerms(hdata); // Emit data for result entry paragraph. Do it in chunks that make sense // html-wise, else our client may get confused for (int i = 0; i < (int)m_respage.size(); i++) { Rcl::Doc& doc(m_respage[i].doc); string& sh(m_respage[i].subHeader); displayDoc(config, i, doc, hdata, sh); } // Footer chunk << "<p align=\"center\">"; if (hasPrev() || hasNext()) { if (hasPrev()) { chunk << "<a href=\"" + linkPrefix() + prevUrl() + "\"><b>" << trans("Previous") << "</b></a>   "; } if (hasNext()) { chunk << "<a href=\"" << linkPrefix() + nextUrl() + "\"><b>" << trans("Next") << "</b></a>"; } } chunk << "</p>" << endl; chunk << "</body></html>" << endl; append(chunk.rdbuf()->str()); } // Default implementations for things that should be implemented by // specializations string ResListPager::nextUrl() { return "n-1"; } string ResListPager::prevUrl() { return "p-1"; } string ResListPager::iconUrl(RclConfig *config, Rcl::Doc& doc) { string apptag; doc.getmeta(Rcl::Doc::keyapptg, &apptag); return path_pathtofileurl(config->getMimeIconPath(doc.mimetype, apptag)); } bool ResListPager::append(const string& data) { fprintf(stderr, "%s", data.c_str()); return true; } string ResListPager::trans(const string& in) { return in; } string ResListPager::detailsLink() { string chunk = string("<a href=\"") + linkPrefix() + "H-1\">"; chunk += trans("(show query)") + "</a>"; return chunk; } const string &ResListPager::parFormat() { static const string cstr_format("<img src=\"%I\" align=\"left\">" "%R %S %L   <b>%T</b><br>" "%M %D   <i>%U</i><br>" "%A %K"); return cstr_format; } const string &ResListPager::dateFormat() { static const string cstr_format(" %Y-%m-%d %H:%M:%S %z"); return cstr_format; } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/sortseq.h�����������������������������������������������������������������������0000644�0001750�0001750�00000003222�13533651561�013031� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SORTSEQ_H_INCLUDED_ #define _SORTSEQ_H_INCLUDED_ #include "autoconfig.h" #include <vector> #include <string> #include <memory> #include "docseq.h" /** * A sorted sequence is created from the first N documents of another one, * and sorts them according to the given criteria. */ class DocSeqSorted : public DocSeqModifier { public: DocSeqSorted(std::shared_ptr<DocSequence> iseq, DocSeqSortSpec &sortspec) : DocSeqModifier(iseq) { setSortSpec(sortspec); } virtual ~DocSeqSorted() {} virtual bool canSort() {return true;} virtual bool setSortSpec(const DocSeqSortSpec &sortspec); virtual bool getDoc(int num, Rcl::Doc &doc, string *sh = 0); virtual int getResCnt() {return int(m_docsp.size());} private: DocSeqSortSpec m_spec; std::vector<Rcl::Doc> m_docs; std::vector<Rcl::Doc *> m_docsp; }; #endif /* _SORTSEQ_H_INCLUDED_ */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/plaintorich.cpp�����������������������������������������������������������������0000644�0001750�0001750�00000031652�13566424763�014220� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <limits.h> #include <string> #include <utility> #include <list> #include <set> #include <vector> #include <unordered_map> #include <algorithm> #include <regex> using std::vector; using std::list; using std::pair; using std::set; using std::unordered_map; #include "rcldb.h" #include "rclconfig.h" #include "log.h" #include "textsplit.h" #include "utf8iter.h" #include "smallut.h" #include "chrono.h" #include "plaintorich.h" #include "cancelcheck.h" #include "unacpp.h" // Text splitter used to take note of the position of query terms // inside the result text. This is then used to insert highlight tags. class TextSplitPTR : public TextSplit { public: // Out: begin and end byte positions of query terms/groups in text vector<GroupMatchEntry> m_tboffs; TextSplitPTR(const HighlightData& hdata) : m_wcount(0), m_hdata(hdata) { // We separate single terms and groups and extract the group // terms for computing positions list before looking for group // matches. Single terms are stored with a reference to the // entry they come with. for (unsigned int i = 0; i < hdata.index_term_groups.size(); i++) { const HighlightData::TermGroup& tg(hdata.index_term_groups[i]); if (tg.kind == HighlightData::TermGroup::TGK_TERM) { m_terms[tg.term] = i; } else { for (const auto& group : tg.orgroups) { for (const auto& term : group) { m_gterms.insert(term); } } } } } // Accept word and its position. If word is search term, add // highlight zone definition. If word is part of search group // (phrase or near), update positions list. virtual bool takeword(const std::string& term, int pos, int bts, int bte) { string dumb = term; if (o_index_stripchars) { if (!unacmaybefold(term, dumb, "UTF-8", UNACOP_UNACFOLD)) { LOGINFO("PlainToRich::takeword: unac failed for [" << term << "]\n"); return true; } } LOGDEB2("Input dumbbed term: '" << dumb << "' " << pos << " " << bts << " " << bte << "\n"); // If this word is a search term, remember its byte-offset span. map<string, size_t>::const_iterator it = m_terms.find(dumb); if (it != m_terms.end()) { m_tboffs.push_back(GroupMatchEntry(bts, bte, it->second)); } // If word is part of a search group, update its positions list if (m_gterms.find(dumb) != m_gterms.end()) { // Term group (phrase/near) handling m_plists[dumb].push_back(pos); m_gpostobytes[pos] = pair<int,int>(bts, bte); LOGDEB2("Recorded bpos for " << pos << ": " << bts << " " << bte << "\n"); } // Check for cancellation request if ((m_wcount++ & 0xfff) == 0) CancelCheck::instance().checkCancel(); return true; } // Must be called after the split to find the phrase/near match positions virtual bool matchGroups(); private: // Word count. Used to call checkCancel from time to time. int m_wcount; // In: user query terms map<string, size_t> m_terms; // m_gterms holds all the terms in m_groups, as a set for quick lookup set<string> m_gterms; const HighlightData& m_hdata; // group/near terms word positions. unordered_map<string, vector<int> > m_plists; unordered_map<int, pair<int, int> > m_gpostobytes; }; // Look for matches to PHRASE and NEAR term groups and finalize the // matched regions list (sort it by increasing start then decreasing // length) bool TextSplitPTR::matchGroups() { for (unsigned int i = 0; i < m_hdata.index_term_groups.size(); i++) { if (m_hdata.index_term_groups[i].kind != HighlightData::TermGroup::TGK_TERM) { matchGroup(m_hdata, i, m_plists, m_gpostobytes, m_tboffs); } } // Sort regions by increasing start and decreasing width. // The output process will skip overlapping entries. std::sort(m_tboffs.begin(), m_tboffs.end(), [](const GroupMatchEntry& a, const GroupMatchEntry& b) -> bool { if (a.offs.first != b.offs.first) return a.offs.first < b.offs.first; return a.offs.second > b.offs.second; } ); return true; } // Replace HTTP(s) urls in text/plain with proper HTML anchors so that // they become clickable in the preview. We don't make a lot of effort // for validating, or catching things which are probably urls but miss // a scheme (e.g. www.xxx.com/index.html), because complicated. static const string urlRE = "(https?://[[:alnum:]~_/.%?&=,#@]+)[[:space:]|]"; static const string urlRep{"<a href=\"$1\">$1</a>"}; static std::regex url_re(urlRE); static string activate_urls(const string& in) { return std::regex_replace(in, url_re, urlRep); } // Fix result text for display inside the gui text window. // // We call overridden functions to output header data, beginnings and ends of // matches etc. // // If the input is text, we output the result in chunks, arranging not // to cut in the middle of a tag, which would confuse qtextedit. If // the input is html, the body is always a single output chunk. bool PlainToRich::plaintorich(const string& in, list<string>& out, // Output chunk list const HighlightData& hdata, int chunksize) { Chrono chron; bool ret = true; LOGDEB1("plaintorichich: in: [" << in << "]\n"); m_hdata = &hdata; // Compute the positions for the query terms. We use the text // splitter to break the text into words, and compare the words to // the search terms, TextSplitPTR splitter(hdata); // Note: the splitter returns the term locations in byte, not // character, offsets. splitter.text_to_words(in); LOGDEB2("plaintorich: split done " << chron.millis() << " mS\n"); // Compute the positions for NEAR and PHRASE groups. splitter.matchGroups(); LOGDEB2("plaintorich: group match done " << chron.millis() << " mS\n"); out.clear(); out.push_back(""); list<string>::iterator olit = out.begin(); // Rich text output *olit = header(); // No term matches. Happens, for example on a snippet selected for // a term match when we are actually looking for a group match // (the snippet generator does this...). if (splitter.m_tboffs.empty()) { LOGDEB1("plaintorich: no term matches\n"); ret = false; } // Iterator for the list of input term positions. We use it to // output highlight tags and to compute term positions in the // output text vector<GroupMatchEntry>::iterator tPosIt = splitter.m_tboffs.begin(); vector<GroupMatchEntry>::iterator tPosEnd = splitter.m_tboffs.end(); #if 0 for (vector<pair<int, int> >::const_iterator it = splitter.m_tboffs.begin(); it != splitter.m_tboffs.end(); it++) { LOGDEB2("plaintorich: region: " << it->first << " "<<it->second<< "\n"); } #endif // Input character iterator Utf8Iter chariter(in); // State variables used to limit the number of consecutive empty lines, // convert all eol to '\n', and preserve some indentation int eol = 0; int hadcr = 0; int inindent = 1; // HTML state bool intag = false, inparamvalue = false; // My tag state int inrcltag = 0; string::size_type headend = 0; if (m_inputhtml) { headend = in.find("</head>"); if (headend == string::npos) headend = in.find("</HEAD>"); if (headend != string::npos) headend += 7; } for (string::size_type pos = 0; pos != string::npos; pos = chariter++) { // Check from time to time if we need to stop if ((pos & 0xfff) == 0) { CancelCheck::instance().checkCancel(); } // If we still have terms positions, check (byte) position. If // we are at or after a term match, mark. if (tPosIt != tPosEnd) { int ibyteidx = int(chariter.getBpos()); if (ibyteidx == tPosIt->offs.first) { if (!intag && ibyteidx >= (int)headend) { *olit += startMatch((unsigned int)(tPosIt->grpidx)); } inrcltag = 1; } else if (ibyteidx == tPosIt->offs.second) { // Output end of match region tags if (!intag && ibyteidx > (int)headend) { *olit += endMatch(); } // Skip all highlight areas that would overlap this one int crend = tPosIt->offs.second; while (tPosIt != splitter.m_tboffs.end() && tPosIt->offs.first < crend) tPosIt++; inrcltag = 0; } } unsigned int car = *chariter; if (car == '\n') { if (!hadcr) eol++; hadcr = 0; continue; } else if (car == '\r') { hadcr++; eol++; continue; } else if (eol) { // Got non eol char in line break state. Do line break; inindent = 1; hadcr = 0; if (eol > 2) eol = 2; while (eol) { if (!m_inputhtml && m_eolbr) *olit += "<br>"; *olit += "\n"; eol--; } // Maybe end this chunk, begin next. Don't do it on html // there is just no way to do it right (qtextedit cant grok // chunks cut in the middle of <a></a> for example). if (!m_inputhtml && !inrcltag && olit->size() > (unsigned int)chunksize) { if (m_activatelinks) { *olit = activate_urls(*olit); } out.push_back(string(startChunk())); olit++; } } switch (car) { case '<': inindent = 0; if (m_inputhtml) { if (!inparamvalue) intag = true; chariter.appendchartostring(*olit); } else { *olit += "<"; } break; case '>': inindent = 0; if (m_inputhtml) { if (!inparamvalue) intag = false; } chariter.appendchartostring(*olit); break; case '&': inindent = 0; if (m_inputhtml) { chariter.appendchartostring(*olit); } else { *olit += "&"; } break; case '"': inindent = 0; if (m_inputhtml && intag) { inparamvalue = !inparamvalue; } chariter.appendchartostring(*olit); break; case ' ': if (m_eolbr && inindent) { *olit += " "; } else { chariter.appendchartostring(*olit); } break; case '\t': if (m_eolbr && inindent) { *olit += "    "; } else { chariter.appendchartostring(*olit); } break; default: inindent = 0; chariter.appendchartostring(*olit); } } // End chariter loop #if 0 { FILE *fp = fopen("/tmp/debugplaintorich", "a"); fprintf(fp, "BEGINOFPLAINTORICHOUTPUT\n"); for (list<string>::iterator it = out.begin(); it != out.end(); it++) { fprintf(fp, "BEGINOFPLAINTORICHCHUNK\n"); fprintf(fp, "%s", it->c_str()); fprintf(fp, "ENDOFPLAINTORICHCHUNK\n"); } fprintf(fp, "ENDOFPLAINTORICHOUTPUT\n"); fclose(fp); } #endif LOGDEB2("plaintorich: done " << chron.millis() << " mS\n"); if (!m_inputhtml && m_activatelinks) { out.back() = activate_urls(out.back()); } return ret; } ��������������������������������������������������������������������������������������recoll-1.26.3/query/sortseq.cpp���������������������������������������������������������������������0000644�0001750�0001750�00000004362�13533651561�013372� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <algorithm> #include "log.h" #include "sortseq.h" using std::string; class CompareDocs { DocSeqSortSpec ss; public: CompareDocs(const DocSeqSortSpec &sortspec) : ss(sortspec) {} // It's not too clear in the std::sort doc what this should do. This // behaves as operator< int operator()(const Rcl::Doc *x, const Rcl::Doc *y) { LOGDEB1("Comparing .. \n" ); const auto xit = x->meta.find(ss.field); const auto yit = y->meta.find(ss.field); if (xit == x->meta.end() || yit == y->meta.end()) return 0; return ss.desc ? yit->second < xit->second : xit->second < yit->second; } }; bool DocSeqSorted::setSortSpec(const DocSeqSortSpec &sortspec) { LOGDEB("DocSeqSorted::setSortSpec\n" ); m_spec = sortspec; int count = m_seq->getResCnt(); LOGDEB("DocSeqSorted:: count " << (count) << "\n" ); m_docs.resize(count); int i; for (i = 0; i < count; i++) { if (!m_seq->getDoc(i, m_docs[i])) { LOGERR("DocSeqSorted: getDoc failed for doc " << (i) << "\n" ); count = i; break; } } m_docs.resize(count); m_docsp.resize(count); for (i = 0; i < count; i++) m_docsp[i] = &m_docs[i]; CompareDocs cmp(sortspec); sort(m_docsp.begin(), m_docsp.end(), cmp); return true; } bool DocSeqSorted::getDoc(int num, Rcl::Doc &doc, string *) { LOGDEB("DocSeqSorted::getDoc(" << (num) << ")\n" ); if (num < 0 || num >= int(m_docsp.size())) return false; doc = *m_docsp[num]; return true; } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/wasaparserdriver.h��������������������������������������������������������������0000644�0001750�0001750�00000005311�13533651561�014716� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _WASAPARSERDRIVER_H_INCLUDED_ #define _WASAPARSERDRIVER_H_INCLUDED_ #include <string> #include <stack> #include <vector> #include "smallut.h" class WasaParserDriver; namespace Rcl { class SearchData; class SearchDataClauseSimple; } namespace yy { class parser; } class RclConfig; class WasaParserDriver { public: WasaParserDriver(const RclConfig *c, const std::string sl, const std::string& as); ~WasaParserDriver(); Rcl::SearchData *parse(const std::string&); bool addClause(Rcl::SearchData *sd, Rcl::SearchDataClauseSimple* cl); int GETCHAR(); void UNGETCHAR(int c); std::string& qualifiers() { return m_qualifiers; } void setreason(const std::string& reason) { m_reason = reason; } const std::string& getreason() const { return m_reason; } private: friend class yy::parser; std::string m_stemlang; std::string m_autosuffs; const RclConfig *m_config; // input string. std::string m_input; // Current position in m_input unsigned int m_index; // Characters pushed-back, ready for next getchar. std::stack<int> m_returns; // Result, set by parser. Rcl::SearchData *m_result; // Storage for top level filters std::vector<std::string> m_filetypes; std::vector<std::string> m_nfiletypes; bool m_haveDates; DateInterval m_dates; // Restrict to date interval size_t m_maxSize; size_t m_minSize; std::string m_reason; // Let the quoted string reader store qualifiers in there, simpler // than handling this in the parser, because their nature is // determined by the absence of white space after the closing // dquote. e.g "some term"abc. We could avoid this by making white // space a token. std::string m_qualifiers; }; #endif /* _WASAPARSERDRIVER_H_INCLUDED_ */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/docseq.cpp����������������������������������������������������������������������0000644�0001750�0001750�00000006666�13533651561�013161� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "docseq.h" #include "filtseq.h" #include "sortseq.h" #include "log.h" #include "internfile.h" std::mutex DocSequence::o_dblock; string DocSequence::o_sort_trans; string DocSequence::o_filt_trans; int DocSequence::getSeqSlice(int offs, int cnt, vector<ResListEntry>& result) { int ret = 0; for (int num = offs; num < offs + cnt; num++, ret++) { result.push_back(ResListEntry()); if (!getDoc(num, result.back().doc, &result.back().subHeader)) { result.pop_back(); return ret; } } return ret; } bool DocSequence::getEnclosing(Rcl::Doc& doc, Rcl::Doc& pdoc) { std::shared_ptr<Rcl::Db> db = getDb(); if (!db) { LOGERR("DocSequence::getEnclosing: no db\n" ); return false; } std::unique_lock<std::mutex> locker(o_dblock); string udi; if (!FileInterner::getEnclosingUDI(doc, udi)) return false; bool dbret = db->getDoc(udi, doc, pdoc); return dbret && pdoc.pc != -1; } // Remove stacked modifying sources (sort, filter) until we get to a real one void DocSource::stripStack() { if (!m_seq) return; while (m_seq->getSourceSeq()) { m_seq = m_seq->getSourceSeq(); } } bool DocSource::buildStack() { LOGDEB2("DocSource::buildStack()\n" ); stripStack(); if (!m_seq) return false; // Filtering must be done before sorting, (which may // truncates the original list) if (m_seq->canFilter()) { if (!m_seq->setFiltSpec(m_fspec)) { LOGERR("DocSource::buildStack: setfiltspec failed\n" ); } } else { if (m_fspec.isNotNull()) { m_seq = std::shared_ptr<DocSequence>(new DocSeqFiltered(m_config, m_seq, m_fspec)); } } if (m_seq->canSort()) { if (!m_seq->setSortSpec(m_sspec)) { LOGERR("DocSource::buildStack: setsortspec failed\n" ); } } else { if (m_sspec.isNotNull()) { m_seq = std::shared_ptr<DocSequence>(new DocSeqSorted(m_seq, m_sspec)); } } return true; } string DocSource::title() { if (!m_seq) return string(); string qual; if (m_fspec.isNotNull() && !m_sspec.isNotNull()) qual = string(" (") + o_filt_trans + string(")"); else if (!m_fspec.isNotNull() && m_sspec.isNotNull()) qual = string(" (") + o_sort_trans + string(")"); else if (m_fspec.isNotNull() && m_sspec.isNotNull()) qual = string(" (") + o_sort_trans + string(",") + o_filt_trans + string(")"); return m_seq->title() + qual; } bool DocSource::setFiltSpec(const DocSeqFiltSpec &f) { LOGDEB2("DocSource::setFiltSpec\n" ); m_fspec = f; buildStack(); return true; } bool DocSource::setSortSpec(const DocSeqSortSpec &s) { LOGDEB2("DocSource::setSortSpec\n" ); m_sspec = s; buildStack(); return true; } ��������������������������������������������������������������������������recoll-1.26.3/query/dynconf.h�����������������������������������������������������������������������0000644�0001750�0001750�00000013105�13533651561�012772� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004-2017 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _DYNCONF_H_INCLUDED_ #define _DYNCONF_H_INCLUDED_ /** * Dynamic configuration storage * * This used to be called "history" because of the initial usage. * Used to store some parameters which would fit neither in recoll.conf, * basically because they change a lot, nor in the QT preferences file, mostly * because they are specific to a configuration directory. * Examples: * - History of documents selected for preview * - Active and inactive external databases (depend on the * configuration directory) * - ... * * The storage is performed in a ConfSimple file, with subkeys and * encodings which depend on the data stored. Under each section, the keys * are sequential numeric, so this basically manages a set of lists. * * The code ensures that a a given value (as defined by the * DynConfEntry::equal() method) is only stored once. If this is * undesirable, equal() should always return false. */ #include <string> #include <list> #include <vector> #include "conftree.h" #include "base64.h" /** Interface for a stored object. */ class DynConfEntry { public: virtual ~DynConfEntry() {} /** Decode object-as-string coming out from storage */ virtual bool decode(const std::string &value) = 0; /** Encode object state into state for storing */ virtual bool encode(std::string& value) = 0; /** Compare objects */ virtual bool equal(const DynConfEntry &other) = 0; }; /** Stored object specialization for generic string storage */ class RclSListEntry : public DynConfEntry { public: RclSListEntry() {} virtual ~RclSListEntry() {} RclSListEntry(const std::string& v) : value(v) { } virtual bool decode(const std::string &enc) { base64_decode(enc, value); return true; } virtual bool encode(std::string& enc) { base64_encode(value, enc); return true; } virtual bool equal(const DynConfEntry& other) { const RclSListEntry& e = dynamic_cast<const RclSListEntry&>(other); return e.value == value; } std::string value; }; /** The dynamic configuration class */ class RclDynConf { public: RclDynConf(const std::string &fn); bool ro() { return m_data.getStatus() == ConfSimple::STATUS_RO; } bool rw() { return m_data.getStatus() == ConfSimple::STATUS_RW; } bool ok() { return m_data.getStatus() != ConfSimple::STATUS_ERROR; } std::string getFilename() { return m_data.getFilename(); } // Generic methods bool eraseAll(const std::string& sk); /** Insert new entry for section sk * @param sk section this is for * @param n new entry * @param s a scratch entry used for decoding and comparisons, * avoiding templating the routine for the actual entry type. */ bool insertNew(const std::string& sk, DynConfEntry &n, DynConfEntry &s, int maxlen = -1); // General method to extract entries. Maybe there would be a way to // express the fact that Type should derive from DynConfEntry, not // too sure how. We are just certain (further down) that it does // have a decode() method. It's up to the user that they call // insertNew() and getEntries() for the same type... template <template <class, class> class Container, class Type> Container<Type, std::allocator<Type>> getEntries(const std::string& sk); // Specialized methods for simple strings bool enterString(const std::string sk, const std::string value, int maxlen = -1); template <template <class, class> class Container> Container<std::string, std::allocator<std::string>> getStringEntries(const std::string& sk); private: unsigned int m_mlen; ConfSimple m_data; }; template <template <class, class> class Container, class Type> Container<Type, std::allocator<Type>> RclDynConf::getEntries(const std::string& sk) { Container<Type, std::allocator<Type>> out; Type entry; std::vector<std::string> names = m_data.getNames(sk); for (const auto& name : names) { std::string value; if (m_data.get(name, value, sk)) { if (!entry.decode(value)) continue; out.push_back(entry); } } return out; } template <template <class, class> class Container> Container<std::string, std::allocator<std::string>> RclDynConf::getStringEntries(const std::string& sk) { std::vector<RclSListEntry> el = getEntries<std::vector, RclSListEntry>(sk); Container<std::string, std::allocator<std::string>> sl; for (const auto& entry : el) { sl.push_back(entry.value); } return sl; } // Defined subkeys. Values in dynconf.cpp // History extern const std::string docHistSubKey; // All external indexes extern const std::string allEdbsSk; // Active external indexes extern const std::string actEdbsSk; // Advanced search history extern const std::string advSearchHistSk; #endif /* _DYNCONF_H_INCLUDED_ */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/docseqhist.cpp������������������������������������������������������������������0000644�0001750�0001750�00000011452�13533651561�014036� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "docseqhist.h" #include <stdio.h> #include <math.h> #include <time.h> #include <cmath> using std::vector; #include "rcldb.h" #include "fileudi.h" #include "base64.h" #include "log.h" #include "smallut.h" // Encode document history entry: // U + Unix time + base64 of udi // The U distinguishes udi-based entries from older fn+ipath ones bool RclDHistoryEntry::encode(string& value) { string budi, bdir; base64_encode(udi, budi); base64_encode(dbdir, bdir); value = string("V ") + lltodecstr(unixtime) + " " + budi + " " + bdir; return true; } // Decode. We support historical entries which were like "time b64fn [b64ipath]" // Previous entry format is "U time b64udi" // Current entry format "V time b64udi [b64dir]" bool RclDHistoryEntry::decode(const string &value) { vector<string> vall; stringToStrings(value, vall); vector<string>::const_iterator it = vall.begin(); udi.clear(); dbdir.clear(); string fn, ipath; switch (vall.size()) { case 2: // Old fn+ipath, null ipath case unixtime = atoll((*it++).c_str()); base64_decode(*it++, fn); break; case 3: if (!it->compare("U") || !it->compare("V")) { // New udi-based entry, no dir it++; unixtime = atoll((*it++).c_str()); base64_decode(*it++, udi); } else { // Old fn + ipath. We happen to know how to build an udi unixtime = atoll((*it++).c_str()); base64_decode(*it++, fn); base64_decode(*it, ipath); } break; case 4: // New udi-based entry, with directory it++; unixtime = atoll((*it++).c_str()); base64_decode(*it++, udi); base64_decode(*it++, dbdir); break; default: return false; } if (!fn.empty()) { // Old style entry found, make an udi, using the fs udi maker make_udi(fn, ipath, udi); } LOGDEB1("RclDHistoryEntry::decode: udi [" << udi << "] dbdir [" << dbdir << "]\n"); return true; } bool RclDHistoryEntry::equal(const DynConfEntry& other) { const RclDHistoryEntry& e = dynamic_cast<const RclDHistoryEntry&>(other); return e.udi == udi && e.dbdir == dbdir; } bool historyEnterDoc(Rcl::Db *db, RclDynConf *dncf, const Rcl::Doc& doc) { string udi; if (db && doc.getmeta(Rcl::Doc::keyudi, &udi)) { std::string dbdir = db->whatIndexForResultDoc(doc); LOGDEB("historyEnterDoc: [" << udi << ", " << dbdir << "] into " << dncf->getFilename() << "\n"); RclDHistoryEntry ne(time(0), udi, dbdir); RclDHistoryEntry scratch; return dncf->insertNew(docHistSubKey, ne, scratch, 200); } else { LOGDEB("historyEnterDoc: doc has no udi\n"); } return false; } vector<RclDHistoryEntry> getDocHistory(RclDynConf* dncf) { return dncf->getEntries<std::vector, RclDHistoryEntry>(docHistSubKey); } bool DocSequenceHistory::getDoc(int num, Rcl::Doc &doc, string *sh) { // Retrieve history list if (!m_hist) return false; if (m_history.empty()) m_history = getDocHistory(m_hist); if (num < 0 || num >= (int)m_history.size()) return false; // We get the history oldest first, but our users expect newest first RclDHistoryEntry& hentry = m_history[m_history.size() - 1 - num]; if (sh) { if (m_prevtime < 0 || abs(m_prevtime - hentry.unixtime) > 86400) { m_prevtime = hentry.unixtime; time_t t = (time_t)(hentry.unixtime); *sh = string(ctime(&t)); // Get rid of the final \n in ctime sh->erase(sh->length()-1); } else { sh->erase(); } } bool ret = m_db->getDoc(hentry.udi, hentry.dbdir, doc); if (!ret || doc.pc == -1) { doc.url = "UNKNOWN"; doc.ipath = ""; } // Ensure the snippets link won't be shown as it does not make // sense (no query terms...) doc.haspages = 0; return ret; } int DocSequenceHistory::getResCnt() { if (m_history.empty()) m_history = getDocHistory(m_hist); return int(m_history.size()); } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/wasaparseaux.cpp����������������������������������������������������������������0000644�0001750�0001750�00000017703�13533651561�014401� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include <iostream> #include "wasatorcl.h" #include "wasaparserdriver.h" #include "searchdata.h" #include "log.h" #define YYDEBUG 1 // bison-generated file #include "wasaparse.hpp" using namespace std; using namespace Rcl; void yy::parser::error (const location_type& l, const std::string& m) { d->setreason(m); } SearchData *wasaStringToRcl(const RclConfig *config, const std::string& stemlang, const std::string& query, string &reason, const std::string& autosuffs) { WasaParserDriver d(config, stemlang, autosuffs); SearchData *sd = d.parse(query); if (!sd) reason = d.getreason(); return sd; } WasaParserDriver::WasaParserDriver(const RclConfig *c, const std::string sl, const std::string& as) : m_stemlang(sl), m_autosuffs(as), m_config(c), m_index(0), m_result(0), m_haveDates(false), m_maxSize((size_t)-1), m_minSize((size_t)-1) { } WasaParserDriver::~WasaParserDriver() { } SearchData *WasaParserDriver::parse(const std::string& in) { m_input = in; m_index = 0; delete m_result; m_result = 0; m_returns = stack<int>(); yy::parser parser(this); parser.set_debug_level(0); if (parser.parse() != 0) { delete m_result; m_result = 0; } if (m_result == 0) return m_result; // Set the top level filters (types, dates, size) for (vector<string>::const_iterator it = m_filetypes.begin(); it != m_filetypes.end(); it++) { m_result->addFiletype(*it); } for (vector<string>::const_iterator it = m_nfiletypes.begin(); it != m_nfiletypes.end(); it++) { m_result->remFiletype(*it); } if (m_haveDates) { m_result->setDateSpan(&m_dates); } if (m_minSize != (size_t)-1) { m_result->setMinSize(m_minSize); } if (m_maxSize != (size_t)-1) { m_result->setMaxSize(m_maxSize); } //if (m_result) m_result->dump(cout); return m_result; } int WasaParserDriver::GETCHAR() { if (!m_returns.empty()) { int c = m_returns.top(); m_returns.pop(); return c; } if (m_index < m_input.size()) return m_input[m_index++]; return 0; } void WasaParserDriver::UNGETCHAR(int c) { m_returns.push(c); } // Add clause to query, handling special pseudo-clauses for size/date // etc. (mostly determined on field name). bool WasaParserDriver::addClause(SearchData *sd, SearchDataClauseSimple* cl) { if (cl->getfield().empty()) { // Simple clause with empty field spec. // Possibly change terms found in the "autosuffs" list into "ext" // field queries if (!m_autosuffs.empty()) { vector<string> asfv; if (stringToStrings(m_autosuffs, asfv)) { if (find_if(asfv.begin(), asfv.end(), StringIcmpPred(cl->gettext())) != asfv.end()) { cl->setfield("ext"); cl->addModifier(SearchDataClause::SDCM_NOSTEMMING); } } } return sd->addClause(cl); } const string& ofld = cl->getfield(); string fld = stringtolower(ofld); // MIME types and categories if (!fld.compare("mime") || !fld.compare("format")) { if (cl->getexclude()) { m_nfiletypes.push_back(cl->gettext()); } else { m_filetypes.push_back(cl->gettext()); } delete cl; return false; } if (!fld.compare("rclcat") || !fld.compare("type")) { vector<string> mtypes; if (m_config && m_config->getMimeCatTypes(cl->gettext(), mtypes)) { for (vector<string>::iterator mit = mtypes.begin(); mit != mtypes.end(); mit++) { if (cl->getexclude()) { m_nfiletypes.push_back(*mit); } else { m_filetypes.push_back(*mit); } } } delete cl; return false; } // Handle "date" spec if (!fld.compare("date")) { DateInterval di; if (!parsedateinterval(cl->gettext(), &di)) { LOGERR("Bad date interval format: " << (cl->gettext()) << "\n" ); m_reason = "Bad date interval format"; delete cl; return false; } LOGDEB("addClause:: date span: " << di.y1 << "-" << di.m1 << "-" << di.d1 << "/" << di.y2 << "-" << di.m2 << "-" << di.d2 << "\n"); m_haveDates = true; m_dates = di; delete cl; return false; } // Handle "size" spec if (!fld.compare("size")) { char *cp; size_t size = strtoll(cl->gettext().c_str(), &cp, 10); if (*cp != 0) { switch (*cp) { case 'k': case 'K': size *= 1000;break; case 'm': case 'M': size *= 1000*1000;break; case 'g': case 'G': size *= 1000*1000*1000;break; case 't': case 'T': size *= size_t(1000)*1000*1000*1000;break; default: m_reason = string("Bad multiplier suffix: ") + *cp; delete cl; return false; } } SearchDataClause::Relation rel = cl->getrel(); delete cl; switch (rel) { case SearchDataClause::REL_EQUALS: m_maxSize = m_minSize = size; break; case SearchDataClause::REL_LT: case SearchDataClause::REL_LTE: m_maxSize = size; break; case SearchDataClause::REL_GT: case SearchDataClause::REL_GTE: m_minSize = size; break; default: m_reason = "Bad relation operator with size query. Use > < or ="; return false; } return false; } if (!fld.compare("dir")) { // dir filtering special case SearchDataClausePath *nclause = new SearchDataClausePath(cl->gettext(), cl->getexclude()); delete cl; return sd->addClause(nclause); } if (cl->getTp() == SCLT_OR || cl->getTp() == SCLT_AND) { // If this is a normal clause and the term has commas or // slashes inside, take it as a list, turn the slashes/commas // to spaces, leave unquoted. Otherwise, this would end up as // a phrase query. This is a handy way to enter multiple terms // to be searched inside a field. We interpret ',' as AND, and // '/' as OR. No mixes allowed and ',' wins. SClType tp = SCLT_FILENAME;// impossible value string ns = neutchars(cl->gettext(), ","); if (ns.compare(cl->gettext())) { // had ',' tp = SCLT_AND; } else { ns = neutchars(cl->gettext(), "/"); if (ns.compare(cl->gettext())) { // had not ',' but has '/' tp = SCLT_OR; } } if (tp != SCLT_FILENAME) { SearchDataClauseSimple *ncl = new SearchDataClauseSimple(tp, ns, ofld); delete cl; return sd->addClause(ncl); } } return sd->addClause(cl); } �������������������������������������������������������������recoll-1.26.3/query/position.hh���������������������������������������������������������������������0000644�0001750�0001750�00000011341�13347664027�013352� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// A Bison parser, made by GNU Bison 3.0.4. // Positions for Bison parsers in C++ // Copyright (C) 2002-2015 Free Software Foundation, Inc. // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // As a special exception, you may create a larger work that contains // part or all of the Bison parser skeleton and distribute that work // under terms of your choice, so long as that work isn't itself a // parser generator using the skeleton or a modified version thereof // as a parser skeleton. Alternatively, if you modify or redistribute // the parser skeleton itself, you may (at your option) remove this // special exception, which will cause the skeleton and the resulting // Bison output files to be licensed under the GNU General Public // License without this special exception. // This special exception was added by the Free Software Foundation in // version 2.2 of Bison. /** ** \file position.hh ** Define the yy::position class. */ #ifndef YY_YY_POSITION_HH_INCLUDED # define YY_YY_POSITION_HH_INCLUDED # include <algorithm> // std::max # include <iostream> # include <string> # ifndef YY_NULLPTR # if defined __cplusplus && 201103L <= __cplusplus # define YY_NULLPTR nullptr # else # define YY_NULLPTR 0 # endif # endif namespace yy { #line 56 "position.hh" // location.cc:296 /// Abstract a position. class position { public: /// Construct a position. explicit position (std::string* f = YY_NULLPTR, unsigned int l = 1u, unsigned int c = 1u) : filename (f) , line (l) , column (c) { } /// Initialization. void initialize (std::string* fn = YY_NULLPTR, unsigned int l = 1u, unsigned int c = 1u) { filename = fn; line = l; column = c; } /** \name Line and Column related manipulators ** \{ */ /// (line related) Advance to the COUNT next lines. void lines (int count = 1) { if (count) { column = 1u; line = add_ (line, count, 1); } } /// (column related) Advance to the COUNT next columns. void columns (int count = 1) { column = add_ (column, count, 1); } /** \} */ /// File name to which this position refers. std::string* filename; /// Current line number. unsigned int line; /// Current column number. unsigned int column; private: /// Compute max(min, lhs+rhs) (provided min <= lhs). static unsigned int add_ (unsigned int lhs, int rhs, unsigned int min) { return (0 < rhs || -static_cast<unsigned int>(rhs) < lhs ? rhs + lhs : min); } }; /// Add \a width columns, in place. inline position& operator+= (position& res, int width) { res.columns (width); return res; } /// Add \a width columns. inline position operator+ (position res, int width) { return res += width; } /// Subtract \a width columns, in place. inline position& operator-= (position& res, int width) { return res += -width; } /// Subtract \a width columns. inline position operator- (position res, int width) { return res -= width; } /// Compare two position objects. inline bool operator== (const position& pos1, const position& pos2) { return (pos1.line == pos2.line && pos1.column == pos2.column && (pos1.filename == pos2.filename || (pos1.filename && pos2.filename && *pos1.filename == *pos2.filename))); } /// Compare two position objects. inline bool operator!= (const position& pos1, const position& pos2) { return !(pos1 == pos2); } /** \brief Intercept output stream redirection. ** \param ostr the destination output stream ** \param pos a reference to the position to redirect */ template <typename YYChar> inline std::basic_ostream<YYChar>& operator<< (std::basic_ostream<YYChar>& ostr, const position& pos) { if (pos.filename) ostr << *pos.filename << ':'; return ostr << pos.line << '.' << pos.column; } } // yy #line 180 "position.hh" // location.cc:296 #endif // !YY_YY_POSITION_HH_INCLUDED �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/plaintorich.h�������������������������������������������������������������������0000644�0001750�0001750�00000006512�13533651561�013652� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _PLAINTORICH_H_INCLUDED_ #define _PLAINTORICH_H_INCLUDED_ #include <string> #include <list> #include "hldata.h" #include "cstr.h" /** * A class for highlighting search results. Overridable methods allow * for different styles. We can handle plain text or html input. In the latter * case, we may fail to highligt term groups if they are mixed with HTML * tags (ex: firstterm <b>2ndterm</b>). */ class PlainToRich { public: virtual ~PlainToRich() {} void set_inputhtml(bool v) { m_inputhtml = v; } void set_activatelinks(bool v) { m_activatelinks = v; } /** * Transform plain text for highlighting search terms, ie in the * preview window or result list entries. * * The actual tags used for highlighting and anchoring are * determined by deriving from this class which handles the searching for * terms and groups, but there is an assumption that the output will be * html-like: we escape characters like < or & * * Finding the search terms is relatively complicated because of * phrase/near searches, which need group highlights. As a matter * of simplification, we handle "phrase" as "near", not filtering * on word order. * * @param in raw text out of internfile. * @param out rich text output, divided in chunks (to help our caller * avoid inserting half tags into textedit which doesnt like it) * @param in hdata terms and groups to be highlighted. These are * lowercase and unaccented. * @param chunksize max size of chunks in output list */ virtual bool plaintorich(const std::string &in, std::list<std::string> &out, const HighlightData& hdata, int chunksize = 50000 ); /* Overridable output methods for headers, highlighting and marking tags */ virtual std::string header() { return cstr_null; } /** Return match prefix (e.g.: <div class="match">). @param groupidx the index into hdata.groups */ virtual std::string startMatch(unsigned int) { return cstr_null; } /** Return data for end of match area (e.g.: </div>). */ virtual std::string endMatch() { return cstr_null; } virtual std::string startChunk() { return cstr_null; } protected: bool m_inputhtml{false}; // Use <br> to break plain text lines (else caller has used a <pre> tag) bool m_eolbr{false}; const HighlightData *m_hdata{0}; bool m_activatelinks{false}; }; #endif /* _PLAINTORICH_H_INCLUDED_ */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/docseqhist.h��������������������������������������������������������������������0000644�0001750�0001750�00000004746�13533651561�013513� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _DOCSEQHIST_H_INCLUDED_ #define _DOCSEQHIST_H_INCLUDED_ #include <time.h> #include <vector> #include <memory> #include "docseq.h" #include "dynconf.h" namespace Rcl { class Db; } /** DynConf Document history entry */ class RclDHistoryEntry : public DynConfEntry { public: RclDHistoryEntry() : unixtime(0) {} RclDHistoryEntry(time_t t, const std::string& u, const std::string& d) : unixtime(t), udi(u), dbdir(d) {} virtual ~RclDHistoryEntry() {} virtual bool decode(const std::string &value); virtual bool encode(std::string& value); virtual bool equal(const DynConfEntry& other); time_t unixtime; std::string udi; std::string dbdir; }; /** A DocSequence coming from the history file. * History is kept as a list of urls. This queries the db to fetch * metadata for an url key */ class DocSequenceHistory : public DocSequence { public: DocSequenceHistory(std::shared_ptr<Rcl::Db> db, RclDynConf *h, const std::string &t) : DocSequence(t), m_db(db), m_hist(h) {} virtual ~DocSequenceHistory() {} virtual bool getDoc(int num, Rcl::Doc &doc, std::string *sh = 0); virtual int getResCnt(); virtual std::string getDescription() {return m_description;} void setDescription(const std::string& desc) {m_description = desc;} protected: virtual std::shared_ptr<Rcl::Db> getDb() { return m_db; } private: std::shared_ptr<Rcl::Db> m_db; RclDynConf *m_hist; time_t m_prevtime{-1}; std::string m_description; // This is just an nls translated 'doc history' std::vector<RclDHistoryEntry> m_history; }; extern bool historyEnterDoc(Rcl::Db *db, RclDynConf *dncf, const Rcl::Doc& doc); #endif /* _DOCSEQ_H_INCLUDED_ */ ��������������������������recoll-1.26.3/query/wasaparse.ypp�������������������������������������������������������������������0000644�0001750�0001750�00000030053�13347664027�013706� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������%{ #define YYDEBUG 1 #include "autoconfig.h" #include <stdio.h> #include <iostream> #include <string> #include "searchdata.h" #include "wasaparserdriver.h" #include "wasaparse.hpp" using namespace std; //#define LOG_PARSER #ifdef LOG_PARSER #define LOGP(X) {cerr << X;} #else #define LOGP(X) #endif int yylex(yy::parser::semantic_type *, yy::parser::location_type *, WasaParserDriver *); void yyerror(char const *); static void qualify(Rcl::SearchDataClauseDist *, const string &); static void addSubQuery(WasaParserDriver *d, Rcl::SearchData *sd, Rcl::SearchData *sq) { if (sd && sq) sd->addClause( new Rcl::SearchDataClauseSub(std::shared_ptr<Rcl::SearchData>(sq))); } %} %skeleton "lalr1.cc" %defines %locations %error-verbose %parse-param {WasaParserDriver* d} %lex-param {WasaParserDriver* d} %union { std::string *str; Rcl::SearchDataClauseRange *rg; Rcl::SearchDataClauseSimple *cl; Rcl::SearchData *sd; } %destructor {delete $$;} <str> %type <cl> qualquote %type <cl> fieldexpr %type <rg> range %type <cl> term %type <sd> query %type <str> complexfieldname /* Non operator tokens need precedence because of the possibility of concatenation which needs to have lower prec than OR */ %left <str> WORD %left <str> QUOTED %left <str> QUALIFIERS %left AND UCONCAT '(' '-' %left OR %token EQUALS CONTAINS SMALLEREQ SMALLER GREATEREQ GREATER RANGE %% topquery: query { // It's possible that we end up with no query (e.g.: because just a // date filter was set, no terms). Allocate an empty query so that we // have something to set the global criteria on (this will yield a // Xapian search like <alldocuments> FILTER xxx if ($1 == 0) d->m_result = new Rcl::SearchData(Rcl::SCLT_AND, d->m_stemlang); else d->m_result = $1; } query: query query %prec UCONCAT { LOGP("q: query query\n"); Rcl::SearchData *sd = 0; if ($1 || $2) { sd = new Rcl::SearchData(Rcl::SCLT_AND, d->m_stemlang); addSubQuery(d, sd, $1); addSubQuery(d, sd, $2); } $$ = sd; } | query AND query { LOGP("q: query AND query\n"); Rcl::SearchData *sd = 0; if ($1 || $3) { sd = new Rcl::SearchData(Rcl::SCLT_AND, d->m_stemlang); addSubQuery(d, sd, $1); addSubQuery(d, sd, $3); } $$ = sd; } | query OR query { LOGP("query: query OR query\n"); Rcl::SearchData *top = 0; if ($1 || $3) { top = new Rcl::SearchData(Rcl::SCLT_OR, d->m_stemlang); addSubQuery(d, top, $1); addSubQuery(d, top, $3); } $$ = top; } | '(' query ')' { LOGP("q: ( query )\n"); $$ = $2; } | fieldexpr %prec UCONCAT { LOGP("q: fieldexpr\n"); Rcl::SearchData *sd = new Rcl::SearchData(Rcl::SCLT_AND, d->m_stemlang); if (d->addClause(sd, $1)) { $$ = sd; } else { delete sd; $$ = 0; } } ; fieldexpr: term { LOGP("fe: simple fieldexpr: " << $1->gettext() << endl); $$ = $1; } | complexfieldname EQUALS term { LOGP("fe: " << *$1 << " = " << $3->gettext() << endl); $3->setfield(*$1); $3->setrel(Rcl::SearchDataClause::REL_EQUALS); $$ = $3; delete $1; } | complexfieldname CONTAINS term { LOGP("fe: " << *$1 << " : " << $3->gettext() << endl); $3->setfield(*$1); $3->setrel(Rcl::SearchDataClause::REL_CONTAINS); $$ = $3; delete $1; } | complexfieldname CONTAINS range { LOGP("fe: " << *$1 << " : " << $3->gettext() << endl); $3->setfield(*$1); $3->setrel(Rcl::SearchDataClause::REL_CONTAINS); $$ = $3; delete $1; } | complexfieldname SMALLER term { LOGP("fe: " << *$1 << " < " << $3->gettext() << endl); $3->setfield(*$1); $3->setrel(Rcl::SearchDataClause::REL_LT); $$ = $3; delete $1; } | complexfieldname SMALLEREQ term { LOGP("fe: " << *$1 << " <= " << $3->gettext() << endl); $3->setfield(*$1); $3->setrel(Rcl::SearchDataClause::REL_LTE); $$ = $3; delete $1; } | complexfieldname GREATER term { LOGP("fe: " << *$1 << " > " << $3->gettext() << endl); $3->setfield(*$1); $3->setrel(Rcl::SearchDataClause::REL_GT); $$ = $3; delete $1; } | complexfieldname GREATEREQ term { LOGP("fe: " << *$1 << " >= " << $3->gettext() << endl); $3->setfield(*$1); $3->setrel(Rcl::SearchDataClause::REL_GTE); $$ = $3; delete $1; } | '-' fieldexpr { LOGP("fe: - fieldexpr[" << $2->gettext() << "]" << endl); $2->setexclude(true); $$ = $2; } ; /* Deal with field names like dc:title */ complexfieldname: WORD { LOGP("cfn: WORD" << endl); $$ = $1; } | complexfieldname CONTAINS WORD { LOGP("cfn: complexfieldname ':' WORD" << endl); $$ = new string(*$1 + string(":") + *$3); delete $1; delete $3; } range: WORD RANGE WORD { LOGP("Range: " << *$1 << string(" .. ") << *$3 << endl); $$ = new Rcl::SearchDataClauseRange(*$1, *$3); delete $1; delete $3; } | RANGE WORD { LOGP("Range: " << "" << string(" .. ") << *$2 << endl); $$ = new Rcl::SearchDataClauseRange("", *$2); delete $2; } | WORD RANGE { LOGP("Range: " << *$1 << string(" .. ") << "" << endl); $$ = new Rcl::SearchDataClauseRange(*$1, ""); delete $1; } ; term: WORD { LOGP("term[" << *$1 << "]" << endl); $$ = new Rcl::SearchDataClauseSimple(Rcl::SCLT_AND, *$1); delete $1; } | qualquote { $$ = $1; } qualquote: QUOTED { LOGP("QUOTED[" << *$1 << "]" << endl); $$ = new Rcl::SearchDataClauseDist(Rcl::SCLT_PHRASE, *$1, 0); delete $1; } | QUOTED QUALIFIERS { LOGP("QUOTED[" << *$1 << "] QUALIFIERS[" << *$2 << "]" << endl); Rcl::SearchDataClauseDist *cl = new Rcl::SearchDataClauseDist(Rcl::SCLT_PHRASE, *$1, 0); qualify(cl, *$2); $$ = cl; delete $1; delete $2; } %% #include <ctype.h> // Look for int at index, skip and return new index found? value. static unsigned int qualGetInt(const string& q, unsigned int cur, int *pval) { unsigned int ncur = cur; if (cur < q.size() - 1) { char *endptr; int val = strtol(&q[cur + 1], &endptr, 10); if (endptr != &q[cur + 1]) { ncur += endptr - &q[cur + 1]; *pval = val; } } return ncur; } static void qualify(Rcl::SearchDataClauseDist *cl, const string& quals) { // cerr << "qualify(" << cl << ", " << quals << ")" << endl; for (unsigned int i = 0; i < quals.length(); i++) { //fprintf(stderr, "qual char %c\n", quals[i]); switch (quals[i]) { case 'b': cl->setWeight(10.0); break; case 'c': break; case 'C': cl->addModifier(Rcl::SearchDataClause::SDCM_CASESENS); break; case 'd': break; case 'D': cl->addModifier(Rcl::SearchDataClause::SDCM_DIACSENS); break; case 'e': cl->addModifier(Rcl::SearchDataClause::SDCM_CASESENS); cl->addModifier(Rcl::SearchDataClause::SDCM_DIACSENS); cl->addModifier(Rcl::SearchDataClause::SDCM_NOSTEMMING); break; case 'l': cl->addModifier(Rcl::SearchDataClause::SDCM_NOSTEMMING); break; case 'L': break; case 'o': { int slack = 10; i = qualGetInt(quals, i, &slack); cl->setslack(slack); //cerr << "set slack " << cl->getslack() << " done" << endl; } break; case 'p': cl->setTp(Rcl::SCLT_NEAR); if (cl->getslack() == 0) { cl->setslack(10); //cerr << "set slack " << cl->getslack() << " done" << endl; } break; case 's': cl->addModifier(Rcl::SearchDataClause::SDCM_NOSYNS); break; case 'S': break; case '.':case '0':case '1':case '2':case '3':case '4': case '5':case '6':case '7':case '8':case '9': { int n = 0; float factor = 1.0; if (sscanf(&(quals[i]), "%f %n", &factor, &n)) { if (factor != 1.0) { cl->setWeight(factor); } } if (n > 0) i += n - 1; } default: break; } } } // specialstartchars are special only at the beginning of a token // (e.g. doctor-who is a term, not 2 terms separated by '-') static const string specialstartchars("-"); // specialinchars are special everywhere except inside a quoted string static const string specialinchars(":=<>()"); // Called with the first dquote already read static int parseString(WasaParserDriver *d, yy::parser::semantic_type *yylval) { string* value = new string(); d->qualifiers().clear(); int c; while ((c = d->GETCHAR())) { switch (c) { case '\\': /* Escape: get next char */ c = d->GETCHAR(); if (c == 0) { value->push_back(c); goto out; } value->push_back(c); break; case '"': /* End of string. Look for qualifiers */ while ((c = d->GETCHAR()) && (isalnum(c) || c == '.')) d->qualifiers().push_back(c); d->UNGETCHAR(c); goto out; default: value->push_back(c); } } out: //cerr << "GOT QUOTED ["<<value<<"] quals [" << d->qualifiers() << "]" << endl; yylval->str = value; return yy::parser::token::QUOTED; } int yylex(yy::parser::semantic_type *yylval, yy::parser::location_type *, WasaParserDriver *d) { if (!d->qualifiers().empty()) { yylval->str = new string(); yylval->str->swap(d->qualifiers()); return yy::parser::token::QUALIFIERS; } int c; /* Skip white space. */ while ((c = d->GETCHAR()) && isspace(c)) continue; if (c == 0) return 0; if (specialstartchars.find_first_of(c) != string::npos) { //cerr << "yylex: return " << c << endl; return c; } // field-term relations, and ranges switch (c) { case '=': return yy::parser::token::EQUALS; case ':': return yy::parser::token::CONTAINS; case '<': { int c1 = d->GETCHAR(); if (c1 == '=') { return yy::parser::token::SMALLEREQ; } else { d->UNGETCHAR(c1); return yy::parser::token::SMALLER; } } case '.': { int c1 = d->GETCHAR(); if (c1 == '.') { return yy::parser::token::RANGE; } else { d->UNGETCHAR(c1); break; } } case '>': { int c1 = d->GETCHAR(); if (c1 == '=') { return yy::parser::token::GREATEREQ; } else { d->UNGETCHAR(c1); return yy::parser::token::GREATER; } } case '(': case ')': return c; } if (c == '"') return parseString(d, yylval); d->UNGETCHAR(c); // Other chars start a term or field name or reserved word string* word = new string(); while ((c = d->GETCHAR())) { if (isspace(c)) { //cerr << "Word broken by whitespace" << endl; break; } else if (specialinchars.find_first_of(c) != string::npos) { //cerr << "Word broken by special char" << endl; d->UNGETCHAR(c); break; } else if (c == '.') { int c1 = d->GETCHAR(); if (c1 == '.') { d->UNGETCHAR(c1); d->UNGETCHAR(c); break; } else { d->UNGETCHAR(c1); word->push_back(c); } } else if (c == 0) { //cerr << "Word broken by EOF" << endl; break; } else { word->push_back(c); } } if (!word->compare("AND") || !word->compare("&&")) { delete word; return yy::parser::token::AND; } else if (!word->compare("OR") || !word->compare("||")) { delete word; return yy::parser::token::OR; } // cerr << "Got word [" << word << "]" << endl; yylval->str = word; return yy::parser::token::WORD; } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/recollq.cpp���������������������������������������������������������������������0000644�0001750�0001750�00000032770�13567757034�013350� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ // Takes a query and run it, no gui, results to stdout #include <stdio.h> #include <stdlib.h> #include <errno.h> #include <string.h> #include <limits.h> #include <iostream> #include <list> #include <string> #include "rcldb.h" #include "rclquery.h" #include "rclconfig.h" #include "pathut.h" #include "rclinit.h" #include "log.h" #include "wasatorcl.h" #include "internfile.h" #include "wipedir.h" #include "transcode.h" #include "textsplit.h" #include "smallut.h" #include "chrono.h" #include "base64.h" using namespace std; bool dump_contents(RclConfig *rclconfig, Rcl::Doc& idoc) { FileInterner interner(idoc, rclconfig, FileInterner::FIF_forPreview); Rcl::Doc fdoc; string ipath = idoc.ipath; if (interner.internfile(fdoc, ipath)) { cout << fdoc.text << endl; } else { cout << "Cant turn to text:" << idoc.url << " | " << idoc.ipath << endl; } return true; } void output_fields(vector<string> fields, Rcl::Doc& doc, Rcl::Query& query, Rcl::Db& rcldb, bool printnames) { if (fields.empty()) { map<string,string>::const_iterator it; for (const auto& entry : doc.meta) { fields.push_back(entry.first); } } for (vector<string>::const_iterator it = fields.begin(); it != fields.end(); it++) { string out; if (!it->compare("abstract")) { string abstract; query.makeDocAbstract(doc, abstract); base64_encode(abstract, out); } else if (!it->compare("xdocid")) { char cdocid[30]; sprintf(cdocid, "%lu", (unsigned long)doc.xdocid); base64_encode(cdocid, out); } else { base64_encode(doc.meta[*it], out); } // Before printnames existed, recollq printed a single blank for empty // fields. This is a problem when printing names and using strtok, but // have to keep the old behaviour when printnames is not set. if (!(out.empty() && printnames)) { if (printnames) cout << *it << " "; cout << out << " "; } } cout << endl; } static char *thisprog; static char usage [] = " -P: Show the date span for all the documents present in the index.\n" " [-o|-a|-f] [-q] <query string>\n" " Runs a recoll query and displays result lines. \n" " Default: will interpret the argument(s) as a xesam query string.\n" " Query elements: \n" " * Implicit AND, exclusion, field spec: t1 -t2 title:t3\n" " * OR has priority: t1 OR t2 t3 OR t4 means (t1 OR t2) AND (t3 OR t4)\n" " * Phrase: \"t1 t2\" (needs additional quoting on cmd line)\n" " -o Emulate the GUI simple search in ANY TERM mode.\n" " -a Emulate the GUI simple search in ALL TERMS mode.\n" " -f Emulate the GUI simple search in filename mode.\n" " -q is just ignored (compatibility with the recoll GUI command line).\n" "Common options:\n" " -c <configdir> : specify config directory, overriding $RECOLL_CONFDIR.\n" " -C : collapse duplicates\n" " -d also dump file contents.\n" " -n [first-]<cnt> define the result slice. The default value for [first]\n" " is 0. Without the option, the default max count is 2000.\n" " Use n=0 for no limit.\n" " -b : basic. Just output urls, no mime types or titles.\n" " -Q : no result lines, just the processed query and result count.\n" " -m : dump the whole document meta[] array for each result.\n" " -A : output the document abstracts.\n" " -S fld : sort by field <fld>.\n" " -D : sort descending.\n" " -s stemlang : set stemming language to use (must exist in index...).\n" " Use -s \"\" to turn off stem expansion.\n" " -T <synonyms file>: use the parameter (Thesaurus) for word expansion.\n" " -i <dbdir> : additional index, several can be given.\n" " -e use url encoding (%xx) for urls.\n" " -E use exact result count instead of lower bound estimate" " -F <field name list> : output exactly these fields for each result.\n" " The field values are encoded in base64, output in one line and \n" " separated by one space character. This is the recommended format \n" " for use by other programs. Use a normal query with option -m to \n" " see the field names. Use -F '' to output all fields, but you probably\n" " also want option -N in this case.\n" " -N : with -F, print the (plain text) field names before the field values.\n" ; static void Usage(void) { cerr << thisprog << ": usage:" << endl << usage; exit(1); } // BEWARE COMPATIBILITY WITH recoll OPTIONS letters static int op_flags; #define OPT_A 0x1 // GUI: -a same #define OPT_a 0x2 #define OPT_b 0x4 #define OPT_C 0x8 // GUI: -c same #define OPT_c 0x10 #define OPT_D 0x20 #define OPT_d 0x40 #define OPT_e 0x80 #define OPT_F 0x100 // GUI: -f same #define OPT_f 0x200 // GUI uses -h for help. us: usage #define OPT_i 0x400 // GUI uses -L to set language of messages // GUI: -l same #define OPT_l 0x800 #define OPT_m 0x1000 #define OPT_N 0x2000 #define OPT_n 0x4000 // GUI: -o same #define OPT_o 0x8000 #define OPT_P 0x10000 #define OPT_Q 0x20000 // GUI: -q same #define OPT_q 0x40000 #define OPT_S 0x80000 #define OPT_s 0x100000 #define OPT_T 0x200000 // GUI: -t use command line, us: ignored #define OPT_t 0x400000 // GUI uses -v : show version. Us: usage // GUI uses -w : open minimized #define OPT_E 0x800000 int recollq(RclConfig **cfp, int argc, char **argv) { string a_config; string sortfield; string stemlang("english"); list<string> extra_dbs; string sf; vector<string> fields; string syngroupsfn; int firstres = 0; int maxcount = 2000; thisprog = argv[0]; argc--; argv++; while (argc > 0 && **argv == '-') { (*argv)++; if (!(**argv)) /* Cas du "adb - core" */ Usage(); while (**argv) switch (*(*argv)++) { case '-': // -- : end of options if (*(*argv) != 0) Usage(); goto endopts; case 'A': op_flags |= OPT_A; break; case 'a': op_flags |= OPT_a; break; case 'b': op_flags |= OPT_b; break; case 'C': op_flags |= OPT_C; break; case 'c': op_flags |= OPT_c; if (argc < 2) Usage(); a_config = *(++argv); argc--; goto b1; case 'd': op_flags |= OPT_d; break; case 'D': op_flags |= OPT_D; break; case 'E': op_flags |= OPT_E; break; case 'e': op_flags |= OPT_e; break; case 'f': op_flags |= OPT_f; break; case 'F': op_flags |= OPT_F; if (argc < 2) Usage(); sf = *(++argv); argc--; goto b1; case 'i': op_flags |= OPT_i; if (argc < 2) Usage(); extra_dbs.push_back(*(++argv)); argc--; goto b1; case 'l': op_flags |= OPT_l; break; case 'm': op_flags |= OPT_m; break; case 'N': op_flags |= OPT_N; break; case 'n': op_flags |= OPT_n; if (argc < 2) Usage(); { string rescnt = *(++argv); string::size_type dash = rescnt.find("-"); if (dash != string::npos) { firstres = atoi(rescnt.substr(0, dash).c_str()); if (dash < rescnt.size()-1) { maxcount = atoi(rescnt.substr(dash+1).c_str()); } } else { maxcount = atoi(rescnt.c_str()); } if (maxcount <= 0) maxcount = INT_MAX; } argc--; goto b1; case 'o': op_flags |= OPT_o; break; case 'P': op_flags |= OPT_P; break; case 'q': op_flags |= OPT_q; break; case 'Q': op_flags |= OPT_Q; break; case 'S': op_flags |= OPT_S; if (argc < 2) Usage(); sortfield = *(++argv); argc--; goto b1; case 's': op_flags |= OPT_s; if (argc < 2) Usage(); stemlang = *(++argv); argc--; goto b1; case 't': op_flags |= OPT_t; break; case 'T': op_flags |= OPT_T; if (argc < 2) Usage(); syngroupsfn = *(++argv); argc--; goto b1; default: Usage(); break; } b1: argc--; argv++; } endopts: string reason; *cfp = recollinit(0, 0, 0, reason, &a_config); RclConfig *rclconfig = *cfp; if (!rclconfig || !rclconfig->ok()) { fprintf(stderr, "Recoll init failed: %s\n", reason.c_str()); exit(1); } if (argc < 1 && !(op_flags & OPT_P)) { Usage(); } if (op_flags & OPT_F) { if (op_flags & (OPT_b|OPT_d|OPT_b|OPT_Q|OPT_m|OPT_A)) Usage(); stringToStrings(sf, fields); } Rcl::Db rcldb(rclconfig); if (!extra_dbs.empty()) { for (list<string>::iterator it = extra_dbs.begin(); it != extra_dbs.end(); it++) { if (!rcldb.addQueryDb(*it)) { cerr << "Can't add index: " << *it << endl; exit(1); } } } if (!syngroupsfn.empty()) { if (!rcldb.setSynGroupsFile(syngroupsfn)) { cerr << "Can't use synonyms file: " << syngroupsfn << endl; exit(1); } } if (!rcldb.open(Rcl::Db::DbRO)) { cerr << "Cant open database in " << rclconfig->getDbDir() << " reason: " << rcldb.getReason() << endl; exit(1); } if (op_flags & OPT_P) { int minyear, maxyear; if (!rcldb.maxYearSpan(&minyear, &maxyear)) { cerr << "maxYearSpan failed: " << rcldb.getReason() << endl; exit(1); } else { cout << "Min year " << minyear << " Max year " << maxyear << endl; exit(0); } } if (argc < 1) { Usage(); } string qs = *argv++;argc--; while (argc > 0) { qs += string(" ") + *argv++;argc--; } { string uq; string charset = rclconfig->getDefCharset(true); int ercnt; if (!transcode(qs, uq, charset, "UTF-8", &ercnt)) { fprintf(stderr, "Can't convert command line args to utf-8\n"); exit(1); } else if (ercnt) { fprintf(stderr, "%d errors while converting arguments from %s " "to utf-8\n", ercnt, charset.c_str()); } qs = uq; } Rcl::SearchData *sd = 0; if (op_flags & (OPT_a|OPT_o|OPT_f)) { sd = new Rcl::SearchData(Rcl::SCLT_OR, stemlang); Rcl::SearchDataClause *clp = 0; if (op_flags & OPT_f) { clp = new Rcl::SearchDataClauseFilename(qs); } else { clp = new Rcl::SearchDataClauseSimple((op_flags & OPT_o)? Rcl::SCLT_OR : Rcl::SCLT_AND, qs); } if (sd) sd->addClause(clp); } else { sd = wasaStringToRcl(rclconfig, stemlang, qs, reason); } if (!sd) { cerr << "Query string interpretation failed: " << reason << endl; return 1; } std::shared_ptr<Rcl::SearchData> rq(sd); Rcl::Query query(&rcldb); if (op_flags & OPT_C) { query.setCollapseDuplicates(true); } if (op_flags & OPT_S) { query.setSortBy(sortfield, (op_flags & OPT_D) ? false : true); } Chrono chron; if (!query.setQuery(rq)) { cerr << "Query setup failed: " << query.getReason() << endl; return(1); } int cnt; if (op_flags & OPT_E) { cnt = query.getResCnt(-1, true); } else { cnt = query.getResCnt(); } if (!(op_flags & OPT_b)) { cout << "Recoll query: " << rq->getDescription() << endl; if (firstres == 0) { if (cnt <= maxcount) cout << cnt << " results" << endl; else cout << cnt << " results (printing " << maxcount << " max):" << endl; } else { cout << "Printing at most " << cnt - (firstres+maxcount) << " results from first " << firstres << endl; } } if (op_flags & OPT_Q) cout << "Query setup took " << chron.millis() << " mS" << endl; if (op_flags & OPT_Q) return(0); for (int i = firstres; i < firstres + maxcount; i++) { Rcl::Doc doc; if (!query.getDoc(i, doc)) break; if (op_flags & OPT_F) { output_fields(fields, doc, query, rcldb, op_flags & OPT_N); continue; } if (op_flags & OPT_e) doc.url = url_encode(doc.url); if (op_flags & OPT_b) { cout << doc.url << endl; } else { string titleorfn = doc.meta[Rcl::Doc::keytt]; if (titleorfn.empty()) titleorfn = doc.meta[Rcl::Doc::keyfn]; if (titleorfn.empty()) { string url; printableUrl(rclconfig->getDefCharset(), doc.url, url); titleorfn = path_getsimple(url); } char cpc[20]; sprintf(cpc, "%d", doc.pc); cout << doc.mimetype << "\t" << "[" << doc.url << "]" << "\t" << "[" << titleorfn << "]" << "\t" << doc.fbytes << "\tbytes" << "\t" << endl; if (op_flags & OPT_m) { for (const auto ent : doc.meta) { cout << ent.first << " = " << ent.second << endl; } } if (op_flags & OPT_A) { string abstract; if (query.makeDocAbstract(doc, abstract)) { cout << "ABSTRACT" << endl; cout << abstract << endl; cout << "/ABSTRACT" << endl; } } } if (op_flags & OPT_d) { dump_contents(rclconfig, doc); } } return 0; } ��������recoll-1.26.3/query/reslistpager.h������������������������������������������������������������������0000644�0001750�0001750�00000010574�13533651561�014045� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _reslistpager_h_included_ #define _reslistpager_h_included_ #include "autoconfig.h" #include <vector> #include <memory> #include "docseq.h" #include "hldata.h" class RclConfig; class PlainToRich; /** * Manage a paged HTML result list. */ class ResListPager { public: ResListPager(int pagesize=10); virtual ~ResListPager() {} void setHighLighter(PlainToRich *ptr) { m_hiliter = ptr; } void setDocSource(std::shared_ptr<DocSequence> src, int winfirst = -1) { m_pagesize = m_newpagesize; m_winfirst = winfirst; m_hasNext = true; m_docSource = src; m_respage.clear(); } void setPageSize(int ps) { m_newpagesize = ps; } int pageNumber() { if (m_winfirst < 0 || m_pagesize <= 0) return -1; return m_winfirst / m_pagesize; } int pageFirstDocNum() { return m_winfirst; } int pageLastDocNum() { if (m_winfirst < 0 || m_respage.size() == 0) return -1; return m_winfirst + int(m_respage.size()) - 1; } virtual int pageSize() const {return m_pagesize;} void pageNext(); bool hasNext() {return m_hasNext;} bool hasPrev() {return m_winfirst > 0;} bool atBot() {return m_winfirst <= 0;} void resultPageFirst() { m_winfirst = -1; m_pagesize = m_newpagesize; resultPageNext(); } void resultPageBack() { if (m_winfirst <= 0) return; m_winfirst -= m_resultsInCurrentPage + m_pagesize; resultPageNext(); } void resultPageNext(); void resultPageFor(int docnum); void displayPage(RclConfig *); void displayDoc(RclConfig *, int idx, Rcl::Doc& doc, const HighlightData& hdata, const string& sh = ""); bool pageEmpty() {return m_respage.size() == 0;} string queryDescription() { return m_docSource ? m_docSource->getDescription() : ""; } bool getDoc(int num, Rcl::Doc &doc); // Things that need to be reimplemented in the subclass: virtual bool append(const string& data); virtual bool append(const string& data, int, const Rcl::Doc&) { return append(data); } // Translation function. This is reimplemented in the qt reslist // object For this to work, the strings must be duplicated inside // reslist.cpp (see the QT_TR_NOOP in there). Very very unwieldy. // To repeat: any change to a string used with trans() inside // reslistpager.cpp must be reflected in the string table inside // reslist.cpp for translation to work. virtual string trans(const string& in); virtual string detailsLink(); virtual const string &parFormat(); virtual const string &dateFormat(); virtual string nextUrl(); virtual string prevUrl(); virtual string pageTop() {return string();} virtual string headerContent() {return string();} virtual string iconUrl(RclConfig *, Rcl::Doc& doc); virtual void suggest(const std::vector<std::string>, std::map<std::string, std::vector<std::string> >& sugg){ sugg.clear(); } virtual string absSep() {return "…";} virtual string linkPrefix() {return "";} private: int m_pagesize; int m_newpagesize; int m_resultsInCurrentPage; // First docnum (from docseq) in current page int m_winfirst; bool m_hasNext; PlainToRich *m_hiliter; std::shared_ptr<DocSequence> m_docSource; std::vector<ResListEntry> m_respage; }; #endif /* _reslistpager_h_included_ */ ������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/dynconf.cpp���������������������������������������������������������������������0000644�0001750�0001750�00000013321�13533651561�013325� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef TEST_HISTORY #include "safeunistd.h" #include "dynconf.h" #include "base64.h" #include "smallut.h" #include "log.h" using namespace std; // Well known keys for history and external indexes. const string docHistSubKey = "docs"; const string allEdbsSk = "allExtDbs"; const string actEdbsSk = "actExtDbs"; const string advSearchHistSk = "advSearchHist"; RclDynConf::RclDynConf(const std::string &fn) : m_data(fn.c_str()) { if (m_data.getStatus() != ConfSimple::STATUS_RW) { // Maybe the config dir is readonly, in which case we try to // open readonly, but we must also handle the case where the // history file does not exist if (access(fn.c_str(), 0) != 0) { m_data = ConfSimple(string(), 1); } else { m_data = ConfSimple(fn.c_str(), 1); } } } bool RclDynConf::insertNew(const string &sk, DynConfEntry &n, DynConfEntry &s, int maxlen) { if (!rw()) { LOGDEB("RclDynConf::insertNew: not writable\n"); return false; } // Is this doc already in list ? If it is we remove the old entry vector<string> names = m_data.getNames(sk); vector<string>::const_iterator it; bool changed = false; for (it = names.begin(); it != names.end(); it++) { string oval; if (!m_data.get(*it, oval, sk)) { LOGDEB("No data for " << *it << "\n"); continue; } s.decode(oval); if (s.equal(n)) { LOGDEB("Erasing old entry\n"); m_data.erase(*it, sk); changed = true; } } // Maybe reget things if (changed) names = m_data.getNames(sk); // Need to prune ? if (maxlen > 0 && names.size() >= (unsigned int)maxlen) { // Need to erase entries until we're back to size. Note that // we don't ever reset numbers. Problems will arise when // history is 4 billion entries old it = names.begin(); for (unsigned int i = 0; i < names.size() - maxlen + 1; i++, it++) { m_data.erase(*it, sk); } } // Increment highest number unsigned int hi = names.empty() ? 0 : (unsigned int)atoi(names.back().c_str()); hi++; char nname[20]; sprintf(nname, "%010u", hi); string value; n.encode(value); LOGDEB1("Encoded value [" << value << "] (" << value.size() << ")\n"); if (!m_data.set(string(nname), value, sk)) { LOGERR("RclDynConf::insertNew: set failed\n"); return false; } return true; } bool RclDynConf::eraseAll(const string &sk) { if (!rw()) { LOGDEB("RclDynConf::eraseAll: not writable\n"); return false; } for (const auto& nm : m_data.getNames(sk)) { m_data.erase(nm, sk); } return true; } // Specialization for plain strings /////////////////////////////////// bool RclDynConf::enterString(const string sk, const string value, int maxlen) { if (!rw()) { LOGDEB("RclDynConf::enterString: not writable\n"); return false; } RclSListEntry ne(value); RclSListEntry scratch; return insertNew(sk, ne, scratch, maxlen); } #else #include <string> #include <iostream> #include "history.h" #include "log.h" #ifndef NO_NAMESPACES using namespace std; #endif static string thisprog; static string usage = "trhist [opts] <filename>\n" " [-s <subkey>]: specify subkey (default: RclDynConf::docHistSubKey)\n" " [-e] : erase all\n" " [-a <string>] enter string (needs -s, no good for history entries\n" "\n" ; static void Usage(void) { cerr << thisprog << ": usage:\n" << usage; exit(1); } static int op_flags; #define OPT_e 0x2 #define OPT_s 0x4 #define OPT_a 0x8 int main(int argc, char **argv) { string sk = "docs"; string value; thisprog = argv[0]; argc--; argv++; while (argc > 0 && **argv == '-') { (*argv)++; if (!(**argv)) /* Cas du "adb - core" */ Usage(); while (**argv) switch (*(*argv)++) { case 'a': op_flags |= OPT_a; if (argc < 2) Usage(); value = *(++argv); argc--; goto b1; case 's': op_flags |= OPT_s; if (argc < 2) Usage(); sk = *(++argv); argc--; goto b1; case 'e': op_flags |= OPT_e; break; default: Usage(); break; } b1: argc--; argv++; } if (argc != 1) Usage(); string filename = *argv++;argc--; RclDynConf hist(filename, 5); DebugLog::getdbl()->setloglevel(DEBDEB1); DebugLog::setfilename("stderr"); if (op_flags & OPT_e) { hist.eraseAll(sk); } else if (op_flags & OPT_a) { if (!(op_flags & OPT_s)) Usage(); hist.enterString(sk, value); } else { for (int i = 0; i < 10; i++) { char docname[200]; sprintf(docname, "A very long document document name" "is very long indeed and this is the end of " "it here and exactly here:\n%d", i); hist.enterDoc(string(docname), "ipathx"); } list<RclDHistoryEntry> hlist = hist.getDocHistory(); for (list<RclDHistoryEntry>::const_iterator it = hlist.begin(); it != hlist.end(); it++) { printf("[%ld] [%s] [%s]\n", it->unixtime, it->fn.c_str(), it->ipath.c_str()); } } } #endif ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/wasaparse.cpp�������������������������������������������������������������������0000644�0001750�0001750�00000136137�13347664027�013672� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// A Bison parser, made by GNU Bison 3.0.4. // Skeleton implementation for Bison LALR(1) parsers in C++ // Copyright (C) 2002-2015 Free Software Foundation, Inc. // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // As a special exception, you may create a larger work that contains // part or all of the Bison parser skeleton and distribute that work // under terms of your choice, so long as that work isn't itself a // parser generator using the skeleton or a modified version thereof // as a parser skeleton. Alternatively, if you modify or redistribute // the parser skeleton itself, you may (at your option) remove this // special exception, which will cause the skeleton and the resulting // Bison output files to be licensed under the GNU General Public // License without this special exception. // This special exception was added by the Free Software Foundation in // version 2.2 of Bison. // First part of user declarations. #line 1 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:404 #define YYDEBUG 1 #include "autoconfig.h" #include <stdio.h> #include <iostream> #include <string> #include "searchdata.h" #include "wasaparserdriver.h" #include "wasaparse.hpp" using namespace std; //#define LOG_PARSER #ifdef LOG_PARSER #define LOGP(X) {cerr << X;} #else #define LOGP(X) #endif int yylex(yy::parser::semantic_type *, yy::parser::location_type *, WasaParserDriver *); void yyerror(char const *); static void qualify(Rcl::SearchDataClauseDist *, const string &); static void addSubQuery(WasaParserDriver *d, Rcl::SearchData *sd, Rcl::SearchData *sq) { if (sd && sq) sd->addClause( new Rcl::SearchDataClauseSub(std::shared_ptr<Rcl::SearchData>(sq))); } #line 73 "y.tab.c" // lalr1.cc:404 # ifndef YY_NULLPTR # if defined __cplusplus && 201103L <= __cplusplus # define YY_NULLPTR nullptr # else # define YY_NULLPTR 0 # endif # endif // User implementation prologue. #line 87 "y.tab.c" // lalr1.cc:412 #ifndef YY_ # if defined YYENABLE_NLS && YYENABLE_NLS # if ENABLE_NLS # include <libintl.h> // FIXME: INFRINGES ON USER NAME SPACE. # define YY_(msgid) dgettext ("bison-runtime", msgid) # endif # endif # ifndef YY_ # define YY_(msgid) msgid # endif #endif #define YYRHSLOC(Rhs, K) ((Rhs)[K].location) /* YYLLOC_DEFAULT -- Set CURRENT to span from RHS[1] to RHS[N]. If N is 0, then set CURRENT to the empty location which ends the previous symbol: RHS[0] (always defined). */ # ifndef YYLLOC_DEFAULT # define YYLLOC_DEFAULT(Current, Rhs, N) \ do \ if (N) \ { \ (Current).begin = YYRHSLOC (Rhs, 1).begin; \ (Current).end = YYRHSLOC (Rhs, N).end; \ } \ else \ { \ (Current).begin = (Current).end = YYRHSLOC (Rhs, 0).end; \ } \ while (/*CONSTCOND*/ false) # endif // Suppress unused-variable warnings by "using" E. #define YYUSE(E) ((void) (E)) // Enable debugging if requested. #if YYDEBUG // A pseudo ostream that takes yydebug_ into account. # define YYCDEBUG if (yydebug_) (*yycdebug_) # define YY_SYMBOL_PRINT(Title, Symbol) \ do { \ if (yydebug_) \ { \ *yycdebug_ << Title << ' '; \ yy_print_ (*yycdebug_, Symbol); \ *yycdebug_ << std::endl; \ } \ } while (false) # define YY_REDUCE_PRINT(Rule) \ do { \ if (yydebug_) \ yy_reduce_print_ (Rule); \ } while (false) # define YY_STACK_PRINT() \ do { \ if (yydebug_) \ yystack_print_ (); \ } while (false) #else // !YYDEBUG # define YYCDEBUG if (false) std::cerr # define YY_SYMBOL_PRINT(Title, Symbol) YYUSE(Symbol) # define YY_REDUCE_PRINT(Rule) static_cast<void>(0) # define YY_STACK_PRINT() static_cast<void>(0) #endif // !YYDEBUG #define yyerrok (yyerrstatus_ = 0) #define yyclearin (yyla.clear ()) #define YYACCEPT goto yyacceptlab #define YYABORT goto yyabortlab #define YYERROR goto yyerrorlab #define YYRECOVERING() (!!yyerrstatus_) namespace yy { #line 173 "y.tab.c" // lalr1.cc:479 /* Return YYSTR after stripping away unnecessary quotes and backslashes, so that it's suitable for yyerror. The heuristic is that double-quoting is unnecessary unless the string contains an apostrophe, a comma, or backslash (other than backslash-backslash). YYSTR is taken from yytname. */ std::string parser::yytnamerr_ (const char *yystr) { if (*yystr == '"') { std::string yyr = ""; char const *yyp = yystr; for (;;) switch (*++yyp) { case '\'': case ',': goto do_not_strip_quotes; case '\\': if (*++yyp != '\\') goto do_not_strip_quotes; // Fall through. default: yyr += *yyp; break; case '"': return yyr; } do_not_strip_quotes: ; } return yystr; } /// Build a parser object. parser::parser (WasaParserDriver* d_yyarg) : #if YYDEBUG yydebug_ (false), yycdebug_ (&std::cerr), #endif d (d_yyarg) {} parser::~parser () {} /*---------------. | Symbol types. | `---------------*/ inline parser::syntax_error::syntax_error (const location_type& l, const std::string& m) : std::runtime_error (m) , location (l) {} // basic_symbol. template <typename Base> inline parser::basic_symbol<Base>::basic_symbol () : value () {} template <typename Base> inline parser::basic_symbol<Base>::basic_symbol (const basic_symbol& other) : Base (other) , value () , location (other.location) { value = other.value; } template <typename Base> inline parser::basic_symbol<Base>::basic_symbol (typename Base::kind_type t, const semantic_type& v, const location_type& l) : Base (t) , value (v) , location (l) {} /// Constructor for valueless symbols. template <typename Base> inline parser::basic_symbol<Base>::basic_symbol (typename Base::kind_type t, const location_type& l) : Base (t) , value () , location (l) {} template <typename Base> inline parser::basic_symbol<Base>::~basic_symbol () { clear (); } template <typename Base> inline void parser::basic_symbol<Base>::clear () { Base::clear (); } template <typename Base> inline bool parser::basic_symbol<Base>::empty () const { return Base::type_get () == empty_symbol; } template <typename Base> inline void parser::basic_symbol<Base>::move (basic_symbol& s) { super_type::move(s); value = s.value; location = s.location; } // by_type. inline parser::by_type::by_type () : type (empty_symbol) {} inline parser::by_type::by_type (const by_type& other) : type (other.type) {} inline parser::by_type::by_type (token_type t) : type (yytranslate_ (t)) {} inline void parser::by_type::clear () { type = empty_symbol; } inline void parser::by_type::move (by_type& that) { type = that.type; that.clear (); } inline int parser::by_type::type_get () const { return type; } // by_state. inline parser::by_state::by_state () : state (empty_state) {} inline parser::by_state::by_state (const by_state& other) : state (other.state) {} inline void parser::by_state::clear () { state = empty_state; } inline void parser::by_state::move (by_state& that) { state = that.state; that.clear (); } inline parser::by_state::by_state (state_type s) : state (s) {} inline parser::symbol_number_type parser::by_state::type_get () const { if (state == empty_state) return empty_symbol; else return yystos_[state]; } inline parser::stack_symbol_type::stack_symbol_type () {} inline parser::stack_symbol_type::stack_symbol_type (state_type s, symbol_type& that) : super_type (s, that.location) { value = that.value; // that is emptied. that.type = empty_symbol; } inline parser::stack_symbol_type& parser::stack_symbol_type::operator= (const stack_symbol_type& that) { state = that.state; value = that.value; location = that.location; return *this; } template <typename Base> inline void parser::yy_destroy_ (const char* yymsg, basic_symbol<Base>& yysym) const { if (yymsg) YY_SYMBOL_PRINT (yymsg, yysym); // User destructor. switch (yysym.type_get ()) { case 3: // WORD #line 52 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:614 {delete (yysym.value.str);} #line 426 "y.tab.c" // lalr1.cc:614 break; case 4: // QUOTED #line 52 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:614 {delete (yysym.value.str);} #line 433 "y.tab.c" // lalr1.cc:614 break; case 5: // QUALIFIERS #line 52 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:614 {delete (yysym.value.str);} #line 440 "y.tab.c" // lalr1.cc:614 break; case 23: // complexfieldname #line 52 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:614 {delete (yysym.value.str);} #line 447 "y.tab.c" // lalr1.cc:614 break; default: break; } } #if YYDEBUG template <typename Base> void parser::yy_print_ (std::ostream& yyo, const basic_symbol<Base>& yysym) const { std::ostream& yyoutput = yyo; YYUSE (yyoutput); symbol_number_type yytype = yysym.type_get (); // Avoid a (spurious) G++ 4.8 warning about "array subscript is // below array bounds". if (yysym.empty ()) std::abort (); yyo << (yytype < yyntokens_ ? "token" : "nterm") << ' ' << yytname_[yytype] << " (" << yysym.location << ": "; YYUSE (yytype); yyo << ')'; } #endif inline void parser::yypush_ (const char* m, state_type s, symbol_type& sym) { stack_symbol_type t (s, sym); yypush_ (m, t); } inline void parser::yypush_ (const char* m, stack_symbol_type& s) { if (m) YY_SYMBOL_PRINT (m, s); yystack_.push (s); } inline void parser::yypop_ (unsigned int n) { yystack_.pop (n); } #if YYDEBUG std::ostream& parser::debug_stream () const { return *yycdebug_; } void parser::set_debug_stream (std::ostream& o) { yycdebug_ = &o; } parser::debug_level_type parser::debug_level () const { return yydebug_; } void parser::set_debug_level (debug_level_type l) { yydebug_ = l; } #endif // YYDEBUG inline parser::state_type parser::yy_lr_goto_state_ (state_type yystate, int yysym) { int yyr = yypgoto_[yysym - yyntokens_] + yystate; if (0 <= yyr && yyr <= yylast_ && yycheck_[yyr] == yystate) return yytable_[yyr]; else return yydefgoto_[yysym - yyntokens_]; } inline bool parser::yy_pact_value_is_default_ (int yyvalue) { return yyvalue == yypact_ninf_; } inline bool parser::yy_table_value_is_error_ (int yyvalue) { return yyvalue == yytable_ninf_; } int parser::parse () { // State. int yyn; /// Length of the RHS of the rule being reduced. int yylen = 0; // Error handling. int yynerrs_ = 0; int yyerrstatus_ = 0; /// The lookahead symbol. symbol_type yyla; /// The locations where the error started and ended. stack_symbol_type yyerror_range[3]; /// The return value of parse (). int yyresult; // FIXME: This shoud be completely indented. It is not yet to // avoid gratuitous conflicts when merging into the master branch. try { YYCDEBUG << "Starting parse" << std::endl; /* Initialize the stack. The initial state will be set in yynewstate, since the latter expects the semantical and the location values to have been already stored, initialize these stacks with a primary value. */ yystack_.clear (); yypush_ (YY_NULLPTR, 0, yyla); // A new symbol was pushed on the stack. yynewstate: YYCDEBUG << "Entering state " << yystack_[0].state << std::endl; // Accept? if (yystack_[0].state == yyfinal_) goto yyacceptlab; goto yybackup; // Backup. yybackup: // Try to take a decision without lookahead. yyn = yypact_[yystack_[0].state]; if (yy_pact_value_is_default_ (yyn)) goto yydefault; // Read a lookahead token. if (yyla.empty ()) { YYCDEBUG << "Reading a token: "; try { yyla.type = yytranslate_ (yylex (&yyla.value, &yyla.location, d)); } catch (const syntax_error& yyexc) { error (yyexc); goto yyerrlab1; } } YY_SYMBOL_PRINT ("Next token is", yyla); /* If the proper action on seeing token YYLA.TYPE is to reduce or to detect an error, take that action. */ yyn += yyla.type_get (); if (yyn < 0 || yylast_ < yyn || yycheck_[yyn] != yyla.type_get ()) goto yydefault; // Reduce or error. yyn = yytable_[yyn]; if (yyn <= 0) { if (yy_table_value_is_error_ (yyn)) goto yyerrlab; yyn = -yyn; goto yyreduce; } // Count tokens shifted since error; after three, turn off error status. if (yyerrstatus_) --yyerrstatus_; // Shift the lookahead token. yypush_ ("Shifting", yyn, yyla); goto yynewstate; /*-----------------------------------------------------------. | yydefault -- do the default action for the current state. | `-----------------------------------------------------------*/ yydefault: yyn = yydefact_[yystack_[0].state]; if (yyn == 0) goto yyerrlab; goto yyreduce; /*-----------------------------. | yyreduce -- Do a reduction. | `-----------------------------*/ yyreduce: yylen = yyr2_[yyn]; { stack_symbol_type yylhs; yylhs.state = yy_lr_goto_state_(yystack_[yylen].state, yyr1_[yyn]); /* If YYLEN is nonzero, implement the default value of the action: '$$ = $1'. Otherwise, use the top of the stack. Otherwise, the following line sets YYLHS.VALUE to garbage. This behavior is undocumented and Bison users should not rely upon it. */ if (yylen) yylhs.value = yystack_[yylen - 1].value; else yylhs.value = yystack_[0].value; // Compute the default @$. { slice<stack_symbol_type, stack_type> slice (yystack_, yylen); YYLLOC_DEFAULT (yylhs.location, slice, yylen); } // Perform the reduction. YY_REDUCE_PRINT (yyn); try { switch (yyn) { case 2: #line 74 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { // It's possible that we end up with no query (e.g.: because just a // date filter was set, no terms). Allocate an empty query so that we // have something to set the global criteria on (this will yield a // Xapian search like <alldocuments> FILTER xxx if ((yystack_[0].value.sd) == 0) d->m_result = new Rcl::SearchData(Rcl::SCLT_AND, d->m_stemlang); else d->m_result = (yystack_[0].value.sd); } #line 695 "y.tab.c" // lalr1.cc:859 break; case 3: #line 87 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("q: query query\n"); Rcl::SearchData *sd = 0; if ((yystack_[1].value.sd) || (yystack_[0].value.sd)) { sd = new Rcl::SearchData(Rcl::SCLT_AND, d->m_stemlang); addSubQuery(d, sd, (yystack_[1].value.sd)); addSubQuery(d, sd, (yystack_[0].value.sd)); } (yylhs.value.sd) = sd; } #line 710 "y.tab.c" // lalr1.cc:859 break; case 4: #line 98 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("q: query AND query\n"); Rcl::SearchData *sd = 0; if ((yystack_[2].value.sd) || (yystack_[0].value.sd)) { sd = new Rcl::SearchData(Rcl::SCLT_AND, d->m_stemlang); addSubQuery(d, sd, (yystack_[2].value.sd)); addSubQuery(d, sd, (yystack_[0].value.sd)); } (yylhs.value.sd) = sd; } #line 725 "y.tab.c" // lalr1.cc:859 break; case 5: #line 109 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("query: query OR query\n"); Rcl::SearchData *top = 0; if ((yystack_[2].value.sd) || (yystack_[0].value.sd)) { top = new Rcl::SearchData(Rcl::SCLT_OR, d->m_stemlang); addSubQuery(d, top, (yystack_[2].value.sd)); addSubQuery(d, top, (yystack_[0].value.sd)); } (yylhs.value.sd) = top; } #line 740 "y.tab.c" // lalr1.cc:859 break; case 6: #line 120 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("q: ( query )\n"); (yylhs.value.sd) = (yystack_[1].value.sd); } #line 749 "y.tab.c" // lalr1.cc:859 break; case 7: #line 126 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("q: fieldexpr\n"); Rcl::SearchData *sd = new Rcl::SearchData(Rcl::SCLT_AND, d->m_stemlang); if (d->addClause(sd, (yystack_[0].value.cl))) { (yylhs.value.sd) = sd; } else { delete sd; (yylhs.value.sd) = 0; } } #line 764 "y.tab.c" // lalr1.cc:859 break; case 8: #line 139 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("fe: simple fieldexpr: " << (yystack_[0].value.cl)->gettext() << endl); (yylhs.value.cl) = (yystack_[0].value.cl); } #line 773 "y.tab.c" // lalr1.cc:859 break; case 9: #line 144 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("fe: " << *(yystack_[2].value.str) << " = " << (yystack_[0].value.cl)->gettext() << endl); (yystack_[0].value.cl)->setfield(*(yystack_[2].value.str)); (yystack_[0].value.cl)->setrel(Rcl::SearchDataClause::REL_EQUALS); (yylhs.value.cl) = (yystack_[0].value.cl); delete (yystack_[2].value.str); } #line 785 "y.tab.c" // lalr1.cc:859 break; case 10: #line 152 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("fe: " << *(yystack_[2].value.str) << " : " << (yystack_[0].value.cl)->gettext() << endl); (yystack_[0].value.cl)->setfield(*(yystack_[2].value.str)); (yystack_[0].value.cl)->setrel(Rcl::SearchDataClause::REL_CONTAINS); (yylhs.value.cl) = (yystack_[0].value.cl); delete (yystack_[2].value.str); } #line 797 "y.tab.c" // lalr1.cc:859 break; case 11: #line 160 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("fe: " << *(yystack_[2].value.str) << " : " << (yystack_[0].value.rg)->gettext() << endl); (yystack_[0].value.rg)->setfield(*(yystack_[2].value.str)); (yystack_[0].value.rg)->setrel(Rcl::SearchDataClause::REL_CONTAINS); (yylhs.value.cl) = (yystack_[0].value.rg); delete (yystack_[2].value.str); } #line 809 "y.tab.c" // lalr1.cc:859 break; case 12: #line 168 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("fe: " << *(yystack_[2].value.str) << " < " << (yystack_[0].value.cl)->gettext() << endl); (yystack_[0].value.cl)->setfield(*(yystack_[2].value.str)); (yystack_[0].value.cl)->setrel(Rcl::SearchDataClause::REL_LT); (yylhs.value.cl) = (yystack_[0].value.cl); delete (yystack_[2].value.str); } #line 821 "y.tab.c" // lalr1.cc:859 break; case 13: #line 176 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("fe: " << *(yystack_[2].value.str) << " <= " << (yystack_[0].value.cl)->gettext() << endl); (yystack_[0].value.cl)->setfield(*(yystack_[2].value.str)); (yystack_[0].value.cl)->setrel(Rcl::SearchDataClause::REL_LTE); (yylhs.value.cl) = (yystack_[0].value.cl); delete (yystack_[2].value.str); } #line 833 "y.tab.c" // lalr1.cc:859 break; case 14: #line 184 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("fe: " << *(yystack_[2].value.str) << " > " << (yystack_[0].value.cl)->gettext() << endl); (yystack_[0].value.cl)->setfield(*(yystack_[2].value.str)); (yystack_[0].value.cl)->setrel(Rcl::SearchDataClause::REL_GT); (yylhs.value.cl) = (yystack_[0].value.cl); delete (yystack_[2].value.str); } #line 845 "y.tab.c" // lalr1.cc:859 break; case 15: #line 192 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("fe: " << *(yystack_[2].value.str) << " >= " << (yystack_[0].value.cl)->gettext() << endl); (yystack_[0].value.cl)->setfield(*(yystack_[2].value.str)); (yystack_[0].value.cl)->setrel(Rcl::SearchDataClause::REL_GTE); (yylhs.value.cl) = (yystack_[0].value.cl); delete (yystack_[2].value.str); } #line 857 "y.tab.c" // lalr1.cc:859 break; case 16: #line 200 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("fe: - fieldexpr[" << (yystack_[0].value.cl)->gettext() << "]" << endl); (yystack_[0].value.cl)->setexclude(true); (yylhs.value.cl) = (yystack_[0].value.cl); } #line 867 "y.tab.c" // lalr1.cc:859 break; case 17: #line 210 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("cfn: WORD" << endl); (yylhs.value.str) = (yystack_[0].value.str); } #line 876 "y.tab.c" // lalr1.cc:859 break; case 18: #line 216 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("cfn: complexfieldname ':' WORD" << endl); (yylhs.value.str) = new string(*(yystack_[2].value.str) + string(":") + *(yystack_[0].value.str)); delete (yystack_[2].value.str); delete (yystack_[0].value.str); } #line 887 "y.tab.c" // lalr1.cc:859 break; case 19: #line 225 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("Range: " << *(yystack_[2].value.str) << string(" .. ") << *(yystack_[0].value.str) << endl); (yylhs.value.rg) = new Rcl::SearchDataClauseRange(*(yystack_[2].value.str), *(yystack_[0].value.str)); delete (yystack_[2].value.str); delete (yystack_[0].value.str); } #line 898 "y.tab.c" // lalr1.cc:859 break; case 20: #line 233 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("Range: " << "" << string(" .. ") << *(yystack_[0].value.str) << endl); (yylhs.value.rg) = new Rcl::SearchDataClauseRange("", *(yystack_[0].value.str)); delete (yystack_[0].value.str); } #line 908 "y.tab.c" // lalr1.cc:859 break; case 21: #line 240 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("Range: " << *(yystack_[1].value.str) << string(" .. ") << "" << endl); (yylhs.value.rg) = new Rcl::SearchDataClauseRange(*(yystack_[1].value.str), ""); delete (yystack_[1].value.str); } #line 918 "y.tab.c" // lalr1.cc:859 break; case 22: #line 249 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("term[" << *(yystack_[0].value.str) << "]" << endl); (yylhs.value.cl) = new Rcl::SearchDataClauseSimple(Rcl::SCLT_AND, *(yystack_[0].value.str)); delete (yystack_[0].value.str); } #line 928 "y.tab.c" // lalr1.cc:859 break; case 23: #line 255 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { (yylhs.value.cl) = (yystack_[0].value.cl); } #line 936 "y.tab.c" // lalr1.cc:859 break; case 24: #line 261 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("QUOTED[" << *(yystack_[0].value.str) << "]" << endl); (yylhs.value.cl) = new Rcl::SearchDataClauseDist(Rcl::SCLT_PHRASE, *(yystack_[0].value.str), 0); delete (yystack_[0].value.str); } #line 946 "y.tab.c" // lalr1.cc:859 break; case 25: #line 267 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:859 { LOGP("QUOTED[" << *(yystack_[1].value.str) << "] QUALIFIERS[" << *(yystack_[0].value.str) << "]" << endl); Rcl::SearchDataClauseDist *cl = new Rcl::SearchDataClauseDist(Rcl::SCLT_PHRASE, *(yystack_[1].value.str), 0); qualify(cl, *(yystack_[0].value.str)); (yylhs.value.cl) = cl; delete (yystack_[1].value.str); delete (yystack_[0].value.str); } #line 960 "y.tab.c" // lalr1.cc:859 break; #line 964 "y.tab.c" // lalr1.cc:859 default: break; } } catch (const syntax_error& yyexc) { error (yyexc); YYERROR; } YY_SYMBOL_PRINT ("-> $$ =", yylhs); yypop_ (yylen); yylen = 0; YY_STACK_PRINT (); // Shift the result of the reduction. yypush_ (YY_NULLPTR, yylhs); } goto yynewstate; /*--------------------------------------. | yyerrlab -- here on detecting error. | `--------------------------------------*/ yyerrlab: // If not already recovering from an error, report this error. if (!yyerrstatus_) { ++yynerrs_; error (yyla.location, yysyntax_error_ (yystack_[0].state, yyla)); } yyerror_range[1].location = yyla.location; if (yyerrstatus_ == 3) { /* If just tried and failed to reuse lookahead token after an error, discard it. */ // Return failure if at end of input. if (yyla.type_get () == yyeof_) YYABORT; else if (!yyla.empty ()) { yy_destroy_ ("Error: discarding", yyla); yyla.clear (); } } // Else will try to reuse lookahead token after shifting the error token. goto yyerrlab1; /*---------------------------------------------------. | yyerrorlab -- error raised explicitly by YYERROR. | `---------------------------------------------------*/ yyerrorlab: /* Pacify compilers like GCC when the user code never invokes YYERROR and the label yyerrorlab therefore never appears in user code. */ if (false) goto yyerrorlab; yyerror_range[1].location = yystack_[yylen - 1].location; /* Do not reclaim the symbols of the rule whose action triggered this YYERROR. */ yypop_ (yylen); yylen = 0; goto yyerrlab1; /*-------------------------------------------------------------. | yyerrlab1 -- common code for both syntax error and YYERROR. | `-------------------------------------------------------------*/ yyerrlab1: yyerrstatus_ = 3; // Each real token shifted decrements this. { stack_symbol_type error_token; for (;;) { yyn = yypact_[yystack_[0].state]; if (!yy_pact_value_is_default_ (yyn)) { yyn += yyterror_; if (0 <= yyn && yyn <= yylast_ && yycheck_[yyn] == yyterror_) { yyn = yytable_[yyn]; if (0 < yyn) break; } } // Pop the current state because it cannot handle the error token. if (yystack_.size () == 1) YYABORT; yyerror_range[1].location = yystack_[0].location; yy_destroy_ ("Error: popping", yystack_[0]); yypop_ (); YY_STACK_PRINT (); } yyerror_range[2].location = yyla.location; YYLLOC_DEFAULT (error_token.location, yyerror_range, 2); // Shift the error token. error_token.state = yyn; yypush_ ("Shifting", error_token); } goto yynewstate; // Accept. yyacceptlab: yyresult = 0; goto yyreturn; // Abort. yyabortlab: yyresult = 1; goto yyreturn; yyreturn: if (!yyla.empty ()) yy_destroy_ ("Cleanup: discarding lookahead", yyla); /* Do not reclaim the symbols of the rule whose action triggered this YYABORT or YYACCEPT. */ yypop_ (yylen); while (1 < yystack_.size ()) { yy_destroy_ ("Cleanup: popping", yystack_[0]); yypop_ (); } return yyresult; } catch (...) { YYCDEBUG << "Exception caught: cleaning lookahead and stack" << std::endl; // Do not try to display the values of the reclaimed symbols, // as their printer might throw an exception. if (!yyla.empty ()) yy_destroy_ (YY_NULLPTR, yyla); while (1 < yystack_.size ()) { yy_destroy_ (YY_NULLPTR, yystack_[0]); yypop_ (); } throw; } } void parser::error (const syntax_error& yyexc) { error (yyexc.location, yyexc.what()); } // Generate an error message. std::string parser::yysyntax_error_ (state_type yystate, const symbol_type& yyla) const { // Number of reported tokens (one for the "unexpected", one per // "expected"). size_t yycount = 0; // Its maximum. enum { YYERROR_VERBOSE_ARGS_MAXIMUM = 5 }; // Arguments of yyformat. char const *yyarg[YYERROR_VERBOSE_ARGS_MAXIMUM]; /* There are many possibilities here to consider: - If this state is a consistent state with a default action, then the only way this function was invoked is if the default action is an error action. In that case, don't check for expected tokens because there are none. - The only way there can be no lookahead present (in yyla) is if this state is a consistent state with a default action. Thus, detecting the absence of a lookahead is sufficient to determine that there is no unexpected or expected token to report. In that case, just report a simple "syntax error". - Don't assume there isn't a lookahead just because this state is a consistent state with a default action. There might have been a previous inconsistent state, consistent state with a non-default action, or user semantic action that manipulated yyla. (However, yyla is currently not documented for users.) - Of course, the expected token list depends on states to have correct lookahead information, and it depends on the parser not to perform extra reductions after fetching a lookahead from the scanner and before detecting a syntax error. Thus, state merging (from LALR or IELR) and default reductions corrupt the expected token list. However, the list is correct for canonical LR with one exception: it will still contain any token that will not be accepted due to an error action in a later state. */ if (!yyla.empty ()) { int yytoken = yyla.type_get (); yyarg[yycount++] = yytname_[yytoken]; int yyn = yypact_[yystate]; if (!yy_pact_value_is_default_ (yyn)) { /* Start YYX at -YYN if negative to avoid negative indexes in YYCHECK. In other words, skip the first -YYN actions for this state because they are default actions. */ int yyxbegin = yyn < 0 ? -yyn : 0; // Stay within bounds of both yycheck and yytname. int yychecklim = yylast_ - yyn + 1; int yyxend = yychecklim < yyntokens_ ? yychecklim : yyntokens_; for (int yyx = yyxbegin; yyx < yyxend; ++yyx) if (yycheck_[yyx + yyn] == yyx && yyx != yyterror_ && !yy_table_value_is_error_ (yytable_[yyx + yyn])) { if (yycount == YYERROR_VERBOSE_ARGS_MAXIMUM) { yycount = 1; break; } else yyarg[yycount++] = yytname_[yyx]; } } } char const* yyformat = YY_NULLPTR; switch (yycount) { #define YYCASE_(N, S) \ case N: \ yyformat = S; \ break YYCASE_(0, YY_("syntax error")); YYCASE_(1, YY_("syntax error, unexpected %s")); YYCASE_(2, YY_("syntax error, unexpected %s, expecting %s")); YYCASE_(3, YY_("syntax error, unexpected %s, expecting %s or %s")); YYCASE_(4, YY_("syntax error, unexpected %s, expecting %s or %s or %s")); YYCASE_(5, YY_("syntax error, unexpected %s, expecting %s or %s or %s or %s")); #undef YYCASE_ } std::string yyres; // Argument number. size_t yyi = 0; for (char const* yyp = yyformat; *yyp; ++yyp) if (yyp[0] == '%' && yyp[1] == 's' && yyi < yycount) { yyres += yytnamerr_ (yyarg[yyi++]); ++yyp; } else yyres += *yyp; return yyres; } const signed char parser::yypact_ninf_ = -3; const signed char parser::yytable_ninf_ = -19; const signed char parser::yypact_[] = { 31, 32, 3, 31, 33, 6, 14, -3, 38, -3, -3, -3, 1, -3, -3, 31, 31, 4, -2, 9, -2, -2, -2, -2, -3, 4, -3, -3, -3, 16, 18, -3, -3, -3, -3, -3, -3, 22, -3, -3 }; const unsigned char parser::yydefact_[] = { 0, 22, 24, 0, 0, 0, 2, 7, 0, 8, 23, 25, 0, 16, 1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 6, 4, 5, 22, 9, 22, 0, 11, 10, 13, 12, 15, 14, 21, 20, 19 }; const signed char parser::yypgoto_[] = { -3, -3, 0, 34, -3, -3, 37, -3 }; const signed char parser::yydefgoto_[] = { -1, 5, 17, 7, 8, 31, 9, 10 }; const signed char parser::yytable_[] = { 6, 27, 2, 12, 1, 2, 14, 15, 11, 3, 4, 16, 29, 2, 16, 25, 26, 1, 2, 24, 15, 38, 3, 4, 16, 39, 30, -18, -18, -18, -18, -18, -18, 37, 1, 2, 1, 2, 13, 3, 4, 0, 4, -17, -17, -17, -17, -17, -17, 18, 19, 20, 21, 22, 23, 28, 32, 33, 34, 35, 36 }; const signed char parser::yycheck_[] = { 0, 3, 4, 3, 3, 4, 0, 6, 5, 8, 9, 10, 3, 4, 10, 15, 16, 3, 4, 18, 6, 3, 8, 9, 10, 3, 17, 11, 12, 13, 14, 15, 16, 17, 3, 4, 3, 4, 4, 8, 9, -1, 9, 11, 12, 13, 14, 15, 16, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 22, 23 }; const unsigned char parser::yystos_[] = { 0, 3, 4, 8, 9, 20, 21, 22, 23, 25, 26, 5, 21, 22, 0, 6, 10, 21, 11, 12, 13, 14, 15, 16, 18, 21, 21, 3, 25, 3, 17, 24, 25, 25, 25, 25, 25, 17, 3, 3 }; const unsigned char parser::yyr1_[] = { 0, 19, 20, 21, 21, 21, 21, 21, 22, 22, 22, 22, 22, 22, 22, 22, 22, 23, 23, 24, 24, 24, 25, 25, 26, 26 }; const unsigned char parser::yyr2_[] = { 0, 2, 1, 2, 3, 3, 3, 1, 1, 3, 3, 3, 3, 3, 3, 3, 2, 1, 3, 3, 2, 2, 1, 1, 1, 2 }; // YYTNAME[SYMBOL-NUM] -- String name of the symbol SYMBOL-NUM. // First, the terminals, then, starting at \a yyntokens_, nonterminals. const char* const parser::yytname_[] = { "$end", "error", "$undefined", "WORD", "QUOTED", "QUALIFIERS", "AND", "UCONCAT", "'('", "'-'", "OR", "EQUALS", "CONTAINS", "SMALLEREQ", "SMALLER", "GREATEREQ", "GREATER", "RANGE", "')'", "$accept", "topquery", "query", "fieldexpr", "complexfieldname", "range", "term", "qualquote", YY_NULLPTR }; #if YYDEBUG const unsigned short int parser::yyrline_[] = { 0, 73, 73, 86, 97, 108, 119, 125, 138, 143, 151, 159, 167, 175, 183, 191, 199, 209, 215, 224, 232, 239, 248, 254, 260, 266 }; // Print the state stack on the debug stream. void parser::yystack_print_ () { *yycdebug_ << "Stack now"; for (stack_type::const_iterator i = yystack_.begin (), i_end = yystack_.end (); i != i_end; ++i) *yycdebug_ << ' ' << i->state; *yycdebug_ << std::endl; } // Report on the debug stream that the rule \a yyrule is going to be reduced. void parser::yy_reduce_print_ (int yyrule) { unsigned int yylno = yyrline_[yyrule]; int yynrhs = yyr2_[yyrule]; // Print the symbols being reduced, and their result. *yycdebug_ << "Reducing stack by rule " << yyrule - 1 << " (line " << yylno << "):" << std::endl; // The symbols being reduced. for (int yyi = 0; yyi < yynrhs; yyi++) YY_SYMBOL_PRINT (" $" << yyi + 1 << " =", yystack_[(yynrhs) - (yyi + 1)]); } #endif // YYDEBUG // Symbol number corresponding to token number t. inline parser::token_number_type parser::yytranslate_ (int t) { static const token_number_type translate_table[] = { 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 8, 18, 2, 2, 2, 9, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 3, 4, 5, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17 }; const unsigned int user_token_number_max_ = 270; const token_number_type undef_token_ = 2; if (static_cast<int>(t) <= yyeof_) return yyeof_; else if (static_cast<unsigned int> (t) <= user_token_number_max_) return translate_table[t]; else return undef_token_; } } // yy #line 1404 "y.tab.c" // lalr1.cc:1167 #line 278 "/home/dockes/projets/fulltext/recoll/src/query/wasaparse.ypp" // lalr1.cc:1168 #include <ctype.h> // Look for int at index, skip and return new index found? value. static unsigned int qualGetInt(const string& q, unsigned int cur, int *pval) { unsigned int ncur = cur; if (cur < q.size() - 1) { char *endptr; int val = strtol(&q[cur + 1], &endptr, 10); if (endptr != &q[cur + 1]) { ncur += endptr - &q[cur + 1]; *pval = val; } } return ncur; } static void qualify(Rcl::SearchDataClauseDist *cl, const string& quals) { // cerr << "qualify(" << cl << ", " << quals << ")" << endl; for (unsigned int i = 0; i < quals.length(); i++) { //fprintf(stderr, "qual char %c\n", quals[i]); switch (quals[i]) { case 'b': cl->setWeight(10.0); break; case 'c': break; case 'C': cl->addModifier(Rcl::SearchDataClause::SDCM_CASESENS); break; case 'd': break; case 'D': cl->addModifier(Rcl::SearchDataClause::SDCM_DIACSENS); break; case 'e': cl->addModifier(Rcl::SearchDataClause::SDCM_CASESENS); cl->addModifier(Rcl::SearchDataClause::SDCM_DIACSENS); cl->addModifier(Rcl::SearchDataClause::SDCM_NOSTEMMING); break; case 'l': cl->addModifier(Rcl::SearchDataClause::SDCM_NOSTEMMING); break; case 'L': break; case 'o': { int slack = 10; i = qualGetInt(quals, i, &slack); cl->setslack(slack); //cerr << "set slack " << cl->getslack() << " done" << endl; } break; case 'p': cl->setTp(Rcl::SCLT_NEAR); if (cl->getslack() == 0) { cl->setslack(10); //cerr << "set slack " << cl->getslack() << " done" << endl; } break; case 's': cl->addModifier(Rcl::SearchDataClause::SDCM_NOSYNS); break; case 'S': break; case '.':case '0':case '1':case '2':case '3':case '4': case '5':case '6':case '7':case '8':case '9': { int n = 0; float factor = 1.0; if (sscanf(&(quals[i]), "%f %n", &factor, &n)) { if (factor != 1.0) { cl->setWeight(factor); } } if (n > 0) i += n - 1; } default: break; } } } // specialstartchars are special only at the beginning of a token // (e.g. doctor-who is a term, not 2 terms separated by '-') static const string specialstartchars("-"); // specialinchars are special everywhere except inside a quoted string static const string specialinchars(":=<>()"); // Called with the first dquote already read static int parseString(WasaParserDriver *d, yy::parser::semantic_type *yylval) { string* value = new string(); d->qualifiers().clear(); int c; while ((c = d->GETCHAR())) { switch (c) { case '\\': /* Escape: get next char */ c = d->GETCHAR(); if (c == 0) { value->push_back(c); goto out; } value->push_back(c); break; case '"': /* End of string. Look for qualifiers */ while ((c = d->GETCHAR()) && (isalnum(c) || c == '.')) d->qualifiers().push_back(c); d->UNGETCHAR(c); goto out; default: value->push_back(c); } } out: //cerr << "GOT QUOTED ["<<value<<"] quals [" << d->qualifiers() << "]" << endl; yylval->str = value; return yy::parser::token::QUOTED; } int yylex(yy::parser::semantic_type *yylval, yy::parser::location_type *, WasaParserDriver *d) { if (!d->qualifiers().empty()) { yylval->str = new string(); yylval->str->swap(d->qualifiers()); return yy::parser::token::QUALIFIERS; } int c; /* Skip white space. */ while ((c = d->GETCHAR()) && isspace(c)) continue; if (c == 0) return 0; if (specialstartchars.find_first_of(c) != string::npos) { //cerr << "yylex: return " << c << endl; return c; } // field-term relations, and ranges switch (c) { case '=': return yy::parser::token::EQUALS; case ':': return yy::parser::token::CONTAINS; case '<': { int c1 = d->GETCHAR(); if (c1 == '=') { return yy::parser::token::SMALLEREQ; } else { d->UNGETCHAR(c1); return yy::parser::token::SMALLER; } } case '.': { int c1 = d->GETCHAR(); if (c1 == '.') { return yy::parser::token::RANGE; } else { d->UNGETCHAR(c1); break; } } case '>': { int c1 = d->GETCHAR(); if (c1 == '=') { return yy::parser::token::GREATEREQ; } else { d->UNGETCHAR(c1); return yy::parser::token::GREATER; } } case '(': case ')': return c; } if (c == '"') return parseString(d, yylval); d->UNGETCHAR(c); // Other chars start a term or field name or reserved word string* word = new string(); while ((c = d->GETCHAR())) { if (isspace(c)) { //cerr << "Word broken by whitespace" << endl; break; } else if (specialinchars.find_first_of(c) != string::npos) { //cerr << "Word broken by special char" << endl; d->UNGETCHAR(c); break; } else if (c == '.') { int c1 = d->GETCHAR(); if (c1 == '.') { d->UNGETCHAR(c1); d->UNGETCHAR(c); break; } else { d->UNGETCHAR(c1); word->push_back(c); } } else if (c == 0) { //cerr << "Word broken by EOF" << endl; break; } else { word->push_back(c); } } if (!word->compare("AND") || !word->compare("&&")) { delete word; return yy::parser::token::AND; } else if (!word->compare("OR") || !word->compare("||")) { delete word; return yy::parser::token::OR; } // cerr << "Got word [" << word << "]" << endl; yylval->str = word; return yy::parser::token::WORD; } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/filtseq.h�����������������������������������������������������������������������0000644�0001750�0001750�00000003153�13533651561�013003� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _FILTSEQ_H_INCLUDED_ #define _FILTSEQ_H_INCLUDED_ #include "autoconfig.h" #include <vector> #include <string> #include <memory> #include "docseq.h" class RclConfig; /** * A filtered sequence is created from another one by selecting entries * according to the given criteria. */ class DocSeqFiltered : public DocSeqModifier { public: DocSeqFiltered(RclConfig *conf, std::shared_ptr<DocSequence> iseq, DocSeqFiltSpec &filtspec); virtual ~DocSeqFiltered() {} virtual bool canFilter() {return true;} virtual bool setFiltSpec(const DocSeqFiltSpec &filtspec); virtual bool getDoc(int num, Rcl::Doc &doc, std::string *sh = 0); virtual int getResCnt() {return m_seq->getResCnt();} private: RclConfig *m_config; DocSeqFiltSpec m_spec; std::vector<int> m_dbindices; }; #endif /* _FILTSEQ_H_INCLUDED_ */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/docseqdocs.h��������������������������������������������������������������������0000644�0001750�0001750�00000003540�13533651561�013463� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004-2013 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _DOCSEQDOCS_H_INCLUDED_ #define _DOCSEQDOCS_H_INCLUDED_ #include <memory> #include "docseq.h" #include "rcldoc.h" namespace Rcl { class Db; } /** A DocSequence that's just built from a bunch of docs */ class DocSequenceDocs : public DocSequence { public: DocSequenceDocs(std::shared_ptr<Rcl::Db> d, const std::vector<Rcl::Doc> docs, const string &t) : DocSequence(t), m_db(d), m_docs(docs) { } virtual ~DocSequenceDocs() { } virtual bool getDoc(int num, Rcl::Doc &doc, string *sh = 0) { if (sh) *sh = string(); if (num < 0 || num >= int(m_docs.size())) return false; doc = m_docs[num]; return true; } virtual int getResCnt() { return m_docs.size(); } virtual string getDescription() { return m_description; } void setDescription(const string& desc) { m_description = desc; } protected: virtual std::shared_ptr<Rcl::Db> getDb() { return m_db; } private: std::shared_ptr<Rcl::Db> m_db; string m_description; std::vector<Rcl::Doc> m_docs; }; #endif /* _DOCSEQ_H_INCLUDED_ */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/xadump.cpp����������������������������������������������������������������������0000644�0001750�0001750�00000022142�13533651561�013164� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include <stdio.h> #include <stdlib.h> #include <signal.h> #include <strings.h> #include <iostream> #include <string> #include <vector> #include "pathut.h" #ifndef NO_NAMESPACES using namespace std; #endif /* NO_NAMESPACES */ #include "utf8iter.h" #include "xapian.h" static string thisprog; static string usage = " -d <dbdir> \n" "-e <output encoding>\n" " -i docid -D : get document data for docid\n" " -i docid -X : delete document docid\n" " -i docid -T : term list for doc docid\n" " -i docid -r : reconstructed text for docid\n" " -t term -E : term existence test\n" " -t term -F : retrieve term frequency data for given term\n" " -t term -P : retrieve postings for term\n" " -T : list all terms\n" " -f : precede each term in the list with its occurrence counts\n" " -n : raw data (no [])\n" " -l : don't list prefixed terms\n" " -x : separate each output char with a space\n" " -s : special mode to dump recoll stem db\n" " -q term [term ...] : perform AND query\n" " \n\n" ; static void Usage(void) { cerr << thisprog << ": usage:\n" << usage; exit(1); } static int op_flags; #define OPT_D 0x1 #define OPT_E 0x2 #define OPT_F 0x4 #define OPT_P 0x8 #define OPT_T 0x10 #define OPT_X 0x20 #define OPT_d 0x80 #define OPT_e 0x100 #define OPT_f 0x200 #define OPT_i 0x400 #define OPT_n 0x800 #define OPT_q 0x1000 #define OPT_t 0x4000 #define OPT_x 0x8000 #define OPT_l 0x10000 #define OPT_r 0x20000 // Compute an exploded version of string, inserting a space between each char. // (no character combining possible) static string detailstring(const string& in) { if (!(op_flags & OPT_x)) return in; string out; Utf8Iter it(in); for (; !it.eof(); it++) { it.appendchartostring(out); out += ' '; } // Strip last space if (!out.empty()) out.resize(out.size()-1); return out; } Xapian::Database *db; static void cleanup() { delete db; } static void sigcleanup(int sig) { fprintf(stderr, "sigcleanup\n"); cleanup(); exit(1); } bool o_index_stripchars; inline bool has_prefix(const string& trm) { if (o_index_stripchars) { return trm.size() && 'A' <= trm[0] && trm[0] <= 'Z'; } else { return trm.size() > 0 && trm[0] == ':'; } } void wholedoc(Xapian::Database* db, int docid) { vector<string> buf; Xapian::TermIterator term; for (term = db->termlist_begin(docid); term != db->termlist_end(docid); term++) { Xapian::PositionIterator pos; for (pos = db->positionlist_begin(docid, *term); pos != db->positionlist_end(docid, *term); pos++) { if (buf.size() < *pos) buf.resize(2*((*pos)+1)); buf[(*pos)] = detailstring(*term); } } for (vector<string>::iterator it = buf.begin(); it != buf.end(); it++) { if (!it->empty()) cout << *it << " "; } } int main(int argc, char **argv) { string dbdir = path_cat(path_home(), ".recoll/xapiandb"); string outencoding = "ISO8859-1"; int docid = 1; string aterm; thisprog = argv[0]; argc--; argv++; while (argc > 0 && **argv == '-') { (*argv)++; if (!(**argv)) /* Cas du "adb - core" */ Usage(); while (**argv) switch (*(*argv)++) { case 'D': op_flags |= OPT_D; break; case 'd': op_flags |= OPT_d; if (argc < 2) Usage(); dbdir = *(++argv); argc--; goto b1; case 'E': op_flags |= OPT_E; break; case 'e': op_flags |= OPT_d; if (argc < 2) Usage(); outencoding = *(++argv); argc--; goto b1; case 'F': op_flags |= OPT_F; break; case 'f': op_flags |= OPT_f; break; case 'i': op_flags |= OPT_i; if (argc < 2) Usage(); if (sscanf(*(++argv), "%d", &docid) != 1) Usage(); argc--; goto b1; case 'l': op_flags |= OPT_l; break; case 'n': op_flags |= OPT_n; break; case 'P': op_flags |= OPT_P; break; case 'q': op_flags |= OPT_q; break; case 'r': case 'b': op_flags |= OPT_r; break; case 'T': op_flags |= OPT_T; break; case 't': op_flags |= OPT_t; if (argc < 2) Usage(); aterm = *(++argv); argc--; goto b1; case 'X': op_flags |= OPT_X; break; case 'x': op_flags |= OPT_x; break; default: Usage(); break; } b1: argc--; argv++; } vector<string> qterms; if (op_flags & OPT_q) { fprintf(stderr, "q argc %d\n", argc); if (argc < 1) Usage(); while (argc > 0) { qterms.push_back(*argv++); argc--; } } if (argc != 0) Usage(); atexit(cleanup); if (signal(SIGHUP, SIG_IGN) != SIG_IGN) signal(SIGHUP, sigcleanup); if (signal(SIGINT, SIG_IGN) != SIG_IGN) signal(SIGINT, sigcleanup); if (signal(SIGQUIT, SIG_IGN) != SIG_IGN) signal(SIGQUIT, sigcleanup); if (signal(SIGTERM, SIG_IGN) != SIG_IGN) signal(SIGTERM, sigcleanup); try { db = new Xapian::Database(dbdir); cout << "DB: ndocs " << db->get_doccount() << " lastdocid " << db->get_lastdocid() << " avglength " << db->get_avlength() << endl; // If we have terms with a leading ':' it's a new style, // unstripped index { Xapian::TermIterator term = db->allterms_begin(":"); if (term == db->allterms_end()) o_index_stripchars = true; else o_index_stripchars = false; cout<<"DB: terms are "<<(o_index_stripchars?"stripped":"raw")<<endl; } if (op_flags & OPT_T) { Xapian::TermIterator term; string printable; string op = (op_flags & OPT_n) ? string(): "["; string cl = (op_flags & OPT_n) ? string(): "]"; if (op_flags & OPT_i) { for (term = db->termlist_begin(docid); term != db->termlist_end(docid);term++) { const string& s = *term; if ((op_flags&OPT_l) && has_prefix(s)) continue; cout << op << detailstring(s) << cl << endl; } } else { for (term = db->allterms_begin(); term != db->allterms_end();term++) { const string& s = *term; if ((op_flags&OPT_l) && has_prefix(s)) continue; if (op_flags & OPT_f) cout << db->get_collection_freq(*term) << " " << term.get_termfreq() << " "; cout << op << detailstring(s) << cl << endl; } } } else if (op_flags & OPT_D) { Xapian::Document doc = db->get_document(docid); string data = doc.get_data(); cout << data << endl; } else if (op_flags & OPT_r) { wholedoc(db, docid); } else if (op_flags & OPT_X) { Xapian::Document doc = db->get_document(docid); string data = doc.get_data(); cout << data << endl; cout << "Really delete xapian document ?" << endl; string rep; cin >> rep; if (!rep.empty() && (rep[0] == 'y' || rep[0] == 'Y')) { Xapian::WritableDatabase wdb(dbdir, Xapian::DB_OPEN); cout << "Deleting" << endl; wdb.delete_document(docid); } } else if (op_flags & OPT_P) { Xapian::PostingIterator doc; for (doc = db->postlist_begin(aterm); doc != db->postlist_end(aterm); doc++) { cout << *doc << "(" << doc.get_wdf() << ") : " ; Xapian::PositionIterator pos; for (pos = doc.positionlist_begin(); pos != doc.positionlist_end(); pos++) { cout << *pos << " " ; } cout << endl; } } else if (op_flags & OPT_F) { cout << "FreqFor " << aterm << " : " << db->get_termfreq(aterm) << endl; } else if (op_flags & OPT_E) { cout << "Exists [" << aterm << "] : " << db->term_exists(aterm) << endl; } else if (op_flags & OPT_q) { Xapian::Enquire enquire(*db); Xapian::Query query(Xapian::Query::OP_AND, qterms.begin(), qterms.end()); cout << "Performing query `" << query.get_description() << "'" << endl; enquire.set_query(query); Xapian::MSet matches = enquire.get_mset(0, 10); cout << "Estimated results: " << matches.get_matches_lower_bound() << endl; Xapian::MSetIterator i; for (i = matches.begin(); i != matches.end(); ++i) { cout << "Document ID " << *i << "\t"; cout << i.get_percent() << "% "; Xapian::Document doc = i.get_document(); cout << "[" << doc.get_data() << "]" << endl; } } } catch (const Xapian::Error &e) { cout << "Exception: " << e.get_msg() << endl; } catch (const string &s) { cout << "Exception: " << s << endl; } catch (const char *s) { cout << "Exception: " << s << endl; } catch (...) { cout << "Caught unknown exception" << endl; } exit(0); } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/stack.hh������������������������������������������������������������������������0000644�0001750�0001750�00000006602�13347664027�012617� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������// A Bison parser, made by GNU Bison 3.0.4. // Stack handling for Bison parsers in C++ // Copyright (C) 2002-2015 Free Software Foundation, Inc. // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // As a special exception, you may create a larger work that contains // part or all of the Bison parser skeleton and distribute that work // under terms of your choice, so long as that work isn't itself a // parser generator using the skeleton or a modified version thereof // as a parser skeleton. Alternatively, if you modify or redistribute // the parser skeleton itself, you may (at your option) remove this // special exception, which will cause the skeleton and the resulting // Bison output files to be licensed under the GNU General Public // License without this special exception. // This special exception was added by the Free Software Foundation in // version 2.2 of Bison. /** ** \file stack.hh ** Define the yy::stack class. */ #ifndef YY_YY_STACK_HH_INCLUDED # define YY_YY_STACK_HH_INCLUDED # include <vector> namespace yy { #line 46 "stack.hh" // stack.hh:132 template <class T, class S = std::vector<T> > class stack { public: // Hide our reversed order. typedef typename S::reverse_iterator iterator; typedef typename S::const_reverse_iterator const_iterator; stack () : seq_ () { seq_.reserve (200); } stack (unsigned int n) : seq_ (n) {} inline T& operator[] (unsigned int i) { return seq_[seq_.size () - 1 - i]; } inline const T& operator[] (unsigned int i) const { return seq_[seq_.size () - 1 - i]; } /// Steal the contents of \a t. /// /// Close to move-semantics. inline void push (T& t) { seq_.push_back (T()); operator[](0).move (t); } inline void pop (unsigned int n = 1) { for (; n; --n) seq_.pop_back (); } void clear () { seq_.clear (); } inline typename S::size_type size () const { return seq_.size (); } inline const_iterator begin () const { return seq_.rbegin (); } inline const_iterator end () const { return seq_.rend (); } private: stack (const stack&); stack& operator= (const stack&); /// The wrapped container. S seq_; }; /// Present a slice of the top of a stack. template <class T, class S = stack<T> > class slice { public: slice (const S& stack, unsigned int range) : stack_ (stack) , range_ (range) {} inline const T& operator [] (unsigned int i) const { return stack_[range_ - i]; } private: const S& stack_; unsigned int range_; }; } // yy #line 156 "stack.hh" // stack.hh:132 #endif // !YY_YY_STACK_HH_INCLUDED ������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/wasatorcl.h���������������������������������������������������������������������0000644�0001750�0001750�00000002412�13533651561�013330� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _WASATORCL_H_INCLUDED_ #define _WASATORCL_H_INCLUDED_ #include <string> namespace Rcl { class SearchData; } class RclConfig; extern Rcl::SearchData *wasaStringToRcl(const RclConfig *, const std::string& stemlang, const std::string& query, std::string &reason, const std::string& autosuffs = ""); #endif /* _WASATORCL_H_INCLUDED_ */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/docseqdb.cpp��������������������������������������������������������������������0000644�0001750�0001750�00000016153�13566424763�013467� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include <math.h> #include <time.h> #include <list> #include "docseqdb.h" #include "rcldb.h" #include "log.h" #include "wasatorcl.h" using std::list; DocSequenceDb::DocSequenceDb(std::shared_ptr<Rcl::Db> db, std::shared_ptr<Rcl::Query> q, const string &t, std::shared_ptr<Rcl::SearchData> sdata) : DocSequence(t), m_db(db), m_q(q), m_sdata(sdata), m_fsdata(sdata), m_rescnt(-1), m_queryBuildAbstract(true), m_queryReplaceAbstract(false), m_isFiltered(false), m_isSorted(false), m_needSetQuery(false), m_lastSQStatus(true) { } void DocSequenceDb::getTerms(HighlightData& hld) { m_fsdata->getTerms(hld); } string DocSequenceDb::getDescription() { return m_fsdata->getDescription(); } bool DocSequenceDb::getDoc(int num, Rcl::Doc &doc, string *sh) { std::unique_lock<std::mutex> locker(o_dblock); if (!setQuery()) return false; if (sh) sh->erase(); return m_q->getDoc(num, doc); } int DocSequenceDb::getResCnt() { std::unique_lock<std::mutex> locker(o_dblock); if (!setQuery()) return false; if (m_rescnt < 0) { m_rescnt= m_q->getResCnt(); } return m_rescnt; } static const string cstr_mre("[...]"); // This one only gets called to fill-up the snippets window // We ignore most abstract/snippets preferences. bool DocSequenceDb::getAbstract(Rcl::Doc &doc, vector<Rcl::Snippet>& vpabs, int maxlen, bool sortbypage) { LOGDEB("DocSequenceDb::getAbstract/pair\n"); std::unique_lock<std::mutex> locker(o_dblock); if (!setQuery()) return false; // Have to put the limit somewhere. int ret = Rcl::ABSRES_ERROR; if (m_q->whatDb()) { ret = m_q->makeDocAbstract( doc, vpabs, maxlen, m_q->whatDb()->getAbsCtxLen() + 2, sortbypage); } LOGDEB("DocSequenceDb::getAbstract: got ret " << ret << " vpabs len " << vpabs.size() << "\n"); if (vpabs.empty()) { return true; } // If the list was probably truncated, indicate it. if (ret & Rcl::ABSRES_TRUNC) { vpabs.push_back(Rcl::Snippet(-1, cstr_mre)); } if (ret & Rcl::ABSRES_TERMMISS) { vpabs.insert(vpabs.begin(), Rcl::Snippet(-1, "(Words missing in snippets)")); } return true; } bool DocSequenceDb::getAbstract(Rcl::Doc &doc, vector<string>& vabs) { std::unique_lock<std::mutex> locker(o_dblock); if (!setQuery()) return false; if (m_q->whatDb() && m_queryBuildAbstract && (doc.syntabs || m_queryReplaceAbstract)) { m_q->makeDocAbstract(doc, vabs); } if (vabs.empty()) vabs.push_back(doc.meta[Rcl::Doc::keyabs]); return true; } int DocSequenceDb::getFirstMatchPage(Rcl::Doc &doc, string& term) { std::unique_lock<std::mutex> locker(o_dblock); if (!setQuery()) return false; if (m_q->whatDb()) { return m_q->getFirstMatchPage(doc, term); } return -1; } list<string> DocSequenceDb::expand(Rcl::Doc &doc) { std::unique_lock<std::mutex> locker(o_dblock); if (!setQuery()) return list<string>(); vector<string> v = m_q->expand(doc); return list<string>(v.begin(), v.end()); } string DocSequenceDb::title() { string qual; if (m_isFiltered && !m_isSorted) qual = string(" (") + o_filt_trans + string(")"); else if (!m_isFiltered && m_isSorted) qual = string(" (") + o_sort_trans + string(")"); else if (m_isFiltered && m_isSorted) qual = string(" (") + o_sort_trans + string(",") + o_filt_trans + string(")"); return DocSequence::title() + qual; } bool DocSequenceDb::setFiltSpec(const DocSeqFiltSpec &fs) { LOGDEB("DocSequenceDb::setFiltSpec\n"); std::unique_lock<std::mutex> locker(o_dblock); if (fs.isNotNull()) { // We build a search spec by adding a filtering layer to the base one. m_fsdata = std::shared_ptr<Rcl::SearchData>( new Rcl::SearchData(Rcl::SCLT_AND, m_sdata->getStemLang())); Rcl::SearchDataClauseSub *cl = new Rcl::SearchDataClauseSub(m_sdata); m_fsdata->addClause(cl); for (unsigned int i = 0; i < fs.crits.size(); i++) { switch (fs.crits[i]) { case DocSeqFiltSpec::DSFS_MIMETYPE: m_fsdata->addFiletype(fs.values[i]); break; case DocSeqFiltSpec::DSFS_QLANG: { if (!m_q) break; string reason; Rcl::SearchData *sd = wasaStringToRcl(m_q->whatDb()->getConf(), m_sdata->getStemLang(), fs.values[i], reason); if (sd) { Rcl::SearchDataClauseSub *cl1 = new Rcl::SearchDataClauseSub( std::shared_ptr<Rcl::SearchData>(sd)); m_fsdata->addClause(cl1); } } break; default: break; } } m_isFiltered = true; } else { m_fsdata = m_sdata; m_isFiltered = false; } m_needSetQuery = true; return true; } bool DocSequenceDb::setSortSpec(const DocSeqSortSpec &spec) { LOGDEB("DocSequenceDb::setSortSpec: fld [" << spec.field << "] " << (spec.desc ? "desc" : "asc") << "\n"); std::unique_lock<std::mutex> locker(o_dblock); if (spec.isNotNull()) { m_q->setSortBy(spec.field, !spec.desc); m_isSorted = true; } else { m_q->setSortBy(string(), true); m_isSorted = false; } m_needSetQuery = true; return true; } bool DocSequenceDb::setQuery() { if (!m_needSetQuery) return true; m_needSetQuery = false; m_rescnt = -1; m_lastSQStatus = m_q->setQuery(m_fsdata); if (!m_lastSQStatus) { m_reason = m_q->getReason(); LOGERR("DocSequenceDb::setQuery: rclquery::setQuery failed: " << m_reason << "\n"); } return m_lastSQStatus; } bool DocSequenceDb::docDups(const Rcl::Doc& doc, std::vector<Rcl::Doc>& dups) { if (m_q->whatDb()) { std::unique_lock<std::mutex> locker(o_dblock); return m_q->whatDb()->docDups(doc, dups); } else { return false; } } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/filtseq.cpp���������������������������������������������������������������������0000644�0001750�0001750�00000007245�13533651561�013344� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "log.h" #include "filtseq.h" #include "rclconfig.h" using std::string; static bool filter(const DocSeqFiltSpec& fs, const Rcl::Doc *x) { LOGDEB2(" Filter: ncrits " << (fs.crits.size()) << "\n" ); // Compare using each criterion in term. We're doing an or: // 1st ok ends for (unsigned int i = 0; i < fs.crits.size(); i++) { switch (fs.crits[i]) { case DocSeqFiltSpec::DSFS_MIMETYPE: LOGDEB2(" filter: MIMETYPE: me [" << (fs.values[i]) << "] doc [" << (x->mimetype) << "]\n" ); if (x->mimetype == fs.values[i]) return true; break; case DocSeqFiltSpec::DSFS_QLANG: { LOGDEB(" filter: QLANG [" << (fs.values[i]) << "]!!\n" ); } break; case DocSeqFiltSpec::DSFS_PASSALL: return true; } } // Did all comparisons return false; } DocSeqFiltered::DocSeqFiltered(RclConfig *conf, std::shared_ptr<DocSequence> iseq, DocSeqFiltSpec &filtspec) : DocSeqModifier(iseq), m_config(conf) { setFiltSpec(filtspec); } bool DocSeqFiltered::setFiltSpec(const DocSeqFiltSpec &filtspec) { LOGDEB0("DocSeqFiltered::setFiltSpec\n" ); for (unsigned int i = 0; i < filtspec.crits.size(); i++) { switch (filtspec.crits[i]) { case DocSeqFiltSpec::DSFS_MIMETYPE: m_spec.orCrit(filtspec.crits[i], filtspec.values[i]); break; case DocSeqFiltSpec::DSFS_QLANG: { // There are very few lang constructs that we can // interpret. The default config uses rclcat:value // only. That will be all for now... string val = filtspec.values[i]; if (val.find("rclcat:") == 0) { string catg = val.substr(7); vector<string> tps; m_config->getMimeCatTypes(catg, tps); for (vector<string>::const_iterator it = tps.begin(); it != tps.end(); it++) { LOGDEB2("Adding mime: [" << (it) << "]\n" ); m_spec.orCrit(DocSeqFiltSpec::DSFS_MIMETYPE, *it); } } } break; default: break; } } // If m_spec ends up empty, pass everything, better than filtering all. if (m_spec.crits.empty()) { m_spec.orCrit(DocSeqFiltSpec::DSFS_PASSALL, ""); } m_dbindices.clear(); return true; } bool DocSeqFiltered::getDoc(int idx, Rcl::Doc &doc, string *) { LOGDEB2("DocSeqFiltered::getDoc() fetching " << (idx) << "\n" ); if (idx >= (int)m_dbindices.size()) { // Have to fetch docs and filter until we get enough or // fail m_dbindices.reserve(idx+1); // First backend seq doc we fetch is the one after last stored int backend_idx = m_dbindices.size() > 0 ? m_dbindices.back() + 1 : 0; // Loop until we get enough docs Rcl::Doc tdoc; while (idx >= (int)m_dbindices.size()) { if (!m_seq->getDoc(backend_idx, tdoc)) return false; if (filter(m_spec, &tdoc)) { m_dbindices.push_back(backend_idx); } backend_idx++; } doc = tdoc; } else { // The corresponding backend indice is already known if (!m_seq->getDoc(m_dbindices[idx], doc)) return false; } return true; } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/query/docseqdb.h����������������������������������������������������������������������0000644�0001750�0001750�00000006154�13566424763�013134� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _DOCSEQDB_H_INCLUDED_ #define _DOCSEQDB_H_INCLUDED_ #include <memory> #include "docseq.h" #include "searchdata.h" #include "rclquery.h" /** A DocSequence from a Db query */ class DocSequenceDb : public DocSequence { public: DocSequenceDb(std::shared_ptr<Rcl::Db> db, std::shared_ptr<Rcl::Query> q, const std::string &t, std::shared_ptr<Rcl::SearchData> sdata); virtual ~DocSequenceDb() {} virtual bool getDoc(int num, Rcl::Doc &doc, std::string * = 0) override; virtual int getResCnt() override; virtual void getTerms(HighlightData& hld) override; // Called to fill-up the snippets window. Ignoers // buildabstract/replaceabstract and syntabslen virtual bool getAbstract(Rcl::Doc &doc, std::vector<Rcl::Snippet>&, int maxlen, bool sortbypage) override; virtual bool getAbstract(Rcl::Doc &doc, std::vector<std::string>&) override; virtual int getFirstMatchPage(Rcl::Doc&, std::string& term) override; virtual bool docDups(const Rcl::Doc& doc, std::vector<Rcl::Doc>& dups) override; virtual std::string getDescription() override; virtual std::list<std::string> expand(Rcl::Doc &doc) override; virtual bool canFilter() override {return true;} virtual bool setFiltSpec(const DocSeqFiltSpec &filtspec) override; virtual bool canSort() override {return true;} virtual bool setSortSpec(const DocSeqSortSpec &sortspec) override; virtual void setAbstractParams(bool qba, bool qra) { m_queryBuildAbstract = qba; m_queryReplaceAbstract = qra; } virtual bool snippetsCapable() override { return true; } virtual std::string title() override; protected: virtual std::shared_ptr<Rcl::Db> getDb() override { return m_db; } private: std::shared_ptr<Rcl::Db> m_db; std::shared_ptr<Rcl::Query> m_q; std::shared_ptr<Rcl::SearchData> m_sdata; std::shared_ptr<Rcl::SearchData> m_fsdata; // Filtered int m_rescnt; bool m_queryBuildAbstract; bool m_queryReplaceAbstract; bool m_isFiltered; bool m_isSorted; bool m_needSetQuery; // search data changed, need to reapply before fetch bool m_lastSQStatus; bool setQuery(); }; #endif /* _DOCSEQDB_H_INCLUDED_ */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/COPYING�������������������������������������������������������������������������������0000644�0001750�0001750�00000043121�13533651561�011050� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. <signature of Ty Coon>, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/compile�������������������������������������������������������������������������������0000755�0001750�0001750�00000016245�13570165161�011377� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#! /bin/sh # Wrapper for compilers which do not understand '-c -o'. scriptversion=2012-10-14.11; # UTC # Copyright (C) 1999-2014 Free Software Foundation, Inc. # Written by Tom Tromey <tromey@cygnus.com>. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # This file is maintained in Automake, please report # bugs to <bug-automake@gnu.org> or send patches to # <automake-patches@gnu.org>. nl=' ' # We need space, tab and new line, in precisely that order. Quoting is # there to prevent tools from complaining about whitespace usage. IFS=" "" $nl" file_conv= # func_file_conv build_file lazy # Convert a $build file to $host form and store it in $file # Currently only supports Windows hosts. If the determined conversion # type is listed in (the comma separated) LAZY, no conversion will # take place. func_file_conv () { file=$1 case $file in / | /[!/]*) # absolute file, and not a UNC file if test -z "$file_conv"; then # lazily determine how to convert abs files case `uname -s` in MINGW*) file_conv=mingw ;; CYGWIN*) file_conv=cygwin ;; *) file_conv=wine ;; esac fi case $file_conv/,$2, in *,$file_conv,*) ;; mingw/*) file=`cmd //C echo "$file " | sed -e 's/"\(.*\) " *$/\1/'` ;; cygwin/*) file=`cygpath -m "$file" || echo "$file"` ;; wine/*) file=`winepath -w "$file" || echo "$file"` ;; esac ;; esac } # func_cl_dashL linkdir # Make cl look for libraries in LINKDIR func_cl_dashL () { func_file_conv "$1" if test -z "$lib_path"; then lib_path=$file else lib_path="$lib_path;$file" fi linker_opts="$linker_opts -LIBPATH:$file" } # func_cl_dashl library # Do a library search-path lookup for cl func_cl_dashl () { lib=$1 found=no save_IFS=$IFS IFS=';' for dir in $lib_path $LIB do IFS=$save_IFS if $shared && test -f "$dir/$lib.dll.lib"; then found=yes lib=$dir/$lib.dll.lib break fi if test -f "$dir/$lib.lib"; then found=yes lib=$dir/$lib.lib break fi if test -f "$dir/lib$lib.a"; then found=yes lib=$dir/lib$lib.a break fi done IFS=$save_IFS if test "$found" != yes; then lib=$lib.lib fi } # func_cl_wrapper cl arg... # Adjust compile command to suit cl func_cl_wrapper () { # Assume a capable shell lib_path= shared=: linker_opts= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. eat=1 case $2 in *.o | *.[oO][bB][jJ]) func_file_conv "$2" set x "$@" -Fo"$file" shift ;; *) func_file_conv "$2" set x "$@" -Fe"$file" shift ;; esac ;; -I) eat=1 func_file_conv "$2" mingw set x "$@" -I"$file" shift ;; -I*) func_file_conv "${1#-I}" mingw set x "$@" -I"$file" shift ;; -l) eat=1 func_cl_dashl "$2" set x "$@" "$lib" shift ;; -l*) func_cl_dashl "${1#-l}" set x "$@" "$lib" shift ;; -L) eat=1 func_cl_dashL "$2" ;; -L*) func_cl_dashL "${1#-L}" ;; -static) shared=false ;; -Wl,*) arg=${1#-Wl,} save_ifs="$IFS"; IFS=',' for flag in $arg; do IFS="$save_ifs" linker_opts="$linker_opts $flag" done IFS="$save_ifs" ;; -Xlinker) eat=1 linker_opts="$linker_opts $2" ;; -*) set x "$@" "$1" shift ;; *.cc | *.CC | *.cxx | *.CXX | *.[cC]++) func_file_conv "$1" set x "$@" -Tp"$file" shift ;; *.c | *.cpp | *.CPP | *.lib | *.LIB | *.Lib | *.OBJ | *.obj | *.[oO]) func_file_conv "$1" mingw set x "$@" "$file" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -n "$linker_opts"; then linker_opts="-link$linker_opts" fi exec "$@" $linker_opts exit 1 } eat= case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: compile [--help] [--version] PROGRAM [ARGS] Wrapper for compilers which do not understand '-c -o'. Remove '-o dest.o' from ARGS, run PROGRAM with the remaining arguments, and rename the output as expected. If you are trying to build a whole package this is not the right script to run: please start by reading the file 'INSTALL'. Report bugs to <bug-automake@gnu.org>. EOF exit $? ;; -v | --v*) echo "compile $scriptversion" exit $? ;; cl | *[/\\]cl | cl.exe | *[/\\]cl.exe ) func_cl_wrapper "$@" # Doesn't return... ;; esac ofile= cfile= for arg do if test -n "$eat"; then eat= else case $1 in -o) # configure might choose to run compile as 'compile cc -o foo foo.c'. # So we strip '-o arg' only if arg is an object. eat=1 case $2 in *.o | *.obj) ofile=$2 ;; *) set x "$@" -o "$2" shift ;; esac ;; *.c) cfile=$1 set x "$@" "$1" shift ;; *) set x "$@" "$1" shift ;; esac fi shift done if test -z "$ofile" || test -z "$cfile"; then # If no '-o' option was seen then we might have been invoked from a # pattern rule where we don't need one. That is ok -- this is a # normal compilation that the losing compiler can handle. If no # '.c' file was seen then we are probably linking. That is also # ok. exec "$@" fi # Name of file we expect compiler to create. cofile=`echo "$cfile" | sed 's|^.*[\\/]||; s|^[a-zA-Z]:||; s/\.c$/.o/'` # Create the lock directory. # Note: use '[/\\:.-]' here to ensure that we don't use the same name # that we are using for the .o file. Also, base the name on the expected # object file name, since that is what matters with a parallel build. lockdir=`echo "$cofile" | sed -e 's|[/\\:.-]|_|g'`.d while true; do if mkdir "$lockdir" >/dev/null 2>&1; then break fi sleep 1 done # FIXME: race condition here if user kills between mkdir and trap. trap "rmdir '$lockdir'; exit 1" 1 2 15 # Run the compile. "$@" ret=$? if test -f "$cofile"; then test "$cofile" = "$ofile" || mv "$cofile" "$ofile" elif test -f "${cofile}bj"; then test "${cofile}bj" = "$ofile" || mv "${cofile}bj" "$ofile" fi rmdir "$lockdir" exit $ret # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/Makefile.am���������������������������������������������������������������������������0000644�0001750�0001750�00000046141�13570157345�012060� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������ # Conditionally enable building the small test drivers, but don't # distribute them, they are not generally useful if COND_TESTMAINS MAYBE_TESTMAINS = testmains endif SUBDIRS = . $(MAYBE_TESTMAINS) DIST_SUBDIRS = . CXXFLAGS ?= @CXXFLAGS@ LIBXAPIAN=@LIBXAPIAN@ XAPIANCXXFLAGS=@XAPIANCXXFLAGS@ XSLT_CFLAGS=@XSLT_CFLAGS@ XSLT_LINKADD=@XSLT_LINKADD@ LIBICONV=@LIBICONV@ INCICONV=@INCICONV@ LIBFAM = @LIBFAM@ RCLLIBVERSION=@RCLLIBVERSION@ X_CFLAGS=@X_CFLAGS@ X_PRE_LIBS=@X_PRE_LIBS@ X_LIBS=@X_LIBS@ X_EXTRA_LIBS=@X_EXTRA_LIBS@ X_LIBX11=@X_LIBX11@ DEFS=@DEFS@ COMMONCPPFLAGS = -I. \ -I$(top_srcdir)/aspell \ -I$(top_srcdir)/bincimapmime \ -I$(top_srcdir)/common \ -I$(top_srcdir)/index \ -I$(top_srcdir)/internfile \ -I$(top_srcdir)/rcldb \ -I$(top_srcdir)/unac \ -I$(top_srcdir)/utils \ -I$(top_srcdir)/xaposix \ -DBUILDING_RECOLL AM_CPPFLAGS = -Wall -Wno-unused -std=c++11 \ $(COMMONCPPFLAGS) \ $(INCICONV) \ $(XAPIANCXXFLAGS) \ $(XSLT_CFLAGS) \ $(X_CFLAGS) \ -DRECOLL_DATADIR=\"${pkgdatadir}\" \ -DREADFILE_ENABLE_ZLIB -DREADFILE_ENABLE_MINIZ -DREADFILE_ENABLE_MD5 \ -D_GNU_SOURCE \ $(DEFS) ACLOCAL_AMFLAGS = -I m4 if NOTHREADS LIBTHREADS= else LIBTHREADS= $(LIBSYSTHREADS) endif librcldir = $(libdir)/recoll librcl_LTLIBRARIES = librecoll.la librecoll_la_SOURCES = \ aspell/aspell-local.h \ aspell/rclaspell.cpp \ aspell/rclaspell.h \ bincimapmime/convert.cc \ bincimapmime/convert.h \ bincimapmime/mime-inputsource.h \ bincimapmime/mime-parsefull.cc \ bincimapmime/mime-parseonlyheader.cc \ bincimapmime/mime-printbody.cc \ bincimapmime/mime-utils.h \ bincimapmime/mime.cc \ bincimapmime/mime.h \ common/webstore.cpp \ common/webstore.h \ common/conf_post.h \ common/cstr.cpp \ common/cstr.h \ common/rclconfig.cpp \ common/rclconfig.h \ common/rclinit.cpp \ common/rclinit.h \ common/syngroups.cpp \ common/syngroups.h \ common/textsplit.cpp \ common/textsplit.h \ common/unacpp.cpp \ common/unacpp.h \ common/uproplist.h \ common/utf8fn.cpp \ common/utf8fn.h \ index/webqueue.cpp \ index/webqueue.h \ index/webqueuefetcher.cpp \ index/webqueuefetcher.h \ index/checkretryfailed.cpp \ index/checkretryfailed.h \ index/exefetcher.cpp \ index/exefetcher.h \ index/fetcher.cpp \ index/fetcher.h \ index/fsfetcher.cpp \ index/fsfetcher.h \ index/fsindexer.cpp \ index/fsindexer.h \ index/idxstatus.h \ index/idxstatus.cpp \ index/mimetype.cpp \ index/mimetype.h \ index/rclmon.h \ index/recollindex.h \ index/subtreelist.cpp \ index/subtreelist.h \ internfile/Filter.h \ internfile/extrameta.cpp \ internfile/extrameta.h \ internfile/htmlparse.cpp \ internfile/htmlparse.h \ internfile/indextext.h \ internfile/internfile.cpp \ internfile/internfile.h \ internfile/mh_exec.cpp \ internfile/mh_exec.h \ internfile/mh_execm.cpp \ internfile/mh_execm.h \ internfile/mh_html.cpp \ internfile/mh_html.h \ internfile/mh_mail.cpp \ internfile/mh_mail.h \ internfile/mh_mbox.cpp \ internfile/mh_mbox.h \ internfile/mh_null.h \ internfile/mh_symlink.h \ internfile/mh_text.cpp \ internfile/mh_text.h \ internfile/mh_unknown.h \ internfile/mh_xslt.cpp \ internfile/mh_xslt.h \ internfile/mimehandler.cpp \ internfile/mimehandler.h \ internfile/myhtmlparse.cpp \ internfile/myhtmlparse.h \ internfile/txtdcode.cpp \ internfile/uncomp.cpp \ internfile/uncomp.h \ query/docseq.cpp \ query/docseq.h \ query/docseqdb.cpp \ query/docseqdb.h \ query/docseqdocs.h \ query/docseqhist.cpp \ query/docseqhist.h \ query/dynconf.cpp \ query/dynconf.h \ query/filtseq.cpp \ query/filtseq.h \ query/plaintorich.cpp \ query/plaintorich.h \ query/recollq.cpp \ query/recollq.h \ query/reslistpager.cpp \ query/reslistpager.h \ query/sortseq.cpp \ query/sortseq.h \ query/wasaparse.ypp \ query/wasaparseaux.cpp \ query/wasaparserdriver.h \ query/wasatorcl.h \ rcldb/daterange.cpp \ rcldb/daterange.h \ rcldb/expansiondbs.cpp \ rcldb/expansiondbs.h \ rcldb/rclabstract.cpp \ rcldb/rclabsfromtext.cpp \ rcldb/rcldb.cpp \ rcldb/rcldb.h \ rcldb/rcldb_p.h \ rcldb/rcldoc.cpp \ rcldb/rcldoc.h \ rcldb/rcldups.cpp \ rcldb/rclquery.cpp \ rcldb/rclquery.h \ rcldb/rclquery_p.h \ rcldb/rclterms.cpp \ rcldb/rclvalues.cpp \ rcldb/rclvalues.h \ rcldb/searchdata.cpp \ rcldb/searchdata.h \ rcldb/searchdatatox.cpp \ rcldb/searchdataxml.cpp \ rcldb/stemdb.cpp \ rcldb/stemdb.h \ rcldb/stoplist.cpp \ rcldb/stoplist.h \ rcldb/synfamily.cpp \ rcldb/synfamily.h \ rcldb/termproc.h \ rcldb/xmacros.h \ unac/unac.cpp \ unac/unac.h \ unac/unac_version.h \ utils/appformime.cpp \ utils/appformime.h \ utils/base64.cpp \ utils/base64.h \ utils/cancelcheck.cpp \ utils/cancelcheck.h \ utils/chrono.h \ utils/chrono.cpp \ utils/circache.cpp \ utils/circache.h \ utils/closefrom.cpp \ utils/closefrom.h \ utils/conftree.cpp \ utils/conftree.h \ utils/copyfile.cpp \ utils/copyfile.h \ utils/cpuconf.cpp \ utils/cpuconf.h \ utils/dlib.cpp \ utils/dlib.h \ utils/ecrontab.cpp \ utils/ecrontab.h \ utils/execmd.cpp \ utils/execmd.h \ utils/fileudi.cpp \ utils/fileudi.h \ utils/fstreewalk.cpp \ utils/fstreewalk.h \ utils/hldata.h \ utils/hldata.cpp \ utils/idfile.cpp \ utils/idfile.h \ utils/listmem.cpp \ utils/listmem.h \ utils/log.cpp \ utils/log.h \ utils/md5.cpp \ utils/md5.h \ utils/md5ut.cpp \ utils/md5ut.h \ utils/mimeparse.cpp \ utils/mimeparse.h \ utils/miniz.cpp \ utils/miniz.h \ utils/netcon.cpp \ utils/netcon.h \ utils/pathut.cpp \ utils/pathut.h \ utils/pxattr.cpp \ utils/pxattr.h \ utils/rclionice.cpp \ utils/rclionice.h \ utils/rclutil.h \ utils/rclutil.cpp \ utils/readfile.cpp \ utils/readfile.h \ utils/smallut.cpp \ utils/smallut.h \ utils/strmatcher.cpp \ utils/strmatcher.h \ utils/transcode.cpp \ utils/transcode.h \ utils/utf8iter.cpp \ utils/utf8iter.h \ utils/wipedir.cpp \ utils/wipedir.h \ utils/workqueue.h \ utils/zlibut.cpp \ utils/zlibut.h \ xaposix/safefcntl.h \ xaposix/safesysstat.h \ xaposix/safesyswait.h \ xaposix/safeunistd.h BUILT_SOURCES = query/wasaparse.cpp AM_YFLAGS = -d # We use -release: the lib is only shared # between recoll programs from the same release. # -version-info $(VERSION_INFO) librecoll_la_LDFLAGS = -release $(VERSION) \ -Wl,--no-undefined -Wl,--warn-unresolved-symbols librecoll_la_LIBADD = $(XSLT_LINKADD) $(LIBXAPIAN) $(LIBICONV) $(LIBTHREADS) # There is probably a better way to do this. The KIO needs to be linked # with librecoll, but librecoll is installed into a non-standard place # (/usr/lib/recoll). Debian packaging has something against setting an # rpath on the kio (cause it's not the same package as the lib), so I don't # know how to link it dynamically. The other thing I don't know is how to # force automake to build a static lib with the PIC objects. So the # following target, which is only used from the KIO build, deletes any .a # and .so and rebuilds the .a with the pic objs (the kio build calls # configured --disable-static). # Of course this is very uncomfortably close to automake/libtool internals # and may not work on all systems. PicStatic: $(librecoll_la_OBJECTS) rm -f .libs/librecoll.a rm -f .libs/librecoll.so $(LIBTOOL) --tag=LD --mode=link gcc -g -O -o librecoll.la \ $(librecoll_la_OBJECTS) bin_PROGRAMS = recollindex if MAKECMDLINE bin_PROGRAMS += recollq endif if MAKEXADUMP bin_PROGRAMS += xadump endif recollindex_SOURCES = \ index/recollindex.cpp \ index/indexer.cpp \ index/indexer.h \ index/rclmonprc.cpp \ index/rclmonrcv.cpp \ utils/x11mon.cpp \ utils/x11mon.h recollindex_LDADD = librecoll.la $(X_LIBX11) recollq_SOURCES = query/recollqmain.cpp recollq_LDADD = librecoll.la xadump_SOURCES = query/xadump.cpp xadump_LDADD = librecoll.la $(LIBXAPIAN) $(LIBICONV) # Note: I'd prefer the generated query parser files not to be distributed # at all, but failed to achieve this EXTRA_DIST = \ bincimapmime/00README.recoll bincimapmime/AUTHORS bincimapmime/COPYING \ \ desktop/hotrecoll.py \ desktop/recoll.appdata.xml \ desktop/recollindex.desktop \ desktop/recoll_index_on_ac.sh \ desktop/recoll-searchgui.desktop \ desktop/recoll.png desktop/recoll.svg desktop/recoll.xcf \ \ doc/prog/Makefile doc/prog/Doxyfile doc/prog/filters.txt doc/prog/top.txt \ \ doc/user/usermanual.html doc/user/docbook-xsl.css doc/user/docbook.css \ doc/user/Makefile doc/user/recoll.conf.xml \ doc/user/custom.xsl doc/user/usermanual.xml \ \ filters/injectcommon.sh filters/recfiltcommon filters/rcltxtlines.py \ \ index/rclmon.sh \ \ kde/kioslave/kio_recoll/00README.txt \ kde/kioslave/kio_recoll/CMakeLists.txt \ kde/kioslave/kio_recoll/data/help.html \ kde/kioslave/kio_recoll/data/searchable.html \ kde/kioslave/kio_recoll/data/welcome.html \ kde/kioslave/kio_recoll/dirif.cpp \ kde/kioslave/kio_recoll/htmlif.cpp \ kde/kioslave/kio_recoll/kio_recoll.cpp \ kde/kioslave/kio_recoll/kio_recoll.h \ kde/kioslave/kio_recoll/recollf.protocol \ kde/kioslave/kio_recoll/recollnolist.protocol \ kde/kioslave/kio_recoll/recoll.protocol \ \ kde/kioslave/kio_recoll-kde4/00README.txt \ kde/kioslave/kio_recoll-kde4/CMakeLists.txt \ kde/kioslave/kio_recoll-kde4/data/help.html \ kde/kioslave/kio_recoll-kde4/data/searchable.html \ kde/kioslave/kio_recoll-kde4/data/welcome.html \ kde/kioslave/kio_recoll-kde4/dirif.cpp \ kde/kioslave/kio_recoll-kde4/htmlif.cpp \ kde/kioslave/kio_recoll-kde4/kio_recoll.cpp \ kde/kioslave/kio_recoll-kde4/kio_recoll.h \ kde/kioslave/kio_recoll-kde4/recollf.protocol \ kde/kioslave/kio_recoll-kde4/recollnolist.protocol \ kde/kioslave/kio_recoll-kde4/recoll.protocol \ \ query/location.hh query/position.hh query/stack.hh \ \ qtgui/advsearch.ui \ qtgui/advsearch_w.cpp \ qtgui/advsearch_w.h \ qtgui/advshist.cpp \ qtgui/advshist.h \ qtgui/confgui/confgui.cpp \ qtgui/confgui/confgui.h \ qtgui/confgui/confguiindex.cpp \ qtgui/confgui/confguiindex.h \ qtgui/crontool.cpp \ qtgui/crontool.h \ qtgui/crontool.ui \ qtgui/firstidx.h \ qtgui/firstidx.ui \ qtgui/fragbuts.cpp \ qtgui/fragbuts.h \ qtgui/guiutils.cpp \ qtgui/guiutils.h \ qtgui/i18n/*.qm qtgui/i18n/*.ts \ qtgui/idxsched.h \ qtgui/idxsched.ui \ qtgui/images/asearch.png \ qtgui/images/cancel.png \ qtgui/images/close.png \ qtgui/images/clock.png \ qtgui/images/code-block.png \ qtgui/images/down.png \ qtgui/images/firstpage.png \ qtgui/images/history.png \ qtgui/images/interro.png \ qtgui/images/nextpage.png \ qtgui/images/prevpage.png \ qtgui/images/recoll.icns \ qtgui/images/recoll.png \ qtgui/images/sortparms.png \ qtgui/images/spell.png \ qtgui/images/table.png \ qtgui/images/up.png \ qtgui/main.cpp \ qtgui/mtpics/License_sidux.txt \ qtgui/mtpics/README \ qtgui/mtpics/aptosid-book.png \ qtgui/mtpics/aptosid-manual-copyright.txt \ qtgui/mtpics/aptosid-manual.png \ qtgui/mtpics/archive.png \ qtgui/mtpics/book.png \ qtgui/mtpics/bookchap.png \ qtgui/mtpics/document.png \ qtgui/mtpics/drawing.png \ qtgui/mtpics/emblem-symbolic-link.png \ qtgui/mtpics/folder.png \ qtgui/mtpics/html.png \ qtgui/mtpics/image.png \ qtgui/mtpics/message.png \ qtgui/mtpics/mozilla_doc.png \ qtgui/mtpics/pdf.png \ qtgui/mtpics/pidgin.png \ qtgui/mtpics/postscript.png \ qtgui/mtpics/presentation.png \ qtgui/mtpics/sidux-book.png \ qtgui/mtpics/soffice.png \ qtgui/mtpics/source.png \ qtgui/mtpics/sownd.png \ qtgui/mtpics/spreadsheet.png \ qtgui/mtpics/text-x-python.png \ qtgui/mtpics/txt.png \ qtgui/mtpics/video.png \ qtgui/mtpics/wordprocessing.png \ qtgui/multisave.cpp \ qtgui/multisave.h \ qtgui/preview_load.cpp \ qtgui/preview_load.h \ qtgui/preview_plaintorich.cpp \ qtgui/preview_plaintorich.h \ qtgui/preview_w.cpp \ qtgui/preview_w.h \ qtgui/preview.ui \ qtgui/ptrans.ui \ qtgui/ptrans_w.cpp \ qtgui/ptrans_w.h \ qtgui/rclhelp.cpp \ qtgui/rclhelp.h \ qtgui/rclm_idx.cpp \ qtgui/rclm_preview.cpp \ qtgui/rclm_saveload.cpp \ qtgui/rclm_view.cpp \ qtgui/rclm_wins.cpp \ qtgui/rclmain.ui \ qtgui/rclmain_w.cpp \ qtgui/rclmain_w.h \ qtgui/rclzg.cpp \ qtgui/rclzg.h \ qtgui/recoll.h \ qtgui/recoll.pro.in \ qtgui/recoll.qrc \ qtgui/reslist.cpp \ qtgui/reslist.h \ qtgui/respopup.cpp \ qtgui/respopup.h \ qtgui/restable.cpp \ qtgui/restable.h \ qtgui/restable.ui \ qtgui/rtitool.cpp \ qtgui/rtitool.h \ qtgui/rtitool.ui \ qtgui/searchclause_w.cpp \ qtgui/searchclause_w.h \ qtgui/snippets.ui \ qtgui/snippets_w.cpp \ qtgui/snippets_w.h \ qtgui/specialindex.h \ qtgui/specialindex.ui \ qtgui/spell.ui \ qtgui/spell_w.cpp \ qtgui/spell_w.h \ qtgui/ssearch_w.cpp \ qtgui/ssearch_w.h \ qtgui/ssearchb.ui \ qtgui/systray.cpp \ qtgui/systray.h \ qtgui/ui_rclmain.h-4.5 \ qtgui/uiprefs.ui \ qtgui/uiprefs_w.cpp \ qtgui/uiprefs_w.h \ qtgui/viewaction.ui \ qtgui/viewaction_w.cpp \ qtgui/viewaction_w.h \ qtgui/webcache.ui \ qtgui/webcache.cpp \ qtgui/webcache.h \ qtgui/widgets/editdialog.h \ qtgui/widgets/editdialog.ui \ qtgui/widgets/listdialog.h \ qtgui/widgets/listdialog.ui \ qtgui/widgets/qxtconfirmationmessage.cpp \ qtgui/widgets/qxtconfirmationmessage.h \ qtgui/widgets/qxtglobal.h \ qtgui/xmltosd.cpp \ qtgui/xmltosd.h \ \ python/README.txt \ python/pychm/AUTHORS \ python/pychm/COPYING \ python/pychm/MANIFEST.in \ python/pychm/README-RECOLL.txt \ python/pychm/recollchm \ python/pychm/recollchm/__init__.py \ python/pychm/recollchm/chm.py \ python/pychm/recollchm/chmlib.py \ python/pychm/recollchm/extra.c \ python/pychm/recollchm/swig_chm.c \ python/pychm/recollchm/swig_chm.i \ python/pychm/setup.py.in \ python/recoll/Makefile \ python/recoll/pyrclextract.cpp \ python/recoll/pyrecoll.cpp \ python/recoll/pyrecoll.h \ python/recoll/recoll/__init__.py \ python/recoll/recoll/conftree.py \ python/recoll/recoll/rclconfig.py \ python/recoll/setup.py.in \ python/samples/docdups.py \ python/samples/mutt-recoll.py \ python/samples/rcldlkp.py \ python/samples/rclmbox.py \ python/samples/recollgui/Makefile \ python/samples/recollgui/qrecoll.py \ python/samples/recollgui/rclmain.ui \ python/samples/recollq.py \ python/samples/recollqsd.py \ \ \ sampleconf/fields sampleconf/fragbuts.xml sampleconf/mimeconf \ sampleconf/mimemap sampleconf/mimeview sampleconf/mimeview.mac \ sampleconf/recoll.conf sampleconf/recoll.qss \ \ testmains/Makefile.am \ \ unac/AUTHORS unac/COPYING unac/README unac/README.recoll unac/unac.c \ \ VERSION # EXTRA_DIST: The Php Code does not build anymore. No need to ship it until # someone fixes it: # php/00README.txt php/recoll/config.m4 php/recoll/make.sh # php/recoll/php_recoll.h php/recoll/recoll.cpp php/sample/shell.php OPTSFORPYTHON = $(shell test -f /etc/debian_version && echo --install-layout=deb) if MAKEPYTHON all-local:: recollpython install-exec-local:: recollpython-install clean-local:: recollpython-clean recollpython: librecoll.la (cd python/recoll; set -x; \ for v in 2 3;do test -n "`which python$${v}`" && \ libdir=$(libdir) python$${v} setup.py build; \ done \ ) recollpython-install: (cd python/recoll; set -x; \ for v in 2 3;do test -n "`which python$${v}`" && \ python$${v} setup.py install \ --prefix=${prefix} --root=$${DESTDIR:-/} $(OPTSFORPYTHON); \ done; \ ) recollpython-clean: rm -f python/recoll/*.pyc rm -rf python/pychm/build rm -rf python/pychm/recollchm.egg-info rm -rf python/pychm/setup.py rm -rf python/recoll/Recoll.egg-info rm -rf python/recoll/__pycache__ rm -rf python/recoll/build endif if MAKEPYTHONCHM all-local:: rclpychm install-exec-local:: rclpychm-install clean-local:: rclpychm-clean rclpychm: (cd python/pychm; set -x; \ for v in 2 3;do \ test -n "`which python$${v}`" && python$${v} setup.py build;\ done \ ) rclpychm-install: (cd python/pychm; set -x; \ for v in 2 3;do test -n "`which python$${v}`" && \ python$${v} setup.py install \ --prefix=${prefix} --root=$${DESTDIR:-/} $(OPTSFORPYTHON); \ done \ ) rclpychm-clean: rm -rf python/pychm/build rm -rf python/pychm/dist/* endif if MAKEQT all-local:: recollqt recollqt: librecoll.la (cd $(QTGUI); ${QMAKE} PREFIX=${prefix} recoll.pro) $(MAKE) -C $(QTGUI) LFLAGS="$(LDFLAGS)" prefix=$(prefix) \ exec_prefix=$(exec_prefix) libdir=$(libdir) clean-local:: recollqt-clean recollqt-clean: -$(MAKE) -C $(QTGUI) clean install-exec-local:: recollqt-install recollqt-install: $(MAKE) -C $(QTGUI) LFLAGS="$(LDFLAGS)" INSTALL_ROOT=$(DESTDIR) \ prefix=$(prefix) exec_prefix=$(exec_prefix) libdir=$(libdir) \ install endif defconfdir = $(pkgdatadir)/examples defconf_DATA = \ desktop/recollindex.desktop \ index/rclmon.sh \ sampleconf/fragbuts.xml \ sampleconf/fields \ sampleconf/recoll.conf \ sampleconf/mimeconf \ sampleconf/recoll.qss \ sampleconf/mimemap \ sampleconf/mimeview filterdir = $(pkgdatadir)/filters dist_filter_DATA = \ desktop/hotrecoll.py \ filters/abiword.xsl \ filters/fb2.xsl \ filters/gnumeric.xsl \ filters/msodump.zip \ filters/okular-note.xsl \ filters/opendoc-body.xsl \ filters/opendoc-flat.xsl \ filters/opendoc-meta.xsl \ filters/openxml-xls-body.xsl \ filters/openxml-word-body.xsl \ filters/openxml-meta.xsl \ filters/ppt-dump.py \ filters/rcl7z \ filters/rclabw.py \ filters/rclaptosidman \ filters/rclaudio \ filters/rclbasehandler.py \ filters/rclbibtex.sh \ filters/rclcheckneedretry.sh \ filters/rclchm \ filters/rcldia \ filters/rcldjvu.py \ filters/rcldoc.py \ filters/rcldvi \ filters/rclepub \ filters/rclepub1 \ filters/rclexec1.py \ filters/rclexecm.py \ filters/rclfb2.py \ filters/rclgaim \ filters/rclgenxslt.py \ filters/rclgnm.py \ filters/rclics \ filters/rclimg \ filters/rclimg.py \ filters/rclinfo \ filters/rclkar \ filters/rclkwd \ filters/rcllatinclass.py \ filters/rcllatinstops.zip \ filters/rcllyx \ filters/rclman \ filters/rclmidi.py \ filters/rclokulnote.py \ filters/rclopxml.py \ filters/rclpdf.py \ filters/rclppt.py \ filters/rclps \ filters/rclpst.py \ filters/rclpurple \ filters/rclpython \ filters/rclrar \ filters/rclrtf.py \ filters/rclscribus \ filters/rclshowinfo \ filters/rclsoff-flat.py \ filters/rclsoff.py \ filters/rclsvg.py \ filters/rcltar \ filters/rcltex \ filters/rcltext.py \ filters/rcluncomp \ filters/rcluncomp.py \ filters/rclwar \ filters/rclxls.py \ filters/rclxml.py \ filters/rclxmp.py \ filters/rclxslt.py \ filters/rclzip \ filters/recoll-we-move-files.py \ filters/recollepub.zip \ filters/svg.xsl \ filters/xls-dump.py \ filters/xlsxmltocsv.py \ filters/xml.xsl \ python/recoll/recoll/conftree.py \ python/recoll/recoll/rclconfig.py install-data-hook: (cd $(DESTDIR)/$(filterdir); \ chmod a+x rcl* ppt-dump.py xls-dump.py xlsxmltocsv.py hotrecoll.py; \ chmod a+x recoll-we-move-files.py ../examples/rclmon.sh; \ chmod 0644 msodump.zip recollepub.zip rclexecm.py rcllatinstops.zip \ rclconfig.py conftree.py rclmidi.py rclexec1.py rcluncomp.py rclxslt.py) if MAKEUSERDOC rdocdir = $(pkgdatadir)/doc rdoc_DATA = doc/user/usermanual.html doc/user/docbook-xsl.css doc/user/usermanual.html: doc/user/usermanual.xml mkdir -p doc/user test -f doc/user/Makefile || \ cp -p $(top_srcdir)/doc/user/Makefile doc/user $(MAKE) -C doc/user VPATH=$(VPATH):$(VPATH)/doc/user usermanual.html endif dist_man1_MANS = doc/man/recoll.1 doc/man/recollq.1 \ doc/man/recollindex.1 doc/man/xadump.1 dist_man5_MANS = doc/man/recoll.conf.5 dist-hook: (cd $(top_srcdir); find . \ \( -name '*.pyc' -o -name '#*' -o -name '*~' \) -delete) test -z "`git status -s | grep -v '??' | grep -v Makefile.am`" vers=`echo $(VERSION) | sed -e 's/~/_/g'`;\ git tag -a RECOLL-$$vers -m "Release $$vers tagged" �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/VERSION�������������������������������������������������������������������������������0000644�0001750�0001750�00000000007�13570165145�011060� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������1.26.3 �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/��������������������������������������������������������������������������������0000755�0001750�0001750�00000000000�13570165407�011162� 5����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/synfamily.cpp�������������������������������������������������������������������0000644�0001750�0001750�00000021202�13533651561�013616� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2012-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include <iostream> #include <algorithm> #include <memory> #include "log.h" #include "cstr.h" #include "xmacros.h" #include "synfamily.h" #include "smallut.h" using namespace std; namespace Rcl { bool XapWritableSynFamily::createMember(const string& membername) { string ermsg; try { m_wdb.add_synonym(memberskey(), membername); } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("XapSynFamily::createMember: error: " << ermsg << "\n"); return false; } return true; } bool XapWritableSynFamily::deleteMember(const string& membername) { string key = entryprefix(membername); for (Xapian::TermIterator xit = m_wdb.synonym_keys_begin(key); xit != m_wdb.synonym_keys_end(key); xit++) { m_wdb.clear_synonyms(*xit); } m_wdb.remove_synonym(memberskey(), membername); return true; } bool XapSynFamily::getMembers(vector<string>& members) { string key = memberskey(); string ermsg; try { for (Xapian::TermIterator xit = m_rdb.synonyms_begin(key); xit != m_rdb.synonyms_end(key); xit++) { members.push_back(*xit); } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("XapSynFamily::getMembers: xapian error " << ermsg << "\n"); return false; } return true; } bool XapSynFamily::listMap(const string& membername) { string key = entryprefix(membername); string ermsg; try { for (Xapian::TermIterator kit = m_rdb.synonym_keys_begin(key); kit != m_rdb.synonym_keys_end(key); kit++) { cout << "[" << *kit << "] -> "; for (Xapian::TermIterator xit = m_rdb.synonyms_begin(*kit); xit != m_rdb.synonyms_end(*kit); xit++) { cout << *xit << " "; } cout << endl; } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("XapSynFamily::listMap: xapian error " << ermsg << "\n"); return false; } vector<string>members; getMembers(members); cout << "All family members: "; for (vector<string>::const_iterator it = members.begin(); it != members.end(); it++) { cout << *it << " "; } cout << endl; return true; } bool XapSynFamily::synExpand(const string& member, const string& term, vector<string>& result) { LOGDEB("XapSynFamily::synExpand:(" << m_prefix1 << ") " << term << " for " << member << "\n"); string key = entryprefix(member) + term; string ermsg; try { for (Xapian::TermIterator xit = m_rdb.synonyms_begin(key); xit != m_rdb.synonyms_end(key); xit++) { LOGDEB2(" Pushing " << *xit << "\n"); result.push_back(*xit); } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("synFamily::synExpand: error for member [" << member << "] term [" << term << "]\n"); result.push_back(term); return false; } // If the input term is not in the list, add it if (find(result.begin(), result.end(), term) == result.end()) { result.push_back(term); } return true; } bool XapComputableSynFamMember::synExpand(const string& term, vector<string>& result, SynTermTrans *filtertrans) { string root = (*m_trans)(term); string filter_root; if (filtertrans) filter_root = (*filtertrans)(term); string key = m_prefix + root; LOGDEB("XapCompSynFamMbr::synExpand([" << m_prefix << "]): term [" << term << "] root [" << root << "] m_trans: " << m_trans->name() << " filter: " << (filtertrans ? filtertrans->name() : "none") << "\n"); string ermsg; try { for (Xapian::TermIterator xit = m_family.getdb().synonyms_begin(key); xit != m_family.getdb().synonyms_end(key); xit++) { LOGDEB("XapCompSynFamMbr::synExpand: testing " << *xit << endl); if (!filtertrans || (*filtertrans)(*xit) == filter_root) { LOGDEB2(" Pushing " << *xit << "\n"); result.push_back(*xit); } } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("XapSynDb::synExpand: error for term [" << term << "] (key " << key << ")\n"); result.push_back(term); return false; } // If the input term and root are not in the list, add them if (find(result.begin(), result.end(), term) == result.end()) { LOGDEB2(" Pushing " << term << "\n"); result.push_back(term); } if (root != term && find(result.begin(), result.end(), root) == result.end()) { if (!filtertrans || (*filtertrans)(root) == filter_root) { LOGDEB2(" Pushing " << root << "\n"); result.push_back(root); } } LOGDEB("XapCompSynFamMbr::synExpand([" << m_prefix << "]): term [" << term << "] -> [" << stringsToString(result) << "]\n"); return true; } bool XapComputableSynFamMember::synKeyExpand(StrMatcher* inexp, vector<string>& result, SynTermTrans *filtertrans) { LOGDEB("XapCompSynFam::synKeyExpand: [" << inexp->exp() << "]\n"); // If set, compute filtering term (e.g.: only case-folded) std::shared_ptr<StrMatcher> filter_exp; if (filtertrans) { filter_exp = std::shared_ptr<StrMatcher>(inexp->clone()); filter_exp->setExp((*filtertrans)(inexp->exp())); } // Transform input into our key format (e.g.: case-folded + diac-stripped), // and prepend prefix inexp->setExp(m_prefix + (*m_trans)(inexp->exp())); // Find the initial section before any special chars for skipping the keys string::size_type es = inexp->baseprefixlen(); string is = inexp->exp().substr(0, es); string::size_type preflen = m_prefix.size(); LOGDEB2("XapCompSynFam::synKeyExpand: init section: [" << is << "]\n"); string ermsg; try { for (Xapian::TermIterator xit = m_family.getdb().synonym_keys_begin(is); xit != m_family.getdb().synonym_keys_end(is); xit++) { LOGDEB2(" Checking1 [" << *xit << "] against [" << inexp->exp() << "]\n"); if (!inexp->match(*xit)) continue; // Push all the synonyms if they match the secondary filter for (Xapian::TermIterator xit1 = m_family.getdb().synonyms_begin(*xit); xit1 != m_family.getdb().synonyms_end(*xit); xit1++) { string term = *xit1; if (filter_exp) { string term1 = (*filtertrans)(term); LOGDEB2(" Testing [" << term1 << "] against [" << filter_exp->exp() << "]\n"); if (!filter_exp->match(term1)) { continue; } } LOGDEB2("XapCompSynFam::keyWildExpand: [" << *xit1 << "]\n"); result.push_back(*xit1); } // Same with key itself string term = (*xit).substr(preflen); if (filter_exp) { string term1 = (*filtertrans)(term); LOGDEB2(" Testing [" << term1 << "] against [" << filter_exp->exp() << "]\n"); if (!filter_exp->match(term1)) { continue; } } LOGDEB2("XapCompSynFam::keyWildExpand: [" << term << "]\n"); result.push_back(term); } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("XapCompSynFam::synKeyExpand: xapian: [" << ermsg << "]\n"); return false; } LOGDEB1("XapCompSynFam::synKeyExpand: final: [" << stringsToString(result) << "]\n"); return true; } } // Namespace Rcl ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rclquery.h����������������������������������������������������������������������0000644�0001750�0001750�00000011141�13567755676�013142� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2008 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _rclquery_h_included_ #define _rclquery_h_included_ #include <string> #include <vector> #include <memory> #include "searchdata.h" #ifndef NO_NAMESPACES namespace Rcl { #endif class Db; class Doc; enum abstract_result { ABSRES_ERROR = 0, ABSRES_OK = 1, ABSRES_TRUNC = 2, ABSRES_TERMMISS = 4 }; // Snippet entry for makeDocAbstract class Snippet { public: Snippet(int page, const std::string& snip) : page(page), snippet(snip) { } Snippet& setTerm(const std::string& trm) { term = trm; return *this; } int page; std::string term; std::string snippet; }; /** * An Rcl::Query is a question (SearchData) applied to a * database. Handles access to the results. Somewhat equivalent to a * cursor in an rdb. * */ class Query { public: Query(Db *db); ~Query(); /** Get explanation about last error */ std::string getReason() const { return m_reason; } /** Choose sort order. Must be called before setQuery */ void setSortBy(const std::string& fld, bool ascending = true); const std::string& getSortBy() const { return m_sortField; } bool getSortAscending() const { return m_sortAscending; } /** Return or filter results with identical content checksum */ void setCollapseDuplicates(bool on) { m_collapseDuplicates = on; } /** Accept data describing the search and query the index. This can * be called repeatedly on the same object which gets reinitialized each * time. */ bool setQuery(std::shared_ptr<SearchData> q); /** Get results count for current query. * * @param useestimate Use get_matches_estimated() if true, else * get_matches_lower_bound() * @param checkatleast checkatleast parameter to get_mset(). Use -1 for * full scan. */ int getResCnt(int checkatleast=1000, bool useestimate=false); /** Get document at rank i in current query results. */ bool getDoc(int i, Doc &doc, bool fetchtext = false); /** Get possibly expanded list of query terms */ bool getQueryTerms(std::vector<std::string>& terms); /** Build synthetic abstract for document, extracting chunks relevant for * the input query. This uses index data only (no access to the file) */ // Abstract returned as one string bool makeDocAbstract(const Doc &doc, std::string& abstract); // Returned as a snippets vector bool makeDocAbstract(const Doc &doc, std::vector<std::string>& abstract); // Returned as a vector of pair<page,snippet> page is 0 if unknown int makeDocAbstract(const Doc &doc, std::vector<Snippet>& abst, int maxoccs= -1, int ctxwords = -1, bool sortbypage=false); /** Retrieve page number for first match for "significant" query term * @param term returns the chosen term */ int getFirstMatchPage(const Doc &doc, std::string& term); /** Retrieve a reference to the searchData we are using */ std::shared_ptr<SearchData> getSD() { return m_sd; } /** Expand query to look for documents like the one passed in */ std::vector<std::string> expand(const Doc &doc); /** Return the Db we're set for */ Db *whatDb() const { return m_db; } /* make this public for access from embedded Db::Native */ class Native; Native *m_nq; private: std::string m_reason; // Error explanation Db *m_db; void *m_sorter; std::string m_sortField; bool m_sortAscending; bool m_collapseDuplicates; int m_resCnt; std::shared_ptr<SearchData> m_sd; int m_snipMaxPosWalk; /* Copyconst and assignement private and forbidden */ Query(const Query &) {} Query & operator=(const Query &) {return *this;}; }; #ifndef NO_NAMESPACES } #endif // NO_NAMESPACES #endif /* _rclquery_h_included_ */ �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rclvalues.cpp�������������������������������������������������������������������0000644�0001750�0001750�00000005063�13533651561�013612� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004-2018 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include <string> #include "xapian.h" #include "rclconfig.h" #include "smallut.h" #include "log.h" #include "unacpp.h" using namespace std; namespace Rcl { void add_field_value(Xapian::Document& xdoc, const FieldTraits& ft, const string& data) { string ndata; switch (ft.valuetype) { case FieldTraits::STR: if (o_index_stripchars) { if (!unacmaybefold(data, ndata, "UTF-8", UNACOP_UNACFOLD)) { LOGDEB("Rcl::add_field_value: unac failed for ["<<data<< "]\n"); ndata = data; } } else { ndata = data; } break; case FieldTraits::INT: { ndata = data; int len = ft.valuelen ? ft.valuelen : 10; leftzeropad(ndata, len); } } LOGDEB0("Rcl::add_field_value: slot " << ft.valueslot << " [" << ndata << "]\n"); xdoc.add_value(ft.valueslot, ndata); } string convert_field_value(const FieldTraits& ft, const string& data) { string ndata(data); switch (ft.valuetype) { case FieldTraits::STR: break; case FieldTraits::INT: { if (ndata.empty()) break; // Apply suffixes char c = ndata.back(); string zeroes; switch(c) { case 'k':case 'K': zeroes = "000";break; case 'm':case 'M': zeroes = "000000";break; case 'g':case 'G': zeroes = "000000000";break; case 't':case 'T': zeroes = "000000000000";break; default: break; } if (!zeroes.empty()) { ndata.pop_back(); ndata += zeroes; } int len = ft.valuelen ? ft.valuelen : 10; leftzeropad(ndata, len); } } return ndata; } } �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rcldups.cpp���������������������������������������������������������������������0000644�0001750�0001750�00000006154�13533651561�013270� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2013 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ //////////////////////////////////////////////////////////////////// #include "autoconfig.h" #include <string> using namespace std; #include <xapian.h> #include "log.h" #include "rcldb.h" #include "rcldb_p.h" #include "xmacros.h" #include "md5ut.h" #include "searchdata.h" #include "rclquery.h" namespace Rcl { /** Retrieve the dups of a given document. The input has to be a query result * because we use the xdocid. We get the md5 from this, then the dups */ bool Db::docDups(const Doc& idoc, vector<Doc>& odocs) { if (m_ndb == 0) { LOGERR("Db::docDups: no db\n" ); return false; } if (idoc.xdocid == 0) { LOGERR("Db::docDups: null xdocid in input doc\n" ); return false; } // Get the xapian doc Xapian::Document xdoc; XAPTRY(xdoc = m_ndb->xrdb.get_document(Xapian::docid(idoc.xdocid)), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::docDups: xapian error: " << (m_reason) << "\n" ); return false; } // Get the md5 string digest; XAPTRY(digest = xdoc.get_value(VALUE_MD5), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::docDups: xapian error: " << (m_reason) << "\n" ); return false; } if (digest.empty()) { LOGDEB("Db::docDups: doc has no md5\n" ); return false; } string md5; MD5HexPrint(digest, md5); SearchData *sdp = new SearchData(); std::shared_ptr<SearchData> sd(sdp); SearchDataClauseSimple *sdc = new SearchDataClauseSimple(SCLT_AND, md5, "rclmd5"); sdc->addModifier(SearchDataClause::SDCM_CASESENS); sdc->addModifier(SearchDataClause::SDCM_DIACSENS); sd->addClause(sdc); Query query(this); query.setCollapseDuplicates(0); if (!query.setQuery(sd)) { LOGERR("Db::docDups: setQuery failed\n" ); return false; } int cnt = query.getResCnt(); for (int i = 0; i < cnt; i++) { Doc doc; if (!query.getDoc(i, doc)) { LOGERR("Db::docDups: getDoc failed at " << (i) << " (cnt " << (cnt) << ")\n" ); return false; } odocs.push_back(doc); } return true; } #if 0 { vector<Doc> dups; bool ret; LOGDEB("DOCDUPS\n" ); ret = m_db->docDups(doc, dups); if (!ret) { LOGDEB("docDups failed\n" ); } else if (dups.size() == 1) { LOGDEB("No dups\n" ); } else { for (unsigned int i = 0; i < dups.size(); i++) { LOGDEB("Dup: " << (dups[i].url) << "\n" ); } } } #endif } ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rclquery_p.h��������������������������������������������������������������������0000644�0001750�0001750�00000007503�13566424763�013455� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _rclquery_p_h_included_ #define _rclquery_p_h_included_ #include <map> #include <vector> #include <string> #include <unordered_set> #include <xapian.h> #include "rclquery.h" class Chrono; namespace Rcl { class Query::Native { public: // The query I belong to Query *m_q; // query descriptor: terms and subqueries joined by operators // (or/and etc...) Xapian::Query xquery; // Open query descriptor. Xapian::Enquire *xenquire; // Partial result set Xapian::MSet xmset; // Term frequencies for current query. See makeAbstract, setQuery std::map<std::string, double> termfreqs; Native(Query *q) : m_q(q), xenquire(0) { } ~Native() { clear(); } void clear() { delete xenquire; xenquire = 0; termfreqs.clear(); } /** Return a list of terms which matched for a specific result document */ bool getMatchTerms(unsigned long xdocid, std::vector<std::string>& terms); int makeAbstract(Xapian::docid id, std::vector<Snippet>&, int maxoccs, int ctxwords, bool sortbypage); int getFirstMatchPage(Xapian::docid docid, std::string& term); void setDbWideQTermsFreqs(); double qualityTerms(Xapian::docid docid, const std::vector<std::string>& terms, std::multimap<double, std::vector<std::string> >& byQ); void abstractPopulateQTerm( Xapian::Database& xrdb, Xapian::docid docid, const string& qterm, int qtrmwrdcnt, int ctxwords, unsigned int maxgrpoccs, unsigned int maxtotaloccs, std::map<unsigned int, std::string>& sparseDoc, std::unordered_set<unsigned int>& searchTermPositions, unsigned int& maxpos, unsigned int& totaloccs, unsigned int& grpoccs, int& ret ); void abstractPopulateContextTerms( Xapian::Database& xrdb, Xapian::docid docid, unsigned int maxpos, std::map<unsigned int, std::string>& sparseDoc, int& ret ); void abstractCreateSnippetsVector( Db::Native *ndb, std::map<unsigned int, std::string>& sparseDoc, std::unordered_set<unsigned int>& searchTermPositions, std::vector<int>& vpbreaks, std::vector<Snippet>& vabs); int abstractFromIndex( Rcl::Db::Native *ndb, Xapian::docid docid, const std::vector<std::string>& matchTerms, const std::multimap<double, std::vector<std::string>> byQ, double totalweight, int ctxwords, unsigned int maxtotaloccs, std::vector<Snippet>& vabs, Chrono& chron ); int abstractFromText( Rcl::Db::Native *ndb, Xapian::docid docid, const std::vector<std::string>& matchTerms, const std::multimap<double, std::vector<std::string>> byQ, double totalweight, int ctxwords, unsigned int maxtotaloccs, vector<Snippet>& vabs, Chrono& chron, bool sortbypage ); }; } #endif /* _rclquery_p_h_included_ */ ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/daterange.cpp�������������������������������������������������������������������0000644�0001750�0001750�00000006357�13303776057�013556� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* The dates-to-query routine is is lifted quasi-verbatim but * modified from xapian-omega:date.cc. Copyright info: * * Copyright 1999,2000,2001 BrightStation PLC * Copyright 2001 James Aylett * Copyright 2001,2002 Ananova Ltd * Copyright 2002 Intercede 1749 Ltd * Copyright 2002,2003,2006 Olly Betts * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 * USA */ #include "autoconfig.h" #include <stdio.h> #include <vector> using namespace std; #include <xapian.h> #include "log.h" #include "rclconfig.h" namespace Rcl { static inline void bufprefix(char *buf, char c) { if (o_index_stripchars) { buf[0] = c; } else { buf[0] = ':'; buf[1] = c; buf[2] = ':'; } } static inline int bpoffs() { return o_index_stripchars ? 1 : 3; } Xapian::Query date_range_filter(int y1, int m1, int d1, int y2, int m2, int d2) { // Xapian uses a smallbuf and snprintf. Can't be bothered, we're // doing at most 3 %d's ! char buf[200]; vector<Xapian::Query> v; // Deal with days till the end of the first month if any bufprefix(buf, 'D'); sprintf(buf + bpoffs(), "%04d%02d", y1, m1); int d_last = monthdays(m1, y1); int d_end = d_last; if (y1 == y2 && m1 == m2 && d2 < d_last) { d_end = d2; } if (d1 > 1 || d_end < d_last) { for ( ; d1 <= d_end ; d1++) { sprintf(buf + 6 + bpoffs(), "%02d", d1); v.push_back(Xapian::Query(buf)); } } else { bufprefix(buf, 'M'); v.push_back(Xapian::Query(buf)); } if (y1 == y2 && m1 == m2) { return Xapian::Query(Xapian::Query::OP_OR, v.begin(), v.end()); } // Months till the end of first year int m_last = (y1 < y2) ? 12 : m2 - 1; bufprefix(buf, 'M'); while (++m1 <= m_last) { sprintf(buf + 4 + bpoffs(), "%02d", m1); v.push_back(Xapian::Query(buf)); } // Years inbetween and first months of the last year if (y1 < y2) { bufprefix(buf, 'Y'); while (++y1 < y2) { sprintf(buf + bpoffs(), "%04d", y1); v.push_back(Xapian::Query(buf)); } bufprefix(buf, 'M'); sprintf(buf + bpoffs(), "%04d", y2); for (m1 = 1; m1 < m2; m1++) { sprintf(buf + 4 + bpoffs(), "%02d", m1); v.push_back(Xapian::Query(buf)); } } // Last month sprintf(buf + 4 + bpoffs(), "%02d", m2); // Deal with any final partial month if (d2 < monthdays(m2, y2)) { bufprefix(buf, 'D'); for (d1 = 1 ; d1 <= d2; d1++) { sprintf(buf + 6 + bpoffs(), "%02d", d1); v.push_back(Xapian::Query(buf)); } } else { bufprefix(buf, 'M'); v.push_back(Xapian::Query(buf)); } return Xapian::Query(Xapian::Query::OP_OR, v.begin(), v.end()); } } ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rclterms.cpp��������������������������������������������������������������������0000644�0001750�0001750�00000045527�13567765436�013474� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ //////////////////////////////////////////////////////////////////// /** Things dealing with walking the terms lists and expansion dbs */ #include "autoconfig.h" #include <string> #include "log.h" #include "rcldb.h" #include "rcldb_p.h" #include "stemdb.h" #include "expansiondbs.h" #include "strmatcher.h" using namespace std; namespace Rcl { // File name wild card expansion. This is a specialisation ot termMatch bool Db::filenameWildExp(const string& fnexp, vector<string>& names, int max) { string pattern = fnexp; names.clear(); // If pattern is not capitalized, not quoted (quoted pattern can't // get here currently anyway), and has no wildcards, we add * at // each end: match any substring if (pattern[0] == '"' && pattern[pattern.size()-1] == '"') { pattern = pattern.substr(1, pattern.size() -2); } else if (pattern.find_first_of(cstr_minwilds) == string::npos && !unaciscapital(pattern)) { pattern = "*" + pattern + "*"; } // else let it be LOGDEB("Rcl::Db::filenameWildExp: pattern: [" << pattern << "]\n"); // We inconditionnally lowercase and strip the pattern, as is done // during indexing. This seems to be the only sane possible // approach with file names and wild cards. termMatch does // stripping conditionally on indexstripchars. string pat1; if (unacmaybefold(pattern, pat1, "UTF-8", UNACOP_UNACFOLD)) { pattern.swap(pat1); } TermMatchResult result; if (!idxTermMatch(ET_WILD, string(), pattern, result, max, unsplitFilenameFieldName)) return false; for (const auto& entry : result.entries) { names.push_back(entry.term); } if (names.empty()) { // Build an impossible query: we know its impossible because we // control the prefixes! names.push_back(wrap_prefix("XNONE") + "NoMatchingTerms"); } return true; } // Walk the Y terms and return min/max bool Db::maxYearSpan(int *minyear, int *maxyear) { LOGDEB("Rcl::Db:maxYearSpan\n"); *minyear = 1000000; *maxyear = -1000000; TermMatchResult result; if (!idxTermMatch(ET_WILD, string(), "*", result, -1, "xapyear")) { LOGINFO("Rcl::Db:maxYearSpan: termMatch failed\n"); return false; } for (const auto& entry : result.entries) { if (!entry.term.empty()) { int year = atoi(strip_prefix(entry.term).c_str()); if (year < *minyear) *minyear = year; if (year > *maxyear) *maxyear = year; } } return true; } bool Db::getAllDbMimeTypes(std::vector<std::string>& exp) { Rcl::TermMatchResult res; if (!idxTermMatch(Rcl::Db::ET_WILD, "", "*", res, -1, "mtype")) { return false; } for (const auto& entry : res.entries) { exp.push_back(Rcl::strip_prefix(entry.term)); } return true; } class TermMatchCmpByWcf { public: int operator()(const TermMatchEntry& l, const TermMatchEntry& r) { return r.wcf - l.wcf < 0; } }; class TermMatchCmpByTerm { public: int operator()(const TermMatchEntry& l, const TermMatchEntry& r) { return l.term.compare(r.term) > 0; } }; class TermMatchTermEqual { public: int operator()(const TermMatchEntry& l, const TermMatchEntry& r) { return !l.term.compare(r.term); } }; static const char *tmtptostr(int typ) { switch (typ) { case Db::ET_WILD: return "wildcard"; case Db::ET_REGEXP: return "regexp"; case Db::ET_STEM: return "stem"; case Db::ET_NONE: default: return "none"; } } // Find all index terms that match an input along different expansion modes: // wildcard, regular expression, or stemming. Depending on flags we perform // case and/or diacritics expansion (this can be the only thing requested). // If the "field" parameter is set, we return a list of appropriately // prefixed terms (which are going to be used to build a Xapian // query). // This routine performs case/diacritics/stemming expansion against // the auxiliary tables, and possibly calls idxTermMatch() for work // using the main index terms (filtering, retrieving stats, expansion // in some cases). bool Db::termMatch(int typ_sens, const string &lang, const string &_term, TermMatchResult& res, int max, const string& field, vector<string>* multiwords) { int matchtyp = matchTypeTp(typ_sens); if (!m_ndb || !m_ndb->m_isopen) return false; Xapian::Database xrdb = m_ndb->xrdb; bool diac_sensitive = (typ_sens & ET_DIACSENS) != 0; bool case_sensitive = (typ_sens & ET_CASESENS) != 0; // Path elements (used for dir: filtering) are special because // they are not unaccented or lowercased even if the index is // otherwise stripped. bool pathelt = (typ_sens & ET_PATHELT) != 0; LOGDEB0("Db::TermMatch: typ " << tmtptostr(matchtyp) << " diacsens " << diac_sensitive << " casesens " << case_sensitive << " pathelt " << pathelt << " lang [" << lang << "] term [" << _term << "] max " << max << " field [" << field << "] stripped " << o_index_stripchars << " init res.size " << res.entries.size() << "\n"); // If index is stripped, no case or diac expansion can be needed: // for the processing inside this routine, everything looks like // we're all-sensitive: no use of expansion db. // Also, convert input to lowercase and strip its accents. string term = _term; if (o_index_stripchars) { diac_sensitive = case_sensitive = true; if (!pathelt && !unacmaybefold(_term, term, "UTF-8", UNACOP_UNACFOLD)) { LOGERR("Db::termMatch: unac failed for [" << _term << "]\n"); return false; } } // The case/diac expansion db SynTermTransUnac unacfoldtrans(UNACOP_UNACFOLD); XapComputableSynFamMember synac(xrdb, synFamDiCa, "all", &unacfoldtrans); if (matchtyp == ET_WILD || matchtyp == ET_REGEXP) { std::shared_ptr<StrMatcher> matcher; if (matchtyp == ET_WILD) { matcher = std::shared_ptr<StrMatcher>(new StrWildMatcher(term)); } else { matcher = std::shared_ptr<StrMatcher>(new StrRegexpMatcher(term)); } if (!diac_sensitive || !case_sensitive) { // Perform case/diac expansion on the exp as appropriate and // expand the result. vector<string> exp; if (diac_sensitive) { // Expand for diacritics and case, filtering for same diacritics SynTermTransUnac foldtrans(UNACOP_FOLD); synac.synKeyExpand(matcher.get(), exp, &foldtrans); } else if (case_sensitive) { // Expand for diacritics and case, filtering for same case SynTermTransUnac unactrans(UNACOP_UNAC); synac.synKeyExpand(matcher.get(), exp, &unactrans); } else { // Expand for diacritics and case, no filtering synac.synKeyExpand(matcher.get(), exp); } // Retrieve additional info and filter against the index itself for (const auto& term : exp) { idxTermMatch(ET_NONE, "", term, res, max, field); } // And also expand the original expression against the // main index: for the common case where the expression // had no case/diac expansion (no entry in the exp db if // the original term is lowercase and without accents). idxTermMatch(typ_sens, lang, term, res, max, field); } else { idxTermMatch(typ_sens, lang, term, res, max, field); } } else { // Expansion is STEM or NONE (which may still need synonyms // and case/diac exp) vector<string> lexp; if (diac_sensitive && case_sensitive) { // No case/diac expansion lexp.push_back(term); } else if (diac_sensitive) { // Expand for accents and case, filtering for same accents, SynTermTransUnac foldtrans(UNACOP_FOLD); synac.synExpand(term, lexp, &foldtrans); } else if (case_sensitive) { // Expand for accents and case, filtering for same case SynTermTransUnac unactrans(UNACOP_UNAC); synac.synExpand(term, lexp, &unactrans); } else { // We are neither accent- nor case- sensitive and may need stem // expansion or not. Expand for accents and case synac.synExpand(term, lexp); } if (matchtyp == ET_STEM || (typ_sens & ET_SYNEXP)) { // Note: if any of the above conds is true, we are insensitive to // diacs and case (enforced in searchdatatox:termexpand // Need stem expansion. Lowercase the result of accent and case // expansion for input to stemdb. for (auto& term : lexp) { string lower; unacmaybefold(term, lower, "UTF-8", UNACOP_FOLD); term.swap(lower); } sort(lexp.begin(), lexp.end()); lexp.erase(unique(lexp.begin(), lexp.end()), lexp.end()); if (matchtyp == ET_STEM) { StemDb sdb(xrdb); vector<string> exp1; for (const auto& term : lexp) { sdb.stemExpand(lang, term, exp1); } exp1.swap(lexp); sort(lexp.begin(), lexp.end()); lexp.erase(unique(lexp.begin(), lexp.end()), lexp.end()); LOGDEB("Db::TermMatch: stemexp: " << stringsToString(lexp) << "\n"); } if (m_syngroups.ok() && (typ_sens & ET_SYNEXP)) { LOGDEB("Db::TermMatch: got syngroups\n"); vector<string> exp1(lexp); for (const auto& term : lexp) { vector<string> sg = m_syngroups.getgroup(term); if (!sg.empty()) { LOGDEB("Db::TermMatch: syngroups out: " << term << " -> " << stringsToString(sg) << "\n"); for (const auto& synonym : sg) { if (synonym.find_first_of(" ") != string::npos) { if (multiwords) { multiwords->push_back(synonym); } } else { exp1.push_back(synonym); } } } } lexp.swap(exp1); sort(lexp.begin(), lexp.end()); lexp.erase(unique(lexp.begin(), lexp.end()), lexp.end()); } // Expand the resulting list for case and diacritics (all // stemdb content is case-folded) vector<string> exp1; for (const auto& term: lexp) { synac.synExpand(term, exp1); } exp1.swap(lexp); sort(lexp.begin(), lexp.end()); lexp.erase(unique(lexp.begin(), lexp.end()), lexp.end()); } // Filter the result against the index and get the stats, // possibly add prefixes. LOGDEB("Db::TermMatch: final lexp before idx filter: " << stringsToString(lexp) << "\n"); for (const auto& term : lexp) { idxTermMatch(Rcl::Db::ET_WILD, "", term, res, max, field); } } TermMatchCmpByTerm tcmp; sort(res.entries.begin(), res.entries.end(), tcmp); TermMatchTermEqual teq; vector<TermMatchEntry>::iterator uit = unique(res.entries.begin(), res.entries.end(), teq); res.entries.resize(uit - res.entries.begin()); TermMatchCmpByWcf wcmp; sort(res.entries.begin(), res.entries.end(), wcmp); if (max > 0) { // Would need a small max and big stem expansion... res.entries.resize(MIN(res.entries.size(), (unsigned int)max)); } return true; } bool Db::Native::idxTermMatch_p( int typ, const string &lang, const string &root, std::function<bool(const string& term, Xapian::termcount colfreq, Xapian::doccount termfreq)> client, const string& prefix) { Xapian::Database xdb = xrdb; std::shared_ptr<StrMatcher> matcher; if (typ == ET_REGEXP) { matcher = std::shared_ptr<StrMatcher>(new StrRegexpMatcher(root)); if (!matcher->ok()) { LOGERR("termMatch: regcomp failed: " << matcher->getreason()); return false; } } else if (typ == ET_WILD) { matcher = std::shared_ptr<StrMatcher>(new StrWildMatcher(root)); } // Find the initial section before any special char string::size_type es = string::npos; if (matcher) { es = matcher->baseprefixlen(); } // Initial section: the part of the prefix+expr before the // first wildcard character. We only scan the part of the // index where this matches string is; if (es == string::npos) { is = prefix + root; } else if (es == 0) { is = prefix; } else { is = prefix + root.substr(0, es); } LOGDEB2("termMatch: initsec: [" << is << "]\n"); for (int tries = 0; tries < 2; tries++) { try { Xapian::TermIterator it = xdb.allterms_begin(); if (!is.empty()) it.skip_to(is.c_str()); for (; it != xdb.allterms_end(); it++) { const string ixterm{*it}; // If we're beyond the terms matching the initial // section, end if (!is.empty() && ixterm.find(is) != 0) break; // Else try to match the term. The matcher content // is without prefix, so we remove this if any. We // just checked that the index term did begin with // the prefix. string term; if (!prefix.empty()) { term = ixterm.substr(prefix.length()); } else { if (has_prefix(ixterm)) { continue; } term = ixterm; } if (matcher && !matcher->match(term)) continue; if (!client(ixterm, xdb.get_collection_freq(ixterm), it.get_termfreq())) { break; } } m_rcldb->m_reason.erase(); break; } catch (const Xapian::DatabaseModifiedError &e) { m_rcldb->m_reason = e.get_msg(); xdb.reopen(); continue; } XCATCHERROR(m_rcldb->m_reason); break; } if (!m_rcldb->m_reason.empty()) { LOGERR("termMatch: " << m_rcldb->m_reason << "\n"); return false; } return true; } // Second phase of wildcard/regexp term expansion after case/diac // expansion: expand against main index terms bool Db::idxTermMatch(int typ_sens, const string &lang, const string &root, TermMatchResult& res, int max, const string& field) { int typ = matchTypeTp(typ_sens); LOGDEB1("Db::idxTermMatch: typ " << tmtptostr(typ) << " lang [" << lang << "] term [" << root << "] max " << max << " field [" << field << "] init res.size " << res.entries.size() << "\n"); if (typ == ET_STEM) { LOGFATAL("RCLDB: internal error: idxTermMatch called with ET_STEM\n"); abort(); } string prefix; if (!field.empty()) { const FieldTraits *ftp = 0; if (!fieldToTraits(field, &ftp, true) || ftp->pfx.empty()) { LOGDEB("Db::termMatch: field is not indexed (no prefix): [" << field << "]\n"); } else { prefix = wrap_prefix(ftp->pfx); } } res.prefix = prefix; int rcnt = 0; bool ret = m_ndb->idxTermMatch_p( typ, lang, root, [&res, &rcnt, max](const string& term, Xapian::termcount cf, Xapian::doccount tf) { res.entries.push_back(TermMatchEntry(term, cf, tf)); // The problem with truncating here is that this is done // alphabetically and we may not keep the most frequent // terms. OTOH, not doing it may stall the program if // we are walking the whole term list. We compromise // by cutting at 2*max if (max > 0 && ++rcnt >= 2*max) return false; return true; }, prefix); return ret; } /** Term list walking. */ class TermIter { public: Xapian::TermIterator it; Xapian::Database db; }; TermIter *Db::termWalkOpen() { if (!m_ndb || !m_ndb->m_isopen) return 0; TermIter *tit = new TermIter; if (tit) { tit->db = m_ndb->xrdb; XAPTRY(tit->it = tit->db.allterms_begin(), tit->db, m_reason); if (!m_reason.empty()) { LOGERR("Db::termWalkOpen: xapian error: " << m_reason << "\n"); return 0; } } return tit; } bool Db::termWalkNext(TermIter *tit, string &term) { XAPTRY( if (tit && tit->it != tit->db.allterms_end()) { term = *(tit->it)++; return true; } , tit->db, m_reason); if (!m_reason.empty()) { LOGERR("Db::termWalkOpen: xapian error: " << m_reason << "\n"); } return false; } void Db::termWalkClose(TermIter *tit) { try { delete tit; } catch (...) {} } bool Db::termExists(const string& word) { if (!m_ndb || !m_ndb->m_isopen) return 0; XAPTRY(if (!m_ndb->xrdb.term_exists(word)) return false, m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::termWalkOpen: xapian error: " << m_reason << "\n"); return false; } return true; } bool Db::stemDiffers(const string& lang, const string& word, const string& base) { Xapian::Stem stemmer(lang); if (!stemmer(word).compare(stemmer(base))) { LOGDEB2("Rcl::Db::stemDiffers: same for " << word << " and " << base << "\n"); return false; } return true; } } // End namespace Rcl �������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/daterange.h���������������������������������������������������������������������0000644�0001750�0001750�00000000356�13303776057�013214� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef _DATERANGE_H_INCLUDED_ #define _DATERANGE_H_INCLUDED_ #include <xapian.h> namespace Rcl { extern Xapian::Query date_range_filter(int y1, int m1, int d1, int y2, int m2, int d2); } #endif /* _DATERANGE_H_INCLUDED_ */ ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/expansiondbs.cpp����������������������������������������������������������������0000644�0001750�0001750�00000011746�13533651561�014314� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "expansiondbs.h" #include <memory> #include <string> #include "log.h" #include "utf8iter.h" #include "smallut.h" #include "chrono.h" #include "textsplit.h" #include "xmacros.h" #include "rcldb.h" #include "stemdb.h" using namespace std; namespace Rcl { /** * Create all expansion dbs used to transform user input term to widen a query * We use Xapian synonyms subsets to store the expansions. */ bool createExpansionDbs(Xapian::WritableDatabase& wdb, const vector<string>& langs) { LOGDEB("StemDb::createExpansionDbs: languages: " <<stringsToString(langs) << "\n"); Chrono cron; // Erase and recreate all the expansion groups // If langs is empty and we don't need casediac expansion, then no need to // walk the big list if (langs.empty()) { if (o_index_stripchars) return true; } // Walk the list of all terms, and stem/unac each. string ermsg; try { // Stem dbs vector<XapWritableComputableSynFamMember> stemdbs; // Note: tried to make this to work with stack-allocated objects, couldn't. // Looks like a bug in copy constructors somewhere, can't guess where vector<std::shared_ptr<SynTermTransStem> > stemmers; for (unsigned int i = 0; i < langs.size(); i++) { stemmers.push_back(std::shared_ptr<SynTermTransStem> (new SynTermTransStem(langs[i]))); stemdbs.push_back( XapWritableComputableSynFamMember(wdb, synFamStem, langs[i], stemmers.back().get())); stemdbs.back().recreate(); } // Unaccented stem dbs vector<XapWritableComputableSynFamMember> unacstemdbs; // We can reuse the same stemmer pointers, the objects are stateless. if (!o_index_stripchars) { for (unsigned int i = 0; i < langs.size(); i++) { unacstemdbs.push_back( XapWritableComputableSynFamMember(wdb, synFamStemUnac, langs[i], stemmers.back().get())); unacstemdbs.back().recreate(); } } SynTermTransUnac transunac(UNACOP_UNACFOLD); XapWritableComputableSynFamMember diacasedb(wdb, synFamDiCa, "all", &transunac); if (!o_index_stripchars) diacasedb.recreate(); Xapian::TermIterator it = wdb.allterms_begin(); // We'd want to skip to the first non-prefixed term, but this is a bit // complicated, so we just jump over most of the prefixed term and then // skip the rest one by one. it.skip_to(wrap_prefix("Z")); for ( ;it != wdb.allterms_end(); it++) { const string term{*it}; if (has_prefix(term)) continue; // Detect and skip CJK terms. Utf8Iter utfit(term); if (utfit.eof()) // Empty term?? Seems to happen. continue; if (TextSplit::isCJK(*utfit)) { // LOGDEB("stemskipped: Skipping CJK\n"); continue; } string lower = term; // If the index is raw, compute the case-folded term which // is the input to the stem db, and add a synonym from the // stripped term to the cased and accented one, for accent // and case expansion at query time if (!o_index_stripchars) { unacmaybefold(term, lower, "UTF-8", UNACOP_FOLD); diacasedb.addSynonym(term); } // Dont' apply stemming to terms which don't look like // natural language words. if (!Db::isSpellingCandidate(term)) { LOGDEB1("createExpansionDbs: skipped: [" << term << "]\n"); continue; } // Create stemming synonym for every language. The input is the // lowercase accented term for (unsigned int i = 0; i < langs.size(); i++) { stemdbs[i].addSynonym(lower); } // For a raw index, also maybe create a stem expansion for // the unaccented term. While this may be incorrect, it is // also necessary for searching in a diacritic-unsensitive // way on a raw index if (!o_index_stripchars) { string unac; unacmaybefold(lower, unac, "UTF-8", UNACOP_UNAC); if (unac != lower) { for (unsigned int i = 0; i < langs.size(); i++) { unacstemdbs[i].addSynonym(unac); } } } } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db::createStemDb: map build failed: " << ermsg << "\n"); return false; } LOGDEB("StemDb::createExpansionDbs: done: " << cron.secs() << " S\n"); return true; } } ��������������������������recoll-1.26.3/rcldb/termproc.h����������������������������������������������������������������������0000644�0001750�0001750�00000023454�13533651561�013116� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2011 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _TERMPROC_H_INCLUDED_ #define _TERMPROC_H_INCLUDED_ #include <vector> #include <string> #include "textsplit.h" #include "stoplist.h" #include "smallut.h" #include "utf8iter.h" namespace Rcl { /** * Termproc objects take term tokens as input and do something * with them: transform to lowercase, filter out stop words, generate n-grams, * finally index or generate search clauses, etc. They are chained and can * be arranged to form different pipelines depending on the desired processing * steps: for example, optional stoplist or commongram processing. * * Shared processing steps are defined in this file. The first and last steps * are usually defined in the specific module. * - The front TermProc is typically chained from a TextSplit object * which generates the original terms, and calls takeword() from its * own takeword() method. * - The last TermProc does something with the finalized terms, e.g. adds * them to the index. */ /** * The base class takes care of chaining: all derived classes call its * takeword() and flush() methods to ensure that terms go through the pipe. */ class TermProc { public: TermProc(TermProc* next) : m_next(next) {} virtual ~TermProc() {} virtual bool takeword(const string &term, int pos, int bs, int be) { if (m_next) return m_next->takeword(term, pos, bs, be); else return true; } // newpage() is like takeword(), but for page breaks. virtual void newpage(int pos) { if (m_next) m_next->newpage(pos); } virtual bool flush() { if (m_next) return m_next->flush(); else return true; } private: TermProc *m_next; /* Copyconst and assignment private and forbidden */ TermProc(const TermProc &) {} TermProc& operator=(const TermProc &) { return *this; }; }; /** * Helper specialized TextSplit class, feeds the pipeline: * - The takeword() method calls a TermProc->takeword(). * - The text_to_words() method also takes care of flushing. * Both methods can be further specialized by the user (they should then call * the base methods when they've done the local processing). */ class TextSplitP : public TextSplit { public: TextSplitP(TermProc *prc, Flags flags = Flags(TXTS_NONE)) : TextSplit(flags), m_prc(prc) {} virtual bool text_to_words(const string &in) { bool ret = TextSplit::text_to_words(in); if (m_prc && !m_prc->flush()) return false; return ret; } virtual bool takeword(const string& term, int pos, int bs, int be) { if (m_prc) return m_prc->takeword(term, pos, bs, be); else return true; } virtual void newpage(int pos) { if (m_prc) return m_prc->newpage(pos); } private: TermProc *m_prc; }; /** Unaccent and lowercase term. If the index is * not case/diac-sensitive, this is usually the first step in the pipeline */ class TermProcPrep : public TermProc { public: TermProcPrep(TermProc *nxt) : TermProc(nxt), m_totalterms(0), m_unacerrors(0) { } virtual bool takeword(const string& itrm, int pos, int bs, int be) { m_totalterms++; string otrm; if (!unacmaybefold(itrm, otrm, "UTF-8", UNACOP_UNACFOLD)) { LOGDEB("splitter::takeword: unac [" << itrm << "] failed\n"); m_unacerrors++; // We don't generate a fatal error because of a bad term, // but one has to put the limit somewhere if (m_unacerrors > 500 && (double(m_totalterms) / double(m_unacerrors)) < 2.0) { // More than 1 error for every other term LOGERR("splitter::takeword: too many unac errors " << m_unacerrors << "/" << m_totalterms << "\n"); return false; } return true; } if (otrm.empty()) { // It may happen in some weird cases that the output from // unac is empty (if the word actually consisted entirely // of diacritics ...) The consequence is that a phrase // search won't work without addional slack. return true; } // We should have a Japanese stemmer to handle this, but for // experimenting, let's do it here: remove 'prolounged sound // mark' and its halfwidth variant from the end of terms. if ((unsigned int)otrm[0] > 127) { Utf8Iter it(otrm); if (TextSplit::isKATAKANA(*it)) { Utf8Iter itprev = it; while (*it != (unsigned int)-1) { itprev = it; it++; } if (*itprev == 0x30fc || *itprev == 0xff70) { otrm = otrm.substr(0, itprev.getBpos()); } } } if (otrm.empty()) { return true; } // It may also occur that unac introduces spaces in the string // (when removing isolated accents, may happen for Greek // for example). This is a pathological situation. We // index all the resulting terms at the same pos because // the surrounding code is not designed to handle a pos // change in here. This means that phrase searches and // snippets will be wrong, but at least searching for the // terms will work. bool hasspace = false; for (string::const_iterator it = otrm.begin();it < otrm.end();it++) { if (*it == ' ') { hasspace=true; break; } } if (hasspace) { std::vector<std::string> terms; stringToTokens(otrm, terms, " ", true); for (std::vector<std::string>::const_iterator it = terms.begin(); it < terms.end(); it++) { if (!TermProc::takeword(*it, pos, bs, be)) { return false; } } return true; } else { return TermProc::takeword(otrm, pos, bs, be); } } virtual bool flush() { m_totalterms = m_unacerrors = 0; return TermProc::flush(); } private: int m_totalterms; int m_unacerrors; }; /** Compare to stop words list and discard if match found */ class TermProcStop : public TermProc { public: TermProcStop(TermProc *nxt, const Rcl::StopList& stops) : TermProc(nxt), m_stops(stops) { } virtual bool takeword(const string& term, int pos, int bs, int be) { if (m_stops.isStop(term)) { return true; } return TermProc::takeword(term, pos, bs, be); } private: const Rcl::StopList& m_stops; }; /** Handle common-gram generation: combine frequent terms with neighbours to * shorten the positions lists for phrase searches. * NOTE: This does not currently work because of bad interaction with the * spans (ie john@domain.com) generation in textsplit. Not used, kept for * testing only */ class TermProcCommongrams : public TermProc { public: TermProcCommongrams(TermProc *nxt, const Rcl::StopList& stops) : TermProc(nxt), m_stops(stops), m_onlygrams(false) { } virtual bool takeword(const string& term, int pos, int bs, int be) { LOGDEB1("TermProcCom::takeword: pos " << (pos) << " " << (bs) << " " << (be) << " [" << (term) << "]\n" ); bool isstop = m_stops.isStop(term); bool twogramemit = false; if (!m_prevterm.empty() && (m_prevstop || isstop)) { // create 2-gram. space unnecessary but improves // the readability of queries string twogram; twogram.swap(m_prevterm); twogram.append(1, ' '); twogram += term; // When emitting a complex term we set the bps to 0. This may // be used by our clients if (!TermProc::takeword(twogram, m_prevpos, 0, 0)) return false; twogramemit = true; #if 0 if (m_stops.isStop(twogram)) { firstword = twogram; isstop = false; } #endif } m_prevterm = term; m_prevstop = isstop; m_prevpos = pos; m_prevsent = false; m_prevbs = bs; m_prevbe = be; // If flags allow, emit the bare term at the current pos. if (!m_onlygrams || (!isstop && !twogramemit)) { if (!TermProc::takeword(term, pos, bs, be)) return false; m_prevsent = true; } return true; } virtual bool flush() { if (!m_prevsent && !m_prevterm.empty()) if (!TermProc::takeword(m_prevterm, m_prevpos, m_prevbs, m_prevbe)) return false; m_prevterm.clear(); m_prevsent = true; return TermProc::flush(); } void onlygrams(bool on) { m_onlygrams = on; } private: // The stoplist we're using const Rcl::StopList& m_stops; // Remembered data for the last processed term string m_prevterm; bool m_prevstop; int m_prevpos; int m_prevbs; int m_prevbe; bool m_prevsent; // If this is set, we only emit longest grams bool m_onlygrams; }; } // End namespace Rcl #endif /* _TERMPROC_H_INCLUDED_ */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/expansiondbs.h������������������������������������������������������������������0000644�0001750�0001750�00000004206�13533651561�013752� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _EXPANSIONDBS_H_INCLUDED_ #define _EXPANSIONDBS_H_INCLUDED_ #include <string> #include <vector> #include <xapian.h> #include "unacpp.h" #include "synfamily.h" /** Specialization and overall creation code for the term expansion mechanism * defined in synfamily.h */ namespace Rcl { /** A Capitals/Diacritics removal functor for using with * XapComputableSynFamMember. The input term transformation always uses * UNACFOLD. Post-expansion filtering uses either UNAC or FOLD */ class SynTermTransUnac : public SynTermTrans { public: /** Constructor * @param op defines if we remove diacritics, case or both */ SynTermTransUnac(UnacOp op) : m_op(op) { } virtual std::string name() { std::string nm("Unac: "); if (m_op & UNACOP_UNAC) nm += "UNAC "; if (m_op & UNACOP_FOLD) nm += "FOLD "; return nm; } virtual std::string operator()(const std::string& in) { string out; unacmaybefold(in, out, "UTF-8", m_op); LOGDEB2("SynTermTransUnac(" << (int(m_op)) << "): in [" << (in) << "] out [" << (out) << "]\n" ); return out; } UnacOp m_op; }; /** Walk the Xapian term list and create all the expansion dbs in one go. */ extern bool createExpansionDbs(Xapian::WritableDatabase& wdb, const std::vector<std::string>& langs); } #endif /* _EXPANSIONDBS_H_INCLUDED_ */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/searchdata.cpp������������������������������������������������������������������0000644�0001750�0001750�00000027371�13533651561�013717� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ // Handle translation from rcl's SearchData structures to Xapian Queries #include "autoconfig.h" #include <stdio.h> #include <string> #include <vector> #include <algorithm> #include <sstream> #include <iostream> using namespace std; #include "xapian.h" #include "cstr.h" #include "rcldb.h" #include "rcldb_p.h" #include "searchdata.h" #include "log.h" #include "smallut.h" #include "textsplit.h" #include "unacpp.h" #include "utf8iter.h" #include "stoplist.h" #include "rclconfig.h" #include "termproc.h" #include "synfamily.h" #include "stemdb.h" #include "expansiondbs.h" #include "base64.h" #include "daterange.h" namespace Rcl { typedef vector<SearchDataClause *>::iterator qlist_it_t; typedef vector<SearchDataClause *>::const_iterator qlist_cit_t; void SearchData::commoninit() { m_haveDates = false; m_maxSize = size_t(-1); m_minSize = size_t(-1); m_haveWildCards = false; m_autodiacsens = false; m_autocasesens = true; m_maxexp = 10000; m_maxcl = 100000; m_softmaxexpand = -1; } SearchData::~SearchData() { LOGDEB0("SearchData::~SearchData\n" ); for (qlist_it_t it = m_query.begin(); it != m_query.end(); it++) delete *it; } // This is called by the GUI simple search if the option is set: add // (OR) phrase to a query (if it is simple enough) so that results // where the search terms are close and in order will come up on top. // We remove very common terms from the query to avoid performance issues. bool SearchData::maybeAddAutoPhrase(Rcl::Db& db, double freqThreshold) { LOGDEB0("SearchData::maybeAddAutoPhrase()\n" ); // cerr << "BEFORE SIMPLIFY\n"; dump(cerr); simplify(); // cerr << "AFTER SIMPLIFY\n"; dump(cerr); if (!m_query.size()) { LOGDEB2("SearchData::maybeAddAutoPhrase: empty query\n" ); return false; } string field; vector<string> words; // Walk the clause list. If this is not an AND list, we find any // non simple clause or different field names, bail out. for (qlist_it_t it = m_query.begin(); it != m_query.end(); it++) { SClType tp = (*it)->m_tp; if (tp != SCLT_AND) { LOGDEB2("SearchData::maybeAddAutoPhrase: wrong tp " << (tp) << "\n" ); return false; } SearchDataClauseSimple *clp = dynamic_cast<SearchDataClauseSimple*>(*it); if (clp == 0) { LOGDEB2("SearchData::maybeAddAutoPhrase: dyncast failed\n" ); return false; } if (it == m_query.begin()) { field = clp->getfield(); } else { if (clp->getfield().compare(field)) { LOGDEB2("SearchData::maybeAddAutoPhrase: diff. fields\n" ); return false; } } // If there are wildcards or quotes in there, bail out if (clp->gettext().find_first_of("\"*[?") != string::npos) { LOGDEB2("SearchData::maybeAddAutoPhrase: wildcards\n" ); return false; } // Do a simple word-split here, not the full-blown // textsplit. Spans of stopwords should not be trimmed later // in this function, they will be properly split when the // phrase gets processed by toNativeQuery() later on. vector<string> wl; stringToStrings(clp->gettext(), wl); words.insert(words.end(), wl.begin(), wl.end()); } // Trim the word list by eliminating very frequent terms // (increasing the slack as we do it): int slack = 0; int doccnt = db.docCnt(); if (!doccnt) doccnt = 1; string swords; for (vector<string>::iterator it = words.begin(); it != words.end(); it++) { double freq = double(db.termDocCnt(*it)) / doccnt; if (freq < freqThreshold) { if (!swords.empty()) swords.append(1, ' '); swords += *it; } else { LOGDEB0("SearchData::Autophrase: [" << *it << "] too frequent (" << (100 * freq) << " %" << ")\n" ); slack++; } } // We can't make a phrase with a single word :) int nwords = TextSplit::countWords(swords); if (nwords <= 1) { LOGDEB2("SearchData::maybeAddAutoPhrase: ended with 1 word\n" ); return false; } // Increase the slack: we want to be a little more laxist than for // an actual user-entered phrase slack += 1 + nwords / 3; m_autophrase = std::shared_ptr<SearchDataClauseDist>( new SearchDataClauseDist(SCLT_PHRASE, swords, slack, field)); return true; } // Add clause to current list. OR lists cant have EXCL clauses. bool SearchData::addClause(SearchDataClause* cl) { if (m_tp == SCLT_OR && cl->getexclude()) { LOGERR("SearchData::addClause: cant add EXCL to OR list\n" ); m_reason = "No Negative (AND_NOT) clauses allowed in OR queries"; return false; } cl->setParent(this); m_haveWildCards = m_haveWildCards || cl->m_haveWildCards; m_query.push_back(cl); return true; } // Am I a file name only search ? This is to turn off term highlighting. // There can't be a subclause in a filename search: no possible need to recurse bool SearchData::fileNameOnly() { for (qlist_it_t it = m_query.begin(); it != m_query.end(); it++) if (!(*it)->isFileName()) return false; return true; } // The query language creates a lot of subqueries. See if we can merge them. void SearchData::simplify() { for (unsigned int i = 0; i < m_query.size(); i++) { if (m_query[i]->m_tp != SCLT_SUB) continue; //C[est ce dyncast qui crashe?? SearchDataClauseSub *clsubp = dynamic_cast<SearchDataClauseSub*>(m_query[i]); if (clsubp == 0) { // ?? continue; } if (clsubp->getSub()->m_tp != m_tp) continue; clsubp->getSub()->simplify(); // If this subquery has special attributes, it's not a // candidate for collapsing, except if it has no clauses, because // then, we just pick the attributes. if (!clsubp->getSub()->m_filetypes.empty() || !clsubp->getSub()->m_nfiletypes.empty() || clsubp->getSub()->m_haveDates || clsubp->getSub()->m_maxSize != size_t(-1) || clsubp->getSub()->m_minSize != size_t(-1) || clsubp->getSub()->m_haveWildCards) { if (!clsubp->getSub()->m_query.empty()) continue; m_filetypes.insert(m_filetypes.end(), clsubp->getSub()->m_filetypes.begin(), clsubp->getSub()->m_filetypes.end()); m_nfiletypes.insert(m_nfiletypes.end(), clsubp->getSub()->m_nfiletypes.begin(), clsubp->getSub()->m_nfiletypes.end()); if (clsubp->getSub()->m_haveDates && !m_haveDates) { m_dates = clsubp->getSub()->m_dates; } if (m_maxSize == size_t(-1)) m_maxSize = clsubp->getSub()->m_maxSize; if (m_minSize == size_t(-1)) m_minSize = clsubp->getSub()->m_minSize; m_haveWildCards = m_haveWildCards || clsubp->getSub()->m_haveWildCards; // And then let the clauses processing go on, there are // none anyway, we will just delete the subquery. } bool allsametp = true; for (qlist_it_t it1 = clsubp->getSub()->m_query.begin(); it1 != clsubp->getSub()->m_query.end(); it1++) { // We want all AND or OR clause, and same as our conjunction if (((*it1)->getTp() != SCLT_AND && (*it1)->getTp() != SCLT_OR) || (*it1)->getTp() != m_tp) { allsametp = false; break; } } if (!allsametp) continue; // All ok: delete the clause_sub, and insert the queries from // its searchdata in its place m_query.erase(m_query.begin() + i); m_query.insert(m_query.begin() + i, clsubp->getSub()->m_query.begin(), clsubp->getSub()->m_query.end()); for (unsigned int j = i; j < i + clsubp->getSub()->m_query.size(); j++) { m_query[j]->setParent(this); } i += int(clsubp->getSub()->m_query.size()) - 1; // We don't want the clauses to be deleted when the parent is, as we // know own them. clsubp->getSub()->m_query.clear(); delete clsubp; } } // Extract terms and groups for highlighting void SearchData::getTerms(HighlightData &hld) const { for (qlist_cit_t it = m_query.begin(); it != m_query.end(); it++) { if (!((*it)->getmodifiers() & SearchDataClause::SDCM_NOTERMS) && !(*it)->getexclude()) { (*it)->getTerms(hld); } } return; } static const char * tpToString(SClType t) { switch (t) { case SCLT_AND: return "AND"; case SCLT_OR: return "OR"; case SCLT_FILENAME: return "FILENAME"; case SCLT_PHRASE: return "PHRASE"; case SCLT_NEAR: return "NEAR"; case SCLT_PATH: return "PATH"; case SCLT_SUB: return "SUB"; default: return "UNKNOWN"; } } static string dumptabs; void SearchData::dump(ostream& o) const { o << dumptabs << "SearchData: " << tpToString(m_tp) << " qs " << int(m_query.size()) << " ft " << m_filetypes.size() << " nft " << m_nfiletypes.size() << " hd " << m_haveDates << " maxs " << int(m_maxSize) << " mins " << int(m_minSize) << " wc " << m_haveWildCards << "\n"; for (std::vector<SearchDataClause*>::const_iterator it = m_query.begin(); it != m_query.end(); it++) { o << dumptabs; (*it)->dump(o); o << "\n"; } // o << dumptabs << "\n"; } void SearchDataClause::dump(ostream& o) const { o << "SearchDataClause??"; } void SearchDataClauseSimple::dump(ostream& o) const { o << "ClauseSimple: " << tpToString(m_tp) << " "; if (m_exclude) o << "- "; o << "[" ; if (!m_field.empty()) o << m_field << " : "; o << m_text << "]"; } void SearchDataClauseFilename::dump(ostream& o) const { o << "ClauseFN: "; if (m_exclude) o << " - "; o << "[" << m_text << "]"; } void SearchDataClausePath::dump(ostream& o) const { o << "ClausePath: "; if (m_exclude) o << " - "; o << "[" << m_text << "]"; } void SearchDataClauseRange::dump(ostream& o) const { o << "ClauseRange: "; if (m_exclude) o << " - "; o << "[" << gettext() << "]"; } void SearchDataClauseDist::dump(ostream& o) const { if (m_tp == SCLT_NEAR) o << "ClauseDist: NEAR "; else o << "ClauseDist: PHRA "; if (m_exclude) o << " - "; o << "["; if (!m_field.empty()) o << m_field << " : "; o << m_text << "]"; } void SearchDataClauseSub::dump(ostream& o) const { o << "ClauseSub {\n"; dumptabs += '\t'; m_sub->dump(o); dumptabs.erase(dumptabs.size()- 1); o << dumptabs << "}"; } } // Namespace Rcl �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/synfamily.h���������������������������������������������������������������������0000644�0001750�0001750�00000015735�13533651561�013301� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SYNFAMILY_H_INCLUDED_ #define _SYNFAMILY_H_INCLUDED_ /** * The Xapian synonyms mechanism can be used for many things beyond actual * synonyms, anything that would turn a string into a group of equivalents. * Unfortunately, it has only one keyspace. * This class partitions the Xapian synonyms keyspace by using prefixes and * can provide different applications each with a family of keyspaces. * Two characters are reserved by the class and should not be used inside * either family or member names: ':' and ';' * A synonym key for family "stemdb", member "french", key "somestem" * looks like: * :stemdb:french:somestem -> somestem expansions * A special entry is used to list all the members for a family, e.g.: * :stemdb;members -> french, english ... */ #include <string> #include <vector> #include <xapian.h> #include "log.h" #include "xmacros.h" #include "strmatcher.h" namespace Rcl { class XapSynFamily { public: /** * Construct from readable xapian database and family name (ie: Stm) */ XapSynFamily(Xapian::Database xdb, const std::string& familyname) : m_rdb(xdb) { m_prefix1 = std::string(":") + familyname; } /** Retrieve all members of this family (e.g: french english german...) */ virtual bool getMembers(std::vector<std::string>&); /** debug: list map for one member to stdout */ virtual bool listMap(const std::string& fam); /** Expand term to list of synonyms for given member */ bool synExpand(const std::string& membername, const std::string& term, std::vector<std::string>& result); // The prefix shared by all synonym entries inside a family member virtual std::string entryprefix(const std::string& member) { return m_prefix1 + ":" + member + ":"; } // The key for the "list of members" entry virtual std::string memberskey() { return m_prefix1 + ";" + "members"; } Xapian::Database& getdb() { return m_rdb; } protected: Xapian::Database m_rdb; std::string m_prefix1; }; /** Modify ops for a synonyms family * * A method to add a synonym entry inside a given member would make sense, * but would not be used presently as all these ops go through * ComputableSynFamMember objects */ class XapWritableSynFamily : public XapSynFamily { public: /** Construct with Xapian db open for r/w */ XapWritableSynFamily(Xapian::WritableDatabase db, const std::string& familyname) : XapSynFamily(db, familyname), m_wdb(db) { } /** Delete all entries for one member (e.g. french), and remove from list * of members */ virtual bool deleteMember(const std::string& membername); /** Add to list of members. Idempotent, does not affect actual expansions */ virtual bool createMember(const std::string& membername); Xapian::WritableDatabase getdb() {return m_wdb;} protected: Xapian::WritableDatabase m_wdb; }; /** A functor which transforms a string */ class SynTermTrans { public: virtual std::string operator()(const std::string&) = 0; virtual std::string name() { return "SynTermTrans: unknown";} }; /** A member (set of root-synonyms associations) of a SynFamily for * which the root is computable from the input term. * The objects use a functor member to compute the term root on input * (e.g. compute the term sterm or casefold it */ class XapComputableSynFamMember { public: XapComputableSynFamMember(Xapian::Database xdb, std::string familyname, std::string membername, SynTermTrans* trans) : m_family(xdb, familyname), m_membername(membername), m_trans(trans), m_prefix(m_family.entryprefix(m_membername)) { } /** Expand a term to its list of synonyms. If filtertrans is set we * keep only the results which transform to the same value as the input * This is used for example for filtering the result of case+diac * expansion when only either case or diac expansion is desired. */ bool synExpand(const std::string& term, std::vector<std::string>& result, SynTermTrans *filtertrans = 0); /** Same with also wildcard/regexp expansion of entry against the keys. * The input matcher will be modified to fit our key format. */ bool synKeyExpand(StrMatcher* in, std::vector<std::string>& result, SynTermTrans *filtertrans = 0); private: XapSynFamily m_family; std::string m_membername; SynTermTrans *m_trans; std::string m_prefix; }; /** Computable term root SynFamily member, modify ops */ class XapWritableComputableSynFamMember { public: XapWritableComputableSynFamMember( Xapian::WritableDatabase xdb, std::string familyname, std::string membername, SynTermTrans* trans) : m_family(xdb, familyname), m_membername(membername), m_trans(trans), m_prefix(m_family.entryprefix(m_membername)) { } virtual bool addSynonym(const std::string& term) { LOGDEB2("addSynonym:me " << (this) << " term [" << (term) << "] m_trans " << (m_trans) << "\n" ); std::string transformed = (*m_trans)(term); LOGDEB2("addSynonym: transformed [" << (transformed) << "]\n" ); if (transformed == term) return true; std::string ermsg; try { m_family.getdb().add_synonym(m_prefix + transformed, term); } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("XapWritableComputableSynFamMember::addSynonym: xapian error " << (ermsg) << "\n" ); return false; } return true; } void clear() { m_family.deleteMember(m_membername); } void recreate() { clear(); m_family.createMember(m_membername); } private: XapWritableSynFamily m_family; std::string m_membername; SynTermTrans *m_trans; std::string m_prefix; }; // // Prefixes are centrally defined here to avoid collisions // // Lowercase accented stem to expansion. Family member name: language static const std::string synFamStem("Stm"); // Lowercase unaccented stem to expansion. Family member name: language static const std::string synFamStemUnac("StU"); // Lowercase unaccented term to case and accent variations. Only one // member, named "all". This set is used for separate case/diac // expansion by post-filtering the results of dual expansion. static const std::string synFamDiCa("DCa"); } // end namespace Rcl #endif /* _SYNFAMILY_H_INCLUDED_ */ �����������������������������������recoll-1.26.3/rcldb/rclabsfromtext.cpp��������������������������������������������������������������0000644�0001750�0001750�00000042705�13566424763�014665� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004-2017 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include <math.h> #include <unordered_map> #include <deque> #include <algorithm> #include <regex> #include "log.h" #include "rcldb.h" #include "rcldb_p.h" #include "rclquery.h" #include "rclquery_p.h" #include "textsplit.h" #include "hldata.h" #include "chrono.h" #include "unacpp.h" #include "zlibut.h" using namespace std; // #define DEBUGABSTRACT #ifdef DEBUGABSTRACT #define LOGABS LOGDEB #else #define LOGABS LOGDEB2 #endif // We now let plaintorich do the highlight tags insertions which is // wasteful because we have most of the information (but the perf hit // is small because it's only called on the output fragments, not on // the whole text). The highlight zone computation code has been left // around just in case I change my mind. #undef COMPUTE_HLZONES namespace Rcl { //// Fragment cleanup // Chars we turn to spaces in the Snippets static const string cstr_nc("\n\r\x0c\\"); // Things that we don't want to repeat in a displayed snippet. // e.g. > > > > > > static const string punctcls("[-<>._+,#*=|]"); static const string punctRE = "(" + punctcls + " *)(" + punctcls + " *)+"; static std::regex fixfrag_re(punctRE); static const string punctRep{"$2"}; static string fixfrag(const string& infrag) { return std::regex_replace(neutchars(infrag, cstr_nc), fixfrag_re, punctRep); } // Fragment descriptor. A fragment is a text area with one or several // matched terms and some context. It is ranked according to the // matched term weights and the near/phrase matches get a boost. struct MatchFragment { // Start/End byte offsets of fragment in the document text int start; int stop; // Weight for this fragment (bigger better) double coef; #ifdef COMPUTE_HLZONES // Highlight areas (each is one or several contiguous match // terms). Because a fragment extends around a match, there // can be several contiguous or separate matches in a given // fragment. vector<pair<int,int>> hlzones; #endif // Position of the first matched term (for page number computations) unsigned int hitpos; // "best term" for this match (e.g. for use as ext app search term) string term; MatchFragment(int sta, int sto, double c, #ifdef COMPUTE_HLZONES vector<pair<int,int>>& hl, #endif unsigned int pos, string& trm) : start(sta), stop(sto), coef(c), hitpos(pos) { #ifdef COMPUTE_HLZONES hlzones.swap(hl); #endif term.swap(trm); } }; // Text splitter for finding the match areas in the document text. class TextSplitABS : public TextSplit { public: TextSplitABS(const vector<string>& matchTerms, const HighlightData& hdata, unordered_map<string, double>& wordcoefs, unsigned int ctxwords, Flags flags, unsigned int maxterms) : TextSplit(flags), m_terms(matchTerms.begin(), matchTerms.end()), m_hdata(hdata), m_wordcoefs(wordcoefs), m_ctxwords(ctxwords), maxtermcount(maxterms) { // Take note of the group (phrase/near) terms because we need // to compute the position lists for them. for (const auto& tg : hdata.index_term_groups) { if (tg.kind != HighlightData::TermGroup::TGK_TERM) { for (const auto& group : tg.orgroups) { for (const auto& term: group) { m_gterms.insert(term); } } } } } // Accept a word and its position. If the word is a matched term, // add/update fragment definition. virtual bool takeword(const std::string& term, int pos, int bts, int bte) { LOGDEB2("takeword: " << term << endl); // Limit time taken with monster documents. The resulting // abstract will be incorrect or inexistant, but this is // better than taking forever (the default cutoff value comes // from the snippetMaxPosWalk configuration parameter, and is // 10E6) if (maxtermcount && termcount++ > maxtermcount) { LOGINF("Rclabsfromtext: stopping because maxtermcount reached: "<< maxtermcount << endl); retflags |= ABSRES_TRUNC; return false; } // Also limit the number of fragments (just in case safety) if (m_fragments.size() > maxtermcount / 100) { LOGINF("Rclabsfromtext: stopping because maxfragments reached: "<< maxtermcount/100 << endl); retflags |= ABSRES_TRUNC; return false; } // Remember recent past m_prevterms.push_back(pair<int,int>(bts,bte)); if (m_prevterms.size() > m_ctxwords+1) { m_prevterms.pop_front(); } string dumb; if (o_index_stripchars) { if (!unacmaybefold(term, dumb, "UTF-8", UNACOP_UNACFOLD)) { LOGINFO("abstract: unac failed for [" << term << "]\n"); return true; } } else { dumb = term; } if (m_terms.find(dumb) != m_terms.end()) { // This word is a search term. Extend or create fragment LOGDEB2("match: [" << dumb << "] current: " << m_curfrag.first << ", " << m_curfrag.second << " remain " << m_remainingWords << endl); double coef = m_wordcoefs[dumb]; if (!m_remainingWords) { // No current fragment. Start one m_curhitpos = baseTextPosition + pos; m_curfrag.first = m_prevterms.front().first; m_curfrag.second = m_prevterms.back().second; #ifdef COMPUTE_HLZONES m_curhlzones.push_back(pair<int,int>(bts, bte)); #endif m_curterm = term; m_curtermcoef = coef; } else { LOGDEB2("Extending current fragment: " << m_remainingWords << " -> " << m_ctxwords << endl); m_extcount++; #ifdef COMPUTE_HLZONES if (m_prevwordhit) { m_curhlzones.back().second = bte; } else { m_curhlzones.push_back(pair<int,int>(bts, bte)); } #endif if (coef > m_curtermcoef) { m_curterm = term; m_curtermcoef = coef; } } #ifdef COMPUTE_HLZONES m_prevwordhit = true; #endif m_curfragcoef += coef; m_remainingWords = m_ctxwords + 1; if (m_extcount > 5) { // Limit expansion of contiguous fragments (this is to // avoid common terms in search causing long // heavyweight meaningless fragments. Also, limit length). m_remainingWords = 1; m_extcount = 0; } // If the term is part of a near/phrase group, update its // positions list if (m_gterms.find(dumb) != m_gterms.end()) { // Term group (phrase/near) handling m_plists[dumb].push_back(pos); m_gpostobytes[pos] = pair<int,int>(bts, bte); LOGDEB2("Recorded bpos for " << pos << ": " << bts << " " << bte << "\n"); } } #ifdef COMPUTE_HLZONES else { // Not a matched term m_prevwordhit = false; } #endif if (m_remainingWords) { // Fragment currently open. Time to close ? m_remainingWords--; m_curfrag.second = bte; if (m_remainingWords == 0) { // We used to not push weak fragments if we had a lot // already. This can cause problems if the fragments // we drop are actually group fragments (which have // not got their boost yet). The right cut value is // difficult to determine, because the absolute values // of the coefs depend on many things (index size, // etc.) The old test was if (m_totalcoef < 5.0 || // m_curfragcoef >= 1.0) We now just avoid creating a // monster by testing the current fragments count at // the top of the function m_fragments.push_back(MatchFragment(m_curfrag.first, m_curfrag.second, m_curfragcoef, #ifdef COMPUTE_HLZONES m_curhlzones, #endif m_curhitpos, m_curterm )); m_totalcoef += m_curfragcoef; m_curfragcoef = 0.0; m_curtermcoef = 0.0; } } return true; } const vector<MatchFragment>& getFragments() { return m_fragments; } // After the text is split: use the group terms positions lists to // find the group matches. void updgroups() { LOGDEB("TextSplitABS: stored total " << m_fragments.size() << " fragments" << endl); vector<GroupMatchEntry> tboffs; // Look for matches to PHRASE and NEAR term groups and finalize // the matched regions list (sort it by increasing start then // decreasing length). We process all groups as NEAR (ignore order). for (unsigned int i = 0; i < m_hdata.index_term_groups.size(); i++) { if (m_hdata.index_term_groups[i].kind != HighlightData::TermGroup::TGK_TERM) { matchGroup(m_hdata, i, m_plists, m_gpostobytes, tboffs); } } // Sort the fragments by increasing start and decreasing width std::sort(m_fragments.begin(), m_fragments.end(), [](const MatchFragment& a, const MatchFragment& b) -> bool { if (a.start != b.start) return a.start < b.start; return a.stop - a.start > b.stop - a.stop; } ); // Sort the group regions by increasing start and decreasing width. std::sort(tboffs.begin(), tboffs.end(), [](const GroupMatchEntry& a, const GroupMatchEntry& b) -> bool { if (a.offs.first != b.offs.first) return a.offs.first < b.offs.first; return a.offs.second > b.offs.second; } ); // Give a boost to fragments which contain a group match // (phrase/near), they are dear to the user's heart. Lists are // sorted, so we never go back in the fragment list (can // always start the search where we previously stopped). if (m_fragments.empty()) { return; } auto fragit = m_fragments.begin(); for (const auto& grpmatch : tboffs) { LOGDEB2("LOOKING FOR FRAGMENT: group: " << grpmatch.offs.first << "-" << grpmatch.offs.second << " curfrag " << fragit->start << "-" << fragit->stop << endl); while (fragit->stop < grpmatch.offs.first) { fragit++; if (fragit == m_fragments.end()) { return; } } if (fragit->start <= grpmatch.offs.first && fragit->stop >= grpmatch.offs.second) { // grp in frag fragit->coef += 10.0; } } return; } int getretflags() { return retflags; } private: // Past terms because we need to go back for context before a hit deque<pair<int,int>> m_prevterms; // Data about the fragment we are building pair<int,int> m_curfrag{0,0}; double m_curfragcoef{0.0}; unsigned int m_remainingWords{0}; unsigned int m_extcount{0}; #ifdef COMPUTE_HLZONES vector<pair<int,int>> m_curhlzones; bool m_prevwordhit{false}; #endif // Current sum of fragment weights double m_totalcoef{0.0}; // Position of 1st term match (for page number computations) unsigned int m_curhitpos{0}; // "best" term string m_curterm; double m_curtermcoef{0.0}; // Group terms, extracted from m_hdata unordered_set<string> m_gterms; // group/near terms word positions. unordered_map<string, vector<int> > m_plists; unordered_map<int, pair<int, int> > m_gpostobytes; // Input unordered_set<string> m_terms; const HighlightData& m_hdata; unordered_map<string, double>& m_wordcoefs; unsigned int m_ctxwords; // Result: begin and end byte positions of query terms/groups in text vector<MatchFragment> m_fragments; unsigned int termcount{0}; unsigned int maxtermcount{0}; int retflags{0}; }; int Query::Native::abstractFromText( Rcl::Db::Native *ndb, Xapian::docid docid, const vector<string>& matchTerms, const multimap<double, vector<string>> byQ, double totalweight, int ctxwords, unsigned int maxtotaloccs, vector<Snippet>& vabs, Chrono& chron, bool sortbypage ) { (void)chron; LOGABS("abstractFromText: entry: " << chron.millis() << "mS\n"); string rawtext; if (!ndb->getRawText(docid, rawtext)) { LOGDEB0("abstractFromText: can't fetch text\n"); return ABSRES_ERROR; } LOGABS("abstractFromText: got raw text: size " << rawtext.size() << " " << chron.millis() << "mS\n"); #if 0 && ! (XAPIAN_MAJOR_VERSION <= 1 && XAPIAN_MINOR_VERSION <= 2) && \ (defined(RAWTEXT_IN_DATA)) // Tryout the Xapian internal method. string snippet = xmset.snippet(rawtext); LOGDEB("SNIPPET: [" << snippet << "] END SNIPPET\n"); #endif // We need the q coefs for individual terms unordered_map<string, double> wordcoefs; for (const auto& mment : byQ) { for (const auto& word : mment.second) { wordcoefs[word] = mment.first; } } // Note: getTerms() was already called by qualityTerms, so this is // a bit wasteful. I guess that the performance impact is // negligible though. To be checked ? We need the highlightdata for the // phrase/near groups. HighlightData hld; if (m_q->m_sd) { m_q->m_sd->getTerms(hld); } LOGABS("abstractFromText: getterms: " << chron.millis() << "mS\n"); TextSplitABS splitter(matchTerms, hld, wordcoefs, ctxwords, TextSplit::TXTS_ONLYSPANS, m_q->m_snipMaxPosWalk); splitter.text_to_words(rawtext); LOGABS("abstractFromText: text_to_words: " << chron.millis() << "mS\n"); splitter.updgroups(); // Sort the fragments by decreasing weight const vector<MatchFragment>& res1 = splitter.getFragments(); vector<MatchFragment> result(res1.begin(), res1.end()); if (sortbypage) { std::sort(result.begin(), result.end(), [](const MatchFragment& a, const MatchFragment& b) -> bool { return a.hitpos < b.hitpos; } ); } else { std::sort(result.begin(), result.end(), [](const MatchFragment& a, const MatchFragment& b) -> bool { return a.coef > b.coef; } ); } vector<int> vpbreaks; ndb->getPagePositions(docid, vpbreaks); // Build the output snippets array by merging the fragments, their // main term and the page positions. unsigned int count = 0; for (const auto& entry : result) { string frag( fixfrag(rawtext.substr(entry.start, entry.stop - entry.start))); #ifdef COMPUTE_HLZONES // This would need to be modified to take tag parameters // instead of the const strings static const string starthit("<span style='color: blue;'>"); static const string endhit("</span>"); size_t inslen = 0; for (const auto& hlzone: entry.hlzones) { frag.replace(hlzone.first - entry.start + inslen, 0, starthit); inslen += starthit.size(); frag.replace(hlzone.second - entry.start + inslen, 0, endhit); inslen += endhit.size(); } #endif int page = 0; if (vpbreaks.size() > 1) { page = ndb->getPageNumberForPosition(vpbreaks, entry.hitpos); if (page < 0) page = 0; } LOGDEB0("=== FRAGMENT: p. " << page << " Coef: " << entry.coef << ": " << frag << endl); vabs.push_back(Snippet(page, frag).setTerm(entry.term)); if (count++ >= maxtotaloccs) break; } return ABSRES_OK | splitter.getretflags(); } } �����������������������������������������������������������recoll-1.26.3/rcldb/searchdataxml.cpp���������������������������������������������������������������0000644�0001750�0001750�00000011206�13533651561�014426� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ // Handle translation from rcl's SearchData structures to XML. Used for // complex search history storage in the GUI #include "autoconfig.h" #include <stdio.h> #include <string> #include <vector> #include <sstream> #include "searchdata.h" #include "log.h" #include "base64.h" using namespace std; namespace Rcl { static string tpToString(SClType tp) { switch (tp) { case SCLT_AND: return "AND"; case SCLT_OR: return "OR"; case SCLT_FILENAME: return "FN"; case SCLT_PHRASE: return "PH"; case SCLT_NEAR: return "NE"; case SCLT_RANGE: return "RG"; case SCLT_SUB: return "SU"; // Unsupported actually default: return "UN"; } } string SearchData::asXML() { LOGDEB("SearchData::asXML\n" ); ostringstream os; // Searchdata os << "<SD>" << endl; // Clause list os << "<CL>" << endl; // List conjunction: default is AND, else print it. if (m_tp != SCLT_AND) os << "<CLT>" << tpToString(m_tp) << "</CLT>" << endl; for (unsigned int i = 0; i < m_query.size(); i++) { SearchDataClause *c = m_query[i]; if (c->getTp() == SCLT_SUB) { LOGERR("SearchData::asXML: can't do subclauses !\n" ); continue; } if (c->getTp() == SCLT_PATH) { // Keep these apart, for compat with the older history format. NEG // is ignored here, we have 2 different tags instead. SearchDataClausePath *cl = dynamic_cast<SearchDataClausePath*>(c); if (cl->getexclude()) { os << "<ND>" << base64_encode(cl->gettext()) << "</ND>" << endl; } else { os << "<YD>" << base64_encode(cl->gettext()) << "</YD>" << endl; } continue; } else { os << "<C>" << endl; if (c->getexclude()) os << "<NEG/>" << endl; if (c->getTp() != SCLT_AND) { os << "<CT>" << tpToString(c->getTp()) << "</CT>" << endl; } if (c->getTp() == SCLT_FILENAME) { SearchDataClauseFilename *cl = dynamic_cast<SearchDataClauseFilename*>(c); os << "<T>" << base64_encode(cl->gettext()) << "</T>" << endl; } else { SearchDataClauseSimple *cl = dynamic_cast<SearchDataClauseSimple*>(c); if (!cl->getfield().empty()) { os << "<F>" << base64_encode(cl->getfield()) << "</F>" << endl; } os << "<T>" << base64_encode(cl->gettext()) << "</T>" << endl; if (cl->getTp() == SCLT_RANGE) { SearchDataClauseRange *clr = dynamic_cast<SearchDataClauseRange*>(cl); const string& t = clr->gettext2(); if (!t.empty()) { os << "<T2>" << base64_encode(clr->gettext2()) << "</T2>" << endl; } } if (cl->getTp() == SCLT_NEAR || cl->getTp() == SCLT_PHRASE) { SearchDataClauseDist *cld = dynamic_cast<SearchDataClauseDist*>(cl); os << "<S>" << cld->getslack() << "</S>" << endl; } } os << "</C>" << endl; } } os << "</CL>" << endl; if (m_haveDates) { if (m_dates.y1 > 0) { os << "<DMI>" << "<D>" << m_dates.d1 << "</D>" << "<M>" << m_dates.m1 << "</M>" << "<Y>" << m_dates.y1 << "</Y>" << "</DMI>" << endl; } if (m_dates.y2 > 0) { os << "<DMA>" << "<D>" << m_dates.d2 << "</D>" << "<M>" << m_dates.m2 << "</M>" << "<Y>" << m_dates.y2 << "</Y>" << "</DMA>" << endl; } } if (m_minSize != size_t(-1)) { os << "<MIS>" << m_minSize << "</MIS>" << endl; } if (m_maxSize != size_t(-1)) { os << "<MAS>" << m_maxSize << "</MAS>" << endl; } if (!m_filetypes.empty()) { os << "<ST>"; for (vector<string>::iterator it = m_filetypes.begin(); it != m_filetypes.end(); it++) { os << *it << " "; } os << "</ST>" << endl; } if (!m_nfiletypes.empty()) { os << "<IT>"; for (vector<string>::iterator it = m_nfiletypes.begin(); it != m_nfiletypes.end(); it++) { os << *it << " "; } os << "</IT>" << endl; } os << "</SD>"; return os.str(); } } ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/xmacros.h�����������������������������������������������������������������������0000644�0001750�0001750�00000004011�13567765362�012737� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _xmacros_h_included_ #define _xmacros_h_included_ // Generic Xapian exception catching code. We do this quite often, // and I have no idea how to do this except for a macro #define XCATCHERROR(MSG) \ catch (const Xapian::Error &e) { \ MSG = e.get_msg(); \ if (MSG.empty()) MSG = "Empty error message"; \ } catch (const std::string &s) { \ MSG = s; \ if (MSG.empty()) MSG = "Empty error message"; \ } catch (const char *s) { \ MSG = s; \ if (MSG.empty()) MSG = "Empty error message"; \ } catch (...) { \ MSG = "Caught unknown xapian exception"; \ } #define XAPTRY(STMTTOTRY, XAPDB, ERSTR) \ for (int tries = 0; tries < 2; tries++) { \ try { \ STMTTOTRY; \ ERSTR.erase(); \ break; \ } catch (const Xapian::DatabaseModifiedError &e) { \ ERSTR = e.get_msg(); \ XAPDB.reopen(); \ continue; \ } XCATCHERROR(ERSTR); \ break; \ } #endif �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rcldoc.h������������������������������������������������������������������������0000644�0001750�0001750�00000024154�13533651561�012527� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _RCLDOC_H_INCLUDED_ #define _RCLDOC_H_INCLUDED_ #include <string> #include <unordered_map> #include <vector> #include "smallut.h" namespace Rcl { /** * Dumb holder for document attributes and data. * * This is used both for indexing, where fields are filled-up by the * indexer prior to adding to the index, and for querying, where * fields are filled from data stored in the index. Not all fields are * in use at both index and query times, and not all field data is * stored at index time. */ class Doc { public: //////////////////////////////////////////////////////////// // The following fields are stored into the document data record (so they // can be accessed after a query without fetching the actual document). // We indicate the routine that sets them up during indexing // Binary or url-encoded url. No transcoding: this is used to access files // Index: computed by Db::add caller. // Query: from doc data. std::string url; // When we do path translation for documents from external indexes, we // save the original path: std::string idxurl; // And the originating db. 0 is base, 1 first external etc. int idxi{0}; // Internal path for multi-doc files. Ascii // Set by FsIndexer::processone std::string ipath; // Mime type. Set by FileInterner::internfile std::string mimetype; // File modification time as decimal ascii unix time // Set by FsIndexer::processone std::string fmtime; // Data reference date (same format). Ie: mail date // Possibly set by mimetype-specific handler // Filter::metaData["modificationdate"] std::string dmtime; // Charset we transcoded the 'text' field from (in case we want back) // Possibly set by handler std::string origcharset; // A map for textual metadata like, author, keywords, abstract, // title. The entries are possibly set by the mimetype-specific // handler. If a fieldname-to-prefix translation exists, the // terms in the value will be indexed with a prefix. // Only some predefined fields are stored in the data record: // "title", "keywords", "abstract", "author", but if a field name is // in the "stored" configuration list, it will be stored too. std::unordered_map<std::string, std::string> meta; // Attribute for the "abstract" entry. true if it is just the top // of doc, not a native document attribute. Not stored directly, but // as an indicative prefix at the beginning of the abstract (ugly hack) bool syntabs{false}; // File size. This is the size of the compressed file or of the // external containing archive. // Index: Set by caller prior to Db::Add. // Query: Set from data record std::string pcbytes; // Document size, ie, size of the .odt or .xls. // Index: Set in internfile from the filter stack // Query: set from data record std::string fbytes; // Doc text size. // Index: from text.length(). // Query: set by rcldb from index data record std::string dbytes; // Doc signature. Used for up to date checks. // Index: set by Db::Add caller. Query: set from doc data. // This is opaque to rcldb, and could just as well be ctime, size, // ctime+size, md5, whatever. std::string sig; ///////////////////////////////////////////////// // The following fields don't go to the db record, so they can't // be retrieved at query time // Main document text. This is plaintext utf-8 text to be split // and indexed std::string text; ///////////////////////////////////////////////// // Misc stuff int pc{0}; // relevancy percentage, used by sortseq, convenience unsigned long xdocid{0}; // Opaque: rcldb doc identifier. // Page breaks were stored during indexing. bool haspages{false}; // Has children, either as content of file-level container or // ipath descendants. bool haschildren{false}; // During indexing: only fields from extended attributes were set, no // doc content. Allows for faster reindexing of existing doc bool onlyxattr{false}; /////////////////////////////////////////////////////////////////// void erase() { url.erase(); idxurl.erase(); idxi = 0; ipath.erase(); mimetype.erase(); fmtime.erase(); dmtime.erase(); origcharset.erase(); meta.clear(); syntabs = false; pcbytes.erase(); fbytes.erase(); dbytes.erase(); sig.erase(); text.erase(); pc = 0; xdocid = 0; haspages = false; haschildren = false; onlyxattr = false; } // Copy ensuring no shared string data, for threading issues. void copyto(Doc *d) const; Doc() { } /** Get value for named field. If value pointer is 0, just test existence */ bool getmeta(const std::string& nm, std::string *value = 0) const { const auto it = meta.find(nm); if (it != meta.end()) { if (value) *value = it->second; return true; } else { return false; } } /** Nocopy getvalue. sets pointer to entry value if exists */ bool peekmeta(const std::string& nm, const std::string **value = 0) const { const auto it = meta.find(nm); if (it != meta.end()) { if (value) *value = &(it->second); return true; } else { return false; } } // Create entry or append text to existing entry. bool addmeta(const std::string& nm, const std::string& value) { auto mit = meta.find(nm); if (mit == meta.end()) { meta[nm] = value; } else if (mit->second.empty()) { mit->second = value; } else { // It may happen that the same attr exists several times // in the internfile stack. Avoid duplicating values. if (mit->second != value) mit->second += std::string(" - ") + value; } return true; } /* Is this document stored as a regular filesystem file ? * (as opposed to e.g. a webcache file), not a subdoc, */ bool isFsFile() { std::string backend; getmeta(keybcknd, &backend); if (!backend.empty() && backend.compare("FS")) return false; return true; } void dump(bool dotext=false) const; //////////////////////////////////////////////////////////////// // The official names for recoll native fields when used in a text // context (ie: the python interface duplicates some of the fixed // fields in the meta array, these are the names used). Defined in // rcldoc.cpp. Fields stored in the meta[] array (ie, title, // author), _must_ use these canonical values, not aliases. This is // enforced in internfile.cpp and misc other bits of metadata-gathering // code static const std::string keyurl; // url // childurl. This is set when working with the parent of the result, to hold // the child of interest url, typically to highlight a directory entry static const std::string keychildurl; // file name. This is set for filesystem-level containers or // documents, and not inherited by subdocuments (which can get a // keyfn anyway from, e.g, an attachment filename value). Subdocs // used to inherit the file name, but this was undesirable (you // usually don't want to see all subdocs when searching for the // file name). Instead the container file name is now set in the // document record but not indexed (see next entry). static const std::string keyfn; // Container file name. This is set for all subdocuments of a // given top level container. It is not indexed by default but // stored in the document record keyfn field if this is still // empty when we create it, for display purposes. static const std::string keytcfn; static const std::string keyipt; // ipath static const std::string keytp; // mime type static const std::string keyfmt; // file mtime static const std::string keydmt; // document mtime static const std::string keymt; // mtime dmtime if set else fmtime static const std::string keyoc; // original charset static const std::string keypcs; // document outer container size static const std::string keyfs; // document size static const std::string keyds; // document text size static const std::string keysz; // dbytes if set else fbytes else pcbytes static const std::string keysig; // sig static const std::string keyrr; // relevancy rating static const std::string keycc; // Collapse count static const std::string keyabs; // abstract static const std::string keyau; // author static const std::string keytt; // title static const std::string keykw; // keywords static const std::string keymd5; // file md5 checksum static const std::string keybcknd; // backend type when not from the fs // udi back from index. Only set by Rcl::Query::getdoc(). static const std::string keyudi; static const std::string keyapptg; // apptag. Set from localfields (fs only) static const std::string keybght; // beagle hit type ("beagleHitType") }; extern bool docsToPaths(std::vector<Doc> &docs,std::vector<std::string> &paths); } #endif /* _RCLDOC_H_INCLUDED_ */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rclvalues.h���������������������������������������������������������������������0000644�0001750�0001750�00000002231�13533651561�013251� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef _RCLVALUES_H_INCLUDED_ #define _RCLVALUES_H_INCLUDED_ /* Copyright (C) 2004-2018 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <string> namespace Rcl { extern void add_field_value(Xapian::Document& xdoc, const FieldTraits& ft, const std::string& data); extern std::string convert_field_value(const FieldTraits& ft, const std::string& data); } #endif /* _RCLVALUES_H_INCLUDED_ */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/stoplist.cpp��������������������������������������������������������������������0000644�0001750�0001750�00000005362�13533651561�013475� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef TEST_STOPLIST #include "log.h" #include "readfile.h" #include "unacpp.h" #include "smallut.h" #include "stoplist.h" #ifndef NO_NAMESPACES namespace Rcl { #endif bool StopList::setFile(const string &filename) { m_stops.clear(); string stoptext, reason; if (!file_to_string(filename, stoptext, &reason)) { LOGDEB0("StopList::StopList: file_to_string(" << (filename) << ") failed: " << (reason) << "\n" ); return false; } set<string> stops; stringToStrings(stoptext, stops); for (set<string>::iterator it = stops.begin(); it != stops.end(); it++) { string dterm; unacmaybefold(*it, dterm, "UTF-8", UNACOP_UNACFOLD); m_stops.insert(dterm); } return true; } // Most sites will have an empty stop list. We try to optimize the // empty set case as much as possible. empty() is probably sligtly faster than // find() in this case. bool StopList::isStop(const string &term) const { return m_stops.empty() ? false : m_stops.find(term) != m_stops.end(); } #ifndef NO_NAMESPACES } #endif #else // TEST_STOPLIST #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <string.h> #include <string> #include <iostream> #include "stoplist.h" using namespace std; using namespace Rcl; static char *thisprog; static char usage [] = "trstoplist stopstermsfile\n\n" ; static void Usage(void) { fprintf(stderr, "%s: usage:\n%s", thisprog, usage); exit(1); } const string tstwords[] = { "the", "is", "xweird", "autre", "autre double", "mot1", "mot double", }; const int tstsz = sizeof(tstwords) / sizeof(string); int main(int argc, char **argv) { int count = 10; thisprog = argv[0]; argc--; argv++; if (argc != 1) Usage(); string filename = argv[0]; argc--; StopList sl(filename); for (int i = 0; i < tstsz; i++) { const string &tst = tstwords[i]; cout << "[" << tst << "] " << (sl.isStop(tst) ? "in stop list" : "not in stop list") << endl; } exit(0); } #endif // TEST_STOPLIST ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rclabstract.cpp�����������������������������������������������������������������0000644�0001750�0001750�00000060411�13566424763�014124� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004-2017 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include <math.h> #include <map> #include <unordered_map> #include <deque> #include <algorithm> #include "log.h" #include "rcldb.h" #include "rcldb_p.h" #include "rclquery.h" #include "rclquery_p.h" #include "textsplit.h" #include "searchdata.h" #include "utf8iter.h" #include "hldata.h" #include "chrono.h" using namespace std; namespace Rcl { static Chrono chron; // This is used as a marker inside the abstract frag lists, but // normally doesn't remain in final output (which is built with a // custom sep. by our caller). static const string cstr_ellipsis("..."); static const string emptys; // This is used to mark positions overlapped by a multi-word match term static const string occupiedmarker("?"); #define DEBUGABSTRACT #ifdef DEBUGABSTRACT #define LOGABS LOGDEB #else #define LOGABS LOGDEB2 #endif // Unprefix terms. Actually it's not completely clear if we should // remove prefixes and keep all terms or prune the prefixed // ones. There is no good way to be sure what will provide the best // result in general. static const bool prune_prefixed_terms = true; static void noPrefixList(const vector<string>& in, vector<string>& out) { for (const auto& term : in) { if (prune_prefixed_terms) { if (has_prefix(term)) continue; } out.push_back(strip_prefix(term)); } sort(out.begin(), out.end()); vector<string>::iterator it = unique(out.begin(), out.end()); out.resize(it - out.begin()); } bool Query::Native::getMatchTerms(unsigned long xdocid, vector<string>& terms) { if (!xenquire) { LOGERR("Query::getMatchTerms: no query opened\n"); return false; } terms.clear(); Xapian::TermIterator it; Xapian::docid id = Xapian::docid(xdocid); vector<string> iterms; XAPTRY(iterms.insert(iterms.begin(), xenquire->get_matching_terms_begin(id), xenquire->get_matching_terms_end(id)), m_q->m_db->m_ndb->xrdb, m_q->m_reason); if (!m_q->m_reason.empty()) { LOGERR("getMatchTerms: xapian error: " << m_q->m_reason << "\n"); return false; } noPrefixList(iterms, terms); return true; } // Retrieve db-wide frequencies for the query terms and store them in // the query object. This is done at most once for a query, and the data is used // while computing abstracts for the different result documents. void Query::Native::setDbWideQTermsFreqs() { // Do it once only for a given query. if (!termfreqs.empty()) return; vector<string> qterms; { vector<string> iqterms; m_q->getQueryTerms(iqterms); noPrefixList(iqterms, qterms); } LOGDEB("Query terms: " << stringsToString(qterms) << endl); Xapian::Database &xrdb = m_q->m_db->m_ndb->xrdb; double doccnt = xrdb.get_doccount(); if (doccnt == 0) doccnt = 1; for (const auto& term : qterms) { termfreqs[term] = xrdb.get_termfreq(term) / doccnt; LOGABS("setDbWideQTermFreqs: [" << term << "] db freq " << termfreqs[term] << "\n"); } } // Compute matched terms quality coefficients for a matched document by // retrieving the Within Document Frequencies and multiplying by // overal term frequency, then using log-based thresholds. // 2012: it's not too clear to me why exactly we do the log thresholds thing. // Preferring terms wich are rare either or both in the db and the document // seems reasonable though // To avoid setting a high quality for a low frequency expansion of a // common stem, which seems wrong, we group the terms by // root, compute a frequency for the group from the sum of member // occurrences, and let the frequency for each group member be the // aggregated frequency. double Query::Native::qualityTerms(Xapian::docid docid, const vector<string>& terms, multimap<double, vector<string> >& byQ) { LOGABS("qualityTerms: entry " << chron.millis() << "mS\n"); setDbWideQTermsFreqs(); LOGABS("qualityTerms: setDbWide..: " << chron.millis() << "mS\n"); map<string, double> termQcoefs; double totalweight = 0; Xapian::Database &xrdb = m_q->m_db->m_ndb->xrdb; double doclen = xrdb.get_doclength(docid); if (doclen == 0) doclen = 1; HighlightData hld; if (m_q->m_sd) { m_q->m_sd->getTerms(hld); } LOGABS("qualityTerms: m_sd->getTerms(): " << chron.millis() << "mS\n"); // Group the input terms by the user term they were possibly // expanded from (by stemming) map<string, vector<string> > byRoot; for (const auto& term: terms) { const auto eit = hld.terms.find(term); if (eit != hld.terms.end()) { byRoot[eit->second].push_back(term); } else { LOGDEB0("qualityTerms: [" << term << "] not found in hld\n"); byRoot[term].push_back(term); } } #ifdef DEBUGABSTRACT { LOGABS("qualityTerms: hld: " << hld.toString() << "\n"); string byRootstr; for (const auto& entry : byRoot) { byRootstr.append("[").append(entry.first).append("]->"); for (const auto& term : entry.second) { byRootstr.append("[").append(term).append("] "); } byRootstr.append("\n"); } LOGABS("qualityTerms: uterms to terms: " << chron.millis() << "mS " << byRootstr << endl); } #endif // Compute in-document and global frequencies for the groups. We // used to call termlist_begin() for each term. This was very slow // on big documents and long term lists. We now compute a sorted // list of terms (with pointers back to their root through a map), // and just call skip_to repeatedly vector<string> allterms; unordered_map<string, string> toRoot; for (const auto& group : byRoot) { for (const auto& term : group.second) { allterms.push_back(term); toRoot[term] = group.first; } } sort(allterms.begin(), allterms.end()); allterms.erase(unique(allterms.begin(), allterms.end()), allterms.end()); map<string, double> grpwdfs; map<string, double> grptfreqs; Xapian::TermIterator xtermit = xrdb.termlist_begin(docid); for (const auto& term : allterms) { const string& root = toRoot[term]; xtermit.skip_to(term); if (xtermit != xrdb.termlist_end(docid) && *xtermit == term) { if (grpwdfs.find(root) != grpwdfs.end()) { grpwdfs[root] = xtermit.get_wdf() / doclen; grptfreqs[root] = termfreqs[term]; } else { grpwdfs[root] += xtermit.get_wdf() / doclen; grptfreqs[root] += termfreqs[term]; } } else { LOGDEB("qualityTerms: term not found in doc term list: " << term << endl); } } LOGABS("qualityTerms: freqs compute: " << chron.millis() << "mS\n"); // Build a sorted by quality container for the groups for (const auto& group : byRoot) { double q = (grpwdfs[group.first]) * grptfreqs[group.first]; q = -log10(q); if (q < 3) { q = 0.05; } else if (q < 4) { q = 0.3; } else if (q < 5) { q = 0.7; } else if (q < 6) { q = 0.8; } else { q = 1; } totalweight += q; byQ.insert(pair<double, vector<string> >(q, group.second)); } #ifdef DEBUGABSTRACT for (auto mit= byQ.rbegin(); mit != byQ.rend(); mit++) { LOGABS("qualityTerms: coef: " << mit->first << " group: " << stringsToString(mit->second) << endl); } #endif return totalweight; } // Return page number for first match of "significant" term. int Query::Native::getFirstMatchPage(Xapian::docid docid, string& term) { LOGDEB("Query::Native::getFirstMatchPage\n"); chron.restart(); if (!m_q|| !m_q->m_db || !m_q->m_db->m_ndb || !m_q->m_db->m_ndb->m_isopen) { LOGERR("Query::getFirstMatchPage: no db\n"); return -1; } Rcl::Db::Native *ndb(m_q->m_db->m_ndb); Xapian::Database& xrdb(ndb->xrdb); vector<string> terms; getMatchTerms(docid, terms); if (terms.empty()) { LOGDEB("getFirstMatchPage: empty match term list (field match?)\n"); return -1; } vector<int> pagepos; ndb->getPagePositions(docid, pagepos); if (pagepos.empty()) return -1; setDbWideQTermsFreqs(); // We try to use a page which matches the "best" term. Get a sorted list multimap<double, vector<string> > byQ; qualityTerms(docid, terms, byQ); for (auto mit = byQ.rbegin(); mit != byQ.rend(); mit++) { for (vector<string>::const_iterator qit = mit->second.begin(); qit != mit->second.end(); qit++) { string qterm = *qit; Xapian::PositionIterator pos; string emptys; try { for (pos = xrdb.positionlist_begin(docid, qterm); pos != xrdb.positionlist_end(docid, qterm); pos++) { int pagenum = ndb->getPageNumberForPosition(pagepos, *pos); if (pagenum > 0) { term = qterm; return pagenum; } } } catch (...) { // Term does not occur. No problem. } } } return -1; } // Creating the abstract from index position data: populate the sparse // array with the positions for a given query term, and mark the // neighboring positions. void Query::Native::abstractPopulateQTerm( Xapian::Database& xrdb, Xapian::docid docid, const string& qterm, int qtrmwrdcnt, int ctxwords, unsigned int maxgrpoccs, unsigned int maxtotaloccs, map<unsigned int, string>& sparseDoc, unordered_set<unsigned int>& searchTermPositions, unsigned int& maxpos, unsigned int& totaloccs, unsigned int& grpoccs, int& ret ) { Xapian::PositionIterator pos; // Walk the position list for this term. for (pos = xrdb.positionlist_begin(docid, qterm); pos != xrdb.positionlist_end(docid, qterm); pos++) { int ipos = *pos; if (ipos < int(baseTextPosition)) // Not in text body continue; LOGABS("makeAbstract: [" << qterm << "] at pos " << ipos << " grpoccs " << grpoccs << " maxgrpoccs " << maxgrpoccs << "\n"); totaloccs++; grpoccs++; // Add adjacent slots to the set to populate at next // step by inserting empty strings. Special provisions // for adding ellipsis and for positions overlapped by // the match term. unsigned int sta = MAX(int(baseTextPosition), ipos - ctxwords); unsigned int sto = ipos + qtrmwrdcnt-1 + m_q->m_db->getAbsCtxLen(); for (unsigned int ii = sta; ii <= sto; ii++) { if (ii == (unsigned int)ipos) { sparseDoc[ii] = qterm; searchTermPositions.insert(ii); if (ii > maxpos) maxpos = ii; } else if (ii > (unsigned int)ipos && ii < (unsigned int)ipos + qtrmwrdcnt) { // Position for another word of the multi-word term sparseDoc[ii] = occupiedmarker; } else if (!sparseDoc[ii].compare(cstr_ellipsis)) { // For an empty slot, the test above has a side // effect of inserting an empty string which // is what we want. Do it also if it was an ellipsis sparseDoc[ii] = emptys; } } // Add ellipsis at the end. This may be replaced later by // an overlapping extract. Take care not to replace an // empty string here, we really want an empty slot, // use find() if (sparseDoc.find(sto+1) == sparseDoc.end()) { sparseDoc[sto+1] = cstr_ellipsis; } // Group done ? if (grpoccs >= maxgrpoccs) { ret |= ABSRES_TRUNC; LOGABS("Db::makeAbstract: max group occs cutoff\n"); break; } // Global done ? if (totaloccs >= maxtotaloccs) { ret |= ABSRES_TRUNC; LOGABS("Db::makeAbstract: max occurrences cutoff\n"); break; } } } // Creating the abstract from index position data: after the query // terms have been inserted at their place in the sparse array, and // the neighboring positions marked, populate the neighbours: for each // term in the document, walk its position list and populate slots // around the query terms. We arbitrarily truncate the list to avoid // taking forever. If we do cutoff, the abstract may be inconsistant // (missing words, potentially altering meaning), which is bad. void Query::Native::abstractPopulateContextTerms( Xapian::Database& xrdb, Xapian::docid docid, unsigned int maxpos, map<unsigned int, string>& sparseDoc, int& ret ) { Xapian::TermIterator term; int cutoff = m_q->m_snipMaxPosWalk; for (term = xrdb.termlist_begin(docid); term != xrdb.termlist_end(docid); term++) { // Ignore prefixed terms if (has_prefix(*term)) continue; if (m_q->m_snipMaxPosWalk > 0 && cutoff-- < 0) { ret |= ABSRES_TERMMISS; LOGDEB0("makeAbstract: max term count cutoff " << m_q->m_snipMaxPosWalk << "\n"); break; } map<unsigned int, string>::iterator vit; Xapian::PositionIterator pos; for (pos = xrdb.positionlist_begin(docid, *term); pos != xrdb.positionlist_end(docid, *term); pos++) { if (m_q->m_snipMaxPosWalk > 0 && cutoff-- < 0) { ret |= ABSRES_TERMMISS; LOGDEB0("makeAbstract: max term count cutoff " << m_q->m_snipMaxPosWalk << "\n"); break; } // If we are beyond the max possible position, stop // for this term if (*pos > maxpos) { break; } if ((vit = sparseDoc.find(*pos)) != sparseDoc.end()) { // Don't replace a term: the terms list is in // alphabetic order, and we may have several terms // at the same position, we want to keep only the // first one (ie: dockes and dockes@wanadoo.fr) if (vit->second.empty()) { LOGDEB2("makeAbstract: populating: [" << *term << "] at " << *pos << "\n"); sparseDoc[*pos] = *term; } } } } } // Creating the abstract from position data: final phase: extract the // snippets from the sparse array. void Query::Native::abstractCreateSnippetsVector( Rcl::Db::Native *ndb, map<unsigned int, string>& sparseDoc, unordered_set<unsigned int>& searchTermPositions, vector<int>& vpbreaks, vector<Snippet>& vabs) { vabs.clear(); string chunk; bool incjk = false; int page = 0; string term; for (const auto& ent : sparseDoc) { LOGDEB2("Abtract:output "<< ent.first <<" -> [" <<ent.second <<"]\n"); if (!occupiedmarker.compare(ent.second)) { LOGDEB("Abstract: qtrm position not filled ??\n"); continue; } if (chunk.empty() && !vpbreaks.empty()) { page = ndb->getPageNumberForPosition(vpbreaks, ent.first); if (page < 0) page = 0; term.clear(); } Utf8Iter uit(ent.second); bool newcjk = false; if (TextSplit::isCJK(*uit)) newcjk = true; if (!incjk || (incjk && !newcjk)) chunk += " "; incjk = newcjk; if (searchTermPositions.find(ent.first) != searchTermPositions.end()) term = ent.second; if (ent.second == cstr_ellipsis) { vabs.push_back(Snippet(page, chunk).setTerm(term)); chunk.clear(); } else { if (ent.second.compare(end_of_field_term) && ent.second.compare(start_of_field_term)) chunk += ent.second; } } if (!chunk.empty()) vabs.push_back(Snippet(page, chunk).setTerm(term)); } // Creating the abstract from index position data: top level routine int Query::Native::abstractFromIndex( Rcl::Db::Native *ndb, Xapian::docid docid, const vector<string>& matchTerms, const multimap<double, vector<string>> byQ, double totalweight, int ctxwords, unsigned int maxtotaloccs, vector<Snippet>& vabs, Chrono& chron ) { Xapian::Database& xrdb(ndb->xrdb); int ret = ABSRES_OK; // The terms 'array' that we partially populate with the document // terms, at their positions around the search terms positions: map<unsigned int, string> sparseDoc; // Also remember apart the search term positions so that we can list // them with their snippets. std::unordered_set<unsigned int> searchTermPositions; // Remember max position. Used to stop walking positions lists while // populating the adjacent slots. unsigned int maxpos = 0; // Total number of occurences for all terms. We stop when we have too much unsigned int totaloccs = 0; // First pass to populate the sparse document: we walk the term // groups, beginning with the better ones, and insert each term at // its position. We also insert empty strings at the surrounding // positions. These are markers showing where we should insert // data during the next pass. for (auto mit = byQ.rbegin(); mit != byQ.rend(); mit++) { unsigned int maxgrpoccs; double q; if (byQ.size() == 1) { maxgrpoccs = maxtotaloccs; q = 1.0; } else { // We give more slots to the better term groups q = mit->first / totalweight; maxgrpoccs = int(ceil(maxtotaloccs * q)); } unsigned int grpoccs = 0; // For each term in user term expansion group for (const auto& qterm : mit->second) { // Enough for this group ? if (grpoccs >= maxgrpoccs) break; LOGABS("makeAbstract: [" << qterm << "] " << maxgrpoccs << " max grp occs (coef " << q << ")\n"); // The match term may span several words (more than one position) int qtrmwrdcnt = TextSplit::countWords(qterm, TextSplit::TXTS_NOSPANS); // Populate positions for this query term. // There may be query terms not in this doc. This raises an // exception when requesting the position list, we catch it ?? // Not clear how this can happen because we are walking the // match list returned by Xapian. Maybe something with the // fields? try { abstractPopulateQTerm(xrdb, docid, qterm, qtrmwrdcnt, ctxwords, maxgrpoccs,maxtotaloccs, sparseDoc, searchTermPositions, maxpos, totaloccs, grpoccs, ret); } catch (...) { // Term does not occur. No problem. } if (totaloccs >= maxtotaloccs) { ret |= ABSRES_TRUNC; LOGABS("Db::makeAbstract: max1 occurrences cutoff\n"); break; } } } maxpos += ctxwords + 1; LOGABS("makeAbstract:" << chron.millis() << "mS:chosen number of positions " << totaloccs << "\n"); // This can happen if there are term occurences in the keywords // etc. but not elsewhere ? if (totaloccs == 0) { LOGDEB("makeAbstract: no occurrences\n"); return ABSRES_OK; } abstractPopulateContextTerms(xrdb, docid, maxpos, sparseDoc, ret); LOGABS("makeAbstract:" << chron.millis() << "mS: all term poslist read\n"); vector<int> vpbreaks; ndb->getPagePositions(docid, vpbreaks); LOGABS("makeAbstract:" << chron.millis() << "mS: extracting. Got " << vpbreaks.size() << " pages\n"); // Finally build the abstract by walking the map (in order of position) abstractCreateSnippetsVector(ndb, sparseDoc, searchTermPositions, vpbreaks, vabs); LOGABS("makeAbtract: done in " << chron.millis() << " mS\n"); return ret; } // Build a document abstract by extracting text chunks around the // query terms. This can either uses the index position lists, or the // stored document text, with very different implementations. // // DatabaseModified and other general exceptions are catched and // possibly retried by our caller. // // @param[out] vabs the abstract is returned as a vector of snippets. int Query::Native::makeAbstract(Xapian::docid docid, vector<Snippet>& vabs, int imaxoccs, int ictxwords, bool sortbypage) { chron.restart(); LOGDEB("makeAbstract: docid " << docid << " imaxoccs " << imaxoccs << " ictxwords " << ictxwords << " sort by page " << sortbypage << "\n"); // The (unprefixed) terms matched by this document vector<string> matchedTerms; getMatchTerms(docid, matchedTerms); if (matchedTerms.empty()) { LOGDEB("makeAbstract:" << chron.millis() << "mS:Empty term list\n"); return ABSRES_ERROR; } LOGDEB("Match terms: " << stringsToString(matchedTerms) << endl); // Retrieve the term frequencies for the query terms. This is // actually computed only once for a query, and for all terms in // the query (not only the matches for this doc) setDbWideQTermsFreqs(); // Build a sorted by quality container for the match terms We are // going to try and show text around the less common search terms. // Terms issued from an original one by stem expansion are // aggregated by the qualityTerms() routine (this is what we call // 'term groups' in the following: index terms expanded from the // same user term). multimap<double, vector<string>> byQ; double totalweight = qualityTerms(docid, matchedTerms, byQ); LOGABS("makeAbstract:" << chron.millis() << "mS: computed Qcoefs.\n"); // This can't happen, but would crash us if (totalweight == 0.0) { LOGERR("makeAbstract:"<<chron.millis()<<"mS: totalweight == 0.0 !\n"); return ABSRES_ERROR; } Rcl::Db::Native *ndb(m_q->m_db->m_ndb); // Total number of slots we populate. The 7 is taken as // average word size. It was a mistake to have the user max // abstract size parameter in characters, we basically only deal // with words. We used to limit the character size at the end, but // this damaged our careful selection of terms const unsigned int maxtotaloccs = imaxoccs > 0 ? imaxoccs : m_q->m_db->getAbsLen() /(7 * (m_q->m_db->getAbsCtxLen() + 1)); int ctxwords = ictxwords == -1 ? m_q->m_db->getAbsCtxLen() : ictxwords; LOGABS("makeAbstract:" << chron.millis() << "mS: mxttloccs " << maxtotaloccs << " ctxwords " << ctxwords << "\n"); if (ndb->m_storetext) { return abstractFromText(ndb, docid, matchedTerms, byQ, totalweight, ctxwords, maxtotaloccs, vabs, chron, sortbypage); } else { return abstractFromIndex(ndb, docid, matchedTerms, byQ, totalweight, ctxwords, maxtotaloccs, vabs, chron); } } } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rcldb.cpp�����������������������������������������������������������������������0000644�0001750�0001750�00000244764�13567765436�012733� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004-2018 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include <stdio.h> #include <cstring> #include <exception> #include "safeunistd.h" #include <math.h> #include <time.h> #include <string> #include <vector> #include <algorithm> #include <sstream> #include <iostream> using namespace std; #include "xapian.h" #include "rclconfig.h" #include "log.h" #include "rcldb.h" #include "rcldb_p.h" #include "stemdb.h" #include "textsplit.h" #include "transcode.h" #include "unacpp.h" #include "conftree.h" #include "pathut.h" #include "rclutil.h" #include "smallut.h" #include "chrono.h" #include "searchdata.h" #include "rclquery.h" #include "rclquery_p.h" #include "rclvalues.h" #include "md5ut.h" #include "cancelcheck.h" #include "termproc.h" #include "expansiondbs.h" #include "rclinit.h" #include "internfile.h" #include "utf8fn.h" #include "wipedir.h" #ifdef RCL_USE_ASPELL #include "rclaspell.h" #endif #include "zlibut.h" #ifndef XAPIAN_AT_LEAST // Added in Xapian 1.4.2. Define it here for older versions #define XAPIAN_AT_LEAST(A,B,C) \ (XAPIAN_MAJOR_VERSION > (A) || \ (XAPIAN_MAJOR_VERSION == (A) && \ (XAPIAN_MINOR_VERSION > (B) || \ (XAPIAN_MINOR_VERSION == (B) && XAPIAN_REVISION >= (C))))) #endif // Recoll index format version is stored in user metadata. When this change, // we can't open the db and will have to reindex. static const string cstr_RCL_IDX_VERSION_KEY("RCL_IDX_VERSION_KEY"); static const string cstr_RCL_IDX_VERSION("1"); static const string cstr_RCL_IDX_DESCRIPTOR_KEY("RCL_IDX_DESCRIPTOR_KEY"); static const string cstr_mbreaks("rclmbreaks"); namespace Rcl { // Some prefixes that we could get from the fields file, but are not going // to ever change. static const string fileext_prefix = "XE"; const string mimetype_prefix = "T"; static const string xapday_prefix = "D"; static const string xapmonth_prefix = "M"; static const string xapyear_prefix = "Y"; const string pathelt_prefix = "XP"; static const string udi_prefix("Q"); static const string parent_prefix("F"); // Special terms to mark begin/end of field (for anchored searches), and // page breaks string start_of_field_term; string end_of_field_term; const string page_break_term = "XXPG/"; // Special term to mark documents with children. const string has_children_term("XXC/"); // Field name for the unsplit file name. Has to exist in the field file // because of usage in termmatch() const string unsplitFilenameFieldName = "rclUnsplitFN"; static const string unsplitfilename_prefix = "XSFS"; // Empty string md5s static const string cstr_md5empty("d41d8cd98f00b204e9800998ecf8427e"); static const int MB = 1024 * 1024; string version_string(){ return string("Recoll ") + string(PACKAGE_VERSION) + string(" + Xapian ") + string(Xapian::version_string()); } // Synthetic abstract marker (to discriminate from abstract actually // found in document) static const string cstr_syntAbs("?!#@"); // Compute the unique term used to link documents to their origin. // "Q" + external udi static inline string make_uniterm(const string& udi) { string uniterm(wrap_prefix(udi_prefix)); uniterm.append(udi); return uniterm; } // Compute parent term used to link documents to their parent document (if any) // "F" + parent external udi static inline string make_parentterm(const string& udi) { // I prefer to be in possible conflict with omega than with // user-defined fields (Xxxx) that we also allow. "F" is currently // not used by omega (2008-07) string pterm(wrap_prefix(parent_prefix)); pterm.append(udi); return pterm; } Db::Native::Native(Db *db) : m_rcldb(db), m_isopen(false), m_iswritable(false), m_noversionwrite(false) #ifdef IDX_THREADS , m_wqueue("DbUpd", m_rcldb->m_config->getThrConf(RclConfig::ThrDbWrite).first), m_totalworkns(0LL), m_havewriteq(false) #endif // IDX_THREADS { LOGDEB1("Native::Native: me " << this << "\n"); } Db::Native::~Native() { LOGDEB1("Native::~Native: me " << this << "\n"); #ifdef IDX_THREADS if (m_havewriteq) { void *status = m_wqueue.setTerminateAndWait(); if (status) { LOGDEB1("Native::~Native: worker status " << status << "\n"); } } #endif // IDX_THREADS } #ifdef IDX_THREADS void *DbUpdWorker(void* vdbp) { recoll_threadinit(); Db::Native *ndbp = (Db::Native *)vdbp; WorkQueue<DbUpdTask*> *tqp = &(ndbp->m_wqueue); DbUpdTask *tsk = 0; for (;;) { size_t qsz = -1; if (!tqp->take(&tsk, &qsz)) { tqp->workerExit(); return (void*)1; } bool status = false; switch (tsk->op) { case DbUpdTask::AddOrUpdate: LOGDEB("DbUpdWorker: got add/update task, ql " << qsz << "\n"); status = ndbp->addOrUpdateWrite( tsk->udi, tsk->uniterm, tsk->doc, tsk->txtlen, tsk->rawztext); break; case DbUpdTask::Delete: LOGDEB("DbUpdWorker: got delete task, ql " << qsz << "\n"); status = ndbp->purgeFileWrite(false, tsk->udi, tsk->uniterm); break; case DbUpdTask::PurgeOrphans: LOGDEB("DbUpdWorker: got orphans purge task, ql " << qsz << "\n"); status = ndbp->purgeFileWrite(true, tsk->udi, tsk->uniterm); break; default: LOGERR("DbUpdWorker: unknown op " << tsk->op << " !!\n"); break; } if (!status) { LOGERR("DbUpdWorker: xxWrite failed\n"); tqp->workerExit(); delete tsk; return (void*)0; } delete tsk; } } void Db::Native::maybeStartThreads() { m_havewriteq = false; const RclConfig *cnf = m_rcldb->m_config; int writeqlen = cnf->getThrConf(RclConfig::ThrDbWrite).first; int writethreads = cnf->getThrConf(RclConfig::ThrDbWrite).second; if (writethreads > 1) { LOGINFO("RclDb: write threads count was forced down to 1\n"); writethreads = 1; } if (writeqlen >= 0 && writethreads > 0) { if (!m_wqueue.start(writethreads, DbUpdWorker, this)) { LOGERR("Db::Db: Worker start failed\n"); return; } m_havewriteq = true; } LOGDEB("RclDb:: threads: haveWriteQ " << m_havewriteq << ", wqlen " << writeqlen << " wqts " << writethreads << "\n"); } #endif // IDX_THREADS void Db::Native::openWrite(const string& dir, Db::OpenMode mode) { int action = (mode == Db::DbUpd) ? Xapian::DB_CREATE_OR_OPEN : Xapian::DB_CREATE_OR_OVERWRITE; #ifdef _WIN32 // On Windows, Xapian is quite bad at erasing partial db which can // occur because of open file deletion errors. if (mode == DbTrunc) { if (path_exists(path_cat(dir, "iamchert"))) { wipedir(dir); unlink(dir.c_str()); } } #endif if (path_exists(dir)) { // Existing index. xwdb = Xapian::WritableDatabase(dir, action); if (action == Xapian::DB_CREATE_OR_OVERWRITE || xwdb.get_doccount() == 0) { // New or empty index. Set the "store text" option // according to configuration. The metadata record will be // written further down. m_storetext = o_index_storedoctext; LOGDEB("Db:: index " << (m_storetext?"stores":"does not store") << " document text\n"); } else { // Existing non empty. Get the option from the index. storesDocText(xwdb); } } else { // New index. If possible, and depending on config, use a stub // to force using Chert. No sense in doing this if we are // storing the text anyway. #if XAPIAN_AT_LEAST(1,3,0) && XAPIAN_HAS_CHERT_BACKEND // Xapian with Glass and Chert support. If storedoctext is // specified in the configuration, use the default backend // (Glass), else force Chert. There might be reasons why // someone would want to use Chert and store text anyway, but // it's an exotic case, and things are complicated enough // already. if (o_index_storedoctext) { xwdb = Xapian::WritableDatabase(dir, action); m_storetext = true; } else { // Force Chert format, don't store the text. string stub = path_cat(m_rcldb->m_config->getConfDir(), "xapian.stub"); FILE *fp = fopen(stub.c_str(), "w"); if (nullptr == fp) { throw(string("Can't create ") + stub); } fprintf(fp, "chert %s\n", dir.c_str()); fclose(fp); xwdb = Xapian::WritableDatabase(stub, action); m_storetext = false; } LOGINF("Rcl::Db::openWrite: new index will " << (m_storetext?"":"not ") << "store document text\n"); #else // Old Xapian (chert only) or much newer (no chert). Use the // default index backend and let the user decide of the // abstract generation method. The configured default is to // store the text. xwdb = Xapian::WritableDatabase(dir, action); m_storetext = o_index_storedoctext; #endif } // If the index is empty, write the data format version, // and the storetext option value inside the index descriptor (new // with recoll 1.24, maybe we'll have other stuff to store in // there in the future). if (xwdb.get_doccount() == 0) { string desc = string("storetext=") + (m_storetext ? "1" : "0") + "\n"; xwdb.set_metadata(cstr_RCL_IDX_DESCRIPTOR_KEY, desc); xwdb.set_metadata(cstr_RCL_IDX_VERSION_KEY, cstr_RCL_IDX_VERSION); } m_iswritable = true; #ifdef IDX_THREADS maybeStartThreads(); #endif } void Db::Native::storesDocText(Xapian::Database& db) { string desc = db.get_metadata(cstr_RCL_IDX_DESCRIPTOR_KEY); ConfSimple cf(desc, 1); string val; m_storetext = false; if (cf.get("storetext", val) && stringToBool(val)) { m_storetext = true; } LOGDEB("Db:: index " << (m_storetext?"stores":"does not store") << " document text\n"); } void Db::Native::openRead(const string& dir) { m_iswritable = false; xrdb = Xapian::Database(dir); storesDocText(xrdb); } /* See comment in class declaration: return all subdocuments of a * document given by its unique id. */ bool Db::Native::subDocs(const string &udi, int idxi, vector<Xapian::docid>& docids) { LOGDEB2("subDocs: [" << uniterm << "]\n"); string pterm = make_parentterm(udi); vector<Xapian::docid> candidates; XAPTRY(docids.clear(); candidates.insert(candidates.begin(), xrdb.postlist_begin(pterm), xrdb.postlist_end(pterm)), xrdb, m_rcldb->m_reason); if (!m_rcldb->m_reason.empty()) { LOGERR("Rcl::Db::subDocs: " << m_rcldb->m_reason << "\n"); return false; } else { for (unsigned int i = 0; i < candidates.size(); i++) { if (whatDbIdx(candidates[i]) == (size_t)idxi) { docids.push_back(candidates[i]); } } LOGDEB0("Db::Native::subDocs: returning " << docids.size() << " ids\n"); return true; } } bool Db::Native::xdocToUdi(Xapian::Document& xdoc, string &udi) { Xapian::TermIterator xit; XAPTRY(xit = xdoc.termlist_begin(); xit.skip_to(wrap_prefix(udi_prefix)), xrdb, m_rcldb->m_reason); if (!m_rcldb->m_reason.empty()) { LOGERR("xdocToUdi: xapian error: " << m_rcldb->m_reason << "\n"); return false; } if (xit != xdoc.termlist_end()) { udi = *xit; if (!udi.empty()) { udi = udi.substr(wrap_prefix(udi_prefix).size()); return true; } } return false; } // Clear term from document if its frequency is 0. This should // probably be done by Xapian when the freq goes to 0 when removing a // posting, but we have to do it ourselves bool Db::Native::clearDocTermIfWdf0(Xapian::Document& xdoc, const string& term) { LOGDEB1("Db::clearDocTermIfWdf0: [" << term << "]\n"); // Find the term Xapian::TermIterator xit; XAPTRY(xit = xdoc.termlist_begin(); xit.skip_to(term);, xrdb, m_rcldb->m_reason); if (!m_rcldb->m_reason.empty()) { LOGERR("Db::clearDocTerm...: [" << term << "] skip failed: " << m_rcldb->m_reason << "\n"); return false; } if (xit == xdoc.termlist_end() || term.compare(*xit)) { LOGDEB0("Db::clearDocTermIFWdf0: term [" << term << "] not found. xit: [" << (xit == xdoc.termlist_end() ? "EOL": *xit) << "]\n"); return false; } // Clear the term if its frequency is 0 if (xit.get_wdf() == 0) { LOGDEB1("Db::clearDocTermIfWdf0: clearing [" << term << "]\n"); XAPTRY(xdoc.remove_term(term), xwdb, m_rcldb->m_reason); if (!m_rcldb->m_reason.empty()) { LOGDEB0("Db::clearDocTermIfWdf0: failed [" << term << "]: " << m_rcldb->m_reason << "\n"); } } return true; } // Holder for term + pos struct DocPosting { DocPosting(string t, Xapian::termpos ps) : term(t), pos(ps) {} string term; Xapian::termpos pos; }; // Clear all terms for given field for given document. // The terms to be cleared are all those with the appropriate // prefix. We also remove the postings for the unprefixed terms (that // is, we undo what we did when indexing). bool Db::Native::clearField(Xapian::Document& xdoc, const string& pfx, Xapian::termcount wdfdec) { LOGDEB1("Db::clearField: clearing prefix [" << pfx << "] for docid " << xdoc.get_docid() << "\n"); vector<DocPosting> eraselist; string wrapd = wrap_prefix(pfx); m_rcldb->m_reason.clear(); for (int tries = 0; tries < 2; tries++) { try { Xapian::TermIterator xit; xit = xdoc.termlist_begin(); xit.skip_to(wrapd); while (xit != xdoc.termlist_end() && !(*xit).compare(0, wrapd.size(), wrapd)) { LOGDEB1("Db::clearfield: erasing for [" << *xit << "]\n"); Xapian::PositionIterator posit; for (posit = xit.positionlist_begin(); posit != xit.positionlist_end(); posit++) { eraselist.push_back(DocPosting(*xit, *posit)); eraselist.push_back(DocPosting(strip_prefix(*xit), *posit)); } xit++; } } catch (const Xapian::DatabaseModifiedError &e) { m_rcldb->m_reason = e.get_msg(); xrdb.reopen(); continue; } XCATCHERROR(m_rcldb->m_reason); break; } if (!m_rcldb->m_reason.empty()) { LOGERR("Db::clearField: failed building erase list: " << m_rcldb->m_reason << "\n"); return false; } // Now remove the found positions, and the terms if the wdf is 0 for (vector<DocPosting>::const_iterator it = eraselist.begin(); it != eraselist.end(); it++) { LOGDEB1("Db::clearField: remove posting: [" << it->term << "] pos [" << it->pos << "]\n"); XAPTRY(xdoc.remove_posting(it->term, it->pos, wdfdec);, xwdb,m_rcldb->m_reason); if (!m_rcldb->m_reason.empty()) { // Not that this normally fails for non-prefixed XXST and // ND, don't make a fuss LOGDEB1("Db::clearFiedl: remove_posting failed for [" << it->term << "]," << it->pos << ": " << m_rcldb->m_reason << "\n"); } clearDocTermIfWdf0(xdoc, it->term); } return true; } // Check if doc given by udi is indexed by term bool Db::Native::hasTerm(const string& udi, int idxi, const string& term) { LOGDEB2("Native::hasTerm: udi [" << udi << "] term [" << term << "]\n"); Xapian::Document xdoc; if (getDoc(udi, idxi, xdoc)) { Xapian::TermIterator xit; XAPTRY(xit = xdoc.termlist_begin(); xit.skip_to(term);, xrdb, m_rcldb->m_reason); if (!m_rcldb->m_reason.empty()) { LOGERR("Rcl::Native::hasTerm: " << m_rcldb->m_reason << "\n"); return false; } if (xit != xdoc.termlist_end() && !term.compare(*xit)) { return true; } } return false; } // Retrieve Xapian document, given udi. There may be several identical udis // if we are using multiple indexes. Xapian::docid Db::Native::getDoc(const string& udi, int idxi, Xapian::Document& xdoc) { string uniterm = make_uniterm(udi); for (int tries = 0; tries < 2; tries++) { try { Xapian::PostingIterator docid; for (docid = xrdb.postlist_begin(uniterm); docid != xrdb.postlist_end(uniterm); docid++) { xdoc = xrdb.get_document(*docid); if (whatDbIdx(*docid) == (size_t)idxi) return *docid; } // Udi not in Db. return 0; } catch (const Xapian::DatabaseModifiedError &e) { m_rcldb->m_reason = e.get_msg(); xrdb.reopen(); continue; } XCATCHERROR(m_rcldb->m_reason); break; } LOGERR("Db::Native::getDoc: Xapian error: " << m_rcldb->m_reason << "\n"); return 0; } // Turn data record from db into document fields bool Db::Native::dbDataToRclDoc(Xapian::docid docid, std::string &data, Doc &doc, bool fetchtext) { LOGDEB2("Db::dbDataToRclDoc: data:\n" << data << "\n"); ConfSimple parms(data); if (!parms.ok()) return false; doc.xdocid = docid; doc.haspages = hasPages(docid); // Compute what index this comes from, and check for path translations string dbdir = m_rcldb->m_basedir; doc.idxi = 0; if (!m_rcldb->m_extraDbs.empty()) { int idxi = int(whatDbIdx(docid)); // idxi is in [0, extraDbs.size()]. 0 is for the main index, // idxi-1 indexes into the additional dbs array. if (idxi) { dbdir = m_rcldb->m_extraDbs[idxi - 1]; doc.idxi = idxi; } } parms.get(Doc::keyurl, doc.idxurl); doc.url = doc.idxurl; m_rcldb->m_config->urlrewrite(dbdir, doc.url); if (!doc.url.compare(doc.idxurl)) doc.idxurl.clear(); // Special cases: parms.get(Doc::keytp, doc.mimetype); parms.get(Doc::keyfmt, doc.fmtime); parms.get(Doc::keydmt, doc.dmtime); parms.get(Doc::keyoc, doc.origcharset); parms.get(cstr_caption, doc.meta[Doc::keytt]); parms.get(Doc::keyabs, doc.meta[Doc::keyabs]); // Possibly remove synthetic abstract indicator (if it's there, we // used to index the beginning of the text as abstract). doc.syntabs = false; if (doc.meta[Doc::keyabs].find(cstr_syntAbs) == 0) { doc.meta[Doc::keyabs] = doc.meta[Doc::keyabs].substr(cstr_syntAbs.length()); doc.syntabs = true; } parms.get(Doc::keyipt, doc.ipath); parms.get(Doc::keypcs, doc.pcbytes); parms.get(Doc::keyfs, doc.fbytes); parms.get(Doc::keyds, doc.dbytes); parms.get(Doc::keysig, doc.sig); // Normal key/value pairs: vector<string> keys = parms.getNames(string()); for (vector<string>::const_iterator it = keys.begin(); it != keys.end(); it++) { if (doc.meta.find(*it) == doc.meta.end()) parms.get(*it, doc.meta[*it]); } doc.meta[Doc::keyurl] = doc.url; doc.meta[Doc::keymt] = doc.dmtime.empty() ? doc.fmtime : doc.dmtime; if (fetchtext) { getRawText(docid, doc.text); } return true; } bool Db::Native::hasPages(Xapian::docid docid) { string ermsg; Xapian::PositionIterator pos; XAPTRY(pos = xrdb.positionlist_begin(docid, page_break_term); if (pos != xrdb.positionlist_end(docid, page_break_term)) { return true; }, xrdb, ermsg); if (!ermsg.empty()) { LOGERR("Db::Native::hasPages: xapian error: " << ermsg << "\n"); } return false; } // Return the positions list for the page break term bool Db::Native::getPagePositions(Xapian::docid docid, vector<int>& vpos) { vpos.clear(); // Need to retrieve the document record to check for multiple page breaks // that we store there for lack of better place map<int, int> mbreaksmap; try { Xapian::Document xdoc = xrdb.get_document(docid); string data = xdoc.get_data(); Doc doc; string mbreaks; if (dbDataToRclDoc(docid, data, doc) && doc.getmeta(cstr_mbreaks, &mbreaks)) { vector<string> values; stringToTokens(mbreaks, values, ","); for (unsigned int i = 0; i < values.size() - 1; i += 2) { int pos = atoi(values[i].c_str()) + baseTextPosition; int incr = atoi(values[i+1].c_str()); mbreaksmap[pos] = incr; } } } catch (...) { } string qterm = page_break_term; Xapian::PositionIterator pos; try { for (pos = xrdb.positionlist_begin(docid, qterm); pos != xrdb.positionlist_end(docid, qterm); pos++) { int ipos = *pos; if (ipos < int(baseTextPosition)) { LOGDEB("getPagePositions: got page position " << ipos << " not in body\n"); // Not in text body. Strange... continue; } map<int, int>::iterator it = mbreaksmap.find(ipos); if (it != mbreaksmap.end()) { LOGDEB1("getPagePositions: found multibreak at " << ipos << " incr " << it->second << "\n"); for (int i = 0 ; i < it->second; i++) vpos.push_back(ipos); } vpos.push_back(ipos); } } catch (...) { // Term does not occur. No problem. } return true; } int Db::Native::getPageNumberForPosition(const vector<int>& pbreaks, int pos) { if (pos < int(baseTextPosition)) // Not in text body return -1; vector<int>::const_iterator it = upper_bound(pbreaks.begin(), pbreaks.end(), pos); return int(it - pbreaks.begin() + 1); } bool Db::Native::getRawText(Xapian::docid docid_combined, string& rawtext) { if (!m_storetext) { LOGDEB("Db::Native::getRawText: document text not stored in index\n"); return false; } // Xapian get_metadata only works on a single index (else of // course, unicity of keys can't be ensured). When using multiple // indexes, we need to open the right one. size_t dbidx = whatDbIdx(docid_combined); Xapian::docid docid = whatDbDocid(docid_combined); string reason; if (dbidx != 0) { Xapian::Database db(m_rcldb->m_extraDbs[dbidx-1]); XAPTRY(rawtext = db.get_metadata(rawtextMetaKey(docid)), db, reason); } else { XAPTRY(rawtext = xrdb.get_metadata(rawtextMetaKey(docid)), xrdb, reason); } if (!reason.empty()) { LOGERR("Rcl::Db::getRawText: could not get value: " << reason << endl); return false; } if (rawtext.empty()) { return true; } ZLibUtBuf cbuf; inflateToBuf(rawtext.c_str(), rawtext.size(), cbuf); rawtext.assign(cbuf.getBuf(), cbuf.getCnt()); return true; } // Note: we're passed a Xapian::Document* because Xapian // reference-counting is not mt-safe. We take ownership and need // to delete it before returning. bool Db::Native::addOrUpdateWrite( const string& udi, const string& uniterm, Xapian::Document *newdocument_ptr, size_t textlen, const string& rawztext) { #ifdef IDX_THREADS Chrono chron; std::unique_lock<std::mutex> lock(m_mutex); #endif std::unique_ptr<Xapian::Document> doc_cleaner(newdocument_ptr); // Check file system full every mbyte of indexed text. It's a bit wasteful // to do this after having prepared the document, but it needs to be in // the single-threaded section. if (m_rcldb->m_maxFsOccupPc > 0 && (m_rcldb->m_occFirstCheck || (m_rcldb->m_curtxtsz - m_rcldb->m_occtxtsz) / MB >= 1)) { LOGDEB("Db::add: checking file system usage\n"); int pc; m_rcldb->m_occFirstCheck = 0; if (fsocc(m_rcldb->m_basedir, &pc) && pc >= m_rcldb->m_maxFsOccupPc) { LOGERR("Db::add: stop indexing: file system " << pc << " %" << " full > max " << m_rcldb->m_maxFsOccupPc << " %" << "\n"); return false; } m_rcldb->m_occtxtsz = m_rcldb->m_curtxtsz; } const char *fnc = udi.c_str(); string ermsg; // Add db entry or update existing entry: Xapian::docid did = 0; try { did = xwdb.replace_document(uniterm, *newdocument_ptr); if (did < m_rcldb->updated.size()) { // This is necessary because only the file-level docs are tested // by needUpdate(), so the subdocs existence flags are only set // here. m_rcldb->updated[did] = true; LOGINFO("Db::add: docid " << did << " updated [" << fnc << "]\n"); } else { LOGINFO("Db::add: docid " << did << " added [" << fnc << "]\n"); } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db::add: replace_document failed: " << ermsg << "\n"); ermsg.erase(); // FIXME: is this ever actually needed? try { xwdb.add_document(*newdocument_ptr); LOGDEB("Db::add: " << fnc << " added (failed re-seek for duplicate)\n"); } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db::add: add_document failed: " << ermsg << "\n"); return false; } } XAPTRY(xwdb.set_metadata(rawtextMetaKey(did), rawztext), xwdb, m_rcldb->m_reason); if (!m_rcldb->m_reason.empty()) { LOGERR("Db::addOrUpdate: set_metadata error: " << m_rcldb->m_reason << "\n"); // This only affects snippets, so let's say not fatal } // Test if we're over the flush threshold (limit memory usage): bool ret = m_rcldb->maybeflush(textlen); #ifdef IDX_THREADS m_totalworkns += chron.nanos(); #endif return ret; } bool Db::Native::purgeFileWrite(bool orphansOnly, const string& udi, const string& uniterm) { #if defined(IDX_THREADS) // We need a mutex even if we have a write queue (so we can only // be called by a single thread) to protect about multiple acces // to xrdb from subDocs() which is also called from needupdate() // (called from outside the write thread ! std::unique_lock<std::mutex> lock(m_mutex); #endif // IDX_THREADS string ermsg; try { Xapian::PostingIterator docid = xwdb.postlist_begin(uniterm); if (docid == xwdb.postlist_end(uniterm)) { return true; } if (m_rcldb->m_flushMb > 0) { Xapian::termcount trms = xwdb.get_doclength(*docid); m_rcldb->maybeflush(trms * 5); } string sig; if (orphansOnly) { Xapian::Document doc = xwdb.get_document(*docid); sig = doc.get_value(VALUE_SIG); if (sig.empty()) { LOGINFO("purgeFileWrite: got empty sig\n"); return false; } } else { LOGDEB("purgeFile: delete docid " << *docid << "\n"); deleteDocument(*docid); } vector<Xapian::docid> docids; subDocs(udi, 0, docids); LOGDEB("purgeFile: subdocs cnt " << docids.size() << "\n"); for (vector<Xapian::docid>::iterator it = docids.begin(); it != docids.end(); it++) { if (m_rcldb->m_flushMb > 0) { Xapian::termcount trms = xwdb.get_doclength(*it); m_rcldb->maybeflush(trms * 5); } string subdocsig; if (orphansOnly) { Xapian::Document doc = xwdb.get_document(*it); subdocsig = doc.get_value(VALUE_SIG); if (subdocsig.empty()) { LOGINFO("purgeFileWrite: got empty sig for subdoc??\n"); continue; } } if (!orphansOnly || sig != subdocsig) { LOGDEB("Db::purgeFile: delete subdoc " << *it << "\n"); deleteDocument(*it); } } return true; } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db::purgeFileWrite: " << ermsg << "\n"); } return false; } /* Rcl::Db methods ///////////////////////////////// */ bool Db::o_inPlaceReset; Db::Db(const RclConfig *cfp) : m_ndb(0), m_mode(Db::DbRO), m_curtxtsz(0), m_flushtxtsz(0), m_occtxtsz(0), m_occFirstCheck(1), m_idxMetaStoredLen(150), m_idxAbsTruncLen(250), m_synthAbsLen(250), m_synthAbsWordCtxLen(4), m_flushMb(-1), m_maxFsOccupPc(0) { m_config = new RclConfig(*cfp); if (start_of_field_term.empty()) { if (o_index_stripchars) { start_of_field_term = "XXST"; end_of_field_term = "XXND"; } else { start_of_field_term = "XXST/"; end_of_field_term = "XXND/"; } } m_ndb = new Native(this); if (m_config) { m_config->getConfParam("maxfsoccuppc", &m_maxFsOccupPc); m_config->getConfParam("idxflushmb", &m_flushMb); m_config->getConfParam("idxmetastoredlen", &m_idxMetaStoredLen); m_config->getConfParam("idxtexttruncatelen", &m_idxTextTruncateLen); } } Db::~Db() { LOGDEB2("Db::~Db\n"); if (m_ndb == 0) return; LOGDEB("Db::~Db: isopen " << m_ndb->m_isopen << " m_iswritable " << m_ndb->m_iswritable << "\n"); i_close(true); #ifdef RCL_USE_ASPELL delete m_aspell; #endif delete m_config; } vector<string> Db::getStemmerNames() { vector<string> res; stringToStrings(Xapian::Stem::get_available_languages(), res); return res; } bool Db::open(OpenMode mode, OpenError *error) { if (error) *error = DbOpenMainDb; if (m_ndb == 0 || m_config == 0) { m_reason = "Null configuration or Xapian Db"; return false; } LOGDEB("Db::open: m_isopen " << m_ndb->m_isopen << " m_iswritable " << m_ndb->m_iswritable << " mode " << mode << "\n"); if (m_ndb->m_isopen) { // We used to return an error here but I see no reason to if (!close()) return false; } if (!m_config->getStopfile().empty()) m_stops.setFile(m_config->getStopfile()); string dir = m_config->getDbDir(); string ermsg; try { switch (mode) { case DbUpd: case DbTrunc: m_ndb->openWrite(dir, mode); updated = vector<bool>(m_ndb->xwdb.get_lastdocid() + 1, false); // We used to open a readonly object in addition to the // r/w one because some operations were faster when // performed through a Database: no forced flushes on // allterms_begin(), used in subDocs(). This issue has // been gone for a long time (now: Xapian 1.2) and the // separate objects seem to trigger other Xapian issues, // so the query db is now a clone of the update one. m_ndb->xrdb = m_ndb->xwdb; LOGDEB("Db::open: lastdocid: " <<m_ndb->xwdb.get_lastdocid()<<"\n"); break; case DbRO: default: m_ndb->openRead(dir); for (auto& db : m_extraDbs) { if (error) *error = DbOpenExtraDb; LOGDEB("Db::Open: adding query db [" << &db << "]\n"); // An error here used to be non-fatal (1.13 and older) // but I can't see why m_ndb->xrdb.add_database(Xapian::Database(db)); } break; } if (error) *error = DbOpenMainDb; // Check index format version. Must not try to check a just created or // truncated db if (mode != DbTrunc && m_ndb->xrdb.get_doccount() > 0) { string version = m_ndb->xrdb.get_metadata(cstr_RCL_IDX_VERSION_KEY); if (version.compare(cstr_RCL_IDX_VERSION)) { m_ndb->m_noversionwrite = true; LOGERR("Rcl::Db::open: file index [" << version << "], software [" << cstr_RCL_IDX_VERSION << "]\n"); throw Xapian::DatabaseError("Recoll index version mismatch", "", ""); } } m_mode = mode; m_ndb->m_isopen = true; m_basedir = dir; if (error) *error = DbOpenNoError; return true; } XCATCHERROR(ermsg); m_reason = ermsg; LOGERR("Db::open: exception while opening [" <<dir<< "]: " << ermsg << "\n"); return false; } bool Db::storesDocText() { if (!m_ndb || !m_ndb->m_isopen) { LOGERR("Db::storesDocText: called on non-opened db\n"); return false; } return m_ndb->m_storetext; } bool Db::getDocRawText(Doc& doc) { if (!m_ndb || !m_ndb->m_isopen) { LOGERR("Db::getDocRawText: called on non-opened db\n"); return false; } return m_ndb->getRawText(doc.xdocid, doc.text); } // Note: xapian has no close call, we delete and recreate the db bool Db::close() { LOGDEB1("Db::close()\n"); return i_close(false); } bool Db::i_close(bool final) { if (m_ndb == 0) return false; LOGDEB("Db::i_close(" << final << "): m_isopen " << m_ndb->m_isopen << " m_iswritable " << m_ndb->m_iswritable << "\n"); if (m_ndb->m_isopen == false && !final) return true; string ermsg; try { bool w = m_ndb->m_iswritable; if (w) { #ifdef IDX_THREADS waitUpdIdle(); #endif if (!m_ndb->m_noversionwrite) m_ndb->xwdb.set_metadata(cstr_RCL_IDX_VERSION_KEY, cstr_RCL_IDX_VERSION); LOGDEB("Rcl::Db:close: xapian will close. May take some time\n"); } deleteZ(m_ndb); if (w) LOGDEB("Rcl::Db:close() xapian close done.\n"); if (final) { return true; } m_ndb = new Native(this); if (m_ndb) { return true; } LOGERR("Rcl::Db::close(): cant recreate db object\n"); return false; } XCATCHERROR(ermsg); LOGERR("Db:close: exception while deleting db: " << ermsg << "\n"); return false; } // Reopen the db with a changed list of additional dbs bool Db::adjustdbs() { if (m_mode != DbRO) { LOGERR("Db::adjustdbs: mode not RO\n"); return false; } if (m_ndb && m_ndb->m_isopen) { if (!close()) return false; if (!open(m_mode)) { return false; } } return true; } int Db::docCnt() { int res = -1; if (!m_ndb || !m_ndb->m_isopen) return -1; XAPTRY(res = m_ndb->xrdb.get_doccount(), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::docCnt: got error: " << m_reason << "\n"); return -1; } return res; } int Db::termDocCnt(const string& _term) { int res = -1; if (!m_ndb || !m_ndb->m_isopen) return -1; string term = _term; if (o_index_stripchars) if (!unacmaybefold(_term, term, "UTF-8", UNACOP_UNACFOLD)) { LOGINFO("Db::termDocCnt: unac failed for [" << _term << "]\n"); return 0; } if (m_stops.isStop(term)) { LOGDEB1("Db::termDocCnt [" << term << "] in stop list\n"); return 0; } XAPTRY(res = m_ndb->xrdb.get_termfreq(term), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::termDocCnt: got error: " << m_reason << "\n"); return -1; } return res; } bool Db::addQueryDb(const string &_dir) { string dir = _dir; LOGDEB0("Db::addQueryDb: ndb " << m_ndb << " iswritable " << ((m_ndb)?m_ndb->m_iswritable:0) << " db [" << dir << "]\n"); if (!m_ndb) return false; if (m_ndb->m_iswritable) return false; dir = path_canon(dir); if (find(m_extraDbs.begin(), m_extraDbs.end(), dir) == m_extraDbs.end()) { m_extraDbs.push_back(dir); } return adjustdbs(); } bool Db::rmQueryDb(const string &dir) { if (!m_ndb) return false; if (m_ndb->m_iswritable) return false; if (dir.empty()) { m_extraDbs.clear(); } else { vector<string>::iterator it = find(m_extraDbs.begin(), m_extraDbs.end(), dir); if (it != m_extraDbs.end()) { m_extraDbs.erase(it); } } return adjustdbs(); } // Determining what index a doc result comes from is based on the // modulo of the docid against the db count. Ref: // http://trac.xapian.org/wiki/FAQ/MultiDatabaseDocumentID bool Db::fromMainIndex(const Doc& doc) { return m_ndb->whatDbIdx(doc.xdocid) == 0; } std::string Db::whatIndexForResultDoc(const Doc& doc) { size_t idx = m_ndb->whatDbIdx(doc.xdocid); if (idx == (size_t)-1) { LOGERR("whatIndexForResultDoc: whatDbIdx returned -1 for " << doc.xdocid << endl); return string(); } // idx is [0..m_extraDbs.size()] 0 is for the main index, else // idx-1 indexes into m_extraDbs if (idx == 0) { return m_basedir; } else { return m_extraDbs[idx-1]; } } size_t Db::Native::whatDbIdx(Xapian::docid id) { LOGDEB1("Db::whatDbIdx: xdocid " << id << ", " << m_rcldb->m_extraDbs.size() << " extraDbs\n"); if (id == 0) return (size_t)-1; if (m_rcldb->m_extraDbs.size() == 0) return 0; return (id - 1) % (m_rcldb->m_extraDbs.size() + 1); } // Return the docid inside the non-combined index Xapian::docid Db::Native::whatDbDocid(Xapian::docid docid_combined) { if (m_rcldb->m_extraDbs.size() == 0) return docid_combined; return (docid_combined - 1) / (m_rcldb->m_extraDbs.size() + 1) + 1; } bool Db::testDbDir(const string &dir, bool *stripped_p) { string aerr; bool mstripped = true; LOGDEB("Db::testDbDir: [" << dir << "]\n"); try { Xapian::Database db(dir); // If the prefix for mimetype is wrapped, it's an unstripped // index. T has been in use in recoll since the beginning and // all documents have a T field (possibly empty). Xapian::TermIterator term = db.allterms_begin(":T:"); if (term == db.allterms_end()) { mstripped = true; } else { mstripped = false; } LOGDEB("testDbDir: " << dir << " is a " << (mstripped ? "stripped" : "raw") << " index\n"); } XCATCHERROR(aerr); if (!aerr.empty()) { LOGERR("Db::Open: error while trying to open database from [" << dir << "]: " << aerr << "\n"); return false; } if (stripped_p) *stripped_p = mstripped; return true; } bool Db::isopen() { if (m_ndb == 0) return false; return m_ndb->m_isopen; } // Try to translate field specification into field prefix. bool Db::fieldToTraits(const string& fld, const FieldTraits **ftpp, bool isquery) { if (m_config && m_config->getFieldTraits(fld, ftpp, isquery)) return true; *ftpp = 0; return false; } // The splitter breaks text into words and adds postings to the Xapian // document. We use a single object to split all of the document // fields and position jumps to separate fields class TextSplitDb : public TextSplitP { public: Xapian::Document &doc; // Xapian document // Base for document section. Gets large increment when we change // sections, to avoid cross-section proximity matches. Xapian::termpos basepos; // Current relative position. This is the remembered value from // the splitter callback. The term position is reset for each call // to text_to_words(), so that the last value of curpos is the // section size (last relative term position), and this is what // gets added to basepos in addition to the inter-section increment // to compute the first position of the next section. Xapian::termpos curpos; Xapian::WritableDatabase& wdb; TextSplitDb(Xapian::WritableDatabase& _wdb, Xapian::Document &d, TermProc *prc) : TextSplitP(prc), doc(d), basepos(1), curpos(0), wdb(_wdb) {} // Reimplement text_to_words to insert the begin and end anchor terms. virtual bool text_to_words(const string &in) { string ermsg; try { // Index the possibly prefixed start term. doc.add_posting(ft.pfx + start_of_field_term, basepos, ft.wdfinc); ++basepos; } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db: xapian add_posting error " << ermsg << "\n"); goto out; } if (!TextSplitP::text_to_words(in)) { LOGDEB("TextSplitDb: TextSplit::text_to_words failed\n"); goto out; } try { // Index the possibly prefixed end term. doc.add_posting(ft.pfx + end_of_field_term, basepos + curpos + 1, ft.wdfinc); ++basepos; } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db: xapian add_posting error " << ermsg << "\n"); goto out; } out: basepos += curpos + 100; return true; } void setTraits(const FieldTraits& ftp) { ft = ftp; if (!ft.pfx.empty()) ft.pfx = wrap_prefix(ft.pfx); } friend class TermProcIdx; private: FieldTraits ft; }; class TermProcIdx : public TermProc { public: TermProcIdx() : TermProc(0), m_ts(0), m_lastpagepos(0), m_pageincr(0) {} void setTSD(TextSplitDb *ts) {m_ts = ts;} bool takeword(const std::string &term, int pos, int, int) { // Compute absolute position (pos is relative to current segment), // and remember relative. m_ts->curpos = pos; pos += m_ts->basepos; // Don't try to add empty term Xapian doesnt like it... Safety check // this should not happen. if (term.empty()) return true; string ermsg; try { // Index without prefix, using the field-specific weighting LOGDEB1("Emitting term at " << pos << " : [" << term << "]\n"); if (!m_ts->ft.pfxonly) m_ts->doc.add_posting(term, pos, m_ts->ft.wdfinc); #ifdef TESTING_XAPIAN_SPELL if (Db::isSpellingCandidate(term, false)) { m_ts->wdb.add_spelling(term); } #endif // Index the prefixed term. if (!m_ts->ft.pfx.empty()) { m_ts->doc.add_posting(m_ts->ft.pfx + term, pos, m_ts->ft.wdfinc); } return true; } XCATCHERROR(ermsg); LOGERR("Db: xapian add_posting error " << ermsg << "\n"); return false; } void newpage(int pos) { pos += m_ts->basepos; if (pos < int(baseTextPosition)) { LOGDEB("newpage: not in body: " << pos << "\n"); return; } m_ts->doc.add_posting(m_ts->ft.pfx + page_break_term, pos); if (pos == m_lastpagepos) { m_pageincr++; LOGDEB2("newpage: same pos, pageincr " << m_pageincr << " lastpagepos " << m_lastpagepos << "\n"); } else { LOGDEB2("newpage: pos change, pageincr " << m_pageincr << " lastpagepos " << m_lastpagepos << "\n"); if (m_pageincr > 0) { // Remember the multiple page break at this position unsigned int relpos = m_lastpagepos - baseTextPosition; LOGDEB2("Remembering multiple page break. Relpos " << relpos << " cnt " << m_pageincr << "\n"); m_pageincrvec.push_back(pair<int, int>(relpos, m_pageincr)); } m_pageincr = 0; } m_lastpagepos = pos; } virtual bool flush() { if (m_pageincr > 0) { unsigned int relpos = m_lastpagepos - baseTextPosition; LOGDEB2("Remembering multiple page break. Position " << relpos << " cnt " << m_pageincr << "\n"); m_pageincrvec.push_back(pair<int, int>(relpos, m_pageincr)); m_pageincr = 0; } return TermProc::flush(); } TextSplitDb *m_ts; // Auxiliary page breaks data for positions with multiple page breaks. int m_lastpagepos; // increment of page breaks at same pos. Normally 0, 1.. when several // breaks at the same pos int m_pageincr; vector <pair<int, int> > m_pageincrvec; }; // At the moment, we normally use the Xapian speller for Katakana and // aspell for everything else bool Db::getSpellingSuggestions(const string& word, vector<string>& suggs) { LOGDEB("Db::getSpellingSuggestions:[" << word << "]\n"); suggs.clear(); if (nullptr == m_ndb) { return false; } string term = word; if (isSpellingCandidate(term, true)) { // Term is candidate for aspell processing #ifdef RCL_USE_ASPELL bool noaspell = false; m_config->getConfParam("noaspell", &noaspell); if (noaspell) { return false; } if (nullptr == m_aspell) { m_aspell = new Aspell(m_config); if (m_aspell) { string reason; m_aspell->init(reason); if (!m_aspell->ok()) { LOGDEB("Aspell speller init failed: " << reason << endl); delete m_aspell; m_aspell = 0; } } } if (nullptr == m_aspell) { LOGERR("Db::getSpellingSuggestions: aspell not initialized\n"); return false; } list<string> asuggs; string reason; if (!m_aspell->suggest(*this, term, asuggs, reason)) { LOGERR("Db::getSpellingSuggestions: aspell failed: " << reason << "\n"); return false; } suggs = vector<string>(asuggs.begin(), asuggs.end()); #endif } else { #ifdef TESTING_XAPIAN_SPELL // Was not aspell candidate (e.g.: katakana). Maybe use Xapian // speller? if (isSpellingCandidate(term, false)) { if (!o_index_stripchars) { if (!unacmaybefold(word, term, "UTF-8", UNACOP_UNACFOLD)) { LOGINFO("Db::getSpelling: unac failed for [" << word << "]\n"); return false; } } string sugg = m_ndb->xrdb.get_spelling_suggestion(term); if (!sugg.empty()) { suggs.push_back(sugg); } } #endif } return true; } // Let our user set the parameters for abstract processing void Db::setAbstractParams(int idxtrunc, int syntlen, int syntctxlen) { LOGDEB1("Db::setAbstractParams: trunc " << idxtrunc << " syntlen " << syntlen << " ctxlen " << syntctxlen << "\n"); if (idxtrunc >= 0) m_idxAbsTruncLen = idxtrunc; if (syntlen > 0) m_synthAbsLen = syntlen; if (syntctxlen > 0) m_synthAbsWordCtxLen = syntctxlen; } bool Db::setSynGroupsFile(const string& fn) { return m_syngroups.setfile(fn); } static const string cstr_nc("\n\r\x0c\\"); #define RECORD_APPEND(R, NM, VAL) {R += NM + "=" + VAL + "\n";} // Add document in internal form to the database: index the terms in // the title abstract and body and add special terms for file name, // date, mime type etc. , create the document data record (more // metadata), and update database bool Db::addOrUpdate(const string &udi, const string &parent_udi, Doc &doc) { LOGDEB("Db::add: udi [" << udi << "] parent [" << parent_udi << "]\n"); if (m_ndb == 0) return false; // This document is potentially going to be passed to the index // update thread. The reference counters are not mt-safe, so we // need to do this through a pointer. The reference is just there // to avoid changing too much code (the previous version passed a copy). Xapian::Document *newdocument_ptr = new Xapian::Document; Xapian::Document &newdocument(*newdocument_ptr); // The term processing pipeline: TermProcIdx tpidx; TermProc *nxt = &tpidx; TermProcStop tpstop(nxt, m_stops);nxt = &tpstop; //TermProcCommongrams tpcommon(nxt, m_stops); nxt = &tpcommon; TermProcPrep tpprep(nxt); if (o_index_stripchars) nxt = &tpprep; TextSplitDb splitter(m_ndb->xwdb, newdocument, nxt); tpidx.setTSD(&splitter); // Udi unique term: this is used for file existence/uptodate // checks, and unique id for the replace_document() call. string uniterm = make_uniterm(udi); string rawztext; // Doc compressed text if (doc.onlyxattr) { // Only updating an existing doc with new extended attributes // data. Need to read the old doc and its data record // first. This is so different from the normal processing that // it uses a fully separate code path (with some duplication // unfortunately) if (!m_ndb->docToXdocXattrOnly(&splitter, udi, doc, newdocument)) { delete newdocument_ptr; return false; } } else { if (m_idxTextTruncateLen > 0) { doc.text = truncate_to_word(doc.text, m_idxTextTruncateLen); } // If the ipath is like a path, index the last element. This is // for compound documents like zip and chm for which the filter // uses the file path as ipath. if (!doc.ipath.empty() && doc.ipath.find_first_not_of("0123456789") != string::npos) { string utf8ipathlast; // There is no way in hell we could have an idea of the // charset here, so let's hope it's ascii or utf-8. We call // transcode to strip the bad chars and pray if (transcode(path_getsimple(doc.ipath), utf8ipathlast, "UTF-8", "UTF-8")) { splitter.text_to_words(utf8ipathlast); } } // Split and index the path from the url for path-based filtering { string path = url_gpathS(doc.url); #ifdef _WIN32 // Windows file names are case-insensitive, so we // translate to UTF-8 and lowercase string upath = compute_utf8fn(m_config, path, false); unacmaybefold(upath, path, "UTF-8", UNACOP_FOLD); #endif vector<string> vpath; stringToTokens(path, vpath, "/"); // If vpath is not /, the last elt is the file/dir name, not a // part of the path. if (vpath.size()) vpath.resize(vpath.size()-1); splitter.curpos = 0; newdocument.add_posting(wrap_prefix(pathelt_prefix), splitter.basepos + splitter.curpos++); for (vector<string>::iterator it = vpath.begin(); it != vpath.end(); it++){ if (it->length() > 230) { // Just truncate it. May still be useful because // of wildcards *it = it->substr(0, 230); } newdocument.add_posting(wrap_prefix(pathelt_prefix) + *it, splitter.basepos + splitter.curpos++); } splitter.basepos += splitter.curpos + 100; } // Index textual metadata. These are all indexed as text with // positions, as we may want to do phrase searches with them (this // makes no sense for keywords by the way). // // The order has no importance, and we set a position gap of 100 // between fields to avoid false proximity matches. for (const auto& entry: doc.meta) { if (entry.second.empty()) { continue; } const FieldTraits *ftp{nullptr}; fieldToTraits(entry.first, &ftp); if (ftp && ftp->valueslot) { LOGDEB("Adding value: for field " << entry.first << " slot " << ftp->valueslot << endl); add_field_value(newdocument, *ftp, entry.second); } // There was an old comment here about not testing for // empty prefix, and we indeed did not test. I don't think // that it makes sense any more (and was in disagreement // with the LOG message. Really now: no prefix: no // indexing. if (ftp && !ftp->pfx.empty()) { LOGDEB0("Db::add: field [" << entry.first << "] pfx [" << ftp->pfx << "] inc " << ftp->wdfinc << ": [" << entry.second << "]\n"); splitter.setTraits(*ftp); if (!splitter.text_to_words(entry.second)) { LOGDEB("Db::addOrUpdate: split failed for " << entry.first << "\n"); } } else { LOGDEB0("Db::add: no prefix for field [" << entry.first << "], no indexing\n"); } } // Reset to no prefix and default params splitter.setTraits(FieldTraits()); if (splitter.curpos < baseTextPosition) splitter.basepos = baseTextPosition; // Split and index body text LOGDEB2("Db::add: split body: [" << doc.text << "]\n"); #ifdef TEXTSPLIT_STATS splitter.resetStats(); #endif if (!splitter.text_to_words(doc.text)) { LOGDEB("Db::addOrUpdate: split failed for main text\n"); } else { if (m_ndb->m_storetext) { ZLibUtBuf buf; deflateToBuf(doc.text.c_str(), doc.text.size(), buf); rawztext.assign(buf.getBuf(), buf.getCnt()); } } #ifdef TEXTSPLIT_STATS // Reject bad data. unrecognized base64 text is characterized by // high avg word length and high variation (because there are // word-splitters like +/ inside the data). TextSplit::Stats::Values v = splitter.getStats(); // v.avglen > 15 && v.sigma > 12 if (v.count > 200 && (v.avglen > 10 && v.sigma / v.avglen > 0.8)) { LOGINFO("RclDb::addOrUpdate: rejecting doc for bad stats count " << v.count << " avglen " << v.avglen << " sigma " << v.sigma << " url [" << doc.url << "] ipath [" << doc.ipath << "] text " << doc.text << "\n"); delete newdocument_ptr; return true; } #endif ////// Special terms for other metadata. No positions for these. // Mime type newdocument.add_boolean_term(wrap_prefix(mimetype_prefix) + doc.mimetype); // Simple file name indexed unsplit for specific "file name" // searches. This is not the same as a filename: clause inside the // query language. // We also add a term for the filename extension if any. string utf8fn; if (doc.getmeta(Doc::keyfn, &utf8fn) && !utf8fn.empty()) { string fn; if (unacmaybefold(utf8fn, fn, "UTF-8", UNACOP_UNACFOLD)) { // We should truncate after extracting the extension, // but this is a pathological case anyway if (fn.size() > 230) utf8truncate(fn, 230); string::size_type pos = fn.rfind('.'); if (pos != string::npos && pos != fn.length() - 1) { newdocument.add_boolean_term(wrap_prefix(fileext_prefix) + fn.substr(pos + 1)); } newdocument.add_term(wrap_prefix(unsplitfilename_prefix) + fn,0); } } newdocument.add_boolean_term(uniterm); // Parent term. This is used to find all descendents, mostly // to delete them when the parent goes away if (!parent_udi.empty()) { newdocument.add_boolean_term(make_parentterm(parent_udi)); } // Fields used for selecting by date. Note that this only // works for years AD 0-9999 (no crash elsewhere, but things // won't work). time_t mtime = atoll(doc.dmtime.empty() ? doc.fmtime.c_str() : doc.dmtime.c_str()); struct tm tmb; localtime_r(&mtime, &tmb); char buf[50]; // It's actually 9, but use 50 to suppress warnings. snprintf(buf, 50, "%04d%02d%02d", tmb.tm_year+1900, tmb.tm_mon + 1, tmb.tm_mday); // Date (YYYYMMDD) newdocument.add_boolean_term(wrap_prefix(xapday_prefix) + string(buf)); // Month (YYYYMM) buf[6] = '\0'; newdocument.add_boolean_term(wrap_prefix(xapmonth_prefix) + string(buf)); // Year (YYYY) buf[4] = '\0'; newdocument.add_boolean_term(wrap_prefix(xapyear_prefix) + string(buf)); ////////////////////////////////////////////////////////////////// // Document data record. omindex has the following nl separated fields: // - url // - sample // - caption (title limited to 100 chars) // - mime type // // The title, author, abstract and keywords fields are special, // they always get stored in the document data // record. Configurable other fields can be, too. // // We truncate stored fields abstract, title and keywords to // reasonable lengths and suppress newlines (so that the data // record can keep a simple syntax) string record; RECORD_APPEND(record, Doc::keyurl, doc.url); RECORD_APPEND(record, Doc::keytp, doc.mimetype); // We left-zero-pad the times so that they are lexico-sortable leftzeropad(doc.fmtime, 11); RECORD_APPEND(record, Doc::keyfmt, doc.fmtime); if (!doc.dmtime.empty()) { leftzeropad(doc.dmtime, 11); RECORD_APPEND(record, Doc::keydmt, doc.dmtime); } RECORD_APPEND(record, Doc::keyoc, doc.origcharset); if (doc.fbytes.empty()) doc.fbytes = doc.pcbytes; if (!doc.fbytes.empty()) { RECORD_APPEND(record, Doc::keyfs, doc.fbytes); leftzeropad(doc.fbytes, 12); newdocument.add_value(VALUE_SIZE, doc.fbytes); } if (doc.haschildren) { newdocument.add_boolean_term(has_children_term); } if (!doc.pcbytes.empty()) RECORD_APPEND(record, Doc::keypcs, doc.pcbytes); char sizebuf[30]; sprintf(sizebuf, "%u", (unsigned int)doc.text.length()); RECORD_APPEND(record, Doc::keyds, sizebuf); // Note that we add the signature both as a value and in the data record if (!doc.sig.empty()) { RECORD_APPEND(record, Doc::keysig, doc.sig); newdocument.add_value(VALUE_SIG, doc.sig); } if (!doc.ipath.empty()) RECORD_APPEND(record, Doc::keyipt, doc.ipath); // Fields from the Meta array. Handle title specially because it has a // different name inside the data record (history...) string& ttref = doc.meta[Doc::keytt]; ttref = neutchars(truncate_to_word(ttref, m_idxMetaStoredLen), cstr_nc); if (!ttref.empty()) { RECORD_APPEND(record, cstr_caption, ttref); ttref.clear(); } // If abstract is empty, we make up one with the beginning of the // document. This is then not indexed, but part of the doc data so // that we can return it to a query without having to decode the // original file. // Note that the map accesses by operator[] create empty entries if they // don't exist yet. if (m_idxAbsTruncLen > 0) { string& absref = doc.meta[Doc::keyabs]; trimstring(absref, " \t\r\n"); if (absref.empty()) { if (!doc.text.empty()) absref = cstr_syntAbs + neutchars(truncate_to_word(doc.text, m_idxAbsTruncLen), cstr_nc); } else { absref = neutchars(truncate_to_word(absref, m_idxAbsTruncLen), cstr_nc); } // Do the append here to avoid the different truncation done // in the regular "stored" loop if (!absref.empty()) { RECORD_APPEND(record, Doc::keyabs, absref); absref.clear(); } } // Append all regular "stored" meta fields const set<string>& stored = m_config->getStoredFields(); for (set<string>::const_iterator it = stored.begin(); it != stored.end(); it++) { string nm = m_config->fieldCanon(*it); if (!doc.meta[nm].empty()) { string value = neutchars(truncate_to_word(doc.meta[nm], m_idxMetaStoredLen), cstr_nc); RECORD_APPEND(record, nm, value); } } // At this point, if the document "filename" field was empty, // try to store the "container file name" value. This is done // after indexing because we don't want search matches on // this, but the filename is often useful for display // purposes. const string *fnp = 0; if (!doc.peekmeta(Rcl::Doc::keyfn, &fnp) || fnp->empty()) { if (doc.peekmeta(Rcl::Doc::keytcfn, &fnp) && !fnp->empty()) { string value = neutchars(truncate_to_word(*fnp, m_idxMetaStoredLen), cstr_nc); RECORD_APPEND(record, Rcl::Doc::keyfn, value); } } // If empty pages (multiple break at same pos) were recorded, save // them (this is because we have no way to record them in the // Xapian list if (!tpidx.m_pageincrvec.empty()) { ostringstream multibreaks; for (unsigned int i = 0; i < tpidx.m_pageincrvec.size(); i++) { if (i != 0) multibreaks << ","; multibreaks << tpidx.m_pageincrvec[i].first << "," << tpidx.m_pageincrvec[i].second; } RECORD_APPEND(record, string(cstr_mbreaks), multibreaks.str()); } // If the file's md5 was computed, add value and term. // The value is optionally used for query result duplicate elimination, // and the term to find the duplicates. // We don't do this for empty docs. const string *md5; if (doc.peekmeta(Doc::keymd5, &md5) && !md5->empty() && md5->compare(cstr_md5empty)) { string digest; MD5HexScan(*md5, digest); newdocument.add_value(VALUE_MD5, digest); newdocument.add_boolean_term(wrap_prefix("XM") + *md5); } LOGDEB0("Rcl::Db::add: new doc record:\n" << record << "\n"); newdocument.set_data(record); } #ifdef IDX_THREADS if (m_ndb->m_havewriteq) { DbUpdTask *tp = new DbUpdTask( DbUpdTask::AddOrUpdate, udi, uniterm, newdocument_ptr, doc.text.length(), rawztext); if (!m_ndb->m_wqueue.put(tp)) { LOGERR("Db::addOrUpdate:Cant queue task\n"); delete newdocument_ptr; return false; } else { return true; } } #endif return m_ndb->addOrUpdateWrite(udi, uniterm, newdocument_ptr, doc.text.length(), rawztext); } bool Db::Native::docToXdocXattrOnly(TextSplitDb *splitter, const string &udi, Doc &doc, Xapian::Document& xdoc) { LOGDEB0("Db::docToXdocXattrOnly\n"); #ifdef IDX_THREADS std::unique_lock<std::mutex> lock(m_mutex); #endif // Read existing document and its data record if (getDoc(udi, 0, xdoc) == 0) { LOGERR("docToXdocXattrOnly: existing doc not found\n"); return false; } string data; XAPTRY(data = xdoc.get_data(), xrdb, m_rcldb->m_reason); if (!m_rcldb->m_reason.empty()) { LOGERR("Db::xattrOnly: got error: " << m_rcldb->m_reason << "\n"); return false; } // Clear the term lists for the incoming fields and index the new values map<string, string>::iterator meta_it; for (const auto& ent : doc.meta) { const FieldTraits *ftp; if (!m_rcldb->fieldToTraits(ent.first, &ftp) || ftp->pfx.empty()) { LOGDEB0("Db::xattrOnly: no prefix for field [" << ent.first << "], skipped\n"); continue; } // Clear the previous terms for the field clearField(xdoc, ftp->pfx, ftp->wdfinc); LOGDEB0("Db::xattrOnly: field [" << ent.first << "] pfx [" << ftp->pfx << "] inc " << ftp->wdfinc << ": [" << ent.second << "]\n"); splitter->setTraits(*ftp); if (!splitter->text_to_words(ent.second)) { LOGDEB("Db::xattrOnly: split failed for " << ent.first << "\n"); } } xdoc.add_value(VALUE_SIG, doc.sig); // Parse current data record into a dict for ease of processing ConfSimple datadic(data); if (!datadic.ok()) { LOGERR("db::docToXdocXattrOnly: failed turning data rec to dict\n"); return false; } // For each "stored" field, check if set in doc metadata and // update the value if it is const set<string>& stored = m_rcldb->m_config->getStoredFields(); for (set<string>::const_iterator it = stored.begin(); it != stored.end(); it++) { string nm = m_rcldb->m_config->fieldCanon(*it); if (doc.getmeta(nm, 0)) { string value = neutchars( truncate_to_word(doc.meta[nm], m_rcldb->m_idxMetaStoredLen), cstr_nc); datadic.set(nm, value, ""); } } // Recreate the record. We want to do this with the local RECORD_APPEND // method for consistency in format, instead of using ConfSimple print vector<string> names = datadic.getNames(""); data.clear(); for (vector<string>::const_iterator it = names.begin(); it != names.end(); it++) { string value; datadic.get(*it, value, ""); RECORD_APPEND(data, *it, value); } RECORD_APPEND(data, Doc::keysig, doc.sig); xdoc.set_data(data); return true; } #ifdef IDX_THREADS void Db::waitUpdIdle() { if (m_ndb->m_iswritable && m_ndb->m_havewriteq) { Chrono chron; m_ndb->m_wqueue.waitIdle(); // We flush here just for correct measurement of the thread work time string ermsg; try { m_ndb->xwdb.commit(); } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db::waitUpdIdle: flush() failed: " << ermsg << "\n"); } m_ndb->m_totalworkns += chron.nanos(); LOGINFO("Db::waitUpdIdle: total xapian work " << lltodecstr(m_ndb->m_totalworkns/1000000) << " mS\n"); } } #endif // Flush when idxflushmbs is reached bool Db::maybeflush(int64_t moretext) { if (m_flushMb > 0) { m_curtxtsz += moretext; if ((m_curtxtsz - m_flushtxtsz) / MB >= m_flushMb) { LOGINF("Db::add/delete: txt size >= " << m_flushMb << " Mb, flushing\n"); return doFlush(); } } return true; } bool Db::doFlush() { if (!m_ndb) { LOGERR("Db::doFLush: no ndb??\n"); return false; } string ermsg; try { m_ndb->xwdb.commit(); } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db::doFlush: flush() failed: " << ermsg << "\n"); return false; } m_flushtxtsz = m_curtxtsz; return true; } void Db::setExistingFlags(const string& udi, unsigned int docid) { if (m_mode == DbRO) return; if (docid == (unsigned int)-1) { LOGERR("Db::setExistingFlags: called with bogus docid !!\n"); return; } #ifdef IDX_THREADS std::unique_lock<std::mutex> lock(m_ndb->m_mutex); #endif i_setExistingFlags(udi, docid); } void Db::i_setExistingFlags(const string& udi, unsigned int docid) { // Set the up to date flag for the document and its // subdocs. needUpdate() can also be called at query time (for // preview up to date check), so no error if the updated bitmap is // of size 0 if (docid >= updated.size()) { if (updated.size()) LOGERR("needUpdate: existing docid beyond updated.size(). Udi [" << udi << "], docid " << docid << ", updated.size() " << updated.size() << "\n"); return; } else { updated[docid] = true; } // Set the existence flag for all the subdocs (if any) vector<Xapian::docid> docids; if (!m_ndb->subDocs(udi, 0, docids)) { LOGERR("Rcl::Db::needUpdate: can't get subdocs\n"); return; } for (auto docid : docids) { if (docid < updated.size()) { LOGDEB2("Db::needUpdate: docid " << docid << " set\n"); updated[docid] = true; } } } // Test if doc given by udi has changed since last indexed (test sigs) bool Db::needUpdate(const string &udi, const string& sig, unsigned int *docidp, string *osigp) { if (m_ndb == 0) return false; if (osigp) osigp->clear(); if (docidp) *docidp = 0; // If we are doing an in place or full reset, no need to test. if (o_inPlaceReset || m_mode == DbTrunc) { // For in place reset, pretend the doc existed, to enable // subdoc purge. The value is only used as a boolean in this case. if (docidp && o_inPlaceReset) { *docidp = -1; } return true; } string uniterm = make_uniterm(udi); string ermsg; #ifdef IDX_THREADS // Need to protect against interaction with the doc update/insert // thread which also updates the existence map, and even multiple // accesses to the readonly Xapian::Database are not allowed // anyway std::unique_lock<std::mutex> lock(m_ndb->m_mutex); #endif // Try to find the document indexed by the uniterm. Xapian::PostingIterator docid; XAPTRY(docid = m_ndb->xrdb.postlist_begin(uniterm), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::needUpdate: xapian::postlist_begin failed: " << m_reason << "\n"); return false; } if (docid == m_ndb->xrdb.postlist_end(uniterm)) { // No document exists with this path: we do need update LOGDEB("Db::needUpdate:yes (new): [" << uniterm << "]\n"); return true; } Xapian::Document xdoc; XAPTRY(xdoc = m_ndb->xrdb.get_document(*docid), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::needUpdate: get_document error: " << m_reason << "\n"); return true; } if (docidp) { *docidp = *docid; } // Retrieve old file/doc signature from value string osig; XAPTRY(osig = xdoc.get_value(VALUE_SIG), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::needUpdate: get_value error: " << m_reason << "\n"); return true; } LOGDEB2("Db::needUpdate: oldsig [" << osig << "] new [" << sig << "]\n"); if (osigp) { *osigp = osig; } // Compare new/old sig if (sig != osig) { LOGDEB("Db::needUpdate:yes: olsig [" << osig << "] new [" << sig << "] [" << uniterm << "]\n"); // Db is not up to date. Let's index the file return true; } // Up to date. Set the existance flags in the map for the doc and // its subdocs. LOGDEB("Db::needUpdate:no: [" << uniterm << "]\n"); i_setExistingFlags(udi, *docid); return false; } // Return existing stem db languages vector<string> Db::getStemLangs() { LOGDEB("Db::getStemLang\n"); vector<string> langs; if (m_ndb == 0 || m_ndb->m_isopen == false) return langs; StemDb db(m_ndb->xrdb); db.getMembers(langs); return langs; } /** * Delete stem db for given language */ bool Db::deleteStemDb(const string& lang) { LOGDEB("Db::deleteStemDb(" << lang << ")\n"); if (m_ndb == 0 || m_ndb->m_isopen == false || !m_ndb->m_iswritable) return false; XapWritableSynFamily db(m_ndb->xwdb, synFamStem); return db.deleteMember(lang); } /** * Create database of stem to parents associations for a given language. * We walk the list of all terms, stem them, and create another Xapian db * with documents indexed by a single term (the stem), and with the list of * parent terms in the document data. */ bool Db::createStemDbs(const vector<string>& langs) { LOGDEB("Db::createStemDbs\n"); if (m_ndb == 0 || m_ndb->m_isopen == false || !m_ndb->m_iswritable) { LOGERR("createStemDb: db not open or not writable\n"); return false; } return createExpansionDbs(m_ndb->xwdb, langs); } /** * This is called at the end of an indexing session, to delete the * documents for files that are no longer there. This can ONLY be called * after a full file-system tree walk, else the file existence flags will * be wrong. */ bool Db::purge() { LOGDEB("Db::purge\n"); if (m_ndb == 0) return false; LOGDEB("Db::purge: m_isopen " << m_ndb->m_isopen << " m_iswritable " << m_ndb->m_iswritable << "\n"); if (m_ndb->m_isopen == false || m_ndb->m_iswritable == false) return false; #ifdef IDX_THREADS // If we manage our own write queue, make sure it's drained and closed if (m_ndb->m_havewriteq) m_ndb->m_wqueue.setTerminateAndWait(); // else we need to lock out other top level threads. This is just // a precaution as they should have been waited for by the top // level actor at this point std::unique_lock<std::mutex> lock(m_ndb->m_mutex); #endif // IDX_THREADS // For xapian versions up to 1.0.1, deleting a non-existant // document would trigger an exception that would discard any // pending update. This could lose both previous added documents // or deletions. Adding the flush before the delete pass ensured // that any added document would go to the index. Kept here // because it doesn't really hurt. m_reason.clear(); try { m_ndb->xwdb.commit(); } XCATCHERROR(m_reason); if (!m_reason.empty()) { LOGERR("Db::purge: 1st flush failed: " << m_reason << "\n"); return false; } // Walk the document array and delete any xapian document whose // flag is not set (we did not see its source during indexing). int purgecount = 0; for (Xapian::docid docid = 1; docid < updated.size(); ++docid) { if (!updated[docid]) { if ((purgecount+1) % 100 == 0) { try { CancelCheck::instance().checkCancel(); } catch(CancelExcept) { LOGINFO("Db::purge: partially cancelled\n"); break; } } try { if (m_flushMb > 0) { // We use an average term length of 5 for // estimating the doc sizes which is probably not // accurate but gives rough consistency with what // we do for add/update. I should fetch the doc // size from the data record, but this would be // bad for performance. Xapian::termcount trms = m_ndb->xwdb.get_doclength(docid); maybeflush(trms * 5); } m_ndb->deleteDocument(docid); LOGDEB("Db::purge: deleted document #" << docid << "\n"); } catch (const Xapian::DocNotFoundError &) { LOGDEB0("Db::purge: document #" << docid << " not found\n"); } catch (const Xapian::Error &e) { LOGERR("Db::purge: document #" << docid << ": " << e.get_msg() << "\n"); } catch (...) { LOGERR("Db::purge: document #" << docid << ": unknown error\n"); } purgecount++; } } m_reason.clear(); try { m_ndb->xwdb.commit(); } XCATCHERROR(m_reason); if (!m_reason.empty()) { LOGERR("Db::purge: 2nd flush failed: " << m_reason << "\n"); return false; } return true; } // Test for doc existence. bool Db::docExists(const string& uniterm) { #ifdef IDX_THREADS // Need to protect read db against multiaccess. std::unique_lock<std::mutex> lock(m_ndb->m_mutex); #endif string ermsg; try { Xapian::PostingIterator docid = m_ndb->xrdb.postlist_begin(uniterm); if (docid == m_ndb->xrdb.postlist_end(uniterm)) { return false; } else { return true; } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db::docExists(" << uniterm << ") " << ermsg << "\n"); } return false; } /* Delete document(s) for given unique identifier (doc and descendents) */ bool Db::purgeFile(const string &udi, bool *existed) { LOGDEB("Db:purgeFile: [" << udi << "]\n"); if (m_ndb == 0 || !m_ndb->m_iswritable) return false; string uniterm = make_uniterm(udi); bool exists = docExists(uniterm); if (existed) *existed = exists; if (!exists) return true; #ifdef IDX_THREADS if (m_ndb->m_havewriteq) { string rztxt; DbUpdTask *tp = new DbUpdTask(DbUpdTask::Delete, udi, uniterm, 0, (size_t)-1, rztxt); if (!m_ndb->m_wqueue.put(tp)) { LOGERR("Db::purgeFile:Cant queue task\n"); return false; } else { return true; } } #endif /* We get there is IDX_THREADS is not defined or there is no queue */ return m_ndb->purgeFileWrite(false, udi, uniterm); } /* Delete subdocs with an out of date sig. We do this to purge obsolete subdocs during a partial update where no general purge will be done */ bool Db::purgeOrphans(const string &udi) { LOGDEB("Db:purgeOrphans: [" << udi << "]\n"); if (m_ndb == 0 || !m_ndb->m_iswritable) return false; string uniterm = make_uniterm(udi); #ifdef IDX_THREADS if (m_ndb->m_havewriteq) { string rztxt; DbUpdTask *tp = new DbUpdTask(DbUpdTask::PurgeOrphans, udi, uniterm, 0, (size_t)-1, rztxt); if (!m_ndb->m_wqueue.put(tp)) { LOGERR("Db::purgeFile:Cant queue task\n"); return false; } else { return true; } } #endif /* We get there is IDX_THREADS is not defined or there is no queue */ return m_ndb->purgeFileWrite(true, udi, uniterm); } bool Db::dbStats(DbStats& res, bool listfailed) { if (!m_ndb || !m_ndb->m_isopen) return false; Xapian::Database xdb = m_ndb->xrdb; XAPTRY(res.dbdoccount = xdb.get_doccount(); res.dbavgdoclen = xdb.get_avlength(); res.mindoclen = xdb.get_doclength_lower_bound(); res.maxdoclen = xdb.get_doclength_upper_bound(); , xdb, m_reason); if (!m_reason.empty()) return false; if (!listfailed) { return true; } // listfailed is set : look for failed docs string ermsg; try { for (unsigned int docid = 1; docid < xdb.get_lastdocid(); docid++) { try { Xapian::Document doc = xdb.get_document(docid); string sig = doc.get_value(VALUE_SIG); if (sig.empty() || sig[sig.size()-1] != '+') { continue; } string data = doc.get_data(); ConfSimple parms(data); if (!parms.ok()) { } else { string url, ipath; parms.get(Doc::keyipt, ipath); parms.get(Doc::keyurl, url); // Turn to local url or not? It seems to make more // sense to keep the original urls as seen by the // indexer. // m_config->urlrewrite(dbdir, url); if (!ipath.empty()) { url += " | " + ipath; } res.failedurls.push_back(url); } } catch (Xapian::DocNotFoundError) { continue; } } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("Db::dbStats: " << ermsg << "\n"); return false; } return true; } // Retrieve document defined by Unique doc identifier. This is used // by the GUI history feature and by open parent/getenclosing // ! The return value is always true except for fatal errors. Document // existence should be tested by looking at doc.pc bool Db::getDoc(const string &udi, const Doc& idxdoc, Doc &doc) { LOGDEB1("Db:getDoc: [" << udi << "]\n"); int idxi = idxdoc.idxi; return getDoc(udi, idxi, doc); } bool Db::getDoc(const string &udi, const std::string& dbdir, Doc &doc) { LOGDEB1("Db::getDoc(udi, dbdir): (" << udi << ", " << dbdir << ")\n"); int idxi = -1; if (dbdir.empty() || dbdir == m_basedir) { idxi = 0; } else { for (unsigned int i = 0; i < m_extraDbs.size(); i++) { if (dbdir == m_extraDbs[i]) { idxi = int(i + 1); break; } } } LOGDEB1("Db::getDoc(udi, dbdir): idxi: " << idxi << endl); if (idxi < 0) { LOGERR("Db::getDoc(udi, dbdir): dbdir not in current extra dbs\n"); return false; } return getDoc(udi, idxi, doc); } bool Db::getDoc(const string& udi, int idxi, Doc& doc) { // Initialize what we can in any case. If this is history, caller // will make partial display in case of error if (m_ndb == 0) return false; doc.meta[Rcl::Doc::keyrr] = "100%"; doc.pc = 100; Xapian::Document xdoc; Xapian::docid docid; if (idxi >= 0 && (docid = m_ndb->getDoc(udi, idxi, xdoc))) { string data = xdoc.get_data(); doc.meta[Rcl::Doc::keyudi] = udi; return m_ndb->dbDataToRclDoc(docid, data, doc); } else { // Document found in history no longer in the // database. We return true (because their might be // other ok docs further) but indicate the error with // pc = -1 doc.pc = -1; LOGINFO("Db:getDoc: no such doc in current index: [" << udi << "]\n"); return true; } } bool Db::hasSubDocs(const Doc &idoc) { if (m_ndb == 0) return false; string inudi; if (!idoc.getmeta(Doc::keyudi, &inudi) || inudi.empty()) { LOGERR("Db::hasSubDocs: no input udi or empty\n"); return false; } LOGDEB1("Db::hasSubDocs: idxi " << idoc.idxi << " inudi [" <<inudi << "]\n"); // Not sure why we perform both the subDocs() call and the test on // has_children. The former will return docs if the input is a // file-level document, but the latter should be true both in this // case and if the input is already a subdoc, so the first test // should be redundant. Does not hurt much in any case, to be // checked one day. vector<Xapian::docid> docids; if (!m_ndb->subDocs(inudi, idoc.idxi, docids)) { LOGDEB("Db::hasSubDocs: lower level subdocs failed\n"); return false; } if (!docids.empty()) return true; // Check if doc has an "has_children" term if (m_ndb->hasTerm(inudi, idoc.idxi, has_children_term)) return true; return false; } // Retrieve all subdocuments of a given one, which may not be a file-level // one (in which case, we have to retrieve this first, then filter the ipaths) bool Db::getSubDocs(const Doc &idoc, vector<Doc>& subdocs) { if (m_ndb == 0) return false; string inudi; if (!idoc.getmeta(Doc::keyudi, &inudi) || inudi.empty()) { LOGERR("Db::getSubDocs: no input udi or empty\n"); return false; } string rootudi; string ipath = idoc.ipath; LOGDEB0("Db::getSubDocs: idxi " << idoc.idxi << " inudi [" << inudi << "] ipath [" << ipath << "]\n"); if (ipath.empty()) { // File-level doc. Use it as root rootudi = inudi; } else { // See if we have a parent term Xapian::Document xdoc; if (!m_ndb->getDoc(inudi, idoc.idxi, xdoc)) { LOGERR("Db::getSubDocs: can't get Xapian document\n"); return false; } Xapian::TermIterator xit; XAPTRY(xit = xdoc.termlist_begin(); xit.skip_to(wrap_prefix(parent_prefix)), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::getSubDocs: xapian error: " << m_reason << "\n"); return false; } if (xit == xdoc.termlist_end()) { LOGERR("Db::getSubDocs: parent term not found\n"); return false; } rootudi = strip_prefix(*xit); } LOGDEB("Db::getSubDocs: root: [" << rootudi << "]\n"); // Retrieve all subdoc xapian ids for the root vector<Xapian::docid> docids; if (!m_ndb->subDocs(rootudi, idoc.idxi, docids)) { LOGDEB("Db::getSubDocs: lower level subdocs failed\n"); return false; } // Retrieve doc, filter, and build output list for (int tries = 0; tries < 2; tries++) { try { for (vector<Xapian::docid>::const_iterator it = docids.begin(); it != docids.end(); it++) { Xapian::Document xdoc = m_ndb->xrdb.get_document(*it); string data = xdoc.get_data(); string docudi; m_ndb->xdocToUdi(xdoc, docudi); Doc doc; doc.meta[Doc::keyudi] = docudi; doc.meta[Doc::keyrr] = "100%"; doc.pc = 100; if (!m_ndb->dbDataToRclDoc(*it, data, doc)) { LOGERR("Db::getSubDocs: doc conversion error\n"); return false; } if (ipath.empty() || FileInterner::ipathContains(ipath, doc.ipath)) { subdocs.push_back(doc); } } return true; } catch (const Xapian::DatabaseModifiedError &e) { m_reason = e.get_msg(); m_ndb->xrdb.reopen(); continue; } XCATCHERROR(m_reason); break; } LOGERR("Db::getSubDocs: Xapian error: " << m_reason << "\n"); return false; } bool Db::getContainerDoc(const Doc &idoc, Doc& ctdoc) { if (m_ndb == 0) return false; string inudi; if (!idoc.getmeta(Doc::keyudi, &inudi) || inudi.empty()) { LOGERR("Db::getContainerDoc: no input udi or empty\n"); return false; } string rootudi; string ipath = idoc.ipath; LOGDEB0("Db::getContainerDoc: idxi " << idoc.idxi << " inudi [" << inudi << "] ipath [" << ipath << "]\n"); if (ipath.empty()) { // File-level doc ?? ctdoc = idoc; return true; } // See if we have a parent term Xapian::Document xdoc; if (!m_ndb->getDoc(inudi, idoc.idxi, xdoc)) { LOGERR("Db::getContainerDoc: can't get Xapian document\n"); return false; } Xapian::TermIterator xit; XAPTRY(xit = xdoc.termlist_begin(); xit.skip_to(wrap_prefix(parent_prefix)), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::getContainerDoc: xapian error: " << m_reason << "\n"); return false; } if (xit == xdoc.termlist_end()) { LOGERR("Db::getContainerDoc: parent term not found\n"); return false; } rootudi = strip_prefix(*xit); if (!getDoc(rootudi, idoc.idxi, ctdoc)) { LOGERR("Db::getContainerDoc: can't get container document\n"); return false; } return true; } // Walk an UDI section (all UDIs beginning with input prefix), and // mark all docs and subdocs as existing. Caller beware: Makes sense // or not depending on the UDI structure for the data store. In practise, // used for absent FS mountable volumes. bool Db::udiTreeMarkExisting(const string& udi) { LOGDEB("Db::udiTreeMarkExisting: " << udi << endl); string wrapd = wrap_prefix(udi_prefix); string expr = udi + "*"; #ifdef IDX_THREADS std::unique_lock<std::mutex> lock(m_ndb->m_mutex); #endif bool ret = m_ndb->idxTermMatch_p( int(ET_WILD), cstr_null, expr, [this, &udi](const string& term, Xapian::termcount, Xapian::doccount) { Xapian::PostingIterator docid; XAPTRY(docid = m_ndb->xrdb.postlist_begin(term), m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("Db::udiTreeWalk: xapian::postlist_begin failed: " << m_reason << "\n"); return false; } if (docid == m_ndb->xrdb.postlist_end(term)) { LOGDEB("Db::udiTreeWalk:no doc for " << term << " ??\n"); return false; } i_setExistingFlags(udi, *docid); LOGDEB0("Db::udiTreeWalk: uniterm: " << term << endl); return true; }, wrapd); return ret; } } // End namespace Rcl ������������recoll-1.26.3/rcldb/rclquery.cpp��������������������������������������������������������������������0000644�0001750�0001750�00000036630�13567765436�013502� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2008 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include <stdlib.h> #include <string.h> #include <stdio.h> #include <vector> #include <sstream> #include "xapian.h" #include "cstr.h" #include "rclconfig.h" #include "log.h" #include "rcldb.h" #include "rcldb_p.h" #include "rclquery.h" #include "rclquery_p.h" #include "conftree.h" #include "smallut.h" #include "chrono.h" #include "searchdata.h" #include "unacpp.h" using namespace std; namespace Rcl { // This is used as a marker inside the abstract frag lists, but // normally doesn't remain in final output (which is built with a // custom sep. by our caller). static const string cstr_ellipsis("..."); // Field names inside the index data record may differ from the rcldoc ones // (esp.: caption / title) static const string& docfToDatf(const string& df) { if (!df.compare(Doc::keytt)) { return cstr_caption; } else if (!df.compare(Doc::keymt)) { return cstr_dmtime; } else { return df; } } // Sort helper class. As Xapian sorting is lexicographic, we do some // special processing for special fields like dates and sizes. User // custom field data will have to be processed before insertion to // achieve equivalent results. #if XAPIAN_MAJOR_VERSION == 1 && XAPIAN_MINOR_VERSION < 2 class QSorter : public Xapian::Sorter #else class QSorter : public Xapian::KeyMaker #endif { public: QSorter(const string& f) : m_fld(docfToDatf(f) + "=") { m_ismtime = !m_fld.compare("dmtime="); if (m_ismtime) m_issize = false; else m_issize = !m_fld.compare("fbytes=") || !m_fld.compare("dbytes=") || !m_fld.compare("pcbytes="); } virtual std::string operator()(const Xapian::Document& xdoc) const { string data = xdoc.get_data(); // It would be simpler to do the record->Rcl::Doc thing, but // hand-doing this will be faster. It makes more assumptions // about the format than a ConfTree though: string::size_type i1, i2; i1 = data.find(m_fld); if (i1 == string::npos) { if (m_ismtime) { // Ugly: specialcase mtime as it's either dmtime or fmtime i1 = data.find("fmtime="); if (i1 == string::npos) { return string(); } } else { return string(); } } i1 += m_fld.length(); if (i1 >= data.length()) return string(); i2 = data.find_first_of("\n\r", i1); if (i2 == string::npos) return string(); string term = data.substr(i1, i2-i1); if (m_ismtime) { return term; } else if (m_issize) { // Left zeropad values for appropriate numeric sorting leftzeropad(term, 12); return term; } // Process data for better sorting. We should actually do the // unicode thing // (http://unicode.org/reports/tr10/#Introduction), but just // removing accents and majuscules will remove the most // glaring weirdnesses (or not, depending on your national // approach to collating...) string sortterm; // We're not even sure the term is utf8 here (ie: url) if (!unacmaybefold(term, sortterm, "UTF-8", UNACOP_UNACFOLD)) { sortterm = term; } // Also remove some common uninteresting starting characters i1 = sortterm.find_first_not_of(" \t\\\"'([*+,.#/"); if (i1 != 0 && i1 != string::npos) { sortterm = sortterm.substr(i1, sortterm.size()-i1); } LOGDEB2("QSorter: [" << term << "] -> [" << sortterm << "]\n"); return sortterm; } private: string m_fld; bool m_ismtime; bool m_issize; }; Query::Query(Db *db) : m_nq(new Native(this)), m_db(db), m_sorter(0), m_sortAscending(true), m_collapseDuplicates(false), m_resCnt(-1), m_snipMaxPosWalk(1000000) { if (db) db->getConf()->getConfParam("snippetMaxPosWalk", &m_snipMaxPosWalk); } Query::~Query() { deleteZ(m_nq); if (m_sorter) { delete (QSorter*)m_sorter; m_sorter = 0; } } void Query::setSortBy(const string& fld, bool ascending) { if (fld.empty()) { m_sortField.erase(); } else { m_sortField = m_db->getConf()->fieldQCanon(fld); m_sortAscending = ascending; } LOGDEB0("RclQuery::setSortBy: [" << m_sortField << "] " << (m_sortAscending ? "ascending" : "descending") << "\n"); } // Prepare query out of user search data bool Query::setQuery(std::shared_ptr<SearchData> sdata) { LOGDEB("Query::setQuery:\n"); if (!m_db || !m_nq) { LOGERR("Query::setQuery: not initialised!\n"); return false; } m_resCnt = -1; m_reason.erase(); m_nq->clear(); m_sd = sdata; Xapian::Query xq; if (!sdata->toNativeQuery(*m_db, &xq)) { m_reason += sdata->getReason(); return false; } m_nq->xquery = xq; string d; for (int tries = 0; tries < 2; tries++) { try { m_nq->xenquire = new Xapian::Enquire(m_db->m_ndb->xrdb); if (m_collapseDuplicates) { m_nq->xenquire->set_collapse_key(Rcl::VALUE_MD5); } else { m_nq->xenquire->set_collapse_key(Xapian::BAD_VALUENO); } m_nq->xenquire->set_docid_order(Xapian::Enquire::DONT_CARE); if (!m_sortField.empty() && stringlowercmp("relevancyrating", m_sortField)) { if (m_sorter) { delete (QSorter*)m_sorter; m_sorter = 0; } m_sorter = new QSorter(m_sortField); // It really seems there is a xapian bug about sort order, we // invert here. m_nq->xenquire->set_sort_by_key((QSorter*)m_sorter, !m_sortAscending); } m_nq->xenquire->set_query(m_nq->xquery); m_nq->xmset = Xapian::MSet(); // Get the query description and trim the "Xapian::Query" d = m_nq->xquery.get_description(); m_reason.erase(); break; } catch (const Xapian::DatabaseModifiedError &e) { m_reason = e.get_msg(); m_db->m_ndb->xrdb.reopen(); continue; } XCATCHERROR(m_reason); break; } if (!m_reason.empty()) { LOGDEB("Query::SetQuery: xapian error " << m_reason << "\n"); return false; } if (d.find("Xapian::Query") == 0) d.erase(0, strlen("Xapian::Query")); sdata->setDescription(d); m_sd = sdata; LOGDEB("Query::SetQuery: Q: " << sdata->getDescription() << "\n"); return true; } bool Query::getQueryTerms(vector<string>& terms) { if (!m_nq) return false; terms.clear(); Xapian::TermIterator it; string ermsg; try { for (it = m_nq->xquery.get_terms_begin(); it != m_nq->xquery.get_terms_end(); it++) { terms.push_back(*it); } } XCATCHERROR(ermsg); if (!ermsg.empty()) { LOGERR("getQueryTerms: xapian error: " << ermsg << "\n"); return false; } return true; } int Query::makeDocAbstract(const Doc &doc, vector<Snippet>& abstract, int maxoccs, int ctxwords, bool sortbypage) { LOGDEB("makeDocAbstract: maxoccs " << maxoccs << " ctxwords " << ctxwords << "\n"); if (!m_db || !m_db->m_ndb || !m_db->m_ndb->m_isopen || !m_nq) { LOGERR("Query::makeDocAbstract: no db or no nq\n"); return ABSRES_ERROR; } int ret = ABSRES_ERROR; XAPTRY(ret = m_nq->makeAbstract(doc.xdocid, abstract, maxoccs, ctxwords, sortbypage), m_db->m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGDEB("makeDocAbstract: makeAbstract: reason: " << m_reason << "\n"); return ABSRES_ERROR; } return ret; } bool Query::makeDocAbstract(const Doc &doc, vector<string>& abstract) { vector<Snippet> vpabs; if (!makeDocAbstract(doc, vpabs)) return false; for (vector<Snippet>::const_iterator it = vpabs.begin(); it != vpabs.end(); it++) { string chunk; if (it->page > 0) { ostringstream ss; ss << it->page; chunk += string(" [p ") + ss.str() + "] "; } chunk += it->snippet; abstract.push_back(chunk); } return true; } bool Query::makeDocAbstract(const Doc &doc, string& abstract) { vector<Snippet> vpabs; if (!makeDocAbstract(doc, vpabs)) return false; for (vector<Snippet>::const_iterator it = vpabs.begin(); it != vpabs.end(); it++) { abstract.append(it->snippet); abstract.append(cstr_ellipsis); } return m_reason.empty() ? true : false; } int Query::getFirstMatchPage(const Doc &doc, string& term) { LOGDEB1("Db::getFirstMatchPage\n");; if (!m_nq) { LOGERR("Query::getFirstMatchPage: no nq\n"); return false; } int pagenum = -1; XAPTRY(pagenum = m_nq->getFirstMatchPage(Xapian::docid(doc.xdocid), term), m_db->m_ndb->xrdb, m_reason); return m_reason.empty() ? pagenum : -1; } // Mset size static const int qquantum = 50; // Get estimated result count for query. Xapian actually does most of // the search job in there, this can be long int Query::getResCnt(int checkatleast, bool useestimate) { if (!m_db || !m_nq || !m_nq->xenquire) { LOGERR("Query::getResCnt: no query opened\n"); return -1; } LOGDEB0("Query::getResCnt: checkatleast " << checkatleast << " estimate " << useestimate << "\n"); if (m_resCnt >= 0) return m_resCnt; if (m_nq->xmset.size() <= 0) { Chrono chron; XAPTRY(if (checkatleast == -1) checkatleast = m_db->docCnt(); m_nq->xmset = m_nq->xenquire->get_mset(0, qquantum, checkatleast), m_db->m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("xenquire->get_mset: exception: " << m_reason << "\n"); return -1; } LOGDEB("Query::getResCnt: get_mset: " << chron.millis() << " mS\n"); } if (useestimate) { m_resCnt = m_nq->xmset.get_matches_estimated(); } else { m_resCnt = m_nq->xmset.get_matches_lower_bound(); } LOGDEB("Query::getResCnt: " << m_resCnt << "\n"); return m_resCnt; } // Get document at rank xapi in query results. We check if the // current mset has the doc, else ask for an other one. We use msets // of qquantum documents. // // Note that as stated by a Xapian developer, Enquire searches from // scratch each time get_mset() is called. So the better performance // on subsequent calls is probably only due to disk caching. bool Query::getDoc(int xapi, Doc &doc, bool fetchtext) { LOGDEB1("Query::getDoc: xapian enquire index " << xapi << "\n"); if (!m_nq || !m_nq->xenquire) { LOGERR("Query::getDoc: no query opened\n"); return false; } int first = m_nq->xmset.get_firstitem(); int last = first + m_nq->xmset.size() -1; if (!(xapi >= first && xapi <= last)) { LOGDEB("Fetching for first " << xapi << ", count " << qquantum << "\n"); XAPTRY(m_nq->xmset = m_nq->xenquire->get_mset(xapi, qquantum, (const Xapian::RSet *)0), m_db->m_ndb->xrdb, m_reason); if (!m_reason.empty()) { LOGERR("enquire->get_mset: exception: " << m_reason << "\n"); return false; } if (m_nq->xmset.empty()) { LOGDEB("enquire->get_mset: got empty result\n"); return false; } first = m_nq->xmset.get_firstitem(); last = first + m_nq->xmset.size() -1; } Xapian::Document xdoc; Xapian::docid docid = 0; int pc = 0; int collapsecount = 0; string data; string udi; m_reason.erase(); for (int xaptries=0; xaptries < 2; xaptries++) { try { xdoc = m_nq->xmset[xapi-first].get_document(); collapsecount = m_nq->xmset[xapi-first].get_collapse_count(); docid = *(m_nq->xmset[xapi-first]); pc = m_nq->xmset.convert_to_percent(m_nq->xmset[xapi-first]); data = xdoc.get_data(); m_reason.erase(); Chrono chron; m_db->m_ndb->xdocToUdi(xdoc, udi); LOGDEB2("Query::getDoc: " << chron.millis() << " ms for udi [" << udi << "], collapse count " << collapsecount << "\n"); break; } catch (Xapian::DatabaseModifiedError &error) { // retry or end of loop m_reason = error.get_msg(); continue; } XCATCHERROR(m_reason); break; } if (!m_reason.empty()) { LOGERR("Query::getDoc: " << m_reason << "\n"); return false; } doc.meta[Rcl::Doc::keyudi] = udi; doc.pc = pc; char buf[200]; if (collapsecount > 0) { sprintf(buf,"%3d%% (%d)", pc, collapsecount + 1); } else { sprintf(buf,"%3d%%", pc); } doc.meta[Doc::keyrr] = buf; if (collapsecount > 0) { sprintf(buf, "%d", collapsecount); doc.meta[Rcl::Doc::keycc] = buf; } // Parse xapian document's data and populate doc fields return m_db->m_ndb->dbDataToRclDoc(docid, data, doc, fetchtext); } vector<string> Query::expand(const Doc &doc) { LOGDEB("Rcl::Query::expand()\n"); vector<string> res; if (!m_nq || !m_nq->xenquire) { LOGERR("Query::expand: no query opened\n"); return res; } for (int tries = 0; tries < 2; tries++) { try { Xapian::RSet rset; rset.add_document(Xapian::docid(doc.xdocid)); // We don't exclude the original query terms. Xapian::ESet eset = m_nq->xenquire->get_eset(20, rset, false); LOGDEB("ESet terms:\n"); // We filter out the special terms for (Xapian::ESetIterator it = eset.begin(); it != eset.end(); it++) { LOGDEB(" [" << (*it) << "]\n"); if ((*it).empty() || has_prefix(*it)) continue; res.push_back(*it); if (res.size() >= 10) break; } m_reason.erase(); break; } catch (const Xapian::DatabaseModifiedError &e) { m_reason = e.get_msg(); m_db->m_ndb->xrdb.reopen(); continue; } XCATCHERROR(m_reason); break; } if (!m_reason.empty()) { LOGERR("Query::expand: xapian error " << m_reason << "\n"); res.clear(); } return res; } } ��������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/stemdb.h������������������������������������������������������������������������0000644�0001750�0001750�00000007314�13533651561�012536� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _STEMDB_H_INCLUDED_ #define _STEMDB_H_INCLUDED_ /** Stem database code * * Stem databases list stems and the set of index terms they expand to. They * are computed from index data by stemming each term and regrouping those * that stem to the same value. * * Stem databases are stored as separate Xapian databases, in * subdirectories of the index (e.g.: stem_french, stem_german2) * * The stem database is generated at the end of an indexing session by * walking the whole index term list, computing the stem for each * term, and building a stem->terms map. * * The map is then stored as a Xapian index where each stem is the * unique term indexing a document, and the list of expansions is stored * as the document data record. It would probably be possible to store * the expansions as the document term list instead (using a prefix to * distinguish the stem term). I tried this (chert, 08-2012) and the stem * db creation is very slightly slower than with the record approach, and * the result is 50% bigger. * * Another possible approach would be to update the stem map as we index. * This would probably be be less efficient for a full index pass because * each term would be seen and stemmed many times, but it might be * more efficient for an incremental pass with a limited number of * updated documents. For a small update, the stem building part often * dominates the indexing time. * * For future reference, I did try to store the map in a gdbm file and * the result is bigger and takes more time to create than the Xapian version. */ #include <vector> #include <string> #include <xapian.h> #include "synfamily.h" namespace Rcl { /* A stemming functor for using with XapComputableSynFamMember. * We could perform the change to lowercase in there too, as stemdb keys * must be lower case, but then the input conversion would be repeated for each * stemming language, which would be inefficient. So we let our caller make sure * that the input is lower-case */ class SynTermTransStem : public SynTermTrans { public: SynTermTransStem(const std::string& lang) : m_stemmer(lang), m_lang(lang) { } virtual ~SynTermTransStem() {} virtual std::string operator()(const std::string& in) { string out = m_stemmer(in); LOGDEB2("SynTermTransStem(" << (m_lang) << "): in [" << (in) << "] out [" << (out) << "]\n" ); return out; } Xapian::Stem m_stemmer; std::string m_lang; }; /** Stemdb is a bit special as a SynFamily as we may want to expand for one * or several members (languages) */ class StemDb : public XapSynFamily { public: StemDb(Xapian::Database& xdb) : XapSynFamily(xdb, synFamStem) { } /** Expand for a number of languages * @param langs space-separated set of languages * @param term term to expand */ bool stemExpand(const std::string& langs, const std::string& term, std::vector<std::string>& result); }; } #endif /* _STEMDB_H_INCLUDED_ */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rcldb_p.h�����������������������������������������������������������������������0000644�0001750�0001750�00000020352�13566714503�012664� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _rcldb_p_h_included_ #define _rcldb_p_h_included_ #include "autoconfig.h" #include <mutex> #include <functional> #include <xapian.h> #ifdef IDX_THREADS #include "workqueue.h" #endif // IDX_THREADS #include "xmacros.h" #include "log.h" namespace Rcl { class Query; #ifdef IDX_THREADS // Task for the index update thread. This can be // - add/update for a new / update documment // - delete for a deleted document // - purgeOrphans when a multidoc file is updated during a partial pass (no // general purge). We want to remove subDocs that possibly don't // exist anymore. We find them by their different sig // txtlen and doc are only valid for add/update else, len is (size_t)-1 and doc // is empty class DbUpdTask { public: enum Op {AddOrUpdate, Delete, PurgeOrphans}; // Note that udi and uniterm are strictly equivalent and are // passed both just to avoid recomputing uniterm which is // available on the caller site. // Take some care to avoid sharing string data (if string impl is cow) DbUpdTask(Op _op, const string& ud, const string& un, Xapian::Document *d, size_t tl, string& rztxt) : op(_op), udi(ud.begin(), ud.end()), uniterm(un.begin(), un.end()), doc(d), txtlen(tl) { rawztext.swap(rztxt); } // Udi and uniterm equivalently designate the doc Op op; string udi; string uniterm; Xapian::Document *doc; // txtlen is used to update the flush interval. It's -1 for a // purge because we actually don't know it, and the code fakes a // text length based on the term count. size_t txtlen; string rawztext; // Compressed doc text }; #endif // IDX_THREADS class TextSplitDb; // A class for data and methods that would have to expose // Xapian-specific stuff if they were in Rcl::Db. There could actually be // 2 different ones for indexing or query as there is not much in // common. class Db::Native { public: Db *m_rcldb; // Parent bool m_isopen; bool m_iswritable; bool m_noversionwrite; //Set if open failed because of version mismatch! bool m_storetext{false}; #ifdef IDX_THREADS WorkQueue<DbUpdTask*> m_wqueue; std::mutex m_mutex; long long m_totalworkns; bool m_havewriteq; void maybeStartThreads(); #endif // IDX_THREADS // Indexing Xapian::WritableDatabase xwdb; // Querying (active even if the wdb is too) Xapian::Database xrdb; Native(Db *db); ~Native(); #ifdef IDX_THREADS friend void *DbUpdWorker(void*); #endif // IDX_THREADS void openWrite(const std::string& dir, Db::OpenMode mode); void openRead(const string& dir); // Determine if an existing index is of the full-text-storing kind // by looking at the index metadata. Stores the result in m_storetext void storesDocText(Xapian::Database&); // Final steps of doc update, part which need to be single-threaded bool addOrUpdateWrite(const string& udi, const string& uniterm, Xapian::Document *doc, size_t txtlen , const string& rawztext); /** Delete all documents which are contained in the input document, * which must be a file-level one. * * @param onlyOrphans if true, only delete documents which have * not the same signature as the input. This is used to delete docs * which do not exist any more in the file after an update, for * example the tail messages after a folder truncation). If false, * delete all. * @param udi the parent document identifier. * @param uniterm equivalent to udi, passed just to avoid recomputing. */ bool purgeFileWrite(bool onlyOrphans, const string& udi, const string& uniterm); bool getPagePositions(Xapian::docid docid, vector<int>& vpos); int getPageNumberForPosition(const vector<int>& pbreaks, int pos); bool dbDataToRclDoc(Xapian::docid docid, std::string &data, Doc &doc, bool fetchtext = false); size_t whatDbIdx(Xapian::docid id); Xapian::docid whatDbDocid(Xapian::docid); /** Retrieve Xapian::docid, given unique document identifier, * using the posting list for the derived term. * * @param udi the unique document identifier (opaque hashed path+ipath). * @param idxi the database index, at query time, when using external * databases. * @param[out] xdoc the xapian document. * @return 0 if not found */ Xapian::docid getDoc(const string& udi, int idxi, Xapian::Document& xdoc); /** Retrieve unique document identifier for given Xapian document, * using the document termlist */ bool xdocToUdi(Xapian::Document& xdoc, string &udi); /** Check if doc is indexed by term */ bool hasTerm(const string& udi, int idxi, const string& term); /** Update existing Xapian document for pure extended attrs change */ bool docToXdocXattrOnly(TextSplitDb *splitter, const string &udi, Doc &doc, Xapian::Document& xdoc); /** Remove all terms currently indexed for field defined by idx prefix */ bool clearField(Xapian::Document& xdoc, const string& pfx, Xapian::termcount wdfdec); /** Check if term wdf is 0 and remove term if so */ bool clearDocTermIfWdf0(Xapian::Document& xdoc, const string& term); /** Compute list of subdocuments for a given udi. We look for documents * indexed by a parent term matching the udi, the posting list for the * parentterm(udi) (As suggested by James Aylett) * * Note that this is not currently recursive: all subdocs are supposed * to be children of the file doc. * Ie: in a mail folder, all messages, attachments, attachments of * attached messages etc. must have the folder file document as * parent. * * Finer grain parent-child relationships are defined by the * indexer (rcldb user), using the ipath. * */ bool subDocs(const string &udi, int idxi, vector<Xapian::docid>& docids); /** Matcher */ bool idxTermMatch_p(int typ_sens,const string &lang,const std::string &term, std::function<bool(const std::string& term, Xapian::termcount colfreq, Xapian::doccount termfreq)> client, const string& field); /** Check if a page position list is defined */ bool hasPages(Xapian::docid id); std::string rawtextMetaKey(Xapian::docid did) { // Xapian's Olly Betts avises to use a key which will // sort the same as the docid (which we do), and to // use Xapian's pack.h:pack_uint_preserving_sort() which is // efficient but hard to read. I'd wager that this // does not make much of a difference. 10 ascii bytes // gives us 10 billion docs, which is enough (says I). char buf[30]; sprintf(buf, "%010d", did); return buf; } bool getRawText(Xapian::docid docid, string& rawtext); void deleteDocument(Xapian::docid docid) { string metareason; XAPTRY(xwdb.set_metadata(rawtextMetaKey(docid), string()), xwdb, metareason); if (!metareason.empty()) { LOGERR("deleteDocument: set_metadata error: " << metareason << "\n"); // not fatal } xwdb.delete_document(docid); } }; // This is the word position offset at which we index the body text // (abstract, keywords, etc.. are stored before this) static const unsigned int baseTextPosition = 100000; } #endif /* _rcldb_p_h_included_ */ ��������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rcldoc.cpp����������������������������������������������������������������������0000644�0001750�0001750�00000011304�13533651561�013053� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2007-2018 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "rcldoc.h" #include "log.h" #include "rclutil.h" using namespace std; namespace Rcl { const string Doc::keyabs("abstract"); const string Doc::keyapptg("rclaptg"); const string Doc::keyau("author"); const string Doc::keybcknd("rclbes"); const string Doc::keybght("beagleHitType"); const string Doc::keycc("collapsecount"); const string Doc::keychildurl("childurl"); const string Doc::keydmt("dmtime"); const string Doc::keyds("dbytes"); const string Doc::keyfmt("fmtime"); const string Doc::keyfn("filename"); const string Doc::keytcfn("containerfilename"); const string Doc::keyfs("fbytes"); const string Doc::keyipt("ipath"); const string Doc::keykw("keywords"); const string Doc::keymd5("md5"); const string Doc::keymt("mtime"); const string Doc::keyoc("origcharset"); const string Doc::keypcs("pcbytes"); const string Doc::keyrr("relevancyrating"); const string Doc::keysig("sig"); const string Doc::keysz("size"); const string Doc::keytp("mtype"); const string Doc::keytt("title"); const string Doc::keyudi("rcludi"); const string Doc::keyurl("url"); void Doc::dump(bool dotext) const { LOGDEB("Rcl::Doc::dump: url: [" << url << "]\n"); LOGDEB("Rcl::Doc::dump: idxurl: [" << idxurl << "]\n"); LOGDEB("Rcl::Doc::dump: ipath: [" << ipath << "]\n"); LOGDEB("Rcl::Doc::dump: mimetype: [" << mimetype << "]\n"); LOGDEB("Rcl::Doc::dump: fmtime: [" << fmtime << "]\n"); LOGDEB("Rcl::Doc::dump: dmtime: [" << dmtime << "]\n"); LOGDEB("Rcl::Doc::dump: origcharset: [" << origcharset << "]\n"); LOGDEB("Rcl::Doc::dump: syntabs: [" << syntabs << "]\n"); LOGDEB("Rcl::Doc::dump: pcbytes: [" << pcbytes << "]\n"); LOGDEB("Rcl::Doc::dump: fbytes: [" << fbytes << "]\n"); LOGDEB("Rcl::Doc::dump: dbytes: [" << dbytes << "]\n"); LOGDEB("Rcl::Doc::dump: sig: [" << sig << "]\n"); LOGDEB("Rcl::Doc::dump: pc: [" << pc << "]\n"); LOGDEB("Rcl::Doc::dump: xdocid: [" << (unsigned long)xdocid << "]\n"); for (const auto& e : meta) { LOGDEB("Rcl::Doc::dump: meta[" << e.first <<"]->["<< e.second << "]\n"); } if (dotext) LOGDEB("Rcl::Doc::dump: text: \n[" << text << "]\n"); } // Copy ensuring no shared string data, for threading issues. void Doc::copyto(Doc *d) const { d->url.assign(url.begin(), url.end()); d->idxurl.assign(idxurl.begin(), idxurl.end()); d->idxi = idxi; d->ipath.assign(ipath.begin(), ipath.end()); d->mimetype.assign(mimetype.begin(), mimetype.end()); d->fmtime.assign(fmtime.begin(), fmtime.end()); d->dmtime.assign(dmtime.begin(), dmtime.end()); d->origcharset.assign(origcharset.begin(), origcharset.end()); map_ss_cp_noshr(meta, &d->meta); d->syntabs = syntabs; d->pcbytes.assign(pcbytes.begin(), pcbytes.end()); d->fbytes.assign(fbytes.begin(), fbytes.end()); d->dbytes.assign(dbytes.begin(), dbytes.end()); d->sig.assign(sig.begin(), sig.end()); d->text.assign(text.begin(), text.end()); d->pc = pc; d->xdocid = xdocid; d->haspages = haspages; d->haschildren = haschildren; d->onlyxattr = onlyxattr; } static const string cstr_fileu("file://"); bool docsToPaths(vector<Rcl::Doc> &docs, vector<string> &paths) { for (const auto& idoc : docs) { string backend; idoc.getmeta(Rcl::Doc::keybcknd, &backend); // This only makes sense for file system files: beagle docs are // always up to date because they can't be updated in the cache, // only added/removed. Same remark as made inside internfile, we // need a generic way to handle backends. if (!backend.empty() && backend.compare("FS")) continue; // Filesystem document. The url has to be like file:// if (idoc.url.find(cstr_fileu) != 0) { LOGERR("idx::docsToPaths: FS backend and non fs url: [" << idoc.url << "]\n"); continue; } paths.push_back(idoc.url.substr(7, string::npos)); } return true; } } ����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/stemdb.cpp����������������������������������������������������������������������0000644�0001750�0001750�00000005463�13533651561�013074� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /** * Management of the auxiliary databases listing stems and their expansion * terms */ #include "autoconfig.h" #include "safeunistd.h" #include <algorithm> #include <map> #include <iostream> #include <string> using namespace std; #include <xapian.h> #include "stemdb.h" #include "log.h" #include "smallut.h" #include "synfamily.h" #include "unacpp.h" #include "rclconfig.h" namespace Rcl { /** * Expand for one or several languages */ bool StemDb::stemExpand(const std::string& langs, const std::string& _term, vector<string>& result) { vector<string> llangs; stringToStrings(langs, llangs); // The stemdb keys may have kept their diacritics or not but they // are always lower-case. It would be more logical for the term // transformers to perform before doing the stemming, but this // would be inefficient when there are several stemming languages string term; unacmaybefold(_term, term, "UTF-8", UNACOP_FOLD); for (vector<string>::const_iterator it = llangs.begin(); it != llangs.end(); it++) { SynTermTransStem stemmer(*it); XapComputableSynFamMember expander(getdb(), synFamStem, *it, &stemmer); (void)expander.synExpand(term, result); } if (!o_index_stripchars) { string unac; unacmaybefold(term, unac, "UTF-8", UNACOP_UNAC); // Expand the unaccented stem, using the unaccented stem // db. Because it's a different db, We need to do it even if // the input has no accent (unac == term) for (vector<string>::const_iterator it = llangs.begin(); it != llangs.end(); it++) { SynTermTransStem stemmer(*it); XapComputableSynFamMember expander(getdb(), synFamStemUnac, *it, &stemmer); (void)expander.synExpand(unac, result); } } if (result.empty()) result.push_back(term); sort(result.begin(), result.end()); vector<string>::iterator uit = unique(result.begin(), result.end()); result.resize(uit - result.begin()); LOGDEB1("stemExpand:" << (langs) << ": " << (term) << " -> " << (stringsToString(result)) << "\n" ); return true; } } �������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/rcldb.h�������������������������������������������������������������������������0000644�0001750�0001750�00000055641�13533651561�012354� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _DB_H_INCLUDED_ #define _DB_H_INCLUDED_ #include "autoconfig.h" #include <stdint.h> #include <string> #include <vector> #include <memory> #include "cstr.h" #include "rcldoc.h" #include "stoplist.h" #include "rclconfig.h" #include "utf8iter.h" #include "textsplit.h" #include "syngroups.h" using std::string; using std::vector; // rcldb defines an interface for a 'real' text database. The current // implementation uses xapian only, and xapian-related code is in rcldb.cpp // If support was added for other backend, the xapian code would be moved in // rclxapian.cpp, another file would be created for the new backend, and the // configuration/compile/link code would be adjusted to allow choosing. There // is no plan for supporting multiple different backends. // // In no case does this try to implement a useful virtualized text-db interface // The main goal is simplicity and good matching to usage inside the recoll // user interface. In other words, this is not exhaustive or well-designed or // reusable. // // Unique Document Identifier: uniquely identifies a document in its // source storage (file system or other). Used for up to date checks // etc. "udi". Our user is responsible for making sure it's not too // big, cause it's stored as a Xapian term (< 150 bytes would be // reasonable) class RclConfig; class Aspell; namespace Rcl { // Omega compatible values. We leave a hole for future omega values. Not sure // it makes any sense to keep any level of omega compat given that the index // is incompatible anyway. enum value_slot { // Omega-compatible values: VALUE_LASTMOD = 0, // 4 byte big endian value - seconds since 1970. VALUE_MD5 = 1, // 16 byte MD5 checksum of original document. VALUE_SIZE = 2, // sortable_serialise(<file size in bytes>) ////////// Recoll only: // Doc sig as chosen by app (ex: mtime+size VALUE_SIG = 10, }; class SearchData; class TermIter; class Query; /** Used for returning result lists for index terms matching some criteria */ class TermMatchEntry { public: TermMatchEntry() : wcf(0) {} TermMatchEntry(const string& t, int f, int d) : term(t), wcf(f), docs(d) {} TermMatchEntry(const string& t) : term(t), wcf(0) {} bool operator==(const TermMatchEntry &o) const { return term == o.term; } bool operator<(const TermMatchEntry &o) const { return term < o.term; } string term; int wcf; // Total count of occurrences within collection. int docs; // Number of documents countaining term. }; /** Term match result list header: statistics and global info */ class TermMatchResult { public: TermMatchResult() { clear(); } void clear() { entries.clear(); } // Term expansion vector<TermMatchEntry> entries; // If a field was specified, this is the corresponding index prefix string prefix; }; class DbStats { public: DbStats() :dbdoccount(0), dbavgdoclen(0), mindoclen(0), maxdoclen(0) {} // Index-wide stats unsigned int dbdoccount; double dbavgdoclen; size_t mindoclen; size_t maxdoclen; vector<string> failedurls; /* Only set if requested */ }; inline bool has_prefix(const string& trm) { if (o_index_stripchars) { return !trm.empty() && 'A' <= trm[0] && trm[0] <= 'Z'; } else { return !trm.empty() && trm[0] == ':'; } } inline string strip_prefix(const string& trm) { if (trm.empty()) return trm; string::size_type st = 0; if (o_index_stripchars) { st = trm.find_first_not_of("ABCDEFIJKLMNOPQRSTUVWXYZ"); if (st == string::npos) return string(); } else { if (has_prefix(trm)) { st = trm.find_last_of(":") + 1; } else { return trm; } } return trm.substr(st); } inline string wrap_prefix(const string& pfx) { if (o_index_stripchars) { return pfx; } else { return cstr_colon + pfx + cstr_colon; } } /** * Wrapper class for the native database. */ class Db { public: // A place for things we don't want visible here. class Native; friend class Native; /* General stuff (valid for query or update) ****************************/ Db(const RclConfig *cfp); ~Db(); enum OpenMode {DbRO, DbUpd, DbTrunc}; enum OpenError {DbOpenNoError, DbOpenMainDb, DbOpenExtraDb}; bool open(OpenMode mode, OpenError *error = 0); bool close(); bool isopen(); /** Get explanation about last error */ string getReason() const {return m_reason;} /** Return all possible stemmer names */ static vector<string> getStemmerNames(); /** Return existing stemming databases */ vector<string> getStemLangs(); /** Check if index stores the documents' texts. Only valid after open */ bool storesDocText(); /** Test word for spelling correction candidate: not too long, no * special chars... * @param with_aspell test for use with aspell, else for xapian speller */ static bool isSpellingCandidate(const string& term, bool with_aspell=true) { if (term.empty() || term.length() > 50 || has_prefix(term)) return false; Utf8Iter u8i(term); if (with_aspell) { // If spelling with aspell, neither katakana nor other cjk // scripts are candidates if (TextSplit::isCJK(*u8i) || TextSplit::isKATAKANA(*u8i)) return false; } else { #ifdef TESTING_XAPIAN_SPELL // The Xapian speller (purely proximity-based) can be used // for Katakana (when split as words which is not always // completely feasible because of separator-less // compounds). Currently we don't try to use the Xapian // speller with other scripts with which it would be usable // in the absence of aspell (it would indeed be better // than nothing with e.g. european languages). This would // require a few more config variables, maybe one day. if (!TextSplit::isKATAKANA(*u8i)) { return false; } #else return false; #endif } if (term.find_first_of(" !\"#$%&()*+,-./0123456789:;<=>?@[\\]^_`{|}~") != string::npos) return false; return true; } /** Return spelling suggestion */ bool getSpellingSuggestions(const string& word, std::vector<std::string>& suggs); /* The next two, only for searchdata, should be somehow hidden */ /* Return configured stop words */ const StopList& getStopList() const {return m_stops;} /* Field name to prefix translation (ie: author -> 'A') */ bool fieldToTraits(const string& fldname, const FieldTraits **ftpp, bool isquery = false); /* Update-related methods ******************************************/ /** Test if the db entry for the given udi is up to date. * * This is done by comparing the input and stored sigs. This is * used both when indexing and querying (before opening a document * using stale info). * * **This assumes that the udi pertains to the main index (idxi==0).** * * Side-effect when the db is writeable and the document up to * date: set the existence flag for the file document and all * subdocs if any (for later use by 'purge()') * * @param udi Unique Document Identifier (as chosen by indexer). * @param sig New signature (as computed by indexer). * @param xdocid[output] Non-zero if doc existed. Should be considered * as opaque, to be used for a possible later call to setExistingFlags() * Note that if inplaceReset is set, the return value is non-zero but not * an actual docid, it's only used as a flag in this case. * @param osig[output] old signature. */ bool needUpdate(const string &udi, const string& sig, unsigned int *xdocid = 0, std::string *osig = 0); /** Set the existance flags for the document and its eventual subdocuments * * This can be called by the indexer after needUpdate() has returned true, * if the indexer does not wish to actually re-index (e.g.: the doc is * known to cause errors). */ void setExistingFlags(const string& udi, unsigned int docid); /** Indicate if we are doing a systematic reindex. This complements needUpdate() return */ bool inFullReset() {return o_inPlaceReset || m_mode == DbTrunc;} /** Add or update document identified by unique identifier. * @param config Config object to use. Can be the same as the member config * or a clone, to avoid sharing when called in multithread context. * @param udi the Unique Document Identifier is opaque to us. * Maximum size 150 bytes. * @param parent_udi the UDI for the container document. In case of complex * embedding, this is not always the immediate parent but the UDI for * the container file (which may be a farther ancestor). It is * used for purging subdocuments when a file ceases to exist and * to set the existence flags of all subdocuments of a container * that is found to be up to date. In other words, the * parent_udi is the UDI for the ancestor of the document which * is subject to needUpdate() and physical existence tests (some * kind of file equivalent). Empty for top-level docs. Should * probably be renamed container_udi. * @param doc container for document data. Should have been filled as * much as possible depending on the document type. * ** doc will be modified in a destructive way ** */ bool addOrUpdate(const string &udi, const string &parent_udi, Doc &doc); #ifdef IDX_THREADS void waitUpdIdle(); #endif /** Delete document(s) for given UDI, including subdocs */ bool purgeFile(const string &udi, bool *existed = 0); /** Delete subdocs with an out of date sig. We do this to purge obsolete subdocs during a partial update where no general purge will be done */ bool purgeOrphans(const string &udi); /** Remove documents that no longer exist in the file system. This * depends on the update map, which is built during * indexing (needUpdate() / addOrUpdate()). * * This should only be called after a full walk of * the file system, else the update map will not be complete, and * many documents will be deleted that shouldn't, which is why this * has to be called externally, rcldb can't know if the indexing * pass was complete or partial. */ bool purge(); /** Create stem expansion database for given languages. */ bool createStemDbs(const std::vector<std::string> &langs); /** Delete stem expansion database for given language. */ bool deleteStemDb(const string &lang); /* Query-related methods ************************************/ /** Return total docs in db */ int docCnt(); /** Return count of docs which have an occurrence of term */ int termDocCnt(const string& term); /** Add extra Xapian database for querying. * @param dir must point to something which can be passed as parameter * to a Xapian::Database constructor (directory or stub). */ bool addQueryDb(const string &dir); /** Remove extra database. if dir == "", remove all. */ bool rmQueryDb(const string &dir); /** Check if document comes from the main index (this is used to decide if we can update the index for it */ bool fromMainIndex(const Doc& doc); /** Retrieve the stored doc text. This returns false if the index does not store raw text or other problems (discriminate with storesDocText(). On success, the data is stored in doc.text */ bool getDocRawText(Doc& doc); /** Retrieve an index designator for the document result. This is used * by the GUI document history feature for remembering where a * doc comes from and allowing later retrieval (if the ext index * is still active...). */ std::string whatIndexForResultDoc(const Doc& doc); /** Tell if directory seems to hold xapian db */ static bool testDbDir(const string &dir, bool *stripped = 0); /** Return the index terms that match the input string * Expansion is performed either with either wildcard or regexp processing * Stem expansion is performed if lang is not empty * * @param typ_sens defines the kind of expansion: none, wildcard, * regexp or stemming. "none" may still expand case, * diacritics and synonyms, depending on the casesens, diacsens and * synexp flags. * @param lang sets the stemming language(s). Can be a space-separated list * @param term is the term to expand * @param result is the main output * @param max defines the maximum result count * @param field if set, defines the field within with the expansion should * be performed. Only used for wildcards and regexps, stemming is * always global. If this is set, the resulting output terms * will be appropriately prefixed and the prefix value will be set * in the TermMatchResult header */ enum MatchType {ET_NONE=0, ET_WILD=1, ET_REGEXP=2, ET_STEM=3, ET_DIACSENS=8, ET_CASESENS=16, ET_SYNEXP=32, ET_PATHELT=64}; int matchTypeTp(int tp) { return tp & 7; } bool termMatch(int typ_sens, const string &lang, const string &term, TermMatchResult& result, int max = -1, const string& field = "", vector<string> *multiwords = 0); bool dbStats(DbStats& stats, bool listFailed); /** Return min and max years for doc mod times in db */ bool maxYearSpan(int *minyear, int *maxyear); /** Return all mime types in index. This can be different from the ones defined in the config because of 'file' command usage. Inserts the types at the end of the parameter */ bool getAllDbMimeTypes(std::vector<std::string>&); /** Wildcard expansion specific to file names. Internal/sdata use only */ bool filenameWildExp(const string& exp, vector<string>& names, int max); /** Set parameters for synthetic abstract generation */ void setAbstractParams(int idxTrunc, int synthLen, int syntCtxLen); int getAbsCtxLen() const { return m_synthAbsWordCtxLen; } int getAbsLen() const { return m_synthAbsLen; } /** Get document for given udi and db index * * Used to retrieve ancestor documents. * @param udi The unique document identifier. * @param idxdoc A document from the same database as an opaque way to pass * the database id (e.g.: when looking for parent in a multi-database * context). * @param[out] doc The output Recoll document. * @return True for success. */ bool getDoc(const string &udi, const Doc& idxdoc, Doc &doc); /** Get document for given udi and index directory. * * Used by the 'history' feature. This supposes that the extra db * is still active. * @param udi The unique document identifier. * @param dbdir The index directory, from storage, as returned by * whatIndexForResultDoc() at the time of the query. Can be * empty to mean "main index" (allows the history to avoid * storing the main dbdir value). * @param[out] doc The output Recoll document. * @return True for success. */ bool getDoc(const string &udi, const std::string& dbdir, Doc &doc); /** Test if documents has sub-documents. * * This can always be detected for file-level documents, using the * postlist for the parent term constructed with udi. * * For non file-level documents (e.g.: does an email inside an * mbox have attachments ?), detection is dependant on the filter * having set an appropriate flag at index time. Higher level code * can't detect it because the doc for the parent may have been * seen before any children. The flag is stored as a value in the * index. */ bool hasSubDocs(const Doc &idoc); /** Get subdocuments of given document. * * For file-level documents, these are all docs indexed by the * parent term built on idoc.udi. For embedded documents, the * parent doc is looked for, then its subdocs list is * filtered using the idoc ipath as a prefix. */ bool getSubDocs(const Doc& idoc, vector<Doc>& subdocs); /** Get container (top level file) document. * * If the input is not a subdocument, this returns a copy of the input. */ bool getContainerDoc(const Doc &idoc, Doc& ctdoc); /** Get duplicates (md5) of document */ bool docDups(const Doc& idoc, std::vector<Doc>& odocs); /* The following are mainly for the aspell module */ /** Whole term list walking. */ TermIter *termWalkOpen(); bool termWalkNext(TermIter *, string &term); void termWalkClose(TermIter *); /** Test term existence */ bool termExists(const string& term); /** Test if terms stem to different roots. */ bool stemDiffers(const string& lang, const string& term, const string& base); const RclConfig *getConf() {return m_config;} /** Activate the "in place reset" mode where all documents are considered as needing update. This is a global/per-process option, and can't be reset. It should be set at the start of the indexing pass. 2012-10: no idea why this is done this way... */ static void setInPlaceReset() {o_inPlaceReset = true;} /** Flush interval get/set. This is used by the first indexing pass to override the config value and flush more rapidly initially so that the user can quickly play with queries */ int getFlushMb() { return m_flushMb; } void setFlushMb(int mb) { m_flushMb = mb; } bool doFlush(); // Use empty fn for no synonyms bool setSynGroupsFile(const std::string& fn); // Mark all documents with an UDI having input as prefix as // existing. Only works if the UDIs for the store are // hierarchical of course. Used by FsIndexer to avoid purging // files for a topdir which is on a removable file system and // currently unmounted (topdir does not exist or is empty. bool udiTreeMarkExisting(const string& udi); /* This has to be public for access by embedded Query::Native */ Native *m_ndb; private: const RclConfig *m_config; string m_reason; // Error explanation // Xapian directories for additional databases to query vector<string> m_extraDbs; OpenMode m_mode; // File existence vector: this is filled during the indexing pass. Any // document whose bit is not set at the end is purged vector<bool> updated; // Text bytes indexed since beginning long long m_curtxtsz; // Text bytes at last flush long long m_flushtxtsz; // Text bytes at last fsoccup check long long m_occtxtsz; // First fs occup check ? int m_occFirstCheck; // Synonym groups. There is no strict reason that this has to be // an Rcl::Db member, as it is only used when building each It // could be a SearchData member, or even a parameter to // Query::setQuery(). Otoh, building the syngroups structure from // a file may be expensive and it's unlikely to change with every // query, so it makes sense to cache it, and Rcl::Db is not a bad // place for this. SynGroups m_syngroups; // Aspell object if needed Aspell *m_aspell = nullptr; /*************** * Parameters cached out of the configuration files. Logically const * after init */ // Stop terms: those don't get indexed. StopList m_stops; // Truncation length for stored meta fields int m_idxMetaStoredLen; // This is how long an abstract we keep or build from beginning of // text when indexing. It only has an influence on the size of the // db as we are free to shorten it again when displaying int m_idxAbsTruncLen; // Document text truncation length int m_idxTextTruncateLen{0}; // This is the size of the abstract that we synthetize out of query // term contexts at *query time* int m_synthAbsLen; // This is how many words (context size) we keep around query terms // when building the abstract int m_synthAbsWordCtxLen; // Flush threshold. Megabytes of text indexed before we flush. int m_flushMb; // Maximum file system occupation percentage int m_maxFsOccupPc; // Database directory string m_basedir; // When this is set, all documents are considered as needing a reindex. // This implements an alternative to just erasing the index before // beginning, with the advantage that, for small index formats updates, // between releases the index remains available while being recreated. static bool o_inPlaceReset; /******* End logical constnesss */ #ifdef IDX_THREADS friend void *DbUpdWorker(void*); #endif // IDX_THREADS // Internal form of setExistingFlags: no locking void i_setExistingFlags(const string& udi, unsigned int docid); // Internal form of close, can be called during destruction bool i_close(bool final); // Reinitialize when adding/removing additional dbs bool adjustdbs(); bool idxTermMatch(int typ_sens, const string &lang, const string &term, TermMatchResult& result, int max = -1, const string& field = cstr_null); // Flush when idxflushmb is reached bool maybeflush(int64_t moretext); bool docExists(const string& uniterm); bool getDoc(const std::string& udi, int idxi, Doc& doc); /* Copyconst and assignement private and forbidden */ Db(const Db &) {} Db& operator=(const Db &) {return *this;}; }; // This has to go somewhere, and as it needs the Xapian version, this is // the most reasonable place. string version_string(); extern const string pathelt_prefix; extern const string mimetype_prefix; extern const string unsplitFilenameFieldName; extern string start_of_field_term; extern string end_of_field_term; } #endif /* _DB_H_INCLUDED_ */ �����������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/searchdata.h��������������������������������������������������������������������0000644�0001750�0001750�00000042172�13533651561�013360� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SEARCHDATA_H_INCLUDED_ #define _SEARCHDATA_H_INCLUDED_ /** * Structures to hold data coming almost directly from the gui * and handle its translation to Xapian queries. * This is not generic code, it reflects the choices made for the user * interface, and it also knows some specific of recoll's usage of Xapian * (ie: term prefixes) */ #include <string> #include <vector> #include <ostream> #include "rcldb.h" #include <memory> #include "smallut.h" #include "cstr.h" #include "hldata.h" class RclConfig; class AdvSearch; namespace Rcl { /** Search clause types */ enum SClType { SCLT_AND, SCLT_OR, SCLT_FILENAME, SCLT_PHRASE, SCLT_NEAR, SCLT_PATH, SCLT_RANGE, SCLT_SUB, }; class SearchDataClause; class SearchDataClauseDist; /** A SearchData object represents a Recoll user query, for translation into a Xapian query tree. This could probably better called a 'question'. This is a list of SearchDataClause objects combined through either OR or AND. Clauses either reflect user entry in a query field: some text, a clause type (AND/OR/NEAR etc.), possibly a distance, or are the result of parsing query language input. A clause can also point to another SearchData representing a subquery. The content of each clause when added may not be fully parsed yet (may come directly from a gui field). It will be parsed and may be translated to several queries in the Xapian sense, for exemple several terms and phrases as would result from ["this is a phrase" term1 term2] . This is why the clauses also have an AND/OR/... type. They are an intermediate form between the primary user input and the final Xapian::Query tree. For example, a phrase clause could be added either explicitly or using double quotes: {SCLT_PHRASE, [this is a phrase]} or as {SCLT_XXX, ["this is a phrase"]} */ class SearchData { public: SearchData(SClType tp, const string& stemlang) : m_tp(tp), m_stemlang(stemlang) { if (m_tp != SCLT_OR && m_tp != SCLT_AND) m_tp = SCLT_OR; commoninit(); } SearchData() : m_tp(SCLT_AND) { commoninit(); } ~SearchData(); /** Is there anything but a file name search in here ? */ bool fileNameOnly(); /** Do we have wildcards anywhere apart from filename searches ? */ bool haveWildCards() {return m_haveWildCards;} /** Translate to Xapian query. rcldb knows about the void* */ bool toNativeQuery(Rcl::Db &db, void *); /** We become the owner of cl and will delete it */ bool addClause(SearchDataClause* cl); /** If this is a simple query (one field only, no distance clauses), * add phrase made of query terms to query, so that docs containing the * user terms in order will have higher relevance. This must be called * before toNativeQuery(). * @param threshold: don't use terms more frequent than the value * (proportion of docs where they occur) */ bool maybeAddAutoPhrase(Rcl::Db &db, double threshold); const std::string& getStemLang() {return m_stemlang;} void setMinSize(size_t size) {m_minSize = size;} void setMaxSize(size_t size) {m_maxSize = size;} /** Set date span for filtering results */ void setDateSpan(DateInterval *dip) {m_dates = *dip; m_haveDates = true;} /** Add file type for filtering results */ void addFiletype(const std::string& ft) {m_filetypes.push_back(ft);} /** Add file type to not wanted list */ void remFiletype(const std::string& ft) {m_nfiletypes.push_back(ft);} /** Retrieve error description */ std::string getReason() {return m_reason;} /** Return term expansion data. Mostly used by caller for highlighting */ void getTerms(HighlightData& hldata) const; /** * Get/set the description field which is retrieved from xapian after * initializing the query. It is stored here for usage in the GUI. */ std::string getDescription() {return m_description;} void setDescription(const std::string& d) {m_description = d;} /** Return an XML version of the contents, for storage in search history by the GUI */ string asXML(); void setTp(SClType tp) { m_tp = tp; } SClType getTp() { return m_tp; } void setMaxExpand(int max) { m_softmaxexpand = max; } bool getAutoDiac() {return m_autodiacsens;} bool getAutoCase() {return m_autocasesens;} int getMaxExp() {return m_maxexp;} int getMaxCl() {return m_maxcl;} int getSoftMaxExp() {return m_softmaxexpand;} void dump(std::ostream& o) const; friend class ::AdvSearch; private: // Combine type. Only SCLT_AND or SCLT_OR here SClType m_tp; // The clauses std::vector<SearchDataClause*> m_query; // Restricted set of filetypes if not empty. std::vector<std::string> m_filetypes; // Excluded set of file types if not empty std::vector<std::string> m_nfiletypes; // Autophrase if set. Can't be part of the normal chain because // it uses OP_AND_MAYBE std::shared_ptr<SearchDataClauseDist> m_autophrase; // Special stuff produced by input which looks like a clause but means // something else (date and size specs) bool m_haveDates; DateInterval m_dates; // Restrict to date interval size_t m_maxSize; size_t m_minSize; // Printable expanded version of the complete query, retrieved/set // from rcldb after the Xapian::setQuery() call std::string m_description; // Error diag std::string m_reason; bool m_haveWildCards; std::string m_stemlang; // Parameters set at the start of ToNativeQuery because they need // an rclconfig. Actually this does not make sense and it would be // simpler to just pass an rclconfig to the constructor; bool m_autodiacsens; bool m_autocasesens; int m_maxexp; int m_maxcl; // Parameters which are not part of the main query data but may influence // translation in special cases. // Maximum TermMatch (e.g. wildcard) expansion. This is normally set // from the configuration with a high default, but may be set to a lower // value during "find-as-you-type" operations from the GUI int m_softmaxexpand; // Collapse bogus subqueries generated by the query parser, mostly // so that we can check if this is an autophrase candidate (else // Xapian will do it anyway) void simplify(); bool expandFileTypes(Rcl::Db &db, std::vector<std::string>& exptps); bool clausesToQuery(Rcl::Db &db, SClType tp, std::vector<SearchDataClause*>& query, string& reason, void *d); void commoninit(); /* Copyconst and assignment private and forbidden */ SearchData(const SearchData &) {} SearchData& operator=(const SearchData&) {return *this;}; }; class SearchDataClause { public: enum Modifier {SDCM_NONE=0, SDCM_NOSTEMMING=0x1, SDCM_ANCHORSTART=0x2, SDCM_ANCHOREND=0x4, SDCM_CASESENS=0x8, SDCM_DIACSENS=0x10, SDCM_NOTERMS=0x20, // Don't include terms for highlighting SDCM_NOSYNS = 0x40, // Don't perform synonym expansion // Aargh special case. pathelts are case/diac-sensitive // even in a stripped index SDCM_PATHELT = 0x80, }; enum Relation {REL_CONTAINS, REL_EQUALS, REL_LT, REL_LTE, REL_GT, REL_GTE}; SearchDataClause(SClType tp) : m_tp(tp), m_parentSearch(0), m_haveWildCards(0), m_modifiers(SDCM_NONE), m_weight(1.0), m_exclude(false), m_rel(REL_CONTAINS) {} virtual ~SearchDataClause() {} virtual bool toNativeQuery(Rcl::Db &db, void *) = 0; bool isFileName() const {return m_tp == SCLT_FILENAME ? true: false;} virtual std::string getReason() const {return m_reason;} virtual void getTerms(HighlightData&) const {} SClType getTp() const { return m_tp; } void setTp(SClType tp) { m_tp = tp; } void setParent(SearchData *p) { m_parentSearch = p; } string getStemLang() { return (m_modifiers & SDCM_NOSTEMMING) || m_parentSearch == 0 ? cstr_null : m_parentSearch->getStemLang(); } bool getAutoDiac() { return m_parentSearch ? m_parentSearch->getAutoDiac() : false; } bool getAutoCase() { return m_parentSearch ? m_parentSearch->getAutoCase() : true; } int getMaxExp() { return m_parentSearch ? m_parentSearch->getMaxExp() : 10000; } size_t getMaxCl() { return m_parentSearch ? m_parentSearch->getMaxCl() : 100000; } int getSoftMaxExp() { return m_parentSearch ? m_parentSearch->getSoftMaxExp() : -1; } virtual void addModifier(Modifier mod) { m_modifiers = m_modifiers | mod; } virtual unsigned int getmodifiers() { return m_modifiers; } virtual void setWeight(float w) { m_weight = w; } virtual bool getexclude() const { return m_exclude; } virtual void setexclude(bool onoff) { m_exclude = onoff; } virtual void setrel(Relation rel) { m_rel = rel; } virtual Relation getrel() { return m_rel; } virtual void dump(std::ostream& o) const; friend class SearchData; protected: std::string m_reason; SClType m_tp; SearchData *m_parentSearch; bool m_haveWildCards; unsigned int m_modifiers; float m_weight; bool m_exclude; Relation m_rel; }; /** * "Simple" data clause with user-entered query text. This can include * multiple phrases and words, but no specified distance. */ class TermProcQ; class SearchDataClauseSimple : public SearchDataClause { public: SearchDataClauseSimple(SClType tp, const std::string& txt, const std::string& fld = std::string()) : SearchDataClause(tp), m_text(txt), m_field(fld), m_curcl(0) { m_haveWildCards = (txt.find_first_of(cstr_minwilds) != std::string::npos); } SearchDataClauseSimple(const std::string& txt, SClType tp) : SearchDataClause(tp), m_text(txt), m_curcl(0) { m_haveWildCards = (txt.find_first_of(cstr_minwilds) != std::string::npos); } virtual ~SearchDataClauseSimple() {} /** Translate to Xapian query */ virtual bool toNativeQuery(Rcl::Db &, void *); virtual void getTerms(HighlightData& hldata) const { hldata.append(m_hldata); } virtual const std::string& gettext() const { return m_text; } virtual const std::string& getfield() const { return m_field; } virtual void setfield(const string& field) { m_field = field; } virtual void dump(std::ostream& o) const; protected: std::string m_text; // Raw user entry text. std::string m_field; // Field specification if any HighlightData m_hldata; // Current count of Xapian clauses, to check against expansion limit size_t m_curcl; bool processUserString(Rcl::Db &db, const string &iq, std::string &ermsg, void* pq, int slack = 0, bool useNear = false); bool expandTerm(Rcl::Db &db, std::string& ermsg, int mods, const std::string& term, std::vector<std::string>& exp, std::string& sterm, const std::string& prefix, std::vector<std::string>* multiwords = 0); // After splitting entry on whitespace: process non-phrase element void processSimpleSpan(Rcl::Db &db, string& ermsg, const string& span, int mods, void *pq); // Process phrase/near element void processPhraseOrNear(Rcl::Db &db, string& ermsg, TermProcQ *splitData, int mods, void *pq, bool useNear, int slack); }; class SearchDataClauseRange : public SearchDataClauseSimple { public: SearchDataClauseRange(const std::string& t1, const std::string& t2, const std::string& fld = std::string()) : SearchDataClauseSimple(SCLT_RANGE, t1, fld), m_t2(t2) {} // This is for 'upgrading' a clauseSimple with eq/gt/lt... rel to // a range. Either of t1 or t2 or both can be set to the original // text, which is why they are passed as separate parameters SearchDataClauseRange(const SearchDataClauseSimple& cl, const std::string& t1, const std::string& t2) : SearchDataClauseSimple(cl) { m_text = t1; m_t2 = t2; } virtual ~SearchDataClauseRange() {} virtual void dump(std::ostream& o) const; virtual const std::string& gettext2() const { return m_t2; } virtual bool toNativeQuery(Rcl::Db &db, void *); protected: std::string m_t2; }; /** * Filename search clause. This is special because term expansion is only * performed against the unsplit file name terms. * * There is a big advantage in expanding only against the * field, especially for file names, because this makes searches for * "*xx" much faster (no need to scan the whole main index). */ class SearchDataClauseFilename : public SearchDataClauseSimple { public: SearchDataClauseFilename(const std::string& txt) : SearchDataClauseSimple(txt, SCLT_FILENAME) { // File name searches don't count when looking for wild cards. m_haveWildCards = false; } virtual ~SearchDataClauseFilename() {} virtual bool toNativeQuery(Rcl::Db &, void *); virtual void dump(std::ostream& o) const; }; /** * Pathname filtering clause. This is special because of history: * - Pathname filtering used to be performed as a post-processing step * done with the url fields of doc data records. * - Then it was done as special phrase searchs on path elements prefixed * with XP. * Up to this point dir filtering data was stored as part of the searchdata * object, not in the SearchDataClause tree. Only one, then a list, * of clauses where stored, and they were always ANDed together. * * In order to allow for OR searching, dir clauses are now stored in a * specific SearchDataClause, but this is still special because the field has * non-standard phrase-like processing, reflected in index storage by * an empty element representing / (as "XP"). * * A future version should use a standard phrase with an anchor to the * start if the path starts with /. As this implies an index format * change but is no important enough to warrant it, this has to wait for * the next format change. */ class SearchDataClausePath : public SearchDataClauseSimple { public: SearchDataClausePath(const std::string& txt, bool excl = false) : SearchDataClauseSimple(SCLT_PATH, txt, "dir") { m_exclude = excl; m_haveWildCards = false; } virtual ~SearchDataClausePath() {} virtual bool toNativeQuery(Rcl::Db &, void *); virtual void dump(std::ostream& o) const; }; /** * A clause coming from a NEAR or PHRASE entry field. There is only one * std::string group, and a specified distance, which applies to it. */ class SearchDataClauseDist : public SearchDataClauseSimple { public: SearchDataClauseDist(SClType tp, const std::string& txt, int slack, const std::string& fld = std::string()) : SearchDataClauseSimple(tp, txt, fld), m_slack(slack) {} virtual ~SearchDataClauseDist() {} virtual bool toNativeQuery(Rcl::Db &, void *); virtual int getslack() const { return m_slack; } virtual void setslack(int slack) { m_slack = slack; } virtual void dump(std::ostream& o) const; private: int m_slack; }; /** Subquery */ class SearchDataClauseSub : public SearchDataClause { public: SearchDataClauseSub(std::shared_ptr<SearchData> sub) : SearchDataClause(SCLT_SUB), m_sub(sub) {} virtual bool toNativeQuery(Rcl::Db &db, void *p) { bool ret = m_sub->toNativeQuery(db, p); if (!ret) m_reason = m_sub->getReason(); return ret; } virtual void getTerms(HighlightData& hldata) const { m_sub.get()->getTerms(hldata); } virtual std::shared_ptr<SearchData> getSub() { return m_sub; } virtual void dump(std::ostream& o) const; protected: std::shared_ptr<SearchData> m_sub; }; } // Namespace Rcl #endif /* _SEARCHDATA_H_INCLUDED_ */ ������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/stoplist.h����������������������������������������������������������������������0000644�0001750�0001750�00000003127�13533651561�013137� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _STOPLIST_H_INCLUDED_ #define _STOPLIST_H_INCLUDED_ #include <set> #include <string> #ifndef NO_NAMESPACES using std::set; using std::string; namespace Rcl { #endif /** * A StopList is just a bunch of strings read from a file. * * Some of the string may contain whitespace (that's for experimentation with * stop n-grams), so we take care of dquotes while reading the file. We also * lowercase and remove accents. The source file should be utf-8. */ class StopList { public: StopList() {} StopList(const string &filename) {setFile(filename);} virtual ~StopList() {} bool setFile(const string &filename); bool isStop(const string &term) const; bool hasStops() const {return !m_stops.empty();} private: set<string> m_stops; }; #ifndef NO_NAMESPACES } #endif #endif /* _STOPLIST_H_INCLUDED_ */ �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/rcldb/searchdatatox.cpp���������������������������������������������������������������0000644�0001750�0001750�00000110650�13566424763�014453� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������/* Copyright (C) 2006-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ // Handle translation from rcl's SearchData structures to Xapian Queries #include "autoconfig.h" #include <stdio.h> #include <string> #include <vector> #include <algorithm> #include <sstream> #include <iostream> using namespace std; #include "xapian.h" #include "cstr.h" #include "rcldb.h" #include "rcldb_p.h" #include "searchdata.h" #include "log.h" #include "smallut.h" #include "textsplit.h" #include "unacpp.h" #include "utf8iter.h" #include "stoplist.h" #include "rclconfig.h" #include "termproc.h" #include "synfamily.h" #include "stemdb.h" #include "expansiondbs.h" #include "base64.h" #include "daterange.h" #include "rclvalues.h" namespace Rcl { static const int original_term_wqf_booster = 10; // Expand doc categories and mime type wild card expressions // // Categories are expanded against the configuration, mimetypes // against the index. bool SearchData::expandFileTypes(Db &db, vector<string>& tps) { const RclConfig *cfg = db.getConf(); if (!cfg) { LOGFATAL("Db::expandFileTypes: null configuration!!\n"); return false; } vector<string> exptps; for (vector<string>::iterator it = tps.begin(); it != tps.end(); it++) { if (cfg->isMimeCategory(*it)) { vector<string>tps; cfg->getMimeCatTypes(*it, tps); exptps.insert(exptps.end(), tps.begin(), tps.end()); } else { TermMatchResult res; string mt = stringtolower((const string&)*it); // We set casesens|diacsens to get an equivalent of ixTermMatch() db.termMatch(Db::ET_WILD|Db::ET_CASESENS|Db::ET_DIACSENS, string(), mt, res, -1, "mtype"); if (res.entries.empty()) { exptps.push_back(it->c_str()); } else { for (vector<TermMatchEntry>::const_iterator rit = res.entries.begin(); rit != res.entries.end(); rit++) { exptps.push_back(strip_prefix(rit->term)); } } } } sort(exptps.begin(), exptps.end()); exptps.erase(unique(exptps.begin(), exptps.end()), exptps.end()); tps = exptps; return true; } static const char *maxXapClauseMsg = "Maximum Xapian query size exceeded. Increase maxXapianClauses " "in the configuration. "; static const char *maxXapClauseCaseDiacMsg = "Or try to use case (C) or diacritics (D) sensitivity qualifiers, or less " "wildcards ?" ; // Walk the clauses list, translate each and add to top Xapian Query bool SearchData::clausesToQuery(Rcl::Db &db, SClType tp, vector<SearchDataClause*>& query, string& reason, void *d) { Xapian::Query xq; for (auto& clausep : query) { Xapian::Query nq; if (!clausep->toNativeQuery(db, &nq)) { LOGERR("SearchData::clausesToQuery: toNativeQuery failed: " << clausep->getReason() << "\n"); reason += clausep->getReason() + " "; return false; } if (nq.empty()) { LOGDEB("SearchData::clausesToQuery: skipping empty clause\n"); continue; } // If this structure is an AND list, must use AND_NOT for excl clauses. // Else this is an OR list, and there can't be excl clauses (checked by // addClause()) Xapian::Query::op op; if (tp == SCLT_AND) { if (clausep->getexclude()) { op = Xapian::Query::OP_AND_NOT; } else { op = Xapian::Query::OP_AND; } } else { op = Xapian::Query::OP_OR; } if (xq.empty()) { if (op == Xapian::Query::OP_AND_NOT) xq = Xapian::Query(op, Xapian::Query::MatchAll, nq); else xq = nq; } else { xq = Xapian::Query(op, xq, nq); } if (int(xq.get_length()) >= getMaxCl()) { LOGERR("" << maxXapClauseMsg << "\n"); m_reason += maxXapClauseMsg; if (!o_index_stripchars) m_reason += maxXapClauseCaseDiacMsg; return false; } } LOGDEB0("SearchData::clausesToQuery: got " << xq.get_length()<<" clauses\n"); if (xq.empty()) xq = Xapian::Query::MatchAll; *((Xapian::Query *)d) = xq; return true; } bool SearchData::toNativeQuery(Rcl::Db &db, void *d) { LOGDEB("SearchData::toNativeQuery: stemlang [" << m_stemlang << "]\n"); m_reason.erase(); db.getConf()->getConfParam("maxTermExpand", &m_maxexp); db.getConf()->getConfParam("maxXapianClauses", &m_maxcl); db.getConf()->getConfParam("autocasesens", &m_autocasesens); db.getConf()->getConfParam("autodiacsens", &m_autodiacsens); // Walk the clause list translating each in turn and building the // Xapian query tree Xapian::Query xq; if (!clausesToQuery(db, m_tp, m_query, m_reason, &xq)) { LOGERR("SearchData::toNativeQuery: clausesToQuery failed. reason: " << m_reason << "\n"); return false; } if (m_haveDates) { // If one of the extremities is unset, compute db extremas if (m_dates.y1 == 0 || m_dates.y2 == 0) { int minyear = 1970, maxyear = 2100; if (!db.maxYearSpan(&minyear, &maxyear)) { LOGERR("Can't retrieve index min/max dates\n"); //whatever, go on. } if (m_dates.y1 == 0) { m_dates.y1 = minyear; m_dates.m1 = 1; m_dates.d1 = 1; } if (m_dates.y2 == 0) { m_dates.y2 = maxyear; m_dates.m2 = 12; m_dates.d2 = 31; } } LOGDEB("Db::toNativeQuery: date interval: " << m_dates.y1 << "-" << m_dates.m1 << "-" << m_dates.d1 << "/" << m_dates.y2 << "-" << m_dates.m2 << "-" << m_dates.d2 << "\n"); Xapian::Query dq = date_range_filter(m_dates.y1, m_dates.m1, m_dates.d1, m_dates.y2, m_dates.m2, m_dates.d2); if (dq.empty()) { LOGINFO("Db::toNativeQuery: date filter is empty\n"); } // If no probabilistic query is provided then promote the daterange // filter to be THE query instead of filtering an empty query. if (xq.empty()) { LOGINFO("Db::toNativeQuery: proba query is empty\n"); xq = dq; } else { xq = Xapian::Query(Xapian::Query::OP_FILTER, xq, dq); } } if (m_minSize != size_t(-1) || m_maxSize != size_t(-1)) { Xapian::Query sq; string min = lltodecstr(m_minSize); string max = lltodecstr(m_maxSize); if (m_minSize == size_t(-1)) { string value(max); leftzeropad(value, 12); sq = Xapian::Query(Xapian::Query::OP_VALUE_LE, VALUE_SIZE, value); } else if (m_maxSize == size_t(-1)) { string value(min); leftzeropad(value, 12); sq = Xapian::Query(Xapian::Query::OP_VALUE_GE, VALUE_SIZE, value); } else { string minvalue(min); leftzeropad(minvalue, 12); string maxvalue(max); leftzeropad(maxvalue, 12); sq = Xapian::Query(Xapian::Query::OP_VALUE_RANGE, VALUE_SIZE, minvalue, maxvalue); } // If no probabilistic query is provided then promote the // filter to be THE query instead of filtering an empty query. if (xq.empty()) { LOGINFO("Db::toNativeQuery: proba query is empty\n"); xq = sq; } else { xq = Xapian::Query(Xapian::Query::OP_FILTER, xq, sq); } } // Add the autophrase if any if (m_autophrase) { Xapian::Query apq; if (m_autophrase->toNativeQuery(db, &apq)) { xq = xq.empty() ? apq : Xapian::Query(Xapian::Query::OP_AND_MAYBE, xq, apq); } } // Add the file type filtering clause if any if (!m_filetypes.empty()) { expandFileTypes(db, m_filetypes); Xapian::Query tq; for (vector<string>::iterator it = m_filetypes.begin(); it != m_filetypes.end(); it++) { string term = wrap_prefix(mimetype_prefix) + *it; LOGDEB0("Adding file type term: [" << term << "]\n"); tq = tq.empty() ? Xapian::Query(term) : Xapian::Query(Xapian::Query::OP_OR, tq, Xapian::Query(term)); } xq = xq.empty() ? tq : Xapian::Query(Xapian::Query::OP_FILTER, xq, tq); } // Add the neg file type filtering clause if any if (!m_nfiletypes.empty()) { expandFileTypes(db, m_nfiletypes); Xapian::Query tq; for (vector<string>::iterator it = m_nfiletypes.begin(); it != m_nfiletypes.end(); it++) { string term = wrap_prefix(mimetype_prefix) + *it; LOGDEB0("Adding negative file type term: [" << term << "]\n"); tq = tq.empty() ? Xapian::Query(term) : Xapian::Query(Xapian::Query::OP_OR, tq, Xapian::Query(term)); } xq = xq.empty() ? tq : Xapian::Query(Xapian::Query::OP_AND_NOT, xq, tq); } *((Xapian::Query *)d) = xq; return true; } // Splitter for breaking a user string into simple terms and // phrases. This is for parts of the user entry which would appear as // a single word because there is no white space inside, but are // actually multiple terms to rcldb (ie term1,term2). Still, most of // the time, the result of our splitting will be a single term. class TextSplitQ : public TextSplitP { public: TextSplitQ(Flags flags, TermProc *prc) : TextSplitP(prc, flags), m_nostemexp(false) { } bool takeword(const std::string &term, int pos, int bs, int be) { // Check if the first letter is a majuscule in which // case we do not want to do stem expansion. Need to do this // before unac of course... m_nostemexp = unaciscapital(term); return TextSplitP::takeword(term, pos, bs, be); } bool nostemexp() const { return m_nostemexp; } private: bool m_nostemexp; }; class TermProcQ : public TermProc { public: TermProcQ() : TermProc(0), m_alltermcount(0), m_lastpos(0), m_ts(0) {} // We need a ref to the splitter (only it knows about orig term // capitalization for controlling stemming. The ref can't be set // in the constructor because the splitter is not built yet when // we are born (chicken and egg). void setTSQ(const TextSplitQ *ts) { m_ts = ts; } bool takeword(const std::string &term, int pos, int bs, int be) { m_alltermcount++; if (m_lastpos < pos) m_lastpos = pos; bool noexpand = be ? m_ts->nostemexp() : true; LOGDEB1("TermProcQ::takeword: pushing [" << term << "] pos " << pos << " noexp " << noexpand << "\n"); if (m_terms[pos].size() < term.size()) { m_terms[pos] = term; m_nste[pos] = noexpand; } return true; } bool flush() { for (map<int, string>::const_iterator it = m_terms.begin(); it != m_terms.end(); it++) { m_vterms.push_back(it->second); m_vnostemexps.push_back(m_nste[it->first]); } return true; } int alltermcount() const { return m_alltermcount; } int lastpos() const { return m_lastpos; } const vector<string>& terms() { return m_vterms; } const vector<bool>& nostemexps() { return m_vnostemexps; } private: // Count of terms including stopwords: this is for adjusting // phrase/near slack int m_alltermcount; int m_lastpos; const TextSplitQ *m_ts; vector<string> m_vterms; vector<bool> m_vnostemexps; map<int, string> m_terms; map<int, bool> m_nste; }; static const vector<CharFlags> expandModStrings{ {SearchDataClause::SDCM_NOSTEMMING, "nostemming"}, {SearchDataClause::SDCM_ANCHORSTART, "anchorstart"}, {SearchDataClause::SDCM_ANCHOREND, "anchorend"}, {SearchDataClause::SDCM_CASESENS, "casesens"}, {SearchDataClause::SDCM_DIACSENS, "diacsens"}, {SearchDataClause::SDCM_NOTERMS, "noterms"}, {SearchDataClause::SDCM_NOSYNS, "nosyns"}, {SearchDataClause::SDCM_PATHELT, "pathelt"}, }; /** Expand term into term list, using appropriate mode: stem, wildcards, * diacritics... * * @param mods stem expansion, case and diacritics sensitivity control. * @param term input single word * @param oexp output expansion list * @param sterm output original input term if there were no wildcards * @param prefix field prefix in index. We could recompute it, but the caller * has it already. Used in the simple case where there is nothing to expand, * and we just return the prefixed term (else Db::termMatch deals with it). * @param multiwords it may happen that synonym processing results in multi-word * expansions which should be processed as phrases. */ bool SearchDataClauseSimple::expandTerm(Rcl::Db &db, string& ermsg, int mods, const string& term, vector<string>& oexp, string &sterm, const string& prefix, vector<string>* multiwords ) { LOGDEB0("expandTerm: mods: [" << flagsToString(expandModStrings, mods) << "] fld [" << m_field << "] trm [" << term << "] lang [" << getStemLang() << "]\n"); sterm.clear(); oexp.clear(); if (term.empty()) return true; if (mods & SDCM_PATHELT) { // Path element are so special. Only wildcards, and they are // case-sensitive. mods |= SDCM_NOSTEMMING|SDCM_CASESENS|SDCM_DIACSENS|SDCM_NOSYNS; } bool maxexpissoft = false; int maxexpand = getSoftMaxExp(); if (maxexpand != -1) { maxexpissoft = true; } else { maxexpand = getMaxExp(); } bool haswild = term.find_first_of(cstr_minwilds) != string::npos; // If there are no wildcards, add term to the list of user-entered terms if (!haswild) { m_hldata.uterms.insert(term); sterm = term; } // No stem expansion if there are wildcards or if prevented by caller bool nostemexp = (mods & SDCM_NOSTEMMING) != 0; if (haswild || getStemLang().empty()) { LOGDEB2("expandTerm: found wildcards or stemlang empty: no exp\n"); nostemexp = true; } bool diac_sensitive = (mods & SDCM_DIACSENS) != 0; bool case_sensitive = (mods & SDCM_CASESENS) != 0; bool synonyms = (mods & SDCM_NOSYNS) == 0; bool pathelt = (mods & SDCM_PATHELT) != 0; // noexpansion can be modified further down by possible case/diac expansion bool noexpansion = nostemexp && !haswild && !synonyms; if (o_index_stripchars) { diac_sensitive = case_sensitive = false; } else { // If we are working with a raw index, apply the rules for case and // diacritics sensitivity. // If any character has a diacritic, we become // diacritic-sensitive. Note that the way that the test is // performed (conversion+comparison) will automatically ignore // accented characters which are actually a separate letter if (getAutoDiac() && unachasaccents(term)) { LOGDEB0("expandTerm: term has accents -> diac-sensitive\n"); diac_sensitive = true; } // If any character apart the first is uppercase, we become // case-sensitive. The first character is reserved for // turning off stemming. You need to use a query language // modifier to search for Floor in a case-sensitive way. Utf8Iter it(term); it++; if (getAutoCase() && unachasuppercase(term.substr(it.getBpos()))) { LOGDEB0("expandTerm: term has uppercase -> case-sensitive\n"); case_sensitive = true; } // If we are sensitive to case or diacritics turn stemming off if (diac_sensitive || case_sensitive) { LOGDEB0("expandTerm: diac or case sens set -> stemexpand and " "synonyms off\n"); nostemexp = true; synonyms = false; } if (!case_sensitive || !diac_sensitive) noexpansion = false; } if (!m_exclude && noexpansion) { oexp.push_back(prefix + term); m_hldata.terms[term] = term; LOGDEB("ExpandTerm: noexpansion: final: "<<stringsToString(oexp)<< "\n"); return true; } int termmatchsens = 0; if (case_sensitive) termmatchsens |= Db::ET_CASESENS; if (diac_sensitive) termmatchsens |= Db::ET_DIACSENS; if (synonyms) termmatchsens |= Db::ET_SYNEXP; if (pathelt) termmatchsens |= Db::ET_PATHELT; Db::MatchType mtyp = haswild ? Db::ET_WILD : nostemexp ? Db::ET_NONE : Db::ET_STEM; TermMatchResult res; if (!db.termMatch(mtyp | termmatchsens, getStemLang(), term, res, maxexpand, m_field, multiwords)) { // Let it go through } // Term match entries to vector of terms if (int(res.entries.size()) >= maxexpand && !maxexpissoft) { ermsg = "Maximum term expansion size exceeded." " Maybe use case/diacritics sensitivity or increase maxTermExpand."; return false; } for (const auto& entry : res.entries) { oexp.push_back(entry.term); } // If the term does not exist at all in the db, the return from // termMatch() is going to be empty, which is not what we want (we // would then compute an empty Xapian query) if (oexp.empty()) oexp.push_back(prefix + term); // Remember the uterm-to-expansion links if (!m_exclude) { for (const auto& entry : oexp) { m_hldata.terms[strip_prefix(entry)] = term; } } LOGDEB("ExpandTerm: final: " << stringsToString(oexp) << "\n"); return true; } static void prefix_vector(vector<string>& v, const string& prefix) { for (vector<string>::iterator it = v.begin(); it != v.end(); it++) { *it = prefix + *it; } } void SearchDataClauseSimple::processSimpleSpan( Rcl::Db &db, string& ermsg, const string& span, int mods, void *pq) { vector<Xapian::Query>& pqueries(*(vector<Xapian::Query>*)pq); LOGDEB0("StringToXapianQ::processSimpleSpan: [" << span << "] mods 0x" << (unsigned int)mods << "\n"); vector<string> exp; string sterm; // dumb version of user term string prefix; const FieldTraits *ftp; if (!m_field.empty() && db.fieldToTraits(m_field, &ftp, true)) { if (ftp->noterms) addModifier(SDCM_NOTERMS); // Don't add terms to highlight data prefix = wrap_prefix(ftp->pfx); } vector<string> multiwords; if (!expandTerm(db, ermsg, mods, span, exp, sterm, prefix, &multiwords)) return; // Set up the highlight data. No prefix should go in there if (!m_exclude) { for (const auto& term : exp) { HighlightData::TermGroup tg; tg.term = term.substr(prefix.size()); tg.grpsugidx = m_hldata.ugroups.size() - 1; m_hldata.index_term_groups.push_back(tg); } } // Push either term or OR of stem-expanded set Xapian::Query xq(Xapian::Query::OP_OR, exp.begin(), exp.end()); m_curcl += exp.size(); // If sterm (simplified original user term) is not null, give it a // relevance boost. We do this even if no expansion occurred (else // the non-expanded terms in a term list would end-up with even // less wqf). This does not happen if there are wildcards anywhere // in the search. // We normally boost the original term in the stem expansion list. Don't // do it if there are wildcards anywhere, this would skew the results. Also // no need to do it if there was no expansion. bool doBoostUserTerm = (m_parentSearch && !m_parentSearch->haveWildCards()) || (m_parentSearch == 0 && !m_haveWildCards); if (exp.size() > 1 && doBoostUserTerm && !sterm.empty()) { xq = Xapian::Query(Xapian::Query::OP_OR, xq, Xapian::Query(prefix+sterm, original_term_wqf_booster)); } // Push phrases for the multi-word expansions for (vector<string>::const_iterator mwp = multiwords.begin(); mwp != multiwords.end(); mwp++) { vector<string> phr; // We just do a basic split to keep things a bit simpler here // (no textsplit). This means though that no punctuation is // allowed in multi-word synonyms. stringToTokens(*mwp, phr); if (!prefix.empty()) prefix_vector(phr, prefix); xq = Xapian::Query(Xapian::Query::OP_OR, xq, Xapian::Query(Xapian::Query::OP_PHRASE, phr.begin(), phr.end())); m_curcl++; } pqueries.push_back(xq); } // User entry element had several terms: transform into a PHRASE or // NEAR xapian query, the elements of which can themselves be OR // queries if the terms get expanded by stemming or wildcards (we // don't do stemming for PHRASE though) void SearchDataClauseSimple::processPhraseOrNear(Rcl::Db &db, string& ermsg, TermProcQ *splitData, int mods, void *pq, bool useNear, int slack) { vector<Xapian::Query> &pqueries(*(vector<Xapian::Query>*)pq); Xapian::Query::op op = useNear ? Xapian::Query::OP_NEAR : Xapian::Query::OP_PHRASE; vector<Xapian::Query> orqueries; #ifdef XAPIAN_NEAR_EXPAND_SINGLE_BUF bool hadmultiple = false; #endif vector<vector<string> >groups; string prefix; const FieldTraits *ftp; if (!m_field.empty() && db.fieldToTraits(m_field, &ftp, true)) { prefix = wrap_prefix(ftp->pfx); } if (mods & Rcl::SearchDataClause::SDCM_ANCHORSTART) { orqueries.push_back(Xapian::Query(prefix + start_of_field_term)); slack++; } // Go through the list and perform stem/wildcard expansion for each element vector<bool>::const_iterator nxit = splitData->nostemexps().begin(); for (vector<string>::const_iterator it = splitData->terms().begin(); it != splitData->terms().end(); it++, nxit++) { LOGDEB0("ProcessPhrase: processing [" << *it << "]\n"); // Adjust when we do stem expansion. Not if disabled by // caller, not inside phrases, and some versions of xapian // will accept only one OR clause inside NEAR. bool nostemexp = *nxit || (op == Xapian::Query::OP_PHRASE) #ifdef XAPIAN_NEAR_EXPAND_SINGLE_BUF || hadmultiple #endif // single OR inside NEAR ; int lmods = mods; if (nostemexp) lmods |= SearchDataClause::SDCM_NOSTEMMING; string sterm; vector<string> exp; if (!expandTerm(db, ermsg, lmods, *it, exp, sterm, prefix)) return; LOGDEB0("ProcessPhraseOrNear: exp size " << exp.size() << ", exp: " << stringsToString(exp) << "\n"); // groups is used for highlighting, we don't want prefixes in there. vector<string> noprefs; for (vector<string>::const_iterator it = exp.begin(); it != exp.end(); it++) { noprefs.push_back(it->substr(prefix.size())); } groups.push_back(noprefs); orqueries.push_back(Xapian::Query(Xapian::Query::OP_OR, exp.begin(), exp.end())); m_curcl += exp.size(); if (m_curcl >= getMaxCl()) return; #ifdef XAPIAN_NEAR_EXPAND_SINGLE_BUF if (exp.size() > 1) hadmultiple = true; #endif } if (mods & Rcl::SearchDataClause::SDCM_ANCHOREND) { orqueries.push_back(Xapian::Query(prefix + end_of_field_term)); slack++; } // Generate an appropriate PHRASE/NEAR query with adjusted slack // For phrases, give a relevance boost like we do for original terms LOGDEB2("PHRASE/NEAR: alltermcount " << splitData->alltermcount() << " lastpos " << splitData->lastpos() << "\n"); Xapian::Query xq(op, orqueries.begin(), orqueries.end(), orqueries.size() + slack); if (op == Xapian::Query::OP_PHRASE) xq = Xapian::Query(Xapian::Query::OP_SCALE_WEIGHT, xq, original_term_wqf_booster); pqueries.push_back(xq); // Insert the search groups and slacks in the highlight data, with // a reference to the user entry that generated them: if (!m_exclude) { HighlightData::TermGroup tg; tg.orgroups = groups; tg.slack = slack; tg.grpsugidx = m_hldata.ugroups.size() - 1; tg.kind = (op == Xapian::Query::OP_PHRASE) ? HighlightData::TermGroup::TGK_PHRASE : HighlightData::TermGroup::TGK_NEAR; m_hldata.index_term_groups.push_back(tg); } } // Trim string beginning with ^ or ending with $ and convert to flags static int stringToMods(string& s) { int mods = 0; // Check for an anchored search trimstring(s); if (s.length() > 0 && s[0] == '^') { mods |= Rcl::SearchDataClause::SDCM_ANCHORSTART; s.erase(0, 1); } if (s.length() > 0 && s[s.length()-1] == '$') { mods |= Rcl::SearchDataClause::SDCM_ANCHOREND; s.erase(s.length()-1); } return mods; } /** * Turn user entry string (NOT query language) into a list of xapian queries. * We just separate words and phrases, and do wildcard and stem expansion, * * This is used to process data entered into an OR/AND/NEAR/PHRASE field of * the GUI (in the case of NEAR/PHRASE, clausedist adds dquotes to the user * entry). * * This appears awful, and it would seem that the split into * terms/phrases should be performed in the upper layer so that we * only receive pure term or near/phrase pure elements here, but in * fact there are things that would appear like terms to naive code, * and which will actually may be turned into phrases (ie: tom:jerry), * in a manner which intimately depends on the index implementation, * so that it makes sense to process this here. * * The final list contains one query for each term or phrase * - Elements corresponding to a stem-expanded part are an OP_OR * composition of the stem-expanded terms (or a single term query). * - Elements corresponding to phrase/near are an OP_PHRASE/NEAR * composition of the phrase terms (no stem expansion in this case) * @return the subquery count (either or'd stem-expanded terms or phrase word * count) */ bool SearchDataClauseSimple::processUserString(Rcl::Db &db, const string &iq, string &ermsg, void *pq, int slack, bool useNear) { vector<Xapian::Query> &pqueries(*(vector<Xapian::Query>*)pq); int mods = m_modifiers; LOGDEB("StringToXapianQ:pUS:: qstr [" << iq << "] fld [" << m_field << "] mods 0x"<<mods<<" slack " << slack << " near " << useNear <<"\n"); ermsg.erase(); m_curcl = 0; const StopList stops = db.getStopList(); // Simple whitespace-split input into user-level words and // double-quoted phrases: word1 word2 "this is a phrase". // // The text splitter may further still decide that the resulting // "words" are really phrases, this depends on separators: // [paul@dom.net] would still be a word (span), but [about:me] // will probably be handled as a phrase. vector<string> phrases; TextSplit::stringToStrings(iq, phrases); // Process each element: textsplit into terms, handle stem/wildcard // expansion and transform into an appropriate Xapian::Query try { for (vector<string>::iterator it = phrases.begin(); it != phrases.end(); it++) { LOGDEB0("strToXapianQ: phrase/word: [" << *it << "]\n"); // Anchoring modifiers int amods = stringToMods(*it); int terminc = amods != 0 ? 1 : 0; mods |= amods; // If there are multiple spans in this element, including // at least one composite, we have to increase the slack // else a phrase query including a span would fail. // Ex: "term0@term1 term2" is onlyspans-split as: // 0 term0@term1 0 12 // 2 term2 13 18 // The position of term2 is 2, not 1, so a phrase search // would fail. // We used to do word split, searching for // "term0 term1 term2" instead, which may have worse // performance, but will succeed. // We now adjust the phrase/near slack by comparing the term count // and the last position // The term processing pipeline: // split -> [unac/case ->] stops -> store terms TermProcQ tpq; TermProc *nxt = &tpq; TermProcStop tpstop(nxt, stops); nxt = &tpstop; //TermProcCommongrams tpcommon(nxt, stops); nxt = &tpcommon; //tpcommon.onlygrams(true); TermProcPrep tpprep(nxt); if (o_index_stripchars) nxt = &tpprep; TextSplitQ splitter(TextSplit::Flags(TextSplit::TXTS_ONLYSPANS | TextSplit::TXTS_KEEPWILD), nxt); tpq.setTSQ(&splitter); splitter.text_to_words(*it); slack += tpq.lastpos() - int(tpq.terms().size()) + 1; LOGDEB0("strToXapianQ: termcount: " << tpq.terms().size() << "\n"); switch (tpq.terms().size() + terminc) { case 0: continue;// ?? case 1: { int lmods = mods; if (tpq.nostemexps().front()) lmods |= SearchDataClause::SDCM_NOSTEMMING; if (!m_exclude) { m_hldata.ugroups.push_back(tpq.terms()); } processSimpleSpan(db, ermsg, tpq.terms().front(), lmods, &pqueries); } break; default: if (!m_exclude) { m_hldata.ugroups.push_back(tpq.terms()); } processPhraseOrNear(db, ermsg, &tpq, mods, &pqueries, useNear, slack); } if (m_curcl >= getMaxCl()) { ermsg = maxXapClauseMsg; if (!o_index_stripchars) ermsg += maxXapClauseCaseDiacMsg; break; } } } catch (const Xapian::Error &e) { ermsg = e.get_msg(); } catch (const string &s) { ermsg = s; } catch (const char *s) { ermsg = s; } catch (...) { ermsg = "Caught unknown exception"; } if (!ermsg.empty()) { LOGERR("stringToXapianQueries: " << ermsg << "\n"); return false; } return true; } // Translate a simple OR or AND search clause. bool SearchDataClauseSimple::toNativeQuery(Rcl::Db &db, void *p) { LOGDEB("SearchDataClauseSimple::toNativeQuery: fld [" << m_field << "] val [" << m_text << "] stemlang [" << getStemLang() << "]\n"); // Transform (in)equalities into a range query switch (getrel()) { case REL_EQUALS: { SearchDataClauseRange cl(*this, gettext(), gettext()); bool ret = cl.toNativeQuery(db, p); m_reason = cl.getReason(); return ret; } case REL_LT: case REL_LTE: { SearchDataClauseRange cl(*this, "", gettext()); bool ret = cl.toNativeQuery(db, p); m_reason = cl.getReason(); return ret; } case REL_GT: case REL_GTE: { SearchDataClauseRange cl(*this, gettext(), ""); bool ret = cl.toNativeQuery(db, p); m_reason = cl.getReason(); return ret; } default: break; } Xapian::Query *qp = (Xapian::Query *)p; *qp = Xapian::Query(); Xapian::Query::op op; switch (m_tp) { case SCLT_AND: op = Xapian::Query::OP_AND; break; case SCLT_OR: op = Xapian::Query::OP_OR; break; default: LOGERR("SearchDataClauseSimple: bad m_tp " << m_tp << "\n"); m_reason = "Internal error"; return false; } vector<Xapian::Query> pqueries; if (!processUserString(db, m_text, m_reason, &pqueries)) return false; if (pqueries.empty()) { LOGERR("SearchDataClauseSimple: resolved to null query\n"); m_reason = string("Resolved to null query. Term too long ? : [" + m_text + string("]")); return false; } *qp = Xapian::Query(op, pqueries.begin(), pqueries.end()); if (m_weight != 1.0) { *qp = Xapian::Query(Xapian::Query::OP_SCALE_WEIGHT, *qp, m_weight); } return true; } // Translate a range clause. This only works if a Xapian value slot // was attributed to the field. bool SearchDataClauseRange::toNativeQuery(Rcl::Db &db, void *p) { LOGDEB("SearchDataClauseRange::toNativeQuery: " << m_field << " :[" << m_text << ".." << m_t2 << "]\n"); Xapian::Query *qp = (Xapian::Query *)p; *qp = Xapian::Query(); if (m_field.empty() || (m_text.empty() && m_t2.empty())) { m_reason = "Range clause needs a field and a value"; return false; } // Get the value number for the field from the configuration const FieldTraits *ftp; if (!db.fieldToTraits(m_field, &ftp, true)) { m_reason = string("field ") + m_field + " not found in configuration"; return false; } if (ftp->valueslot == 0) { m_reason = string("No value slot specified in configuration for field ") + m_field; return false; } LOGDEB("SearchDataClauseRange: value slot " << ftp->valueslot << endl); // Build Xapian VALUE query. string errstr; try { if (m_text.empty()) { *qp = Xapian::Query(Xapian::Query::OP_VALUE_LE, ftp->valueslot, convert_field_value(*ftp, m_t2)); } else if (m_t2.empty()) { *qp = Xapian::Query(Xapian::Query::OP_VALUE_GE, ftp->valueslot, convert_field_value(*ftp, m_text)); } else { *qp = Xapian::Query(Xapian::Query::OP_VALUE_RANGE, ftp->valueslot, convert_field_value(*ftp, m_text), convert_field_value(*ftp, m_t2)); } } XCATCHERROR(errstr); if (!errstr.empty()) { LOGERR("SearchDataClauseRange: range query creation failed for slot "<< ftp->valueslot << endl); m_reason = "Range query creation failed\n"; *qp = Xapian::Query(); return false; } return true; } // Translate a FILENAME search clause. This always comes // from a "filename" search from the gui or recollq. A query language // "filename:"-prefixed field will not go through here, but through // the generic field-processing code. // // We do not split the entry any more (used to do some crazy thing // about expanding multiple fragments in the past). We just take the // value blanks and all and expand this against the indexed unsplit // file names bool SearchDataClauseFilename::toNativeQuery(Rcl::Db &db, void *p) { Xapian::Query *qp = (Xapian::Query *)p; *qp = Xapian::Query(); int maxexp = getSoftMaxExp(); if (maxexp == -1) maxexp = getMaxExp(); vector<string> names; db.filenameWildExp(m_text, names, maxexp); *qp = Xapian::Query(Xapian::Query::OP_OR, names.begin(), names.end()); if (m_weight != 1.0) { *qp = Xapian::Query(Xapian::Query::OP_SCALE_WEIGHT, *qp, m_weight); } return true; } // Translate a dir: path filtering clause. See comments in .h bool SearchDataClausePath::toNativeQuery(Rcl::Db &db, void *p) { LOGDEB("SearchDataClausePath::toNativeQuery: [" << m_text << "]\n"); Xapian::Query *qp = (Xapian::Query *)p; *qp = Xapian::Query(); string ltext; #ifdef _WIN32 // Windows file names are case-insensitive, so we lowercase (same // as when indexing) unacmaybefold(m_text, ltext, "UTF-8", UNACOP_FOLD); #else ltext = m_text; #endif if (ltext.empty()) { LOGERR("SearchDataClausePath: empty path??\n"); m_reason = "Empty path ?"; return false; } vector<Xapian::Query> orqueries; if (path_isabsolute(ltext)) orqueries.push_back(Xapian::Query(wrap_prefix(pathelt_prefix))); else ltext = path_tildexpand(ltext); vector<string> vpath; stringToTokens(ltext, vpath, "/"); for (vector<string>::const_iterator pit = vpath.begin(); pit != vpath.end(); pit++){ string sterm; vector<string> exp; if (!expandTerm(db, m_reason, SDCM_PATHELT, *pit, exp, sterm, wrap_prefix(pathelt_prefix))) { return false; } LOGDEB0("SDataPath::toNative: exp size " << exp.size() << ". Exp: " << stringsToString(exp) << "\n"); if (exp.size() == 1) orqueries.push_back(Xapian::Query(exp[0])); else orqueries.push_back(Xapian::Query(Xapian::Query::OP_OR, exp.begin(), exp.end())); m_curcl += exp.size(); if (m_curcl >= getMaxCl()) return false; } *qp = Xapian::Query(Xapian::Query::OP_PHRASE, orqueries.begin(), orqueries.end()); if (m_weight != 1.0) { *qp = Xapian::Query(Xapian::Query::OP_SCALE_WEIGHT, *qp, m_weight); } return true; } // Translate NEAR or PHRASE clause. bool SearchDataClauseDist::toNativeQuery(Rcl::Db &db, void *p) { LOGDEB("SearchDataClauseDist::toNativeQuery\n"); Xapian::Query *qp = (Xapian::Query *)p; *qp = Xapian::Query(); vector<Xapian::Query> pqueries; // We produce a single phrase out of the user entry then use // stringToXapianQueries() to lowercase and simplify the phrase // terms etc. This will result into a single (complex) // Xapian::Query. if (m_text.find('\"') != string::npos) { m_text = neutchars(m_text, "\""); } string s = cstr_dquote + m_text + cstr_dquote; bool useNear = (m_tp == SCLT_NEAR); if (!processUserString(db, s, m_reason, &pqueries, m_slack, useNear)) return false; if (pqueries.empty()) { LOGERR("SearchDataClauseDist: resolved to null query\n"); m_reason = string("Resolved to null query. Term too long ? : [" + m_text + string("]")); return false; } *qp = *pqueries.begin(); if (m_weight != 1.0) { *qp = Xapian::Query(Xapian::Query::OP_SCALE_WEIGHT, *qp, m_weight); } return true; } } // Namespace Rcl ����������������������������������������������������������������������������������������recoll-1.26.3/kde/����������������������������������������������������������������������������������0000755�0001750�0001750�00000000000�13570165407�010637� 5����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/kde/kioslave/�������������������������������������������������������������������������0000755�0001750�0001750�00000000000�13570165407�012454� 5����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/kde/kioslave/kio_recoll-kde4/���������������������������������������������������������0000755�0001750�0001750�00000000000�13570165410�015415� 5����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/kde/kioslave/kio_recoll-kde4/kio_recoll.h���������������������������������������������0000644�0001750�0001750�00000014247�13533651561�017646� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������#ifndef _RECOLL_H #define _RECOLL_H /* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <string> using std::string; #include <qglobal.h> #include <qstring.h> #include <kurl.h> #include <kio/global.h> #include <kio/slavebase.h> #include <kdeversion.h> #include "rclconfig.h" #include "rcldb.h" #include "reslistpager.h" #include "docseq.h" #include <memory> class RecollProtocol; /** Specialize the recoll html pager for the kind of links we use etc. */ class RecollKioPager : public ResListPager { public: RecollKioPager() : m_parent(0) {} void setParent(RecollProtocol *proto) {m_parent = proto;} virtual bool append(const string& data); virtual bool append(const string& data, int, const Rcl::Doc&) {return append(data);} virtual string detailsLink(); virtual const string &parFormat(); virtual string nextUrl(); virtual string prevUrl(); virtual string pageTop(); private: RecollProtocol *m_parent; }; class QueryDesc { public: QueryDesc() : opt("l"), page(0), isDetReq(false) {} QString query; QString opt; int page; bool isDetReq; bool sameQuery(const QueryDesc& o) const { return !opt.compare(o.opt) && !query.compare(o.query); } }; // Our virtual tree is a bit complicated. We need a class to analyse an URL // and tell what we should do with it class UrlIngester { public: UrlIngester(RecollProtocol *p, const KUrl& url); enum RootEntryType {UIRET_NONE, UIRET_ROOT, UIRET_HELP, UIRET_SEARCH}; bool isRootEntry(RootEntryType *tp) { if (m_type != UIMT_ROOTENTRY) return false; *tp = m_retType; return true; } bool isQuery(QueryDesc *q) { if (m_type != UIMT_QUERY) return false; *q = m_query; return true; } bool isResult(QueryDesc *q, int *num) { if (m_type != UIMT_QUERYRESULT) return false; *q = m_query; *num = m_resnum; return true; } bool isPreview(QueryDesc *q, int *num) { if (m_type != UIMT_PREVIEW) return false; *q = m_query; *num = m_resnum; return true; } bool endSlashQuery() {return m_slashend;} bool alwaysDir() {return m_alwaysdir;} private: RecollProtocol *m_parent; QueryDesc m_query; bool m_slashend; bool m_alwaysdir; RootEntryType m_retType; int m_resnum; enum MyType {UIMT_NONE, UIMT_ROOTENTRY, UIMT_QUERY, UIMT_QUERYRESULT, UIMT_PREVIEW}; MyType m_type; }; /** * A KIO slave to execute and display Recoll searches. * * Things are made a little complicated because KIO slaves can't hope * that their internal state will remain consistent with their user * application state: slaves die, are restarted, reused, at random * between requests. * In our case, this means that any request has to be processed * without reference to the last operation performed. Ie, if the * search parameters are not those from the last request, the search * must be restarted anew. This happens for example with different * searches in 2 konqueror screens: typically only one kio_slave will * be used. * The fact that we check if the search is the same as the last one, * to avoid restarting is an optimization, not the base mechanism * (contrary to what was initially assumed, and may have left a few * crumbs around). * * We have two modes of operation, one based on html forms and result * pages, which can potentially be developped to the full Recoll * functionality, and one based on a directory listing model, which * will always be more limited, but may be useful in some cases to * allow easy copying of files etc. Which one is in use is decided by * the form of the URL. */ class RecollProtocol : public KIO::SlaveBase { public: RecollProtocol(const QByteArray &pool, const QByteArray &app ); virtual ~RecollProtocol(); virtual void mimetype(const KUrl& url); virtual void get(const KUrl& url); // The directory mode is not available with KDE 4.0, I could find // no way to avoid crashing kdirmodel #if KDE_IS_VERSION(4,1,0) virtual void stat(const KUrl & url); virtual void listDir(const KUrl& url); #endif static RclConfig *o_rclconfig; friend class RecollKioPager; friend class UrlIngester; private: bool maybeOpenDb(string& reason); bool URLToQuery(const KUrl &url, QString& q, QString& opt, int *page=0); bool doSearch(const QueryDesc& qd); void searchPage(); void queryDetails(); string makeQueryUrl(int page, bool isdet = false); bool syncSearch(const QueryDesc& qd); void htmlDoSearch(const QueryDesc& qd); void showPreview(const Rcl::Doc& doc); bool isRecollResult(const KUrl &url, int *num, QString* q); bool m_initok; std::shared_ptr<Rcl::Db> m_rcldb; string m_reason; bool m_alwaysdir; string m_stemlang; // english by default else env[RECOLL_KIO_STEMLANG] // Search state: because of how the KIO slaves are used / reused, // we can't be sure that the next request will be for the same // search, and we need to check and restart one if the data // changes. This is very wasteful but hopefully won't happen too // much in actual use. One possible workaround for some scenarios // (one slave several konqueror windows) would be to have a small // cache of recent searches kept open. RecollKioPager m_pager; std::shared_ptr<DocSequence> m_source; // Note: page here is not used, current page always comes from m_pager. QueryDesc m_query; }; extern "C" {int kdemain(int, char**);} #endif // _RECOLL_H ���������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/kde/kioslave/kio_recoll-kde4/data/����������������������������������������������������0000755�0001750�0001750�00000000000�13570165410�016326� 5����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������recoll-1.26.3/kde/kioslave/kio_recoll-kde4/data/welcome.html����������������������������������������0000644�0001750�0001750�00000001452�13303776057�020602� �����������������������������������������������������������������������������������������������������������������������������������������������������������������������������0000000�0000000������������������������������������������������������������������������������������������������������������������������������������������������������������������������<html> <head> <meta http-equiv="Content-Type" content="text/html; charset=utf-8"> <title>Recoll Search

Recoll search

Query type:
Query language
All terms
Any term
File name
Enter search string:

recoll-1.26.3/kde/kioslave/kio_recoll-kde4/data/help.html0000644000175000017500000000751613303776057020106 00000000000000 Recoll Kio Slave Recoll search

Recoll kio slave

Use this module to perform Recoll searches from any program with a KIO interface.

The module can work in two modes:

  • Html interface, close to a simplified QT Recoll interface.
  • File manager interface, Only with KDE 4.1 and newer, which presents results as directory entries

The module is still in its infancy. You will undoubtedly obtain strange effects from time to time. If you have any remarks or ideas about improving kio_recoll, or observe an interesting and reproducible sequence, please report it.

kio_recoll is primarily designed and tested with konqueror, and you will undoubtedly get even more surprising effects with other tools.

The Html interface is currently much more usable. The directory interface is extremely quirky.

The module is particularly unhelpful with search hits inside email folders, which Konqueror has no way to access.

HTML interface

This works more or less like the Recoll QT GUI, much simplified. The Recoll manual describes the queries that can be performed.

Most pages in the interface should quite self-explanatory.

You normally enter this interface by entering "recoll:" or "recoll:/" in the Konqueror URL entry, and following the "search" link. You can also directly enter "recoll:/search.html".
In most circumstances, entering a link like recoll:/john smith will also yield an HTML result list.

Compared to QT Recoll, the nice point is that you can click or drag/drop the icons to access the results in the standard desktop way.

File manager interface

The path part of the URI is taken as a Recoll query language string and executed. The results are displayed as directory entries.

There are several ways to enter this interface:

  • Using "recollf" as protocol name instead of "recoll". This is probably the easiest option inside open dialogs.
  • Using an URL ending with a '/', ie:
    recoll:/red apples ext:html/
  • Users who will want to use the file manager view most of the time can set the RECOLL_KIO_ALWAYS_DIR environment variable or the kio_always_dir recoll.conf variable to 1. The HTML interface will then only be accessible through the search link in the top "recoll:" view.

No search result details (samples, relevance etc.) are available, but this interface allows multiple selections and copies, usage inside any KDE open dialog, etc.

To avoid swamping the interface with thousands of results, the result count is limited to 100 by default. You can change this value by setting the kio_max_direntries parameter in your recoll configuration file (typically ~/.recoll/recoll.conf)

Because of limitations in the current KIO slave usage, the actual entry names are not those displayed but synthetic ones like "recollResultxxx". This has unfortunate side-effects when dragging/dropping the entries to some other application, or when using an open dialog (the opened file doesn't have the correct path to the original file).

Recoll Search

recoll-1.26.3/kde/kioslave/kio_recoll-kde4/data/searchable.html0000644000175000017500000000145013303776057021236 00000000000000 Recoll searchable HTML

A Recoll-searchable HTML page

This is a text sample in which links have been inserted for words, such as system installation, which can be searched for in the whole document set by using recoll

Also a little bit of javascript magic can make all words searchable (try double-clicking any word).

recoll-1.26.3/kde/kioslave/kio_recoll-kde4/recollnolist.protocol0000644000175000017500000000030013303776057021633 00000000000000[Protocol] exec=kio_recoll protocol=recoll input=none output=filesystem # Version for kde4.0: no "listing" entry reading=true defaultMimeType=text/html Icon=recoll Class=:local URIMode=rawuri recoll-1.26.3/kde/kioslave/kio_recoll-kde4/dirif.cpp0000644000175000017500000002513213533651561017147 00000000000000/* Copyright (C) 2008 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* * A lot of code in this file was copied from kio_beagle 0.4.0, * which is a GPL program. The authors listed are: * Debajyoti Bera * * KDE4 port: * Stephan Binner */ #include "autoconfig.h" #include #if KDE_IS_VERSION(4,1,0) // Couldn't get listDir() to work with kde 4.0, konqueror keeps // crashing because of kdirmodel, couldn't find a workaround (not // saying it's impossible)... #include #include #include #include #include #include "kio_recoll.h" #include "pathut.h" using namespace KIO; static const QString resultBaseName("recollResult"); // Check if the input URL is of the form that konqueror builds by // appending one of our result file names to the directory name (which // is the search string). If it is, extract return the result document // number. Possibly restart the search if the search string does not // match the current one bool RecollProtocol::isRecollResult(const KUrl &url, int *num, QString *q) { *num = -1; kDebug() << "url" << url; // Basic checks if (!url.host().isEmpty() || url.path().isEmpty() || (url.protocol().compare("recoll") && url.protocol().compare("recollf"))) return false; QString path = url.path(); if (!path.startsWith("/")) return false; // Look for the last '/' and check if it is followed by // resultBaseName (riiiight...) int slashpos = path.lastIndexOf("/"); if (slashpos == -1 || slashpos == 0 || slashpos == path.length() -1) return false; slashpos++; //kDebug() << "Comparing " << path.mid(slashpos, resultBaseName.length()) << // "and " << resultBaseName; if (path.mid(slashpos, resultBaseName.length()).compare(resultBaseName)) return false; // Extract the result number QString snum = path.mid(slashpos + resultBaseName.length()); sscanf(snum.toAscii(), "%d", num); if (*num == -1) return false; //kDebug() << "URL analysis ok, num:" << *num; // We do have something that ressembles a recoll result locator. Check if // this matches the current search, else have to run the requested one *q = path.mid(1, slashpos-2); return true; } // Translate rcldoc result into directory entry static const UDSEntry resultToUDSEntry(const Rcl::Doc& doc, int num) { UDSEntry entry; KUrl url(doc.url.c_str()); // kDebug() << doc.url.c_str(); entry.insert(KIO::UDSEntry::UDS_DISPLAY_NAME, url.fileName()); char cnum[30];sprintf(cnum, "%04d", num); entry.insert(KIO::UDSEntry::UDS_NAME, resultBaseName + cnum); if (!doc.mimetype.compare("application/x-fsdirectory") || !doc.mimetype.compare("inode/directory")) { entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, "inode/directory"); entry.insert( KIO::UDSEntry::UDS_FILE_TYPE, S_IFDIR); } else { entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, doc.mimetype.c_str()); entry.insert( KIO::UDSEntry::UDS_FILE_TYPE, S_IFREG); } entry.insert(KIO::UDSEntry::UDS_LOCAL_PATH, url.path()); // For local files, supply the usual file stat information struct stat info; if (lstat(url.path().toAscii(), &info) >= 0) { entry.insert( KIO::UDSEntry::UDS_SIZE, info.st_size); entry.insert( KIO::UDSEntry::UDS_ACCESS, info.st_mode); entry.insert( KIO::UDSEntry::UDS_MODIFICATION_TIME, info.st_mtime); entry.insert( KIO::UDSEntry::UDS_ACCESS_TIME, info.st_atime); entry.insert( KIO::UDSEntry::UDS_CREATION_TIME, info.st_ctime); } entry.insert(KIO::UDSEntry::UDS_TARGET_URL, doc.url.c_str()); return entry; } // From kio_beagle static void createRootEntry(KIO::UDSEntry& entry) { entry.clear(); entry.insert( KIO::UDSEntry::UDS_NAME, "."); entry.insert( KIO::UDSEntry::UDS_FILE_TYPE, S_IFDIR); entry.insert( KIO::UDSEntry::UDS_ACCESS, 0700); entry.insert( KIO::UDSEntry::UDS_MIME_TYPE, "inode/directory"); } // Points to html query screen static void createGoHomeEntry(KIO::UDSEntry& entry) { entry.clear(); entry.insert(KIO::UDSEntry::UDS_NAME, "search.html"); entry.insert(KIO::UDSEntry::UDS_DISPLAY_NAME, "Recoll search (click me)"); entry.insert(KIO::UDSEntry::UDS_FILE_TYPE, S_IFREG); entry.insert(KIO::UDSEntry::UDS_TARGET_URL, "recoll:///search.html"); entry.insert(KIO::UDSEntry::UDS_ACCESS, 0500); entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, "text/html"); entry.insert(KIO::UDSEntry::UDS_ICON_NAME, "recoll"); } // Points to help file static void createGoHelpEntry(KIO::UDSEntry& entry) { QString location = KStandardDirs::locate("data", "kio_recoll/help.html"); entry.clear(); entry.insert(KIO::UDSEntry::UDS_NAME, "help"); entry.insert(KIO::UDSEntry::UDS_DISPLAY_NAME, "Recoll help (click me first)"); entry.insert(KIO::UDSEntry::UDS_FILE_TYPE, S_IFREG); entry.insert(KIO::UDSEntry::UDS_TARGET_URL, QString("file://") + location); entry.insert(KIO::UDSEntry::UDS_ACCESS, 0500); entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, "text/html"); entry.insert(KIO::UDSEntry::UDS_ICON_NAME, "help"); } void RecollProtocol::stat(const KUrl & url) { kDebug() << url << endl ; UrlIngester ingest(this, url); KIO::UDSEntry entry; entry.insert(KIO::UDSEntry::UDS_TARGET_URL, url.url()); UrlIngester::RootEntryType rettp; QueryDesc qd; int num; if (ingest.isRootEntry(&rettp)) { switch(rettp) { case UrlIngester::UIRET_ROOT: createRootEntry(entry); break; case UrlIngester::UIRET_HELP: createGoHelpEntry(entry); break; case UrlIngester::UIRET_SEARCH: createGoHomeEntry(entry); break; default: error(ERR_DOES_NOT_EXIST, ""); break; } } else if (ingest.isResult(&qd, &num)) { if (syncSearch(qd)) { Rcl::Doc doc; if (num >= 0 && m_source && m_source->getDoc(num, doc)) { entry = resultToUDSEntry(doc, num); } else { error(ERR_DOES_NOT_EXIST, ""); } } else { // hopefully syncSearch() set the error? } } else if (ingest.isQuery(&qd)) { // ie "recoll:/some string" or "recoll:/some string/" // // We have a problem here. We'd like to let the user enter // either form and get an html or a dir contents result, // depending on the ending /. Otoh this makes the name space // inconsistent, because /toto can't be a file (the html // result page) while /toto/ would be a directory ? or can it // // Another approach would be to use different protocol names // to avoid any possibility of mixups if (m_alwaysdir || ingest.alwaysDir() || ingest.endSlashQuery()) { kDebug() << "Directory type"; entry.insert(KIO::UDSEntry::UDS_FILE_TYPE, S_IFDIR); entry.insert(KIO::UDSEntry::UDS_ACCESS, 0700); entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, "inode/directory"); entry.insert(KIO::UDSEntry::UDS_NAME, qd.query); entry.insert( KIO::UDSEntry::UDS_MODIFICATION_TIME, time(0)); entry.insert( KIO::UDSEntry::UDS_CREATION_TIME, time(0)); } } statEntry(entry); finished(); } void RecollProtocol::listDir(const KUrl& url) { kDebug() << url << endl; UrlIngester ingest(this, url); UrlIngester::RootEntryType rettp; QueryDesc qd; if (ingest.isRootEntry(&rettp)) { switch(rettp) { case UrlIngester::UIRET_ROOT: { kDebug() << "list /" << endl; UDSEntryList entries; KIO::UDSEntry entry; createRootEntry(entry); entries.append(entry); createGoHomeEntry(entry); entries.append(entry); createGoHelpEntry(entry); entries.append(entry); listEntries(entries); finished(); } return; default: error(ERR_CANNOT_ENTER_DIRECTORY, ""); return; } } else if (ingest.isQuery(&qd)) { // At this point, it seems that when the request is from // konqueror autocompletion it comes with a / at the end, // which offers an opportunity to not perform it. if (ingest.endSlashQuery()) { kDebug() << "Ends With /" << endl; error(ERR_SLAVE_DEFINED, "Autocompletion search aborted"); return; } if (!syncSearch(qd)) { // syncSearch did the error thing return; } // Fallthrough to actually listing the directory } else { kDebug() << "Cant grok input url"; error(ERR_CANNOT_ENTER_DIRECTORY, ""); return; } static int maxentries = -1; if (maxentries == -1) { if (o_rclconfig) o_rclconfig->getConfParam("kio_max_direntries", &maxentries); if (maxentries == -1) maxentries = 10000; } static const int pagesize = 200; int pagebase = 0; while (pagebase < maxentries) { vector page; int pagelen = m_source->getSeqSlice(pagebase, pagesize, page); UDSEntry entry; if (pagelen < 0) { error(ERR_SLAVE_DEFINED, "Internal error"); listEntry(entry, true); break; } for (int i = 0; i < pagelen; i++) { listEntry(resultToUDSEntry(page[i].doc, i), false); } if (pagelen != pagesize) { listEntry(entry, true); break; } pagebase += pagelen; } finished(); } #else // <--- KDE 4.1+ #include #include "kio_recoll.h" bool RecollProtocol::isRecollResult(const KUrl &, int *, QString *) { return false; } #endif recoll-1.26.3/kde/kioslave/kio_recoll-kde4/recoll.protocol0000644000175000017500000000025613303776057020414 00000000000000[Protocol] exec=kio_recoll protocol=recoll input=none output=filesystem listing=Name,Type, URL reading=true defaultMimeType=text/html Icon=recoll Class=:local URIMode=rawuri recoll-1.26.3/kde/kioslave/kio_recoll-kde4/kio_recoll.cpp0000644000175000017500000002503113533651561020172 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include using namespace std; #include #include #include #include #include #include #include #include "rclconfig.h" #include "rcldb.h" #include "rclinit.h" #include "pathut.h" #include "searchdata.h" #include "rclquery.h" #include "wasatorcl.h" #include "kio_recoll.h" #include "docseqdb.h" #include "readfile.h" #include "smallut.h" #include "textsplit.h" #include "guiutils.h" using namespace KIO; RclConfig *RecollProtocol::o_rclconfig; RecollProtocol::RecollProtocol(const QByteArray &pool, const QByteArray &app) : SlaveBase("recoll", pool, app), m_initok(false), m_alwaysdir(false) { kDebug() << endl; if (o_rclconfig == 0) { o_rclconfig = recollinit(0, 0, 0, m_reason); if (!o_rclconfig || !o_rclconfig->ok()) { m_reason = string("Configuration problem: ") + m_reason; return; } } if (o_rclconfig->getDbDir().empty()) { // Note: this will have to be replaced by a call to a // configuration building dialog for initial configuration? Or // do we assume that the QT GUO is always used for this ? m_reason = "No db directory in configuration ??"; return; } rwSettings(false); m_rcldb = std::shared_ptr(new Rcl::Db(o_rclconfig)); if (!m_rcldb) { m_reason = "Could not build database object. (out of memory ?)"; return; } // Decide if we allow switching between html and file manager // presentation by using an end slash or not. Can also be done dynamically // by switching proto names. const char *cp = getenv("RECOLL_KIO_ALWAYS_DIR"); if (cp) { m_alwaysdir = stringToBool(cp); } else { o_rclconfig->getConfParam("kio_always_dir", &m_alwaysdir); } cp = getenv("RECOLL_KIO_STEMLANG"); if (cp) { m_stemlang = cp; } else { m_stemlang = "english"; } m_pager.setParent(this); m_initok = true; return; } // There should be an object counter somewhere to delete the config when done. // Doesn't seem needed in the kio context. RecollProtocol::~RecollProtocol() { kDebug(); } bool RecollProtocol::maybeOpenDb(string &reason) { if (!m_rcldb) { reason = "Internal error: initialization error"; return false; } if (!m_rcldb->isopen() && !m_rcldb->open(Rcl::Db::DbRO)) { reason = "Could not open database in " + o_rclconfig->getDbDir(); return false; } return true; } // This is never called afaik void RecollProtocol::mimetype(const KUrl &url) { kDebug() << url << endl; mimeType("text/html"); finished(); } UrlIngester::UrlIngester(RecollProtocol *p, const KUrl& url) : m_parent(p), m_slashend(false), m_alwaysdir(false), m_retType(UIRET_NONE), m_resnum(0), m_type(UIMT_NONE) { kDebug() << "Url" << url; m_alwaysdir = !url.protocol().compare("recollf"); QString path = url.path(); if (url.host().isEmpty()) { if (path.isEmpty() || !path.compare("/")) { m_type = UIMT_ROOTENTRY; m_retType = UIRET_ROOT; return; } else if (!path.compare("/help.html")) { m_type = UIMT_ROOTENTRY; m_retType = UIRET_HELP; return; } else if (!path.compare("/search.html")) { m_type = UIMT_ROOTENTRY; m_retType = UIRET_SEARCH; // Retrieve the query value for preloading the form m_query.query = url.queryItem("q"); return; } else if (m_parent->isRecollResult(url, &m_resnum, &m_query.query)) { m_type = UIMT_QUERYRESULT; m_query.opt = "l"; m_query.page = 0; } else { // Have to think this is some search string m_type = UIMT_QUERY; m_query.query = url.path(); m_query.opt = "l"; m_query.page = 0; } } else { // Non empty host, url must be something like : // //search/query?q=query¶m=value... kDebug() << "host" << url.host() << "path" << url.path(); if (url.host().compare("search") || url.path().compare("/query")) { return; } m_type = UIMT_QUERY; // Decode the forms' arguments m_query.query = url.queryItem("q"); m_query.opt = url.queryItem("qtp"); if (m_query.opt.isEmpty()) { m_query.opt = "l"; } QString p = url.queryItem("p"); if (p.isEmpty()) { m_query.page = 0; } else { sscanf(p.toAscii(), "%d", &m_query.page); } p = url.queryItem("det"); m_query.isDetReq = !p.isEmpty(); p = url.queryItem("cmd"); if (!p.isEmpty() && !p.compare("pv")) { p = url.queryItem("dn"); if (!p.isEmpty()) { // Preview and no docnum ?? m_resnum = atoi((const char *)p.toUtf8()); // Result in page is 1+ m_resnum--; m_type = UIMT_PREVIEW; } } } if (m_query.query.startsWith("/")) m_query.query.remove(0,1); if (m_query.query.endsWith("/")) { kDebug() << "Ends with /"; m_slashend = true; m_query.query.chop(1); } else { m_slashend = false; } return; } bool RecollProtocol::syncSearch(const QueryDesc &qd) { kDebug(); if (!m_initok || !maybeOpenDb(m_reason)) { string reason = "RecollProtocol::listDir: Init error:" + m_reason; error(KIO::ERR_SLAVE_DEFINED, reason.c_str()); return false; } if (qd.sameQuery(m_query)) { return true; } // doSearch() calls error() if appropriate. return doSearch(qd); } // This is used by the html interface, but also by the directory one // when doing file copies for exemple. This is the central dispatcher // for requests, it has to know a little about both models. void RecollProtocol::get(const KUrl& url) { kDebug() << url << endl; if (!m_initok || !maybeOpenDb(m_reason)) { string reason = "Recoll: init error: " + m_reason; error(KIO::ERR_SLAVE_DEFINED, reason.c_str()); return; } UrlIngester ingest(this, url); UrlIngester::RootEntryType rettp; QueryDesc qd; int resnum; if (ingest.isRootEntry(&rettp)) { switch(rettp) { case UrlIngester::UIRET_HELP: { QString location = KStandardDirs::locate("data", "kio_recoll/help.html"); redirection(location); } goto out; default: searchPage(); goto out; } } else if (ingest.isResult(&qd, &resnum)) { // Url matched one generated by konqueror/Dolphin out of a // search directory listing: ie: // recoll:/some search string/recollResultxx // // This happens when the user drags/drop the result to another // app, or with the "open-with" right-click. Does not happen // if the entry itself is clicked (the UDS_URL is apparently // used in this case // // Redirect to the result document URL if (!syncSearch(qd)) { return; } Rcl::Doc doc; if (resnum >= 0 && m_source && m_source->getDoc(resnum, doc)) { mimeType(doc.mimetype.c_str()); redirection(KUrl::fromLocalFile((const char *)(doc.url.c_str()+7))); goto out; } } else if (ingest.isPreview(&qd, &resnum)) { if (!syncSearch(qd)) { return; } Rcl::Doc doc; if (resnum >= 0 && m_source && m_source->getDoc(resnum, doc)) { showPreview(doc); goto out; } } else if (ingest.isQuery(&qd)) { #if 0 // Do we need this ? if (host.isEmpty()) { char cpage[20];sprintf(cpage, "%d", page); QString nurl = QString::fromAscii("recoll://search/query?q=") + query + "&qtp=" + opt + "&p=" + cpage; redirection(KUrl(nurl)); goto out; } #endif // htmlDoSearch does the search syncing (needs to know about changes). htmlDoSearch(qd); goto out; } error(KIO::ERR_SLAVE_DEFINED, "Unrecognized URL or internal error"); out: finished(); } // Execute Recoll search, and set the docsource bool RecollProtocol::doSearch(const QueryDesc& qd) { kDebug() << "query" << qd.query << "opt" << qd.opt; m_query = qd; char opt = qd.opt.isEmpty() ? 'l' : qd.opt.toUtf8().at(0); string qs = (const char *)qd.query.toUtf8(); Rcl::SearchData *sd = 0; if (opt != 'l') { Rcl::SearchDataClause *clp = 0; if (opt == 'f') { clp = new Rcl::SearchDataClauseFilename(qs); } else { clp = new Rcl::SearchDataClauseSimple(opt == 'o' ? Rcl::SCLT_OR : Rcl::SCLT_AND, qs); } sd = new Rcl::SearchData(Rcl::SCLT_OR, m_stemlang); if (sd && clp) sd->addClause(clp); } else { sd = wasaStringToRcl(o_rclconfig, m_stemlang, qs, m_reason); } if (!sd) { m_reason = "Internal Error: cant build search"; error(KIO::ERR_SLAVE_DEFINED, m_reason.c_str()); return false; } std::shared_ptr sdata(sd); std::shared_ptrquery(new Rcl::Query(m_rcldb.get())); query->setCollapseDuplicates(prefs.collapseDuplicates); if (!query->setQuery(sdata)) { m_reason = "Query execute failed. Invalid query or syntax error?"; error(KIO::ERR_SLAVE_DEFINED, m_reason.c_str()); return false; } DocSequenceDb *src = new DocSequenceDb(m_rcldb, std::shared_ptr(query), "Query results", sdata); if (src == 0) { error(KIO::ERR_SLAVE_DEFINED, "Can't build result sequence"); return false; } m_source = std::shared_ptr(src); // Reset pager in all cases. Costs nothing, stays at page -1 initially // htmldosearch will fetch the first page if needed. m_pager.setDocSource(m_source); return true; } // Note: KDE_EXPORT is actually needed on Unix when building with // cmake. Says something like __attribute__(visibility(defautl)) // (cmake apparently sets all symbols to not exported) extern "C" {KDE_EXPORT int kdemain(int argc, char **argv);} int kdemain(int argc, char **argv) { #ifdef KDE_VERSION_3 KInstance instance("kio_recoll"); #else KComponentData instance("kio_recoll"); #endif kDebug() << "*** starting kio_recoll " << endl; if (argc != 4) { kDebug() << "Usage: kio_recoll proto dom-socket1 dom-socket2\n" << endl; exit(-1); } RecollProtocol slave(argv[2], argv[3]); slave.dispatchLoop(); kDebug() << "kio_recoll Done" << endl; return 0; } recoll-1.26.3/kde/kioslave/kio_recoll-kde4/recollf.protocol0000644000175000017500000000025713303776057020563 00000000000000[Protocol] exec=kio_recoll protocol=recollf input=none output=filesystem listing=Name,Type, URL reading=true defaultMimeType=text/html Icon=recoll Class=:local URIMode=rawuri recoll-1.26.3/kde/kioslave/kio_recoll-kde4/CMakeLists.txt0000644000175000017500000000517413533651561020112 00000000000000cmake_minimum_required(VERSION 2.6) project(kio_recoll) find_package(KDE4 REQUIRED) add_definitions(${QT_DEFINITIONS} ${KDE4_DEFINITIONS}) add_definitions(-DKDE_DEFAULT_DEBUG_AREA=7130 -DRECOLL_DATADIR=\\"${CMAKE_INSTALL_PREFIX}/share/recoll\\" -DLIBDIR=\\"${CMAKE_INSTALL_PREFIX}/lib\\" -DHAVE_CONFIG_H ) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${KDE4_ENABLE_EXCEPTIONS}") set(rcltop ${CMAKE_CURRENT_SOURCE_DIR}/../../../) # Execute recoll configuration to create autoconfig.h and version.h and # generate a PIC lib execute_process(COMMAND ${rcltop}/configure --disable-static --disable-qtgui --disable-x11mon --disable-python-chm --disable-python-module --prefix=${CMAKE_INSTALL_PREFIX} --mandir=${CMAKE_INSTALL_PREFIX}/share/man WORKING_DIRECTORY ${rcltop} ) link_directories(${rcltop}/.libs ${CMAKE_INSTALL_PREFIX}/lib) include_directories (${CMAKE_SOURCE_DIR} ${CMAKE_BINARY_DIR} ${KDE4_INCLUDES} ${rcltop}/aspell ${rcltop}/bincimapmime ${rcltop}/common ${rcltop}/index ${rcltop}/internfile ${rcltop}/query ${rcltop}/rcldb ${rcltop}/unac ${rcltop}/utils ${rcltop}/qtgui ) set(kio_recoll_SRCS kio_recoll.cpp htmlif.cpp dirif.cpp ${rcltop}/qtgui/guiutils.cpp) CHECK_LIBRARY_EXISTS(dl dlopen "" DLOPEN_IN_LIBDL) IF(DLOPEN_IN_LIBDL) LIST(APPEND EXTRA_LIBS dl) ENDIF(DLOPEN_IN_LIBDL) CHECK_LIBRARY_EXISTS(pthread pthread_sigmask "" PTHREAD_IN_LIBPTHREAD) IF(PTHREAD_IN_LIBPTHREAD) LIST(APPEND EXTRA_LIBS pthread) ENDIF(PTHREAD_IN_LIBPTHREAD) # Had the idea to add e.g. /usr/lib/recoll to the rpath so that the dyn lib # will be found at run time. But this does not seem to work with debian # which strips RPATH by default (I think there is a way for libs in app-specific # paths but I did not find it). Link with the .a instead. #SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib/recoll") kde4_add_plugin(kio_recoll ${kio_recoll_SRCS}) add_custom_target(rcllib COMMAND make PicStatic WORKING_DIRECTORY ${rcltop} ) add_dependencies(kio_recoll rcllib) target_link_libraries(kio_recoll recoll xapian xslt xml2 z ${EXTRA_LIBS} ${KDE4_KIO_LIBS}) install(TARGETS kio_recoll DESTINATION ${PLUGIN_INSTALL_DIR}) IF ("${KDE_VERSION_MAJOR}.${KDE_VERSION_MINOR}" GREATER 4.0) install(FILES recoll.protocol recollf.protocol DESTINATION ${SERVICES_INSTALL_DIR}) ELSE ("${KDE_VERSION_MAJOR}.${KDE_VERSION_MINOR}" GREATER 4.0) install(FILES recollnolist.protocol DESTINATION ${SERVICES_INSTALL_DIR} RENAME recoll.protocol) ENDIF ("${KDE_VERSION_MAJOR}.${KDE_VERSION_MINOR}" GREATER 4.0) install(FILES data/welcome.html data/help.html DESTINATION ${DATA_INSTALL_DIR}/kio_recoll) recoll-1.26.3/kde/kioslave/kio_recoll-kde4/00README.txt0000644000175000017500000000656413303776057017217 00000000000000Recoll KIO slave ================ An experiment with a recoll KIO slave. Caveat: I am only currently testing this with a production, but very recent, version of KDE 4.1, and I don't intend to really support older versions. The most usable aspects work under KDE 4.0 though. As a reference, my test system is an up to date (2009-01) Kubuntu 8.10. Usage ===== Depending on the protocol name used, the search results will be returned either as HTML pages (looking quite like a normal Recoll result list), or as directory entries. The HTML mode only works with Konqueror, not Dolphin. The directory mode is available with both browsers, and also application open dialog (ie Kate). The HTML mode is much more usable than the directory mode at this point More detailed help/explanations can be found a document accessible from the slave: To try things out, after building and installing, enter "recoll:/" in a Konqueror URL entry. Depending on the KDE version, this will bring you either to an HTML search form, or to a directory listing, where you should READ THE HELP FILE. Building and installing: ======================= Only tested with KDE 4.1 and later. The main Recoll installation shares its prefix with the KIO slave, which needs to use the KDE one. This means that, if KDE lives in /usr, Recoll must be configured with --prefix=/usr, not /usr/local. Else you'll have run-time problems, the slave will not be able to find the Recoll configuration. !!*Notice: You cannot share a build directory between recoll and kio_recoll because they use different configure options for the main lib, but build it in the same place. The main lib "configure" is run at "cmake" time for kio_recoll, the build is done at "make" time. Recipe: - Make sure the KDE4 core devel packages and cmake are installed. - Extract the Recoll source. - IF Recoll is not installed yet: configure recoll with --prefix=/usr (or wherever KDE lives), build and install Recoll. - In the Recoll source, go to kde/kioslave/recoll, then build and install the kio slave: mkdir builddir cd builddir cmake .. -DCMAKE_INSTALL_PREFIX=/usr -DQT_QMAKE_EXECUTABLE=/usr/bin/qmake-qt4 make sudo make install - You should have a look at where "make install" copies things, because misconfigured distribution, generating wrong targets, are frequent. Especially, you should check that kio_recoll.so is copied to the right place, meaning among the output of "kde4-config --path module". As an additional check, there should be many other kio_[xxx].so in there. Same for the protocol file, check that it's not alone in its directory (really, this sounds strange, but, to this point, I've seen more systems with broken cmake/KDE configs than correct ones). You need to build/update the index with recollindex, the KIO slave doesn't deal with indexing for now. Misc build problems: =================== KUBUNTU 8.10 (updated to 2008-27-11) ------------------------------------ cmake generates a bad dependancy on /build/buildd/kde4libs-4.1.2/obj-i486-linux-gnu/lib/libkdecore.so inside CMakeFiles/kio_recoll.dir/build.make Found no way to fix this. You need to edit the line and replace the /build/[...]/lib with /usr/lib. This manifests itself with the following error message: make[2]: *** No rule to make target `/build/buildd/kde4libs-4.1.2/obj-i486-linux-gnu/lib/libkdecore.so', needed by `lib/kio_recoll.so'. Stop. recoll-1.26.3/kde/kioslave/kio_recoll-kde4/htmlif.cpp0000644000175000017500000001763413533651561017345 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include #include using namespace std; #include #include #include "rclconfig.h" #include "rcldb.h" #include "rclinit.h" #include "pathut.h" #include "searchdata.h" #include "rclquery.h" #include "wasatorcl.h" #include "kio_recoll.h" #include "docseqdb.h" #include "readfile.h" #include "smallut.h" #include "plaintorich.h" #include "internfile.h" #include "wipedir.h" #include "hldata.h" using namespace KIO; bool RecollKioPager::append(const string& data) { if (!m_parent) return false; m_parent->data(QByteArray(data.c_str())); return true; } #include string RecollProtocol::makeQueryUrl(int page, bool isdet) { ostringstream str; str << "recoll://search/query?q=" << url_encode((const char*)m_query.query.toUtf8()) << "&qtp=" << (const char*)m_query.opt.toUtf8(); if (page >= 0) str << "&p=" << page; if (isdet) str << "&det=1"; return str.str(); } string RecollKioPager::detailsLink() { string chunk = string("makeQueryUrl(m_parent->m_pager.pageNumber(), true) + "\">" + "(show query)" + ""; return chunk; } static string parformat; const string& RecollKioPager::parFormat() { // Need to escape the % inside the query url string qurl = m_parent->makeQueryUrl(-1, false), escurl; for (string::size_type pos = 0; pos < qurl.length(); pos++) { switch(qurl.at(pos)) { case '%': escurl += "%%"; break; default: escurl += qurl.at(pos); } } ostringstream str; str << "" "%R %S " "Preview  " << "Open " << "%T
" "%M %D   %U  %i
" "%A %K"; return parformat = str.str(); } string RecollKioPager::pageTop() { string pt = "

m_query.query.toUtf8())); pt += "\">New Search"; return pt; // Would be nice to have but doesnt work because the query may be executed // by another kio instance which has no idea of the current page o #if 0 && KDE_IS_VERSION(4,1,0) "    m_query.query.toUtf8())) + "/\">Directory view (you may need to reload the page)" #endif } string RecollKioPager::nextUrl() { int pagenum = pageNumber(); if (pagenum < 0) pagenum = 0; else pagenum++; return m_parent->makeQueryUrl(pagenum); } string RecollKioPager::prevUrl() { int pagenum = pageNumber(); if (pagenum <= 0) pagenum = 0; else pagenum--; return m_parent->makeQueryUrl(pagenum); } static string welcomedata; void RecollProtocol::searchPage() { mimeType("text/html"); if (welcomedata.empty()) { QString location = KStandardDirs::locate("data", "kio_recoll/welcome.html"); string reason; if (location.isEmpty() || !file_to_string((const char *)location.toUtf8(), welcomedata, &reason)) { welcomedata = "Recoll Error" "

Could not locate Recoll welcome.html file: "; welcomedata += reason; welcomedata += "

"; } } string catgq; #if 0 // Catg filtering. A bit complicated to do because of the // stateless thing (one more thing to compare to check if same // query) right now. Would be easy by adding to the query // language, but not too useful in this case, so scrap it for now. list cats; if (o_rclconfig->getMimeCategories(cats) && !cats.empty()) { catgq = "

Filter on types: " "All"; for (list::iterator it = cats.begin(); it != cats.end();it++) { catgq += "\n" + *it ; } } #endif string tmp; map subs; subs['Q'] = (const char *)m_query.query.toUtf8(); subs['C'] = catgq; subs['S'] = ""; pcSubst(welcomedata, tmp, subs); data(tmp.c_str()); } void RecollProtocol::queryDetails() { mimeType("text/html"); QByteArray array; QTextStream os(&array, QIODevice::WriteOnly); os << "" << endl; os << "" << endl; os << "" << "Recoll query details" << "\n" << endl; os << "" << endl; os << "

Query details:

" << endl; os << "

" << m_pager.queryDescription().c_str() <<"

"<< endl; os << "

Return to results" << endl; os << "" << endl; data(array); } class PlainToRichKio : public PlainToRich { public: PlainToRichKio(const string& nm) : m_name(nm) { } virtual string header() { if (m_inputhtml) { return cstr_null; } else { return string("" ""). append(m_name). append("

");
	}
    }

    virtual string startMatch(unsigned int)
    {
	return string("");
    }

    virtual string endMatch() 
    {
	return string("");
    }

    const string &m_name;
};

void RecollProtocol::showPreview(const Rcl::Doc& idoc)
{
    FileInterner interner(idoc, o_rclconfig, FileInterner::FIF_forPreview);
    Rcl::Doc fdoc;
    string ipath = idoc.ipath;
    if (!interner.internfile(fdoc, ipath)) {
	error(KIO::ERR_SLAVE_DEFINED, "Cannot convert file to internal format");
	return;
    }
    if (!interner.get_html().empty()) {
	fdoc.text = interner.get_html();
	fdoc.mimetype = "text/html";
    }

    mimeType("text/html");

    string fname =  path_getsimple(fdoc.url).c_str();
    PlainToRichKio ptr(fname);
    ptr.set_inputhtml(!fdoc.mimetype.compare("text/html"));
    list otextlist;
    HighlightData hdata;
    if (m_source)
	m_source->getTerms(hdata);
    ptr.plaintorich(fdoc.text, otextlist, hdata);

    QByteArray array;
    QTextStream os(&array, QIODevice::WriteOnly);
    for (list::iterator it = otextlist.begin(); 
	 it != otextlist.end(); it++) {
	os << (*it).c_str();
    }
    os << "" << endl;
    data(array);
}

void RecollProtocol::htmlDoSearch(const QueryDesc& qd)
{
    kDebug() << "q" << qd.query << "option" << qd.opt << "page" << qd.page <<
	"isdet" << qd.isDetReq << endl;
 
    mimeType("text/html");

    if (!syncSearch(qd))
	return;
    // syncSearch/doSearch do the setDocSource when needed
    if (m_pager.pageNumber() < 0) {
	m_pager.resultPageNext();
    }
    if (qd.isDetReq) {
	queryDetails();
	return;
    }

    // Check / adjust page number
    if (qd.page > m_pager.pageNumber()) {
	int npages = qd.page - m_pager.pageNumber();
	for (int i = 0; i < npages; i++)
	    m_pager.resultPageNext();
    } else if (qd.page < m_pager.pageNumber()) {
	int npages = m_pager.pageNumber() - qd.page;
	for (int i = 0; i < npages; i++) 
	    m_pager.resultPageBack();
    }
    // Display
    m_pager.displayPage(o_rclconfig);
}
recoll-1.26.3/kde/kioslave/kio_recoll/0000755000175000017500000000000013570165410014570 500000000000000recoll-1.26.3/kde/kioslave/kio_recoll/kio_recoll.h0000644000175000017500000001467013533651561017021 00000000000000#ifndef _RECOLL_H
#define _RECOLL_H
/* Copyright (C) 2005 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include 

#include 
#include 

#include 

#include "rclconfig.h"
#include "rcldb.h"
#include "docseq.h"
#include "reslistpager.h"
#include 

class RecollProtocol;

/** Specialize the recoll html pager for the kind of links we use etc. */
class RecollKioPager : public ResListPager {
public:
    RecollKioPager() : m_parent(0) {}
    void setParent(RecollProtocol *proto) {
        m_parent = proto;
    }

    virtual bool append(const std::string& data);
    virtual bool append(const std::string& data, int, const Rcl::Doc&) {
        return append(data);
    }
    virtual std::string detailsLink();
    virtual const std::string& parFormat();
    virtual std::string nextUrl();
    virtual std::string prevUrl();
    virtual std::string pageTop();

private:
    RecollProtocol *m_parent;
};

class QueryDesc {
public:
    QueryDesc() : opt("l"), page(0), isDetReq(false) {}
    QString query;
    QString opt;
    int page;
    bool isDetReq;
    bool sameQuery(const QueryDesc& o) const {
        return !opt.compare(o.opt) && !query.compare(o.query);
    }
};

// Our virtual tree is a bit complicated. We need a class to analyse an URL
// and tell what we should do with it
class UrlIngester {
public:
    UrlIngester(RecollProtocol *p, const QUrl& url);
    enum RootEntryType {UIRET_NONE, UIRET_ROOT, UIRET_HELP, UIRET_SEARCH};
    bool isRootEntry(RootEntryType *tp) {
        if (m_type != UIMT_ROOTENTRY) {
            return false;
        }
        *tp = m_retType;
        return true;
    }
    bool isQuery(QueryDesc *q) {
        if (m_type != UIMT_QUERY) {
            return false;
        }
        *q = m_query;
        return true;
    }
    bool isResult(QueryDesc *q, int *num) {
        if (m_type != UIMT_QUERYRESULT) {
            return false;
        }
        *q = m_query;
        *num = m_resnum;
        return true;
    }
    bool isPreview(QueryDesc *q, int *num) {
        if (m_type != UIMT_PREVIEW) {
            return false;
        }
        *q = m_query;
        *num = m_resnum;
        return true;
    }
    bool endSlashQuery() {
        return m_slashend;
    }
    bool alwaysDir() {
        return m_alwaysdir;
    }

private:
    RecollProtocol *m_parent;
    QueryDesc       m_query;
    bool            m_slashend;
    bool            m_alwaysdir;
    RootEntryType   m_retType;
    int             m_resnum;
    enum MyType {UIMT_NONE, UIMT_ROOTENTRY, UIMT_QUERY, UIMT_QUERYRESULT,
                 UIMT_PREVIEW
                };
    MyType           m_type;
};

/**
 * A KIO slave to execute and display Recoll searches.
 *
 * Things are made a little complicated because KIO slaves can't hope
 * that their internal state will remain consistent with their user
 * application state: slaves die, are restarted, reused, at random
 * between requests.
 * In our case, this means that any request has to be processed
 * without reference to the last operation performed. Ie, if the
 * search parameters are not those from the last request, the search
 * must be restarted anew. This happens for example with different
 * searches in 2 konqueror screens: typically only one kio_slave will
 * be used.
 * The fact that we check if the search is the same as the last one,
 * to avoid restarting is an optimization, not the base mechanism
 * (contrary to what was initially assumed, and may have left a few
 * crumbs around).
 *
 * We have two modes of operation, one based on html forms and result
 * pages, which can potentially be developped to the full Recoll
 * functionality, and one based on a directory listing model, which
 * will always be more limited, but may be useful in some cases to
 * allow easy copying of files etc. Which one is in use is decided by
 * the form of the URL.
 */
class RecollProtocol : public KIO::SlaveBase {
public:
    RecollProtocol(const QByteArray& pool, const QByteArray& app);
    virtual ~RecollProtocol();
    virtual void mimetype(const QUrl& url);
    virtual void get(const QUrl& url);
    // The directory mode is not available with KDE 4.0, I could find
    // no way to avoid crashing kdirmodel
    virtual void stat(const QUrl& url);
    virtual void listDir(const QUrl& url);

    static RclConfig  *o_rclconfig;

    friend class RecollKioPager;
    friend class UrlIngester;

private:
    bool maybeOpenDb(std::string& reason);
    bool URLToQuery(const QUrl& url, QString& q, QString& opt, int *page = 0);
    bool doSearch(const QueryDesc& qd);

    void searchPage();
    void queryDetails();
    std::string makeQueryUrl(int page, bool isdet = false);
    bool syncSearch(const QueryDesc& qd);
    void htmlDoSearch(const QueryDesc& qd);
    void showPreview(const Rcl::Doc& doc);
    bool isRecollResult(const QUrl& url, int *num, QString* q);

    bool        m_initok;
    std::shared_ptr m_rcldb;
    std::string      m_reason;
    bool        m_alwaysdir;
    // english by default else env[RECOLL_KIO_STEMLANG]
    std::string      m_stemlang;

    // Search state: because of how the KIO slaves are used / reused,
    // we can't be sure that the next request will be for the same
    // search, and we need to check and restart one if the data
    // changes. This is very wasteful but hopefully won't happen too
    // much in actual use. One possible workaround for some scenarios
    // (one slave several konqueror windows) would be to have a small
    // cache of recent searches kept open.
    RecollKioPager m_pager;
    std::shared_ptr m_source;
    // Note: page here is not used, current page always comes from m_pager.
    QueryDesc      m_query;
};

extern "C" {
    __attribute__((visibility("default"))) int
    kdemain(int argc, char **argv);
}


#endif // _RECOLL_H
recoll-1.26.3/kde/kioslave/kio_recoll/data/0000755000175000017500000000000013570165410015501 500000000000000recoll-1.26.3/kde/kioslave/kio_recoll/data/welcome.html0000644000175000017500000000145213303776057017755 00000000000000

  
  Recoll Search



  

Recoll search

Query type:
Query language
All terms
Any term
File name
Enter search string:

recoll-1.26.3/kde/kioslave/kio_recoll/data/help.html0000644000175000017500000000755513303776057017264 00000000000000 Recoll Kio Slave Recoll search

Recoll kio slave

Use this module to perform Recoll searches from any program with a KIO interface.

The module can work in two modes:

  • Html interface, close to a simplified QT Recoll interface.
  • File manager interface, Only with KDE 4.1 and newer, which presents results as directory entries

With recent KDE versions (now: 2016), the file manager interface works in Dolphin, and both the file manager and the HTML interface work in Konqueror.

You will undoubtedly obtain strange effects from time to time. If you have any remarks or ideas about improving kio_recoll, or observe an interesting and reproducible sequence, please report it.

The module is particularly unhelpful with search hits inside email folders, which Konqueror or Dolphin have no way to access.

HTML interface

This works more or less like the Recoll QT GUI, much simplified. The Recoll manual describes the queries that can be performed.

Most pages in the interface should quite self-explanatory.

You normally enter this interface by entering "recoll:" or "recoll:/" in the Konqueror URL entry, and following the "search" link. You can also directly enter "recoll:/search.html".
In most circumstances, entering a link like recoll:/john smith will also yield an HTML result list.

Compared to QT Recoll, the nice point is that you can click or drag/drop the icons to access the results in the standard desktop way.

File manager interface

The path part of the URI is taken as a Recoll query language string and executed. The results are displayed as directory entries.

There are several ways to enter this interface:

  • Dolphin will only present the file manager interface. Enter recoll:/some query or recoll: some query in the address bar. Note: single-slash, not double, you will get something like "Protocol unknown" if you enter 2 slashes.
  • Konqueror: this supports both the file manager and HTML interfaces. You can force using the file manager interface in the following ways:
    • Using "recollf" as protocol name instead of "recoll". This is probably the easiest option inside open dialogs.
    • Using an URL ending with a '/', ie:
      recoll:/red apples ext:html/
    • If you would like to use the file manager view most of the time, you can set the RECOLL_KIO_ALWAYS_DIR environment variable or the kio_always_dir recoll.conf variable to 1. The HTML interface will then only be accessible through the search link in the top "recoll:" view.

No search result details (samples, relevance etc.) are available, but this interface allows multiple selections and copies, usage inside any KDE open dialog, etc.

To avoid swamping the interface with too many thousands of results, the result count is limited to 10000 by default. You can change this value by setting the kio_max_direntries parameter in your recoll configuration file (typically ~/.recoll/recoll.conf)

Recoll Search

recoll-1.26.3/kde/kioslave/kio_recoll/data/searchable.html0000644000175000017500000000145013303776057020411 00000000000000 Recoll searchable HTML

A Recoll-searchable HTML page

This is a text sample in which links have been inserted for words, such as system installation, which can be searched for in the whole document set by using recoll

Also a little bit of javascript magic can make all words searchable (try double-clicking any word).

recoll-1.26.3/kde/kioslave/kio_recoll/recollnolist.protocol0000644000175000017500000000030013303776057021006 00000000000000[Protocol] exec=kio_recoll protocol=recoll input=none output=filesystem # Version for kde4.0: no "listing" entry reading=true defaultMimeType=text/html Icon=recoll Class=:local URIMode=rawuri recoll-1.26.3/kde/kioslave/kio_recoll/dirif.cpp0000644000175000017500000003074413533651561016327 00000000000000/* Copyright (C) 2008 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ /* * A lot of code in this file was copied from kio_beagle 0.4.0, * which is a GPL program. The authors listed are: * Debajyoti Bera * * KDE4 port: * Stephan Binner */ #include "autoconfig.h" // Couldn't get listDir() to work with kde 4.0, konqueror keeps // crashing because of kdirmodel, couldn't find a workaround (not // saying it's impossible)... #include #include #include #include #include "kio_recoll.h" #include "pathut.h" using namespace KIO; static const QString resultBaseName("recollResult"); // Check if the input URL is of the form that konqueror builds by // appending one of our result file names to the directory name (which // is the search string). If it is, extract and return the result // document number. Possibly restart the search if the search string // does not match the current one bool RecollProtocol::isRecollResult(const QUrl& url, int *num, QString *q) { *num = -1; qDebug() << "RecollProtocol::isRecollResult: url: " << url; // Basic checks if (!url.host().isEmpty() || url.path().isEmpty() || (url.scheme().compare("recoll") && url.scheme().compare("recollf"))) { qDebug() << "RecollProtocol::isRecollResult: no: url.host " << url.host() << " path " << url.path() << " scheme " << url.scheme(); return false; } QString path = url.path(); qDebug() << "RecollProtocol::isRecollResult: path: " << path; if (!path.startsWith("/")) { return false; } // Look for the last '/' and check if it is followed by // resultBaseName (riiiight...) int slashpos = path.lastIndexOf("/"); if (slashpos == -1 || slashpos == 0 || slashpos == path.length() - 1) { return false; } slashpos++; //qDebug() << "Comparing " << path.mid(slashpos, resultBaseName.length()) << // "and " << resultBaseName; if (path.mid(slashpos, resultBaseName.length()).compare(resultBaseName)) { return false; } // Extract the result number QString snum = path.mid(slashpos + resultBaseName.length()); sscanf(snum.toUtf8(), "%d", num); if (*num == -1) { return false; } //qDebug() << "URL analysis ok, num:" << *num; // We do have something that ressembles a recoll result locator. Check if // this matches the current search, else have to run the requested one *q = path.mid(1, slashpos - 2); return true; } // Translate rcldoc result into directory entry static const UDSEntry resultToUDSEntry(const Rcl::Doc& doc, int num) { UDSEntry entry; QUrl url(doc.url.c_str()); //qDebug() << doc.url.c_str(); /// Filename - as displayed in directory listings etc. /// "." has the usual special meaning of "current directory" /// UDS_NAME must always be set and never be empty, neither contain '/'. /// /// Note that KIO will append the UDS_NAME to the url of their /// parent directory, so all kioslaves must use that naming scheme /// ("url_of_parent/filename" will be the full url of that file). /// To customize the appearance of files without changing the url /// of the items, use UDS_DISPLAY_NAME. // // Use the result number to designate the file in case we are // asked to access it char cnum[30]; sprintf(cnum, "%04d", num); entry.insert(KIO::UDSEntry::UDS_NAME, resultBaseName + cnum); // Display the real file name entry.insert(KIO::UDSEntry::UDS_DISPLAY_NAME, url.fileName()); /// A local file path if the ioslave display files sitting on the /// local filesystem (but in another hierarchy, e.g. settings:/ or /// remote:/) entry.insert(KIO::UDSEntry::UDS_LOCAL_PATH, url.path()); /// This file is a shortcut or mount, pointing to an /// URL in a different hierarchy /// @since 4.1 // We should probably set this only if the scheme is not 'file' (e.g. // from the web cache). entry.insert(KIO::UDSEntry::UDS_TARGET_URL, doc.url.c_str()); if (!doc.mimetype.compare("application/x-fsdirectory") || !doc.mimetype.compare("inode/directory")) { entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, "inode/directory"); entry.insert(KIO::UDSEntry::UDS_FILE_TYPE, S_IFDIR); } else { entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, doc.mimetype.c_str()); entry.insert(KIO::UDSEntry::UDS_FILE_TYPE, S_IFREG); } // For local files, supply the usual file stat information struct stat info; if (lstat(url.path().toUtf8(), &info) >= 0) { entry.insert(KIO::UDSEntry::UDS_SIZE, info.st_size); entry.insert(KIO::UDSEntry::UDS_ACCESS, info.st_mode); entry.insert(KIO::UDSEntry::UDS_MODIFICATION_TIME, info.st_mtime); entry.insert(KIO::UDSEntry::UDS_ACCESS_TIME, info.st_atime); entry.insert(KIO::UDSEntry::UDS_CREATION_TIME, info.st_ctime); } return entry; } // From kio_beagle static void createRootEntry(KIO::UDSEntry& entry) { entry.clear(); entry.insert(KIO::UDSEntry::UDS_NAME, "."); entry.insert(KIO::UDSEntry::UDS_FILE_TYPE, S_IFDIR); entry.insert(KIO::UDSEntry::UDS_ACCESS, 0700); entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, "inode/directory"); } // Points to html query screen static void createGoHomeEntry(KIO::UDSEntry& entry) { entry.clear(); entry.insert(KIO::UDSEntry::UDS_NAME, "search.html"); entry.insert(KIO::UDSEntry::UDS_DISPLAY_NAME, "Recoll search (click me)"); entry.insert(KIO::UDSEntry::UDS_FILE_TYPE, S_IFREG); entry.insert(KIO::UDSEntry::UDS_TARGET_URL, "recoll:///search.html"); entry.insert(KIO::UDSEntry::UDS_ACCESS, 0500); entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, "text/html"); entry.insert(KIO::UDSEntry::UDS_ICON_NAME, "recoll"); } // Points to help file static void createGoHelpEntry(KIO::UDSEntry& entry) { QString location = QStandardPaths::locate(QStandardPaths::GenericDataLocation, "kio_recoll/help.html"); entry.clear(); entry.insert(KIO::UDSEntry::UDS_NAME, "help"); entry.insert(KIO::UDSEntry::UDS_DISPLAY_NAME, "Recoll help (click me first)"); entry.insert(KIO::UDSEntry::UDS_FILE_TYPE, S_IFREG); entry.insert(KIO::UDSEntry::UDS_TARGET_URL, QString("file://") + location); entry.insert(KIO::UDSEntry::UDS_ACCESS, 0500); entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, "text/html"); entry.insert(KIO::UDSEntry::UDS_ICON_NAME, "help"); } // As far as I can see we only ever get this on '/' so why all the code? void RecollProtocol::stat(const QUrl& url) { qDebug() << "RecollProtocol::stat:" << url; UrlIngester ingest(this, url); KIO::UDSEntry entry; // entry.insert(KIO::UDSEntry::UDS_TARGET_URL, url.url()); // entry.insert(KIO::UDSEntry::UDS_URL, url.url()); UrlIngester::RootEntryType rettp; QueryDesc qd; int num; if (ingest.isRootEntry(&rettp)) { qDebug() << "RecollProtocol::stat: root entry"; switch (rettp) { case UrlIngester::UIRET_ROOT: qDebug() << "RecollProtocol::stat: root"; createRootEntry(entry); break; case UrlIngester::UIRET_HELP: qDebug() << "RecollProtocol::stat: root help"; createGoHelpEntry(entry); break; case UrlIngester::UIRET_SEARCH: qDebug() << "RecollProtocol::stat: root search"; createGoHomeEntry(entry); break; default: qDebug() << "RecollProtocol::stat: ??"; error(ERR_DOES_NOT_EXIST, QString()); break; } } else if (ingest.isResult(&qd, &num)) { qDebug() << "RecollProtocol::stat: isresult"; if (syncSearch(qd)) { Rcl::Doc doc; if (num >= 0 && m_source && m_source->getDoc(num, doc)) { entry = resultToUDSEntry(doc, num); } else { error(ERR_DOES_NOT_EXIST, QString()); } } else { // hopefully syncSearch() set the error? } } else if (ingest.isQuery(&qd)) { qDebug() << "RecollProtocol::stat: isquery"; // ie "recoll:/some string" or "recoll:/some string/" // // We have a problem here. We'd like to let the user enter // either form and get an html or a dir contents result, // depending on the ending /. Otoh this makes the name space // inconsistent, because /toto can't be a file (the html // result page) while /toto/ would be a directory ? or can it // // Another approach would be to use different protocol names // to avoid any possibility of mixups if (m_alwaysdir || ingest.alwaysDir() || ingest.endSlashQuery()) { qDebug() << "RecollProtocol::stat: Directory type:"; // Need to check no / in there entry.insert(KIO::UDSEntry::UDS_NAME, qd.query); entry.insert(KIO::UDSEntry::UDS_ACCESS, 0700); entry.insert(KIO::UDSEntry::UDS_MODIFICATION_TIME, time(0)); entry.insert(KIO::UDSEntry::UDS_CREATION_TIME, time(0)); entry.insert(KIO::UDSEntry::UDS_FILE_TYPE, S_IFDIR); entry.insert(KIO::UDSEntry::UDS_MIME_TYPE, "inode/directory"); } } else { qDebug() << "RecollProtocol::stat: none of the above ??"; } statEntry(entry); finished(); } void RecollProtocol::listDir(const QUrl& url) { qDebug() << "RecollProtocol::listDir: url: " << url; UrlIngester ingest(this, url); UrlIngester::RootEntryType rettp; QueryDesc qd; if (ingest.isRootEntry(&rettp)) { switch (rettp) { case UrlIngester::UIRET_ROOT: { qDebug() << "RecollProtocol::listDir:list /"; UDSEntryList entries; KIO::UDSEntry entry; createRootEntry(entry); entries.append(entry); createGoHomeEntry(entry); entries.append(entry); createGoHelpEntry(entry); entries.append(entry); listEntries(entries); finished(); } return; default: error(ERR_CANNOT_ENTER_DIRECTORY, QString()); return; } } else if (ingest.isQuery(&qd)) { // At this point, it seems that when the request is from // konqueror autocompletion it comes with a / at the end, // which offers an opportunity to not perform it. if (ingest.endSlashQuery()) { qDebug() << "RecollProtocol::listDir: Ends With /"; error(ERR_SLAVE_DEFINED, QString::fromUtf8("Autocompletion search aborted")); return; } if (!syncSearch(qd)) { // syncSearch did the error thing return; } // Fallthrough to actually listing the directory } else { qDebug() << "RecollProtocol::listDir: Cant grok input url"; error(ERR_CANNOT_ENTER_DIRECTORY, QString()); return; } static int maxentries = -1; if (maxentries == -1) { if (o_rclconfig) { o_rclconfig->getConfParam("kio_max_direntries", &maxentries); } if (maxentries == -1) { maxentries = 10000; } } static const int pagesize = 200; int pagebase = 0; while (pagebase < maxentries) { vector page; int pagelen = m_source->getSeqSlice(pagebase, pagesize, page); UDSEntry entry; if (pagelen < 0) { error(ERR_SLAVE_DEFINED, QString::fromUtf8("Internal error")); break; } UDSEntryList entries; for (int i = 0; i < pagelen; i++) { entries.push_back(resultToUDSEntry(page[i].doc, i)); } listEntries(entries); if (pagelen != pagesize) { break; } pagebase += pagelen; } finished(); } recoll-1.26.3/kde/kioslave/kio_recoll/recoll.protocol0000644000175000017500000000025613303776057017567 00000000000000[Protocol] exec=kio_recoll protocol=recoll Icon=recoll input=none output=filesystem listing=Name,Type, URL reading=true Class=:local URIMode=rawuri defaultMimeType=text/html recoll-1.26.3/kde/kioslave/kio_recoll/kio_recoll.cpp0000644000175000017500000002744613533651561017361 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include #include #include #include #include #include #include "rclconfig.h" #include "rcldb.h" #include "rclinit.h" #include "pathut.h" #include "searchdata.h" #include "rclquery.h" #include "wasatorcl.h" #include "kio_recoll.h" #include "docseqdb.h" #include "readfile.h" #include "smallut.h" #include "textsplit.h" #include "guiutils.h" using namespace KIO; using namespace std; RclConfig *RecollProtocol::o_rclconfig; RecollProtocol::RecollProtocol(const QByteArray& pool, const QByteArray& app) : SlaveBase("recoll", pool, app), m_initok(false), m_alwaysdir(false) { qDebug() << "RecollProtocol::RecollProtocol()"; if (o_rclconfig == 0) { o_rclconfig = recollinit(0, 0, 0, m_reason); if (!o_rclconfig || !o_rclconfig->ok()) { m_reason = string("Configuration problem: ") + m_reason; return; } } if (o_rclconfig->getDbDir().empty()) { // Note: this will have to be replaced by a call to a // configuration building dialog for initial configuration? Or // do we assume that the QT GUO is always used for this ? m_reason = "No db directory in configuration ??"; return; } rwSettings(false); m_rcldb = std::shared_ptr(new Rcl::Db(o_rclconfig)); if (!m_rcldb) { m_reason = "Could not build database object. (out of memory ?)"; return; } // Decide if we allow switching between html and file manager // presentation by using an end slash or not. Can also be done dynamically // by switching proto names. const char *cp = getenv("RECOLL_KIO_ALWAYS_DIR"); if (cp) { m_alwaysdir = stringToBool(cp); } else { o_rclconfig->getConfParam("kio_always_dir", &m_alwaysdir); } cp = getenv("RECOLL_KIO_STEMLANG"); if (cp) { m_stemlang = cp; } else { m_stemlang = "english"; } m_pager.setParent(this); m_initok = true; return; } // There should be an object counter somewhere to delete the config when done. // Doesn't seem needed in the kio context. RecollProtocol::~RecollProtocol() { qDebug() << "RecollProtocol::~RecollProtocol()"; } bool RecollProtocol::maybeOpenDb(string& reason) { if (!m_rcldb) { reason = "Internal error: initialization error"; return false; } if (!m_rcldb->isopen() && !m_rcldb->open(Rcl::Db::DbRO)) { reason = "Could not open database in " + o_rclconfig->getDbDir(); return false; } return true; } // This is never called afaik void RecollProtocol::mimetype(const QUrl& url) { qDebug() << "RecollProtocol::mimetype: url: " << url; mimeType("text/html"); finished(); } UrlIngester::UrlIngester(RecollProtocol *p, const QUrl& url) : m_parent(p), m_slashend(false), m_alwaysdir(false), m_retType(UIRET_NONE), m_resnum(0), m_type(UIMT_NONE) { qDebug() << "UrlIngester::UrlIngester: Url: " << url; m_alwaysdir = !url.scheme().compare("recollf"); QString path = url.path(); if (url.host().isEmpty()) { if (path.isEmpty() || !path.compare("/")) { m_type = UIMT_ROOTENTRY; m_retType = UIRET_ROOT; return; } else if (!path.compare("/help.html")) { m_type = UIMT_ROOTENTRY; m_retType = UIRET_HELP; return; } else if (!path.compare("/search.html")) { m_type = UIMT_ROOTENTRY; m_retType = UIRET_SEARCH; QUrlQuery q(url); // Retrieve the query value for preloading the form m_query.query = q.queryItemValue("q"); return; } else if (m_parent->isRecollResult(url, &m_resnum, &m_query.query)) { m_type = UIMT_QUERYRESULT; m_query.opt = "l"; m_query.page = 0; } else { // Have to think this is some search string m_type = UIMT_QUERY; m_query.query = url.path(); m_query.opt = "l"; m_query.page = 0; } } else { // Non empty host, url must be something like : // //search/query?q=query¶m=value... qDebug() << "UrlIngester::UrlIngester: host " << url.host() << " path " << url.path(); if (url.host().compare("search") || url.path().compare("/query")) { return; } m_type = UIMT_QUERY; // Decode the forms' arguments // Retrieve the query value for preloading the form QUrlQuery q(url); m_query.query = q.queryItemValue("q"); m_query.opt = q.queryItemValue("qtp"); if (m_query.opt.isEmpty()) { m_query.opt = "l"; } QString p = q.queryItemValue("p"); if (p.isEmpty()) { m_query.page = 0; } else { sscanf(p.toUtf8(), "%d", &m_query.page); } p = q.queryItemValue("det"); m_query.isDetReq = !p.isEmpty(); p = q.queryItemValue("cmd"); if (!p.isEmpty() && !p.compare("pv")) { p = q.queryItemValue("dn"); if (!p.isEmpty()) { // Preview and no docnum ?? m_resnum = atoi((const char *)p.toUtf8()); // Result in page is 1+ m_resnum--; m_type = UIMT_PREVIEW; } } } if (m_query.query.startsWith("/")) { m_query.query.remove(0, 1); } if (m_query.query.endsWith("/")) { qDebug() << "UrlIngester::UrlIngester: query Ends with /"; m_slashend = true; m_query.query.chop(1); } else { m_slashend = false; } return; } bool RecollProtocol::syncSearch(const QueryDesc& qd) { qDebug() << "RecollProtocol::syncSearch"; if (!m_initok || !maybeOpenDb(m_reason)) { string reason = "RecollProtocol::listDir: Init error:" + m_reason; error(KIO::ERR_SLAVE_DEFINED, u8s2qs(reason)); return false; } if (qd.sameQuery(m_query)) { return true; } // doSearch() calls error() if appropriate. return doSearch(qd); } // This is used by the html interface, but also by the directory one // when doing file copies for exemple. This is the central dispatcher // for requests, it has to know a little about both models. void RecollProtocol::get(const QUrl& url) { qDebug() << "RecollProtocol::get: " << url; if (!m_initok || !maybeOpenDb(m_reason)) { string reason = "Recoll: init error: " + m_reason; error(KIO::ERR_SLAVE_DEFINED, u8s2qs(reason)); return; } UrlIngester ingest(this, url); UrlIngester::RootEntryType rettp; QueryDesc qd; int resnum; if (ingest.isRootEntry(&rettp)) { switch (rettp) { case UrlIngester::UIRET_HELP: { QString location = QStandardPaths::locate(QStandardPaths::GenericDataLocation, "kio_recoll/help.html"); redirection(QUrl::fromLocalFile(location)); } goto out; default: searchPage(); goto out; } } else if (ingest.isResult(&qd, &resnum)) { // Url matched one generated by konqueror/Dolphin out of a // search directory listing: ie: // recoll:/some search string/recollResultxx // // This happens when the user drags/drop the result to another // app, or with the "open-with" right-click. Does not happen // if the entry itself is clicked (the UDS_URL is apparently // used in this case // // Redirect to the result document URL if (!syncSearch(qd)) { return; } Rcl::Doc doc; if (resnum >= 0 && m_source && m_source->getDoc(resnum, doc)) { mimeType(doc.mimetype.c_str()); redirection(QUrl::fromLocalFile((const char *)(doc.url.c_str() + 7))); goto out; } } else if (ingest.isPreview(&qd, &resnum)) { if (!syncSearch(qd)) { return; } Rcl::Doc doc; if (resnum >= 0 && m_source && m_source->getDoc(resnum, doc)) { showPreview(doc); goto out; } } else if (ingest.isQuery(&qd)) { #if 0 // Do we need this ? if (host.isEmpty()) { char cpage[20]; sprintf(cpage, "%d", page); QString nurl = QString::fromAscii("recoll://search/query?q=") + query + "&qtp=" + opt + "&p=" + cpage; redirection(QUrl(nurl)); goto out; } #endif // htmlDoSearch does the search syncing (needs to know about changes). htmlDoSearch(qd); goto out; } error(KIO::ERR_SLAVE_DEFINED, u8s2qs("Unrecognized URL or internal error")); out: finished(); } // Execute Recoll search, and set the docsource bool RecollProtocol::doSearch(const QueryDesc& qd) { qDebug() << "RecollProtocol::doSearch:query" << qd.query << "opt" << qd.opt; m_query = qd; char opt = qd.opt.isEmpty() ? 'l' : qd.opt.toUtf8().at(0); string qs = (const char *)qd.query.toUtf8(); Rcl::SearchData *sd = 0; if (opt != 'l') { Rcl::SearchDataClause *clp = 0; if (opt == 'f') { clp = new Rcl::SearchDataClauseFilename(qs); } else { clp = new Rcl::SearchDataClauseSimple(opt == 'o' ? Rcl::SCLT_OR : Rcl::SCLT_AND, qs); } sd = new Rcl::SearchData(Rcl::SCLT_OR, m_stemlang); if (sd && clp) { sd->addClause(clp); } } else { sd = wasaStringToRcl(o_rclconfig, m_stemlang, qs, m_reason); } if (!sd) { m_reason = "Internal Error: cant build search"; error(KIO::ERR_SLAVE_DEFINED, u8s2qs(m_reason)); return false; } std::shared_ptr sdata(sd); std::shared_ptrquery(new Rcl::Query(m_rcldb.get())); query->setCollapseDuplicates(prefs.collapseDuplicates); if (!query->setQuery(sdata)) { m_reason = "Query execute failed. Invalid query or syntax error?"; error(KIO::ERR_SLAVE_DEFINED, u8s2qs(m_reason)); return false; } DocSequenceDb *src = new DocSequenceDb(m_rcldb, std::shared_ptr(query), "Query results", sdata); if (src == 0) { error(KIO::ERR_SLAVE_DEFINED, u8s2qs("Can't build result sequence")); return false; } m_source = std::shared_ptr(src); // Reset pager in all cases. Costs nothing, stays at page -1 initially // htmldosearch will fetch the first page if needed. m_pager.setDocSource(m_source); return true; } int kdemain(int argc, char **argv) { QCoreApplication::setApplicationName("kio_recoll"); qDebug() << "*** starting kio_recoll "; if (argc != 4) { qDebug() << "Usage: kio_recoll proto dom-socket1 dom-socket2\n"; exit(-1); } RecollProtocol slave(argv[2], argv[3]); slave.dispatchLoop(); qDebug() << "kio_recoll Done"; return 0; } recoll-1.26.3/kde/kioslave/kio_recoll/recollf.protocol0000644000175000017500000000025713303776057017736 00000000000000[Protocol] exec=kio_recoll protocol=recollf input=none output=filesystem listing=Name,Type, URL reading=true defaultMimeType=text/html Icon=recoll Class=:local URIMode=rawuri recoll-1.26.3/kde/kioslave/kio_recoll/CMakeLists.txt0000644000175000017500000000535113533651561017262 00000000000000project(kio_recoll) cmake_minimum_required(VERSION 2.8.12) include(FeatureSummary) set(QT_MIN_VERSION 5.2.0) set(KF5_MIN_VERSION 5.0.0) find_package(Qt5 ${QT_MIN_VERSION} CONFIG REQUIRED COMPONENTS Network Widgets) find_package(ECM REQUIRED NO_MODULE) set(CMAKE_MODULE_PATH ${ECM_MODULE_PATH}) include(KDEInstallDirs) include(KDECMakeSettings) include(KDECompilerSettings NO_POLICY_SCOPE) # CoreAddons? find_package(KF5 ${KF5_MIN_VERSION} REQUIRED COMPONENTS KIO) add_definitions(-DQT_NO_URL_CAST_FROM_STRING) include_directories( ${CMAKE_SOURCE_DIR} ${CMAKE_BINARY_DIR} ) ## Recoll stuff add_definitions( -DRECOLL_DATADIR="${CMAKE_INSTALL_PREFIX}/share/recoll" -DLIBDIR="${CMAKE_INSTALL_PREFIX}/lib" -DHAVE_CONFIG_H ) set(rcltop ${CMAKE_CURRENT_SOURCE_DIR}/../../../) # Execute recoll configuration to create autoconfig.h and version.h and # generate a PIC lib execute_process(COMMAND ${rcltop}/configure --disable-static --disable-qtgui --disable-x11mon --disable-python-chm --disable-python-module --prefix=${CMAKE_INSTALL_PREFIX} --mandir=${CMAKE_INSTALL_PREFIX}/share/man WORKING_DIRECTORY ${rcltop} ) link_directories(${rcltop}/.libs ${CMAKE_INSTALL_PREFIX}/lib) include_directories (${CMAKE_SOURCE_DIR} ${rcltop}/aspell ${rcltop}/bincimapmime ${rcltop}/common ${rcltop}/index ${rcltop}/internfile ${rcltop}/query ${rcltop}/rcldb ${rcltop}/unac ${rcltop}/utils ${rcltop}/qtgui ) set(kio_recoll_SRCS kio_recoll.cpp htmlif.cpp dirif.cpp ${rcltop}/qtgui/guiutils.cpp) # Had the idea to add e.g. /usr/lib/recoll to the rpath so that the dyn lib # will be found at run time. But this does not seem to work with debian # which strips RPATH by default (I think there is a way for libs in app-specific # paths but I did not find it). Link with the .a instead. #SET(CMAKE_INSTALL_RPATH "${CMAKE_INSTALL_PREFIX}/lib/recoll") add_library(kio_recoll MODULE ${kio_recoll_SRCS}) add_custom_target(rcllib COMMAND make -j 3 PicStatic WORKING_DIRECTORY ${rcltop} ) add_dependencies(kio_recoll rcllib) target_link_libraries(kio_recoll recoll xapian xslt xml2 KF5::KIOCore dl z pthread ) install(FILES recoll.protocol recollf.protocol DESTINATION ${SERVICES_INSTALL_DIR}) install(FILES data/welcome.html data/help.html DESTINATION ${DATA_INSTALL_DIR}/kio_recoll) # Tried but could not use PLUGIN_INSTALL_DIR (/usr/lib64/plugins), or # /usr/lib64/qt5/plugins/kf5/kio/recoll.so: the module is not found by # dolphin). Actually that's because of the protocol file. recoll has # exec=kio_recoll, file has exec=kf5/kio/file set_target_properties(kio_recoll PROPERTIES OUTPUT_NAME "kio_recoll") install(TARGETS kio_recoll DESTINATION ${LIB_INSTALL_DIR}/qt5/plugins) recoll-1.26.3/kde/kioslave/kio_recoll/00README.txt0000644000175000017500000000542213303776057016362 00000000000000Recoll KIO slave ================ An experiment with a recoll KIO slave. Caveat: I am only currently testing this with a production, but very recent, version of KDE 4.1, and I don't intend to really support older versions. The most usable aspects work under KDE 4.0 though. As a reference, my test system is an up to date (2009-01) Kubuntu 8.10. Usage ===== Depending on the protocol name used, the search results will be returned either as HTML pages (looking quite like a normal Recoll result list), or as directory entries. The HTML mode only works with Konqueror, not Dolphin. The directory mode is available with both browsers, and also application open dialog (ie Kate). The HTML mode is much more usable than the directory mode at this point More detailed help/explanations can be found a document accessible from the slave: To try things out, after building and installing, enter "recoll:/" in a Konqueror URL entry. Depending on the KDE version, this will bring you either to an HTML search form, or to a directory listing, where you should READ THE HELP FILE. Building and installing: ======================= Only tested with KDE 4.1 and later. The main Recoll installation shares its prefix with the KIO slave, which needs to use the KDE one. This means that, if KDE lives in /usr, Recoll must be configured with --prefix=/usr, not /usr/local. Else you'll have run-time problems, the slave will not be able to find the Recoll configuration. !!*Notice: You cannot share a build directory between recoll and kio_recoll because they use different configure options for the main lib, but build it in the same place. The main lib "configure" is run at "cmake" time for kio_recoll, the build is done at "make" time. Recipe: - Make sure the KDE4 core devel packages and cmake are installed. - Extract the Recoll source. - IF Recoll is not installed yet: configure recoll with --prefix=/usr (or wherever KDE lives), build and install Recoll. - In the Recoll source, go to kde/kioslave/kio_recoll, then build and install the kio slave: mkdir builddir cd builddir cmake .. -DCMAKE_INSTALL_PREFIX=/usr make sudo make install - You should have a look at where "make install" copies things, because misconfigured distribution, generating wrong targets, are frequent. Especially, you should check that kio_recoll.so is copied to the right place, meaning among the output of "kde4-config --path module". As an additional check, there should be many other kio_[xxx].so in there. Same for the protocol file, check that it's not alone in its directory (really, this sounds strange, but, to this point, I've seen more systems with broken cmake/KDE configs than correct ones). You need to build/update the index with recollindex, the KIO slave doesn't deal with indexing for now. recoll-1.26.3/kde/kioslave/kio_recoll/htmlif.cpp0000644000175000017500000002122413533651561016506 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include #include #include #include "rclconfig.h" #include "rcldb.h" #include "rclinit.h" #include "pathut.h" #include "searchdata.h" #include "rclquery.h" #include "wasatorcl.h" #include "kio_recoll.h" #include "docseqdb.h" #include "readfile.h" #include "smallut.h" #include "plaintorich.h" #include "internfile.h" #include "wipedir.h" #include "hldata.h" using namespace std; using namespace KIO; bool RecollKioPager::append(const string& data) { if (!m_parent) { return false; } m_parent->data(QByteArray(data.c_str())); return true; } #include string RecollProtocol::makeQueryUrl(int page, bool isdet) { ostringstream str; str << "recoll://search/query?q=" << url_encode((const char*)m_query.query.toUtf8()) << "&qtp=" << (const char*)m_query.opt.toUtf8(); if (page >= 0) { str << "&p=" << page; } if (isdet) { str << "&det=1"; } return str.str(); } string RecollKioPager::detailsLink() { string chunk = string("makeQueryUrl(m_parent->m_pager.pageNumber(), true) + "\">" + "(show query)" + ""; return chunk; } static string parformat; const string& RecollKioPager::parFormat() { // Need to escape the % inside the query url string qurl = m_parent->makeQueryUrl(-1, false), escurl; for (string::size_type pos = 0; pos < qurl.length(); pos++) { switch (qurl.at(pos)) { case '%': escurl += "%%"; break; default: escurl += qurl.at(pos); } } ostringstream str; str << "" "%R %S " "Preview  " << "Open " << "%T
" "%M %D   %U  %i
" "%A %K"; return parformat = str.str(); } string RecollKioPager::pageTop() { string pt = "

m_query.query.toUtf8())); pt += "\">New Search"; return pt; // Would be nice to have but doesnt work because the query may be executed // by another kio instance which has no idea of the current page o #if 0 "    m_query.query.toUtf8())) + "/\">Directory view (you may need to reload the page)" #endif } string RecollKioPager::nextUrl() { int pagenum = pageNumber(); if (pagenum < 0) { pagenum = 0; } else { pagenum++; } return m_parent->makeQueryUrl(pagenum); } string RecollKioPager::prevUrl() { int pagenum = pageNumber(); if (pagenum <= 0) { pagenum = 0; } else { pagenum--; } return m_parent->makeQueryUrl(pagenum); } static string welcomedata; void RecollProtocol::searchPage() { mimeType("text/html"); if (welcomedata.empty()) { QString location = QStandardPaths::locate(QStandardPaths::GenericDataLocation, "kio_recoll/welcome.html"); string reason; if (location.isEmpty() || !file_to_string((const char *)location.toUtf8(), welcomedata, &reason)) { welcomedata = "Recoll Error" "

Could not locate Recoll welcome.html file: "; welcomedata += reason; welcomedata += "

"; } } string catgq; #if 0 // Catg filtering. A bit complicated to do because of the // stateless thing (one more thing to compare to check if same // query) right now. Would be easy by adding to the query // language, but not too useful in this case, so scrap it for now. list cats; if (o_rclconfig->getMimeCategories(cats) && !cats.empty()) { catgq = "

Filter on types: " "All"; for (list::iterator it = cats.begin(); it != cats.end(); it++) { catgq += "\n" + *it ; } } #endif string tmp; map subs; subs['Q'] = (const char *)m_query.query.toUtf8(); subs['C'] = catgq; subs['S'] = ""; pcSubst(welcomedata, tmp, subs); data(tmp.c_str()); } void RecollProtocol::queryDetails() { mimeType("text/html"); QByteArray array; QTextStream os(&array, QIODevice::WriteOnly); os << "" << endl; os << "" << endl; os << "" << "Recoll query details" << "\n" << endl; os << "" << endl; os << "

Query details:

" << endl; os << "

" << m_pager.queryDescription().c_str() << "

" << endl; os << "

Return to results" << endl; os << "" << endl; data(array); } class PlainToRichKio : public PlainToRich { public: PlainToRichKio(const string& nm) : m_name(nm) { } virtual string header() { if (m_inputhtml) { return cstr_null; } else { return string("" ""). append(m_name). append("

");
        }
    }

    virtual string startMatch(unsigned int) {
        return string("");
    }

    virtual string endMatch() {
        return string("");
    }

    const string& m_name;
};

void RecollProtocol::showPreview(const Rcl::Doc& idoc)
{
    FileInterner interner(idoc, o_rclconfig, FileInterner::FIF_forPreview);
    Rcl::Doc fdoc;
    string ipath = idoc.ipath;
    if (!interner.internfile(fdoc, ipath)) {
        error(KIO::ERR_SLAVE_DEFINED,
              QString::fromUtf8("Cannot convert file to internal format"));
        return;
    }
    if (!interner.get_html().empty()) {
        fdoc.text = interner.get_html();
        fdoc.mimetype = "text/html";
    }

    mimeType("text/html");

    string fname =  path_getsimple(fdoc.url).c_str();
    PlainToRichKio ptr(fname);
    ptr.set_inputhtml(!fdoc.mimetype.compare("text/html"));
    list otextlist;
    HighlightData hdata;
    if (m_source) {
        m_source->getTerms(hdata);
    }
    ptr.plaintorich(fdoc.text, otextlist, hdata);

    QByteArray array;
    QTextStream os(&array, QIODevice::WriteOnly);
    for (list::iterator it = otextlist.begin();
            it != otextlist.end(); it++) {
        os << (*it).c_str();
    }
    os << "" << endl;
    data(array);
}

void RecollProtocol::htmlDoSearch(const QueryDesc& qd)
{
    qDebug() << "q" << qd.query << "option" << qd.opt << "page" << qd.page <<
             "isdet" << qd.isDetReq << endl;

    mimeType("text/html");

    if (!syncSearch(qd)) {
        return;
    }
    // syncSearch/doSearch do the setDocSource when needed
    if (m_pager.pageNumber() < 0) {
        m_pager.resultPageNext();
    }
    if (qd.isDetReq) {
        queryDetails();
        return;
    }

    // Check / adjust page number
    if (qd.page > m_pager.pageNumber()) {
        int npages = qd.page - m_pager.pageNumber();
        for (int i = 0; i < npages; i++) {
            m_pager.resultPageNext();
        }
    } else if (qd.page < m_pager.pageNumber()) {
        int npages = m_pager.pageNumber() - qd.page;
        for (int i = 0; i < npages; i++) {
            m_pager.resultPageBack();
        }
    }
    // Display
    m_pager.displayPage(o_rclconfig);
}
recoll-1.26.3/unac/0000755000175000017500000000000013570165410011014 500000000000000recoll-1.26.3/unac/README.recoll0000644000175000017500000000061113303776057013102 00000000000000This is a stripped down and modified version of unac-1.7.0
You can find the full original distribution at http://www.senga.org/unac/
You can find the full version used by Recoll at the following url:

http://bitbucket.org/medoc/recoll/src/tip/unac/

See AUTHORS and COPYING for tributes, etc.

Dont bother Loic Dachary about bugs in this version, you can find the culprit
here: jfd@recoll.org
recoll-1.26.3/unac/COPYING0000644000175000017500000004310613533651561012001 00000000000000		    GNU GENERAL PUBLIC LICENSE
		       Version 2, June 1991

 Copyright (C) 1989, 1991 Free Software Foundation, Inc.
  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 Everyone is permitted to copy and distribute verbatim copies
 of this license document, but changing it is not allowed.

			    Preamble

  The licenses for most software are designed to take away your
freedom to share and change it.  By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users.  This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it.  (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.)  You can apply it to
your programs, too.

  When we speak of free software, we are referring to freedom, not
price.  Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.

  To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.

  For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have.  You must make sure that they, too, receive or can get the
source code.  And you must show them these terms so they know their
rights.

  We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.

  Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software.  If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.

  Finally, any free program is threatened constantly by software
patents.  We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary.  To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.

  The precise terms and conditions for copying, distribution and
modification follow.

		    GNU GENERAL PUBLIC LICENSE
   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION

  0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License.  The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language.  (Hereinafter, translation is included without limitation in
the term "modification".)  Each licensee is addressed as "you".

Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope.  The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.

  1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.

You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.

  2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:

    a) You must cause the modified files to carry prominent notices
    stating that you changed the files and the date of any change.

    b) You must cause any work that you distribute or publish, that in
    whole or in part contains or is derived from the Program or any
    part thereof, to be licensed as a whole at no charge to all third
    parties under the terms of this License.

    c) If the modified program normally reads commands interactively
    when run, you must cause it, when started running for such
    interactive use in the most ordinary way, to print or display an
    announcement including an appropriate copyright notice and a
    notice that there is no warranty (or else, saying that you provide
    a warranty) and that users may redistribute the program under
    these conditions, and telling the user how to view a copy of this
    License.  (Exception: if the Program itself is interactive but
    does not normally print such an announcement, your work based on
    the Program is not required to print an announcement.)

These requirements apply to the modified work as a whole.  If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works.  But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.

Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.

In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.

  3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:

    a) Accompany it with the complete corresponding machine-readable
    source code, which must be distributed under the terms of Sections
    1 and 2 above on a medium customarily used for software interchange; or,

    b) Accompany it with a written offer, valid for at least three
    years, to give any third party, for a charge no more than your
    cost of physically performing source distribution, a complete
    machine-readable copy of the corresponding source code, to be
    distributed under the terms of Sections 1 and 2 above on a medium
    customarily used for software interchange; or,

    c) Accompany it with the information you received as to the offer
    to distribute corresponding source code.  (This alternative is
    allowed only for noncommercial distribution and only if you
    received the program in object code or executable form with such
    an offer, in accord with Subsection b above.)

The source code for a work means the preferred form of the work for
making modifications to it.  For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable.  However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.

If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.

  4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License.  Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.

  5. You are not required to accept this License, since you have not
signed it.  However, nothing else grants you permission to modify or
distribute the Program or its derivative works.  These actions are
prohibited by law if you do not accept this License.  Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.

  6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions.  You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.

  7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License.  If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all.  For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.

If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.

It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices.  Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.

This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.

  8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded.  In such case, this License incorporates
the limitation as if written in the body of this License.

  9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time.  Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.

Each version is given a distinguishing version number.  If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation.  If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.

  10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission.  For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this.  Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.

			    NO WARRANTY

  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.

  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.

		     END OF TERMS AND CONDITIONS

	    How to Apply These Terms to Your New Programs

  If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.

  To do so, attach the following notices to the program.  It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.

    
    Copyright (C) 19yy  

    This program is free software; you can redistribute it and/or modify
    it under the terms of the GNU General Public License as published by
    the Free Software Foundation; either version 2 of the License, or
    (at your option) any later version.

    This program is distributed in the hope that it will be useful,
    but WITHOUT ANY WARRANTY; without even the implied warranty of
    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
    GNU General Public License for more details.

    You should have received a copy of the GNU General Public License
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA


Also add information on how to contact you by electronic and paper mail.

If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:

    Gnomovision version 69, Copyright (C) 19yy name of author
    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
    This is free software, and you are welcome to redistribute it
    under certain conditions; type `show c' for details.

The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License.  Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.

You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary.  Here is a sample; alter the names:

  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
  `Gnomovision' (which makes passes at compilers) written by James Hacker.

  , 1 April 1989
  Ty Coon, President of Vice

This General Public License does not permit incorporating your program into
proprietary programs.  If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library.  If this is what you want to do, use the GNU Library General
Public License instead of this License.
recoll-1.26.3/unac/unac_version.h0000644000175000017500000000003513303776057013607 00000000000000#define UNAC_VERSION "1.7.0"
recoll-1.26.3/unac/AUTHORS0000644000175000017500000000003413303776057012012 00000000000000Loic Dachary loic@senga.org
recoll-1.26.3/unac/unac.cpp0000644000175000017500000000002213410362770012361 00000000000000#include "unac.c"
recoll-1.26.3/unac/README0000644000175000017500000000626413303776057011635 00000000000000$Header: /cvsroot/unac/unac/README,v 1.5 2002/09/02 10:40:09 loic Exp $

What is it ?
------------

unac is a C library that removes accents from characters, regardless
of the character set (ISO-8859-15, ISO-CELTIC, KOI8-RU...)  as long as
iconv(3) is able to convert it into UTF-16 (Unicode).  For instance
the string t will become ete.  It provides a command line interface
(unaccent) that removes accents from an input flow or a string given
in argument. When using the library function or the command, the
charset of the input must be specified. The input is converted to
UTF-16 using iconv(3), accents are removed and the result is converted
back to the original charset. The iconv -l command on GNU/Linux will
show all charset supported.

Where is the documentation ?
----------------------------

The manual page of the unaccent command : man unaccent.
The manual page of the unac library : man unac.

How to install it ?
-------------------

For OS that are not GNU/Linux we recommend to use the iconv library
provided by Bruno Haible  at
ftp://ftp.gnu.org/pub/gnu/libiconv/libiconv-1.8.tar.gz.

./configure [--with-iconv=/my/local]

make all

make check

make install

How to link with unac ?
-------------------------

Assuming you've installed unac in the /usr/local directory use something 
similar to the following:

In the sources:
...
#include 
...

On the command line:

cc -I/usr/local/include -o prog prog.cc -L/usr/local/lib -lunac 

Where can I download it ?
-------------------------
The main distribution site is http://www.senga.org/unac/.

What is the license ?
---------------------
unac is distributed under the GNU GPL, as found at 
http://www.gnu.org/licenses/gpl.txt. Unicode data files are
under the following license, which is compatible with the 
GNU GPL:

http://www.unicode.org/Public/3.2-Update/UnicodeData-3.2.0.html#UCD_Terms
UCD Terms of Use

Disclaimer

The Unicode Character  Database is provided as is  by Unicode, Inc. No
claims  are  made  as  to  fitness  for  any  particular  purpose.  No
warranties of any kind are  expressed or implied. The recipient agrees
to determine  applicability of information provided. If  this file has
been purchased  on magnetic or  optical media from Unicode,  Inc., the
sole remedy for  any claim will be exchange  of defective media within
90 days of receipt.

This disclaimer  is applicable for  all other data  files accompanying
the Unicode  Character Database, some  of which have been  compiled by
the Unicode Consortium, and some  of which have been supplied by other
sources.  Limitations on Rights to Redistribute This Data

Recipient is granted the right to make copies in any form for internal
distribution  and  to  freely  use  the information  supplied  in  the
creation of  products supporting the Unicode(TM)  Standard.  The files
in  the  Unicode Character  Database  can  be  redistributed to  third
parties or other organizations (whether  for profit or not) as long as
this notice  and the disclaimer notice are  retained.  Information can
be extracted from  these files and used in  documentation or programs,
as long as there is an accompanying notice indicating the source.

Loic Dachary
loic@senga.org
http://www.senga.org/
recoll-1.26.3/unac/unac.c0000644000175000017500000246500513533651561012050 00000000000000/*
 * Copyright (C) 2000, 2001, 2002 Loic Dachary 
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#ifdef BUILDING_RECOLL
#include "autoconfig.h"
#else
#include "config.h"
#endif /* RECOLL */

#ifdef BUILDING_RECOLL
/* Yes, recoll unac is actually c++, lets face modernity, I will not be
   caught writing another binary search  */
#include 
#include 
#include 
#include 
#include 
#include 
#include 

using std::string;
using std::vector;

#include "smallut.h"

/* 
   Storage for the exception translations. These are chars which
   should not be translated according to what UnicodeData says, but
   instead according to some local rule. There will usually be very
   few of them, but they must be looked up for every translated char.
 */
std::unordered_map except_trans;
static inline bool is_except_char(unsigned short c, string& trans)
{
    auto it = except_trans.find(c);
    if (it == except_trans.end())
	return false;
    trans = it->second;
    return true;
}
#endif /* BUILDING_RECOLL*/

/*
 * If configure.in has not defined this symbol, assume const. It
 * does not harm much: a warning will be issued during compilation.
 */
#ifndef ICONV_CONST
#ifdef RCL_ICONV_INBUF_CONST
#define ICONV_CONST const
#else
#define ICONV_CONST
#endif
#endif /* ICONV_CONST */

#include 
#include 
#include 
#include 
#ifdef HAVE_VSNPRINTF
#include 
#include 
#endif /* HAVE_VSNPRINTF */

#include "unac.h"
#include "unac_version.h"

/* Generated by builder. Do not modify. Start tables */
/*
 * 00A0 NO-BREAK SPACE
 * 	0020 SPACE
 * 00A8 DIAERESIS
 * 	0020 SPACE
 * 00AA FEMININE ORDINAL INDICATOR
 * 	0061 LATIN SMALL LETTER A
 * 00AF MACRON
 * 	0020 SPACE
 * 00B2 SUPERSCRIPT TWO
 * 	0032 DIGIT TWO
 * 00B3 SUPERSCRIPT THREE
 * 	0033 DIGIT THREE
 * 00B4 ACUTE ACCENT
 * 	0020 SPACE
 * 00B5 MICRO SIGN
 * 	03BC GREEK SMALL LETTER MU
 * 00B8 CEDILLA
 * 	0020 SPACE
 * 00B9 SUPERSCRIPT ONE
 * 	0031 DIGIT ONE
 * 00BA MASCULINE ORDINAL INDICATOR
 * 	006F LATIN SMALL LETTER O
 * 00BC VULGAR FRACTION ONE QUARTER
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 	0034 DIGIT FOUR
 * 00BD VULGAR FRACTION ONE HALF
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 	0032 DIGIT TWO
 * 00BE VULGAR FRACTION THREE QUARTERS
 * 	0033 DIGIT THREE
 * 	2044 FRACTION SLASH
 * 	0034 DIGIT FOUR
 * 00C0 LATIN CAPITAL LETTER A WITH GRAVE
 * 	0041 LATIN CAPITAL LETTER A
 * 00C1 LATIN CAPITAL LETTER A WITH ACUTE
 * 	0041 LATIN CAPITAL LETTER A
 * 00C2 LATIN CAPITAL LETTER A WITH CIRCUMFLEX
 * 	0041 LATIN CAPITAL LETTER A
 * 00C3 LATIN CAPITAL LETTER A WITH TILDE
 * 	0041 LATIN CAPITAL LETTER A
 * 00C4 LATIN CAPITAL LETTER A WITH DIAERESIS
 * 	0041 LATIN CAPITAL LETTER A
 * 00C5 LATIN CAPITAL LETTER A WITH RING ABOVE
 * 	0041 LATIN CAPITAL LETTER A
 * 00C7 LATIN CAPITAL LETTER C WITH CEDILLA
 * 	0043 LATIN CAPITAL LETTER C
 * 00C8 LATIN CAPITAL LETTER E WITH GRAVE
 * 	0045 LATIN CAPITAL LETTER E
 * 00C9 LATIN CAPITAL LETTER E WITH ACUTE
 * 	0045 LATIN CAPITAL LETTER E
 * 00CA LATIN CAPITAL LETTER E WITH CIRCUMFLEX
 * 	0045 LATIN CAPITAL LETTER E
 * 00CB LATIN CAPITAL LETTER E WITH DIAERESIS
 * 	0045 LATIN CAPITAL LETTER E
 * 00CC LATIN CAPITAL LETTER I WITH GRAVE
 * 	0049 LATIN CAPITAL LETTER I
 * 00CD LATIN CAPITAL LETTER I WITH ACUTE
 * 	0049 LATIN CAPITAL LETTER I
 * 00CE LATIN CAPITAL LETTER I WITH CIRCUMFLEX
 * 	0049 LATIN CAPITAL LETTER I
 * 00CF LATIN CAPITAL LETTER I WITH DIAERESIS
 * 	0049 LATIN CAPITAL LETTER I
 * 00D1 LATIN CAPITAL LETTER N WITH TILDE
 * 	004E LATIN CAPITAL LETTER N
 * 00D2 LATIN CAPITAL LETTER O WITH GRAVE
 * 	004F LATIN CAPITAL LETTER O
 * 00D3 LATIN CAPITAL LETTER O WITH ACUTE
 * 	004F LATIN CAPITAL LETTER O
 * 00D4 LATIN CAPITAL LETTER O WITH CIRCUMFLEX
 * 	004F LATIN CAPITAL LETTER O
 * 00D5 LATIN CAPITAL LETTER O WITH TILDE
 * 	004F LATIN CAPITAL LETTER O
 * 00D6 LATIN CAPITAL LETTER O WITH DIAERESIS
 * 	004F LATIN CAPITAL LETTER O
 * 00D9 LATIN CAPITAL LETTER U WITH GRAVE
 * 	0055 LATIN CAPITAL LETTER U
 * 00DA LATIN CAPITAL LETTER U WITH ACUTE
 * 	0055 LATIN CAPITAL LETTER U
 * 00DB LATIN CAPITAL LETTER U WITH CIRCUMFLEX
 * 	0055 LATIN CAPITAL LETTER U
 * 00DC LATIN CAPITAL LETTER U WITH DIAERESIS
 * 	0055 LATIN CAPITAL LETTER U
 * 00DD LATIN CAPITAL LETTER Y WITH ACUTE
 * 	0059 LATIN CAPITAL LETTER Y
 * 00E0 LATIN SMALL LETTER A WITH GRAVE
 * 	0061 LATIN SMALL LETTER A
 * 00E1 LATIN SMALL LETTER A WITH ACUTE
 * 	0061 LATIN SMALL LETTER A
 * 00E2 LATIN SMALL LETTER A WITH CIRCUMFLEX
 * 	0061 LATIN SMALL LETTER A
 * 00E3 LATIN SMALL LETTER A WITH TILDE
 * 	0061 LATIN SMALL LETTER A
 * 00E4 LATIN SMALL LETTER A WITH DIAERESIS
 * 	0061 LATIN SMALL LETTER A
 * 00E5 LATIN SMALL LETTER A WITH RING ABOVE
 * 	0061 LATIN SMALL LETTER A
 * 00E7 LATIN SMALL LETTER C WITH CEDILLA
 * 	0063 LATIN SMALL LETTER C
 * 00E8 LATIN SMALL LETTER E WITH GRAVE
 * 	0065 LATIN SMALL LETTER E
 * 00E9 LATIN SMALL LETTER E WITH ACUTE
 * 	0065 LATIN SMALL LETTER E
 * 00EA LATIN SMALL LETTER E WITH CIRCUMFLEX
 * 	0065 LATIN SMALL LETTER E
 * 00EB LATIN SMALL LETTER E WITH DIAERESIS
 * 	0065 LATIN SMALL LETTER E
 * 00EC LATIN SMALL LETTER I WITH GRAVE
 * 	0069 LATIN SMALL LETTER I
 * 00ED LATIN SMALL LETTER I WITH ACUTE
 * 	0069 LATIN SMALL LETTER I
 * 00EE LATIN SMALL LETTER I WITH CIRCUMFLEX
 * 	0069 LATIN SMALL LETTER I
 * 00EF LATIN SMALL LETTER I WITH DIAERESIS
 * 	0069 LATIN SMALL LETTER I
 * 00F1 LATIN SMALL LETTER N WITH TILDE
 * 	006E LATIN SMALL LETTER N
 * 00F2 LATIN SMALL LETTER O WITH GRAVE
 * 	006F LATIN SMALL LETTER O
 * 00F3 LATIN SMALL LETTER O WITH ACUTE
 * 	006F LATIN SMALL LETTER O
 * 00F4 LATIN SMALL LETTER O WITH CIRCUMFLEX
 * 	006F LATIN SMALL LETTER O
 * 00F5 LATIN SMALL LETTER O WITH TILDE
 * 	006F LATIN SMALL LETTER O
 * 00F6 LATIN SMALL LETTER O WITH DIAERESIS
 * 	006F LATIN SMALL LETTER O
 * 00F9 LATIN SMALL LETTER U WITH GRAVE
 * 	0075 LATIN SMALL LETTER U
 * 00FA LATIN SMALL LETTER U WITH ACUTE
 * 	0075 LATIN SMALL LETTER U
 * 00FB LATIN SMALL LETTER U WITH CIRCUMFLEX
 * 	0075 LATIN SMALL LETTER U
 * 00FC LATIN SMALL LETTER U WITH DIAERESIS
 * 	0075 LATIN SMALL LETTER U
 * 00FD LATIN SMALL LETTER Y WITH ACUTE
 * 	0079 LATIN SMALL LETTER Y
 * 00FF LATIN SMALL LETTER Y WITH DIAERESIS
 * 	0079 LATIN SMALL LETTER Y
 * 0100 LATIN CAPITAL LETTER A WITH MACRON
 * 	0041 LATIN CAPITAL LETTER A
 * 0101 LATIN SMALL LETTER A WITH MACRON
 * 	0061 LATIN SMALL LETTER A
 * 0102 LATIN CAPITAL LETTER A WITH BREVE
 * 	0041 LATIN CAPITAL LETTER A
 * 0103 LATIN SMALL LETTER A WITH BREVE
 * 	0061 LATIN SMALL LETTER A
 * 0104 LATIN CAPITAL LETTER A WITH OGONEK
 * 	0041 LATIN CAPITAL LETTER A
 * 0105 LATIN SMALL LETTER A WITH OGONEK
 * 	0061 LATIN SMALL LETTER A
 * 0106 LATIN CAPITAL LETTER C WITH ACUTE
 * 	0043 LATIN CAPITAL LETTER C
 * 0107 LATIN SMALL LETTER C WITH ACUTE
 * 	0063 LATIN SMALL LETTER C
 * 0108 LATIN CAPITAL LETTER C WITH CIRCUMFLEX
 * 	0043 LATIN CAPITAL LETTER C
 * 0109 LATIN SMALL LETTER C WITH CIRCUMFLEX
 * 	0063 LATIN SMALL LETTER C
 * 010A LATIN CAPITAL LETTER C WITH DOT ABOVE
 * 	0043 LATIN CAPITAL LETTER C
 * 010B LATIN SMALL LETTER C WITH DOT ABOVE
 * 	0063 LATIN SMALL LETTER C
 * 010C LATIN CAPITAL LETTER C WITH CARON
 * 	0043 LATIN CAPITAL LETTER C
 * 010D LATIN SMALL LETTER C WITH CARON
 * 	0063 LATIN SMALL LETTER C
 * 010E LATIN CAPITAL LETTER D WITH CARON
 * 	0044 LATIN CAPITAL LETTER D
 * 010F LATIN SMALL LETTER D WITH CARON
 * 	0064 LATIN SMALL LETTER D
 * 0112 LATIN CAPITAL LETTER E WITH MACRON
 * 	0045 LATIN CAPITAL LETTER E
 * 0113 LATIN SMALL LETTER E WITH MACRON
 * 	0065 LATIN SMALL LETTER E
 * 0114 LATIN CAPITAL LETTER E WITH BREVE
 * 	0045 LATIN CAPITAL LETTER E
 * 0115 LATIN SMALL LETTER E WITH BREVE
 * 	0065 LATIN SMALL LETTER E
 * 0116 LATIN CAPITAL LETTER E WITH DOT ABOVE
 * 	0045 LATIN CAPITAL LETTER E
 * 0117 LATIN SMALL LETTER E WITH DOT ABOVE
 * 	0065 LATIN SMALL LETTER E
 * 0118 LATIN CAPITAL LETTER E WITH OGONEK
 * 	0045 LATIN CAPITAL LETTER E
 * 0119 LATIN SMALL LETTER E WITH OGONEK
 * 	0065 LATIN SMALL LETTER E
 * 011A LATIN CAPITAL LETTER E WITH CARON
 * 	0045 LATIN CAPITAL LETTER E
 * 011B LATIN SMALL LETTER E WITH CARON
 * 	0065 LATIN SMALL LETTER E
 * 011C LATIN CAPITAL LETTER G WITH CIRCUMFLEX
 * 	0047 LATIN CAPITAL LETTER G
 * 011D LATIN SMALL LETTER G WITH CIRCUMFLEX
 * 	0067 LATIN SMALL LETTER G
 * 011E LATIN CAPITAL LETTER G WITH BREVE
 * 	0047 LATIN CAPITAL LETTER G
 * 011F LATIN SMALL LETTER G WITH BREVE
 * 	0067 LATIN SMALL LETTER G
 * 0120 LATIN CAPITAL LETTER G WITH DOT ABOVE
 * 	0047 LATIN CAPITAL LETTER G
 * 0121 LATIN SMALL LETTER G WITH DOT ABOVE
 * 	0067 LATIN SMALL LETTER G
 * 0122 LATIN CAPITAL LETTER G WITH CEDILLA
 * 	0047 LATIN CAPITAL LETTER G
 * 0123 LATIN SMALL LETTER G WITH CEDILLA
 * 	0067 LATIN SMALL LETTER G
 * 0124 LATIN CAPITAL LETTER H WITH CIRCUMFLEX
 * 	0048 LATIN CAPITAL LETTER H
 * 0125 LATIN SMALL LETTER H WITH CIRCUMFLEX
 * 	0068 LATIN SMALL LETTER H
 * 0128 LATIN CAPITAL LETTER I WITH TILDE
 * 	0049 LATIN CAPITAL LETTER I
 * 0129 LATIN SMALL LETTER I WITH TILDE
 * 	0069 LATIN SMALL LETTER I
 * 012A LATIN CAPITAL LETTER I WITH MACRON
 * 	0049 LATIN CAPITAL LETTER I
 * 012B LATIN SMALL LETTER I WITH MACRON
 * 	0069 LATIN SMALL LETTER I
 * 012C LATIN CAPITAL LETTER I WITH BREVE
 * 	0049 LATIN CAPITAL LETTER I
 * 012D LATIN SMALL LETTER I WITH BREVE
 * 	0069 LATIN SMALL LETTER I
 * 012E LATIN CAPITAL LETTER I WITH OGONEK
 * 	0049 LATIN CAPITAL LETTER I
 * 012F LATIN SMALL LETTER I WITH OGONEK
 * 	0069 LATIN SMALL LETTER I
 * 0130 LATIN CAPITAL LETTER I WITH DOT ABOVE
 * 	0049 LATIN CAPITAL LETTER I
 * 0132 LATIN CAPITAL LIGATURE IJ
 * 	0049 LATIN CAPITAL LETTER I
 * 	004A LATIN CAPITAL LETTER J
 * 0133 LATIN SMALL LIGATURE IJ
 * 	0069 LATIN SMALL LETTER I
 * 	006A LATIN SMALL LETTER J
 * 0134 LATIN CAPITAL LETTER J WITH CIRCUMFLEX
 * 	004A LATIN CAPITAL LETTER J
 * 0135 LATIN SMALL LETTER J WITH CIRCUMFLEX
 * 	006A LATIN SMALL LETTER J
 * 0136 LATIN CAPITAL LETTER K WITH CEDILLA
 * 	004B LATIN CAPITAL LETTER K
 * 0137 LATIN SMALL LETTER K WITH CEDILLA
 * 	006B LATIN SMALL LETTER K
 * 0139 LATIN CAPITAL LETTER L WITH ACUTE
 * 	004C LATIN CAPITAL LETTER L
 * 013A LATIN SMALL LETTER L WITH ACUTE
 * 	006C LATIN SMALL LETTER L
 * 013B LATIN CAPITAL LETTER L WITH CEDILLA
 * 	004C LATIN CAPITAL LETTER L
 * 013C LATIN SMALL LETTER L WITH CEDILLA
 * 	006C LATIN SMALL LETTER L
 * 013D LATIN CAPITAL LETTER L WITH CARON
 * 	004C LATIN CAPITAL LETTER L
 * 013E LATIN SMALL LETTER L WITH CARON
 * 	006C LATIN SMALL LETTER L
 * 013F LATIN CAPITAL LETTER L WITH MIDDLE DOT
 * 	004C LATIN CAPITAL LETTER L
 * 	00B7 MIDDLE DOT
 * 0140 LATIN SMALL LETTER L WITH MIDDLE DOT
 * 	006C LATIN SMALL LETTER L
 * 	00B7 MIDDLE DOT
 * 0143 LATIN CAPITAL LETTER N WITH ACUTE
 * 	004E LATIN CAPITAL LETTER N
 * 0144 LATIN SMALL LETTER N WITH ACUTE
 * 	006E LATIN SMALL LETTER N
 * 0145 LATIN CAPITAL LETTER N WITH CEDILLA
 * 	004E LATIN CAPITAL LETTER N
 * 0146 LATIN SMALL LETTER N WITH CEDILLA
 * 	006E LATIN SMALL LETTER N
 * 0147 LATIN CAPITAL LETTER N WITH CARON
 * 	004E LATIN CAPITAL LETTER N
 * 0148 LATIN SMALL LETTER N WITH CARON
 * 	006E LATIN SMALL LETTER N
 * 0149 LATIN SMALL LETTER N PRECEDED BY APOSTROPHE
 * 	02BC MODIFIER LETTER APOSTROPHE
 * 	006E LATIN SMALL LETTER N
 * 014C LATIN CAPITAL LETTER O WITH MACRON
 * 	004F LATIN CAPITAL LETTER O
 * 014D LATIN SMALL LETTER O WITH MACRON
 * 	006F LATIN SMALL LETTER O
 * 014E LATIN CAPITAL LETTER O WITH BREVE
 * 	004F LATIN CAPITAL LETTER O
 * 014F LATIN SMALL LETTER O WITH BREVE
 * 	006F LATIN SMALL LETTER O
 * 0150 LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
 * 	004F LATIN CAPITAL LETTER O
 * 0151 LATIN SMALL LETTER O WITH DOUBLE ACUTE
 * 	006F LATIN SMALL LETTER O
 * 0154 LATIN CAPITAL LETTER R WITH ACUTE
 * 	0052 LATIN CAPITAL LETTER R
 * 0155 LATIN SMALL LETTER R WITH ACUTE
 * 	0072 LATIN SMALL LETTER R
 * 0156 LATIN CAPITAL LETTER R WITH CEDILLA
 * 	0052 LATIN CAPITAL LETTER R
 * 0157 LATIN SMALL LETTER R WITH CEDILLA
 * 	0072 LATIN SMALL LETTER R
 * 0158 LATIN CAPITAL LETTER R WITH CARON
 * 	0052 LATIN CAPITAL LETTER R
 * 0159 LATIN SMALL LETTER R WITH CARON
 * 	0072 LATIN SMALL LETTER R
 * 015A LATIN CAPITAL LETTER S WITH ACUTE
 * 	0053 LATIN CAPITAL LETTER S
 * 015B LATIN SMALL LETTER S WITH ACUTE
 * 	0073 LATIN SMALL LETTER S
 * 015C LATIN CAPITAL LETTER S WITH CIRCUMFLEX
 * 	0053 LATIN CAPITAL LETTER S
 * 015D LATIN SMALL LETTER S WITH CIRCUMFLEX
 * 	0073 LATIN SMALL LETTER S
 * 015E LATIN CAPITAL LETTER S WITH CEDILLA
 * 	0053 LATIN CAPITAL LETTER S
 * 015F LATIN SMALL LETTER S WITH CEDILLA
 * 	0073 LATIN SMALL LETTER S
 * 0160 LATIN CAPITAL LETTER S WITH CARON
 * 	0053 LATIN CAPITAL LETTER S
 * 0161 LATIN SMALL LETTER S WITH CARON
 * 	0073 LATIN SMALL LETTER S
 * 0162 LATIN CAPITAL LETTER T WITH CEDILLA
 * 	0054 LATIN CAPITAL LETTER T
 * 0163 LATIN SMALL LETTER T WITH CEDILLA
 * 	0074 LATIN SMALL LETTER T
 * 0164 LATIN CAPITAL LETTER T WITH CARON
 * 	0054 LATIN CAPITAL LETTER T
 * 0165 LATIN SMALL LETTER T WITH CARON
 * 	0074 LATIN SMALL LETTER T
 * 0168 LATIN CAPITAL LETTER U WITH TILDE
 * 	0055 LATIN CAPITAL LETTER U
 * 0169 LATIN SMALL LETTER U WITH TILDE
 * 	0075 LATIN SMALL LETTER U
 * 016A LATIN CAPITAL LETTER U WITH MACRON
 * 	0055 LATIN CAPITAL LETTER U
 * 016B LATIN SMALL LETTER U WITH MACRON
 * 	0075 LATIN SMALL LETTER U
 * 016C LATIN CAPITAL LETTER U WITH BREVE
 * 	0055 LATIN CAPITAL LETTER U
 * 016D LATIN SMALL LETTER U WITH BREVE
 * 	0075 LATIN SMALL LETTER U
 * 016E LATIN CAPITAL LETTER U WITH RING ABOVE
 * 	0055 LATIN CAPITAL LETTER U
 * 016F LATIN SMALL LETTER U WITH RING ABOVE
 * 	0075 LATIN SMALL LETTER U
 * 0170 LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
 * 	0055 LATIN CAPITAL LETTER U
 * 0171 LATIN SMALL LETTER U WITH DOUBLE ACUTE
 * 	0075 LATIN SMALL LETTER U
 * 0172 LATIN CAPITAL LETTER U WITH OGONEK
 * 	0055 LATIN CAPITAL LETTER U
 * 0173 LATIN SMALL LETTER U WITH OGONEK
 * 	0075 LATIN SMALL LETTER U
 * 0174 LATIN CAPITAL LETTER W WITH CIRCUMFLEX
 * 	0057 LATIN CAPITAL LETTER W
 * 0175 LATIN SMALL LETTER W WITH CIRCUMFLEX
 * 	0077 LATIN SMALL LETTER W
 * 0176 LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
 * 	0059 LATIN CAPITAL LETTER Y
 * 0177 LATIN SMALL LETTER Y WITH CIRCUMFLEX
 * 	0079 LATIN SMALL LETTER Y
 * 0178 LATIN CAPITAL LETTER Y WITH DIAERESIS
 * 	0059 LATIN CAPITAL LETTER Y
 * 0179 LATIN CAPITAL LETTER Z WITH ACUTE
 * 	005A LATIN CAPITAL LETTER Z
 * 017A LATIN SMALL LETTER Z WITH ACUTE
 * 	007A LATIN SMALL LETTER Z
 * 017B LATIN CAPITAL LETTER Z WITH DOT ABOVE
 * 	005A LATIN CAPITAL LETTER Z
 * 017C LATIN SMALL LETTER Z WITH DOT ABOVE
 * 	007A LATIN SMALL LETTER Z
 * 017D LATIN CAPITAL LETTER Z WITH CARON
 * 	005A LATIN CAPITAL LETTER Z
 * 017E LATIN SMALL LETTER Z WITH CARON
 * 	007A LATIN SMALL LETTER Z
 * 017F LATIN SMALL LETTER LONG S
 * 	0073 LATIN SMALL LETTER S
 * 01A0 LATIN CAPITAL LETTER O WITH HORN
 * 	004F LATIN CAPITAL LETTER O
 * 01A1 LATIN SMALL LETTER O WITH HORN
 * 	006F LATIN SMALL LETTER O
 * 01AF LATIN CAPITAL LETTER U WITH HORN
 * 	0055 LATIN CAPITAL LETTER U
 * 01B0 LATIN SMALL LETTER U WITH HORN
 * 	0075 LATIN SMALL LETTER U
 * 01C4 LATIN CAPITAL LETTER DZ WITH CARON
 * 	0044 LATIN CAPITAL LETTER D
 * 	005A LATIN CAPITAL LETTER Z
 * 01C5 LATIN CAPITAL LETTER D WITH SMALL LETTER Z WITH CARON
 * 	0044 LATIN CAPITAL LETTER D
 * 	007A LATIN SMALL LETTER Z
 * 01C6 LATIN SMALL LETTER DZ WITH CARON
 * 	0064 LATIN SMALL LETTER D
 * 	007A LATIN SMALL LETTER Z
 * 01C7 LATIN CAPITAL LETTER LJ
 * 	004C LATIN CAPITAL LETTER L
 * 	004A LATIN CAPITAL LETTER J
 * 01C8 LATIN CAPITAL LETTER L WITH SMALL LETTER J
 * 	004C LATIN CAPITAL LETTER L
 * 	006A LATIN SMALL LETTER J
 * 01C9 LATIN SMALL LETTER LJ
 * 	006C LATIN SMALL LETTER L
 * 	006A LATIN SMALL LETTER J
 * 01CA LATIN CAPITAL LETTER NJ
 * 	004E LATIN CAPITAL LETTER N
 * 	004A LATIN CAPITAL LETTER J
 * 01CB LATIN CAPITAL LETTER N WITH SMALL LETTER J
 * 	004E LATIN CAPITAL LETTER N
 * 	006A LATIN SMALL LETTER J
 * 01CC LATIN SMALL LETTER NJ
 * 	006E LATIN SMALL LETTER N
 * 	006A LATIN SMALL LETTER J
 * 01CD LATIN CAPITAL LETTER A WITH CARON
 * 	0041 LATIN CAPITAL LETTER A
 * 01CE LATIN SMALL LETTER A WITH CARON
 * 	0061 LATIN SMALL LETTER A
 * 01CF LATIN CAPITAL LETTER I WITH CARON
 * 	0049 LATIN CAPITAL LETTER I
 * 01D0 LATIN SMALL LETTER I WITH CARON
 * 	0069 LATIN SMALL LETTER I
 * 01D1 LATIN CAPITAL LETTER O WITH CARON
 * 	004F LATIN CAPITAL LETTER O
 * 01D2 LATIN SMALL LETTER O WITH CARON
 * 	006F LATIN SMALL LETTER O
 * 01D3 LATIN CAPITAL LETTER U WITH CARON
 * 	0055 LATIN CAPITAL LETTER U
 * 01D4 LATIN SMALL LETTER U WITH CARON
 * 	0075 LATIN SMALL LETTER U
 * 01D5 LATIN CAPITAL LETTER U WITH DIAERESIS AND MACRON
 * 	0055 LATIN CAPITAL LETTER U
 * 01D6 LATIN SMALL LETTER U WITH DIAERESIS AND MACRON
 * 	0075 LATIN SMALL LETTER U
 * 01D7 LATIN CAPITAL LETTER U WITH DIAERESIS AND ACUTE
 * 	0055 LATIN CAPITAL LETTER U
 * 01D8 LATIN SMALL LETTER U WITH DIAERESIS AND ACUTE
 * 	0075 LATIN SMALL LETTER U
 * 01D9 LATIN CAPITAL LETTER U WITH DIAERESIS AND CARON
 * 	0055 LATIN CAPITAL LETTER U
 * 01DA LATIN SMALL LETTER U WITH DIAERESIS AND CARON
 * 	0075 LATIN SMALL LETTER U
 * 01DB LATIN CAPITAL LETTER U WITH DIAERESIS AND GRAVE
 * 	0055 LATIN CAPITAL LETTER U
 * 01DC LATIN SMALL LETTER U WITH DIAERESIS AND GRAVE
 * 	0075 LATIN SMALL LETTER U
 * 01DE LATIN CAPITAL LETTER A WITH DIAERESIS AND MACRON
 * 	0041 LATIN CAPITAL LETTER A
 * 01DF LATIN SMALL LETTER A WITH DIAERESIS AND MACRON
 * 	0061 LATIN SMALL LETTER A
 * 01E0 LATIN CAPITAL LETTER A WITH DOT ABOVE AND MACRON
 * 	0041 LATIN CAPITAL LETTER A
 * 01E1 LATIN SMALL LETTER A WITH DOT ABOVE AND MACRON
 * 	0061 LATIN SMALL LETTER A
 * 01E2 LATIN CAPITAL LETTER AE WITH MACRON
 * 	00C6 LATIN CAPITAL LETTER AE
 * 01E3 LATIN SMALL LETTER AE WITH MACRON
 * 	00E6 LATIN SMALL LETTER AE
 * 01E6 LATIN CAPITAL LETTER G WITH CARON
 * 	0047 LATIN CAPITAL LETTER G
 * 01E7 LATIN SMALL LETTER G WITH CARON
 * 	0067 LATIN SMALL LETTER G
 * 01E8 LATIN CAPITAL LETTER K WITH CARON
 * 	004B LATIN CAPITAL LETTER K
 * 01E9 LATIN SMALL LETTER K WITH CARON
 * 	006B LATIN SMALL LETTER K
 * 01EA LATIN CAPITAL LETTER O WITH OGONEK
 * 	004F LATIN CAPITAL LETTER O
 * 01EB LATIN SMALL LETTER O WITH OGONEK
 * 	006F LATIN SMALL LETTER O
 * 01EC LATIN CAPITAL LETTER O WITH OGONEK AND MACRON
 * 	004F LATIN CAPITAL LETTER O
 * 01ED LATIN SMALL LETTER O WITH OGONEK AND MACRON
 * 	006F LATIN SMALL LETTER O
 * 01EE LATIN CAPITAL LETTER EZH WITH CARON
 * 	01B7 LATIN CAPITAL LETTER EZH
 * 01EF LATIN SMALL LETTER EZH WITH CARON
 * 	0292 LATIN SMALL LETTER EZH
 * 01F0 LATIN SMALL LETTER J WITH CARON
 * 	006A LATIN SMALL LETTER J
 * 01F1 LATIN CAPITAL LETTER DZ
 * 	0044 LATIN CAPITAL LETTER D
 * 	005A LATIN CAPITAL LETTER Z
 * 01F2 LATIN CAPITAL LETTER D WITH SMALL LETTER Z
 * 	0044 LATIN CAPITAL LETTER D
 * 	007A LATIN SMALL LETTER Z
 * 01F3 LATIN SMALL LETTER DZ
 * 	0064 LATIN SMALL LETTER D
 * 	007A LATIN SMALL LETTER Z
 * 01F4 LATIN CAPITAL LETTER G WITH ACUTE
 * 	0047 LATIN CAPITAL LETTER G
 * 01F5 LATIN SMALL LETTER G WITH ACUTE
 * 	0067 LATIN SMALL LETTER G
 * 01F8 LATIN CAPITAL LETTER N WITH GRAVE
 * 	004E LATIN CAPITAL LETTER N
 * 01F9 LATIN SMALL LETTER N WITH GRAVE
 * 	006E LATIN SMALL LETTER N
 * 01FA LATIN CAPITAL LETTER A WITH RING ABOVE AND ACUTE
 * 	0041 LATIN CAPITAL LETTER A
 * 01FB LATIN SMALL LETTER A WITH RING ABOVE AND ACUTE
 * 	0061 LATIN SMALL LETTER A
 * 01FC LATIN CAPITAL LETTER AE WITH ACUTE
 * 	00C6 LATIN CAPITAL LETTER AE
 * 01FD LATIN SMALL LETTER AE WITH ACUTE
 * 	00E6 LATIN SMALL LETTER AE
 * 01FE LATIN CAPITAL LETTER O WITH STROKE AND ACUTE
 * 	00D8 LATIN CAPITAL LETTER O WITH STROKE
 * 01FF LATIN SMALL LETTER O WITH STROKE AND ACUTE
 * 	00F8 LATIN SMALL LETTER O WITH STROKE
 * 0200 LATIN CAPITAL LETTER A WITH DOUBLE GRAVE
 * 	0041 LATIN CAPITAL LETTER A
 * 0201 LATIN SMALL LETTER A WITH DOUBLE GRAVE
 * 	0061 LATIN SMALL LETTER A
 * 0202 LATIN CAPITAL LETTER A WITH INVERTED BREVE
 * 	0041 LATIN CAPITAL LETTER A
 * 0203 LATIN SMALL LETTER A WITH INVERTED BREVE
 * 	0061 LATIN SMALL LETTER A
 * 0204 LATIN CAPITAL LETTER E WITH DOUBLE GRAVE
 * 	0045 LATIN CAPITAL LETTER E
 * 0205 LATIN SMALL LETTER E WITH DOUBLE GRAVE
 * 	0065 LATIN SMALL LETTER E
 * 0206 LATIN CAPITAL LETTER E WITH INVERTED BREVE
 * 	0045 LATIN CAPITAL LETTER E
 * 0207 LATIN SMALL LETTER E WITH INVERTED BREVE
 * 	0065 LATIN SMALL LETTER E
 * 0208 LATIN CAPITAL LETTER I WITH DOUBLE GRAVE
 * 	0049 LATIN CAPITAL LETTER I
 * 0209 LATIN SMALL LETTER I WITH DOUBLE GRAVE
 * 	0069 LATIN SMALL LETTER I
 * 020A LATIN CAPITAL LETTER I WITH INVERTED BREVE
 * 	0049 LATIN CAPITAL LETTER I
 * 020B LATIN SMALL LETTER I WITH INVERTED BREVE
 * 	0069 LATIN SMALL LETTER I
 * 020C LATIN CAPITAL LETTER O WITH DOUBLE GRAVE
 * 	004F LATIN CAPITAL LETTER O
 * 020D LATIN SMALL LETTER O WITH DOUBLE GRAVE
 * 	006F LATIN SMALL LETTER O
 * 020E LATIN CAPITAL LETTER O WITH INVERTED BREVE
 * 	004F LATIN CAPITAL LETTER O
 * 020F LATIN SMALL LETTER O WITH INVERTED BREVE
 * 	006F LATIN SMALL LETTER O
 * 0210 LATIN CAPITAL LETTER R WITH DOUBLE GRAVE
 * 	0052 LATIN CAPITAL LETTER R
 * 0211 LATIN SMALL LETTER R WITH DOUBLE GRAVE
 * 	0072 LATIN SMALL LETTER R
 * 0212 LATIN CAPITAL LETTER R WITH INVERTED BREVE
 * 	0052 LATIN CAPITAL LETTER R
 * 0213 LATIN SMALL LETTER R WITH INVERTED BREVE
 * 	0072 LATIN SMALL LETTER R
 * 0214 LATIN CAPITAL LETTER U WITH DOUBLE GRAVE
 * 	0055 LATIN CAPITAL LETTER U
 * 0215 LATIN SMALL LETTER U WITH DOUBLE GRAVE
 * 	0075 LATIN SMALL LETTER U
 * 0216 LATIN CAPITAL LETTER U WITH INVERTED BREVE
 * 	0055 LATIN CAPITAL LETTER U
 * 0217 LATIN SMALL LETTER U WITH INVERTED BREVE
 * 	0075 LATIN SMALL LETTER U
 * 0218 LATIN CAPITAL LETTER S WITH COMMA BELOW
 * 	0053 LATIN CAPITAL LETTER S
 * 0219 LATIN SMALL LETTER S WITH COMMA BELOW
 * 	0073 LATIN SMALL LETTER S
 * 021A LATIN CAPITAL LETTER T WITH COMMA BELOW
 * 	0054 LATIN CAPITAL LETTER T
 * 021B LATIN SMALL LETTER T WITH COMMA BELOW
 * 	0074 LATIN SMALL LETTER T
 * 021E LATIN CAPITAL LETTER H WITH CARON
 * 	0048 LATIN CAPITAL LETTER H
 * 021F LATIN SMALL LETTER H WITH CARON
 * 	0068 LATIN SMALL LETTER H
 * 0226 LATIN CAPITAL LETTER A WITH DOT ABOVE
 * 	0041 LATIN CAPITAL LETTER A
 * 0227 LATIN SMALL LETTER A WITH DOT ABOVE
 * 	0061 LATIN SMALL LETTER A
 * 0228 LATIN CAPITAL LETTER E WITH CEDILLA
 * 	0045 LATIN CAPITAL LETTER E
 * 0229 LATIN SMALL LETTER E WITH CEDILLA
 * 	0065 LATIN SMALL LETTER E
 * 022A LATIN CAPITAL LETTER O WITH DIAERESIS AND MACRON
 * 	004F LATIN CAPITAL LETTER O
 * 022B LATIN SMALL LETTER O WITH DIAERESIS AND MACRON
 * 	006F LATIN SMALL LETTER O
 * 022C LATIN CAPITAL LETTER O WITH TILDE AND MACRON
 * 	004F LATIN CAPITAL LETTER O
 * 022D LATIN SMALL LETTER O WITH TILDE AND MACRON
 * 	006F LATIN SMALL LETTER O
 * 022E LATIN CAPITAL LETTER O WITH DOT ABOVE
 * 	004F LATIN CAPITAL LETTER O
 * 022F LATIN SMALL LETTER O WITH DOT ABOVE
 * 	006F LATIN SMALL LETTER O
 * 0230 LATIN CAPITAL LETTER O WITH DOT ABOVE AND MACRON
 * 	004F LATIN CAPITAL LETTER O
 * 0231 LATIN SMALL LETTER O WITH DOT ABOVE AND MACRON
 * 	006F LATIN SMALL LETTER O
 * 0232 LATIN CAPITAL LETTER Y WITH MACRON
 * 	0059 LATIN CAPITAL LETTER Y
 * 0233 LATIN SMALL LETTER Y WITH MACRON
 * 	0079 LATIN SMALL LETTER Y
 * 02B0 MODIFIER LETTER SMALL H
 * 	0068 LATIN SMALL LETTER H
 * 02B1 MODIFIER LETTER SMALL H WITH HOOK
 * 	0266 LATIN SMALL LETTER H WITH HOOK
 * 02B2 MODIFIER LETTER SMALL J
 * 	006A LATIN SMALL LETTER J
 * 02B3 MODIFIER LETTER SMALL R
 * 	0072 LATIN SMALL LETTER R
 * 02B4 MODIFIER LETTER SMALL TURNED R
 * 	0279 LATIN SMALL LETTER TURNED R
 * 02B5 MODIFIER LETTER SMALL TURNED R WITH HOOK
 * 	027B LATIN SMALL LETTER TURNED R WITH HOOK
 * 02B6 MODIFIER LETTER SMALL CAPITAL INVERTED R
 * 	0281 LATIN LETTER SMALL CAPITAL INVERTED R
 * 02B7 MODIFIER LETTER SMALL W
 * 	0077 LATIN SMALL LETTER W
 * 02B8 MODIFIER LETTER SMALL Y
 * 	0079 LATIN SMALL LETTER Y
 * 02D8 BREVE
 * 	0020 SPACE
 * 02D9 DOT ABOVE
 * 	0020 SPACE
 * 02DA RING ABOVE
 * 	0020 SPACE
 * 02DB OGONEK
 * 	0020 SPACE
 * 02DC SMALL TILDE
 * 	0020 SPACE
 * 02DD DOUBLE ACUTE ACCENT
 * 	0020 SPACE
 * 02E0 MODIFIER LETTER SMALL GAMMA
 * 	0263 LATIN SMALL LETTER GAMMA
 * 02E1 MODIFIER LETTER SMALL L
 * 	006C LATIN SMALL LETTER L
 * 02E2 MODIFIER LETTER SMALL S
 * 	0073 LATIN SMALL LETTER S
 * 02E3 MODIFIER LETTER SMALL X
 * 	0078 LATIN SMALL LETTER X
 * 02E4 MODIFIER LETTER SMALL REVERSED GLOTTAL STOP
 * 	0295 LATIN LETTER PHARYNGEAL VOICED FRICATIVE
 * 0300 COMBINING GRAVE ACCENT
 * 	0000 
 * 0301 COMBINING ACUTE ACCENT
 * 	0000 
 * 0302 COMBINING CIRCUMFLEX ACCENT
 * 	0000 
 * 0303 COMBINING TILDE
 * 	0000 
 * 0304 COMBINING MACRON
 * 	0000 
 * 0305 COMBINING OVERLINE
 * 	0000 
 * 0306 COMBINING BREVE
 * 	0000 
 * 0307 COMBINING DOT ABOVE
 * 	0000 
 * 0308 COMBINING DIAERESIS
 * 	0000 
 * 0309 COMBINING HOOK ABOVE
 * 	0000 
 * 030A COMBINING RING ABOVE
 * 	0000 
 * 030B COMBINING DOUBLE ACUTE ACCENT
 * 	0000 
 * 030C COMBINING CARON
 * 	0000 
 * 030D COMBINING VERTICAL LINE ABOVE
 * 	0000 
 * 030E COMBINING DOUBLE VERTICAL LINE ABOVE
 * 	0000 
 * 030F COMBINING DOUBLE GRAVE ACCENT
 * 	0000 
 * 0310 COMBINING CANDRABINDU
 * 	0000 
 * 0311 COMBINING INVERTED BREVE
 * 	0000 
 * 0312 COMBINING TURNED COMMA ABOVE
 * 	0000 
 * 0313 COMBINING COMMA ABOVE
 * 	0000 
 * 0314 COMBINING REVERSED COMMA ABOVE
 * 	0000 
 * 0315 COMBINING COMMA ABOVE RIGHT
 * 	0000 
 * 0316 COMBINING GRAVE ACCENT BELOW
 * 	0000 
 * 0317 COMBINING ACUTE ACCENT BELOW
 * 	0000 
 * 0318 COMBINING LEFT TACK BELOW
 * 	0000 
 * 0319 COMBINING RIGHT TACK BELOW
 * 	0000 
 * 031A COMBINING LEFT ANGLE ABOVE
 * 	0000 
 * 031B COMBINING HORN
 * 	0000 
 * 031C COMBINING LEFT HALF RING BELOW
 * 	0000 
 * 031D COMBINING UP TACK BELOW
 * 	0000 
 * 031E COMBINING DOWN TACK BELOW
 * 	0000 
 * 031F COMBINING PLUS SIGN BELOW
 * 	0000 
 * 0320 COMBINING MINUS SIGN BELOW
 * 	0000 
 * 0321 COMBINING PALATALIZED HOOK BELOW
 * 	0000 
 * 0322 COMBINING RETROFLEX HOOK BELOW
 * 	0000 
 * 0323 COMBINING DOT BELOW
 * 	0000 
 * 0324 COMBINING DIAERESIS BELOW
 * 	0000 
 * 0325 COMBINING RING BELOW
 * 	0000 
 * 0326 COMBINING COMMA BELOW
 * 	0000 
 * 0327 COMBINING CEDILLA
 * 	0000 
 * 0328 COMBINING OGONEK
 * 	0000 
 * 0329 COMBINING VERTICAL LINE BELOW
 * 	0000 
 * 032A COMBINING BRIDGE BELOW
 * 	0000 
 * 032B COMBINING INVERTED DOUBLE ARCH BELOW
 * 	0000 
 * 032C COMBINING CARON BELOW
 * 	0000 
 * 032D COMBINING CIRCUMFLEX ACCENT BELOW
 * 	0000 
 * 032E COMBINING BREVE BELOW
 * 	0000 
 * 032F COMBINING INVERTED BREVE BELOW
 * 	0000 
 * 0330 COMBINING TILDE BELOW
 * 	0000 
 * 0331 COMBINING MACRON BELOW
 * 	0000 
 * 0332 COMBINING LOW LINE
 * 	0000 
 * 0333 COMBINING DOUBLE LOW LINE
 * 	0000 
 * 0334 COMBINING TILDE OVERLAY
 * 	0000 
 * 0335 COMBINING SHORT STROKE OVERLAY
 * 	0000 
 * 0336 COMBINING LONG STROKE OVERLAY
 * 	0000 
 * 0337 COMBINING SHORT SOLIDUS OVERLAY
 * 	0000 
 * 0338 COMBINING LONG SOLIDUS OVERLAY
 * 	0000 
 * 0339 COMBINING RIGHT HALF RING BELOW
 * 	0000 
 * 033A COMBINING INVERTED BRIDGE BELOW
 * 	0000 
 * 033B COMBINING SQUARE BELOW
 * 	0000 
 * 033C COMBINING SEAGULL BELOW
 * 	0000 
 * 033D COMBINING X ABOVE
 * 	0000 
 * 033E COMBINING VERTICAL TILDE
 * 	0000 
 * 033F COMBINING DOUBLE OVERLINE
 * 	0000 
 * 0340 COMBINING GRAVE TONE MARK
 * 	0000 
 * 0341 COMBINING ACUTE TONE MARK
 * 	0000 
 * 0342 COMBINING GREEK PERISPOMENI
 * 	0000 
 * 0343 COMBINING GREEK KORONIS
 * 	0000 
 * 0344 COMBINING GREEK DIALYTIKA TONOS
 * 	0000 
 * 0345 COMBINING GREEK YPOGEGRAMMENI
 * 	0000 
 * 0346 COMBINING BRIDGE ABOVE
 * 	0000 
 * 0347 COMBINING EQUALS SIGN BELOW
 * 	0000 
 * 0348 COMBINING DOUBLE VERTICAL LINE BELOW
 * 	0000 
 * 0349 COMBINING LEFT ANGLE BELOW
 * 	0000 
 * 034A COMBINING NOT TILDE ABOVE
 * 	0000 
 * 034B COMBINING HOMOTHETIC ABOVE
 * 	0000 
 * 034C COMBINING ALMOST EQUAL TO ABOVE
 * 	0000 
 * 034D COMBINING LEFT RIGHT ARROW BELOW
 * 	0000 
 * 034E COMBINING UPWARDS ARROW BELOW
 * 	0000 
 * 034F COMBINING GRAPHEME JOINER
 * 	0000 
 * 0350 COMBINING RIGHT ARROWHEAD ABOVE
 * 	0000 
 * 0351 COMBINING LEFT HALF RING ABOVE
 * 	0000 
 * 0352 COMBINING FERMATA
 * 	0000 
 * 0353 COMBINING X BELOW
 * 	0000 
 * 0354 COMBINING LEFT ARROWHEAD BELOW
 * 	0000 
 * 0355 COMBINING RIGHT ARROWHEAD BELOW
 * 	0000 
 * 0356 COMBINING RIGHT ARROWHEAD AND UP ARROWHEAD BELOW
 * 	0000 
 * 0357 COMBINING RIGHT HALF RING ABOVE
 * 	0000 
 * 0358 COMBINING DOT ABOVE RIGHT
 * 	0000 
 * 0359 COMBINING ASTERISK BELOW
 * 	0000 
 * 035A COMBINING DOUBLE RING BELOW
 * 	0000 
 * 035B COMBINING ZIGZAG ABOVE
 * 	0000 
 * 035C COMBINING DOUBLE BREVE BELOW
 * 	0000 
 * 035D COMBINING DOUBLE BREVE
 * 	0000 
 * 035E COMBINING DOUBLE MACRON
 * 	0000 
 * 035F COMBINING DOUBLE MACRON BELOW
 * 	0000 
 * 0360 COMBINING DOUBLE TILDE
 * 	0000 
 * 0361 COMBINING DOUBLE INVERTED BREVE
 * 	0000 
 * 0362 COMBINING DOUBLE RIGHTWARDS ARROW BELOW
 * 	0000 
 * 0363 COMBINING LATIN SMALL LETTER A
 * 	0000 
 * 0364 COMBINING LATIN SMALL LETTER E
 * 	0000 
 * 0365 COMBINING LATIN SMALL LETTER I
 * 	0000 
 * 0366 COMBINING LATIN SMALL LETTER O
 * 	0000 
 * 0367 COMBINING LATIN SMALL LETTER U
 * 	0000 
 * 0368 COMBINING LATIN SMALL LETTER C
 * 	0000 
 * 0369 COMBINING LATIN SMALL LETTER D
 * 	0000 
 * 036A COMBINING LATIN SMALL LETTER H
 * 	0000 
 * 036B COMBINING LATIN SMALL LETTER M
 * 	0000 
 * 036C COMBINING LATIN SMALL LETTER R
 * 	0000 
 * 036D COMBINING LATIN SMALL LETTER T
 * 	0000 
 * 036E COMBINING LATIN SMALL LETTER V
 * 	0000 
 * 036F COMBINING LATIN SMALL LETTER X
 * 	0000 
 * 0374 GREEK NUMERAL SIGN
 * 	02B9 MODIFIER LETTER PRIME
 * 037A GREEK YPOGEGRAMMENI
 * 	0020 SPACE
 * 037E GREEK QUESTION MARK
 * 	003B SEMICOLON
 * 0384 GREEK TONOS
 * 	0020 SPACE
 * 0385 GREEK DIALYTIKA TONOS
 * 	0020 SPACE
 * 0386 GREEK CAPITAL LETTER ALPHA WITH TONOS
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 0387 GREEK ANO TELEIA
 * 	00B7 MIDDLE DOT
 * 0388 GREEK CAPITAL LETTER EPSILON WITH TONOS
 * 	0395 GREEK CAPITAL LETTER EPSILON
 * 0389 GREEK CAPITAL LETTER ETA WITH TONOS
 * 	0397 GREEK CAPITAL LETTER ETA
 * 038A GREEK CAPITAL LETTER IOTA WITH TONOS
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 038C GREEK CAPITAL LETTER OMICRON WITH TONOS
 * 	039F GREEK CAPITAL LETTER OMICRON
 * 038E GREEK CAPITAL LETTER UPSILON WITH TONOS
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 038F GREEK CAPITAL LETTER OMEGA WITH TONOS
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 0390 GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS
 * 	03B9 GREEK SMALL LETTER IOTA
 * 03AA GREEK CAPITAL LETTER IOTA WITH DIALYTIKA
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 03AB GREEK CAPITAL LETTER UPSILON WITH DIALYTIKA
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 03AC GREEK SMALL LETTER ALPHA WITH TONOS
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 03AD GREEK SMALL LETTER EPSILON WITH TONOS
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 03AE GREEK SMALL LETTER ETA WITH TONOS
 * 	03B7 GREEK SMALL LETTER ETA
 * 03AF GREEK SMALL LETTER IOTA WITH TONOS
 * 	03B9 GREEK SMALL LETTER IOTA
 * 03B0 GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 03CA GREEK SMALL LETTER IOTA WITH DIALYTIKA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 03CB GREEK SMALL LETTER UPSILON WITH DIALYTIKA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 03CC GREEK SMALL LETTER OMICRON WITH TONOS
 * 	03BF GREEK SMALL LETTER OMICRON
 * 03CD GREEK SMALL LETTER UPSILON WITH TONOS
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 03CE GREEK SMALL LETTER OMEGA WITH TONOS
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 03D0 GREEK BETA SYMBOL
 * 	03B2 GREEK SMALL LETTER BETA
 * 03D1 GREEK THETA SYMBOL
 * 	03B8 GREEK SMALL LETTER THETA
 * 03D2 GREEK UPSILON WITH HOOK SYMBOL
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 03D3 GREEK UPSILON WITH ACUTE AND HOOK SYMBOL
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 03D4 GREEK UPSILON WITH DIAERESIS AND HOOK SYMBOL
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 03D5 GREEK PHI SYMBOL
 * 	03C6 GREEK SMALL LETTER PHI
 * 03D6 GREEK PI SYMBOL
 * 	03C0 GREEK SMALL LETTER PI
 * 03F0 GREEK KAPPA SYMBOL
 * 	03BA GREEK SMALL LETTER KAPPA
 * 03F1 GREEK RHO SYMBOL
 * 	03C1 GREEK SMALL LETTER RHO
 * 03F2 GREEK LUNATE SIGMA SYMBOL
 * 	03C2 GREEK SMALL LETTER FINAL SIGMA
 * 03F4 GREEK CAPITAL THETA SYMBOL
 * 	0398 GREEK CAPITAL LETTER THETA
 * 03F5 GREEK LUNATE EPSILON SYMBOL
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 03F9 GREEK CAPITAL LUNATE SIGMA SYMBOL
 * 	03A3 GREEK CAPITAL LETTER SIGMA
 * 0400 CYRILLIC CAPITAL LETTER IE WITH GRAVE
 * 	0415 CYRILLIC CAPITAL LETTER IE
 * 0401 CYRILLIC CAPITAL LETTER IO
 * 	0415 CYRILLIC CAPITAL LETTER IE
 * 0403 CYRILLIC CAPITAL LETTER GJE
 * 	0413 CYRILLIC CAPITAL LETTER GHE
 * 0407 CYRILLIC CAPITAL LETTER YI
 * 	0406 CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
 * 040C CYRILLIC CAPITAL LETTER KJE
 * 	041A CYRILLIC CAPITAL LETTER KA
 * 040D CYRILLIC CAPITAL LETTER I WITH GRAVE
 * 	0418 CYRILLIC CAPITAL LETTER I
 * 040E CYRILLIC CAPITAL LETTER SHORT U
 * 	0423 CYRILLIC CAPITAL LETTER U
 * 0419 CYRILLIC CAPITAL LETTER SHORT I
 * 	0418 CYRILLIC CAPITAL LETTER I
 * 0439 CYRILLIC SMALL LETTER SHORT I
 * 	0438 CYRILLIC SMALL LETTER I
 * 0450 CYRILLIC SMALL LETTER IE WITH GRAVE
 * 	0435 CYRILLIC SMALL LETTER IE
 * 0451 CYRILLIC SMALL LETTER IO
 * 	0435 CYRILLIC SMALL LETTER IE
 * 0453 CYRILLIC SMALL LETTER GJE
 * 	0433 CYRILLIC SMALL LETTER GHE
 * 0457 CYRILLIC SMALL LETTER YI
 * 	0456 CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
 * 045C CYRILLIC SMALL LETTER KJE
 * 	043A CYRILLIC SMALL LETTER KA
 * 045D CYRILLIC SMALL LETTER I WITH GRAVE
 * 	0438 CYRILLIC SMALL LETTER I
 * 045E CYRILLIC SMALL LETTER SHORT U
 * 	0443 CYRILLIC SMALL LETTER U
 * 0476 CYRILLIC CAPITAL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT
 * 	0474 CYRILLIC CAPITAL LETTER IZHITSA
 * 0477 CYRILLIC SMALL LETTER IZHITSA WITH DOUBLE GRAVE ACCENT
 * 	0475 CYRILLIC SMALL LETTER IZHITSA
 * 0483 COMBINING CYRILLIC TITLO
 * 	0000 
 * 0484 COMBINING CYRILLIC PALATALIZATION
 * 	0000 
 * 0485 COMBINING CYRILLIC DASIA PNEUMATA
 * 	0000 
 * 0486 COMBINING CYRILLIC PSILI PNEUMATA
 * 	0000 
 * 0487 COMBINING CYRILLIC POKRYTIE
 * 	0000 
 * 0488 COMBINING CYRILLIC HUNDRED THOUSANDS SIGN
 * 	0000 
 * 0489 COMBINING CYRILLIC MILLIONS SIGN
 * 	0000 
 * 04C1 CYRILLIC CAPITAL LETTER ZHE WITH BREVE
 * 	0416 CYRILLIC CAPITAL LETTER ZHE
 * 04C2 CYRILLIC SMALL LETTER ZHE WITH BREVE
 * 	0436 CYRILLIC SMALL LETTER ZHE
 * 04D0 CYRILLIC CAPITAL LETTER A WITH BREVE
 * 	0410 CYRILLIC CAPITAL LETTER A
 * 04D1 CYRILLIC SMALL LETTER A WITH BREVE
 * 	0430 CYRILLIC SMALL LETTER A
 * 04D2 CYRILLIC CAPITAL LETTER A WITH DIAERESIS
 * 	0410 CYRILLIC CAPITAL LETTER A
 * 04D3 CYRILLIC SMALL LETTER A WITH DIAERESIS
 * 	0430 CYRILLIC SMALL LETTER A
 * 04D6 CYRILLIC CAPITAL LETTER IE WITH BREVE
 * 	0415 CYRILLIC CAPITAL LETTER IE
 * 04D7 CYRILLIC SMALL LETTER IE WITH BREVE
 * 	0435 CYRILLIC SMALL LETTER IE
 * 04DA CYRILLIC CAPITAL LETTER SCHWA WITH DIAERESIS
 * 	04D8 CYRILLIC CAPITAL LETTER SCHWA
 * 04DB CYRILLIC SMALL LETTER SCHWA WITH DIAERESIS
 * 	04D9 CYRILLIC SMALL LETTER SCHWA
 * 04DC CYRILLIC CAPITAL LETTER ZHE WITH DIAERESIS
 * 	0416 CYRILLIC CAPITAL LETTER ZHE
 * 04DD CYRILLIC SMALL LETTER ZHE WITH DIAERESIS
 * 	0436 CYRILLIC SMALL LETTER ZHE
 * 04DE CYRILLIC CAPITAL LETTER ZE WITH DIAERESIS
 * 	0417 CYRILLIC CAPITAL LETTER ZE
 * 04DF CYRILLIC SMALL LETTER ZE WITH DIAERESIS
 * 	0437 CYRILLIC SMALL LETTER ZE
 * 04E2 CYRILLIC CAPITAL LETTER I WITH MACRON
 * 	0418 CYRILLIC CAPITAL LETTER I
 * 04E3 CYRILLIC SMALL LETTER I WITH MACRON
 * 	0438 CYRILLIC SMALL LETTER I
 * 04E4 CYRILLIC CAPITAL LETTER I WITH DIAERESIS
 * 	0418 CYRILLIC CAPITAL LETTER I
 * 04E5 CYRILLIC SMALL LETTER I WITH DIAERESIS
 * 	0438 CYRILLIC SMALL LETTER I
 * 04E6 CYRILLIC CAPITAL LETTER O WITH DIAERESIS
 * 	041E CYRILLIC CAPITAL LETTER O
 * 04E7 CYRILLIC SMALL LETTER O WITH DIAERESIS
 * 	043E CYRILLIC SMALL LETTER O
 * 04EA CYRILLIC CAPITAL LETTER BARRED O WITH DIAERESIS
 * 	04E8 CYRILLIC CAPITAL LETTER BARRED O
 * 04EB CYRILLIC SMALL LETTER BARRED O WITH DIAERESIS
 * 	04E9 CYRILLIC SMALL LETTER BARRED O
 * 04EC CYRILLIC CAPITAL LETTER E WITH DIAERESIS
 * 	042D CYRILLIC CAPITAL LETTER E
 * 04ED CYRILLIC SMALL LETTER E WITH DIAERESIS
 * 	044D CYRILLIC SMALL LETTER E
 * 04EE CYRILLIC CAPITAL LETTER U WITH MACRON
 * 	0423 CYRILLIC CAPITAL LETTER U
 * 04EF CYRILLIC SMALL LETTER U WITH MACRON
 * 	0443 CYRILLIC SMALL LETTER U
 * 04F0 CYRILLIC CAPITAL LETTER U WITH DIAERESIS
 * 	0423 CYRILLIC CAPITAL LETTER U
 * 04F1 CYRILLIC SMALL LETTER U WITH DIAERESIS
 * 	0443 CYRILLIC SMALL LETTER U
 * 04F2 CYRILLIC CAPITAL LETTER U WITH DOUBLE ACUTE
 * 	0423 CYRILLIC CAPITAL LETTER U
 * 04F3 CYRILLIC SMALL LETTER U WITH DOUBLE ACUTE
 * 	0443 CYRILLIC SMALL LETTER U
 * 04F4 CYRILLIC CAPITAL LETTER CHE WITH DIAERESIS
 * 	0427 CYRILLIC CAPITAL LETTER CHE
 * 04F5 CYRILLIC SMALL LETTER CHE WITH DIAERESIS
 * 	0447 CYRILLIC SMALL LETTER CHE
 * 04F8 CYRILLIC CAPITAL LETTER YERU WITH DIAERESIS
 * 	042B CYRILLIC CAPITAL LETTER YERU
 * 04F9 CYRILLIC SMALL LETTER YERU WITH DIAERESIS
 * 	044B CYRILLIC SMALL LETTER YERU
 * 0587 ARMENIAN SMALL LIGATURE ECH YIWN
 * 	0565 ARMENIAN SMALL LETTER ECH
 * 	0582 ARMENIAN SMALL LETTER YIWN
 * 0591 HEBREW ACCENT ETNAHTA
 * 	0000 
 * 0592 HEBREW ACCENT SEGOL
 * 	0000 
 * 0593 HEBREW ACCENT SHALSHELET
 * 	0000 
 * 0594 HEBREW ACCENT ZAQEF QATAN
 * 	0000 
 * 0595 HEBREW ACCENT ZAQEF GADOL
 * 	0000 
 * 0596 HEBREW ACCENT TIPEHA
 * 	0000 
 * 0597 HEBREW ACCENT REVIA
 * 	0000 
 * 0598 HEBREW ACCENT ZARQA
 * 	0000 
 * 0599 HEBREW ACCENT PASHTA
 * 	0000 
 * 059A HEBREW ACCENT YETIV
 * 	0000 
 * 059B HEBREW ACCENT TEVIR
 * 	0000 
 * 059C HEBREW ACCENT GERESH
 * 	0000 
 * 059D HEBREW ACCENT GERESH MUQDAM
 * 	0000 
 * 059E HEBREW ACCENT GERSHAYIM
 * 	0000 
 * 059F HEBREW ACCENT QARNEY PARA
 * 	0000 
 * 05A0 HEBREW ACCENT TELISHA GEDOLA
 * 	0000 
 * 05A1 HEBREW ACCENT PAZER
 * 	0000 
 * 05A2 HEBREW ACCENT ATNAH HAFUKH
 * 	0000 
 * 05A3 HEBREW ACCENT MUNAH
 * 	0000 
 * 05A4 HEBREW ACCENT MAHAPAKH
 * 	0000 
 * 05A5 HEBREW ACCENT MERKHA
 * 	0000 
 * 05A6 HEBREW ACCENT MERKHA KEFULA
 * 	0000 
 * 05A7 HEBREW ACCENT DARGA
 * 	0000 
 * 05A8 HEBREW ACCENT QADMA
 * 	0000 
 * 05A9 HEBREW ACCENT TELISHA QETANA
 * 	0000 
 * 05AA HEBREW ACCENT YERAH BEN YOMO
 * 	0000 
 * 05AB HEBREW ACCENT OLE
 * 	0000 
 * 05AC HEBREW ACCENT ILUY
 * 	0000 
 * 05AD HEBREW ACCENT DEHI
 * 	0000 
 * 05AE HEBREW ACCENT ZINOR
 * 	0000 
 * 05AF HEBREW MARK MASORA CIRCLE
 * 	0000 
 * 05B0 HEBREW POINT SHEVA
 * 	0000 
 * 05B1 HEBREW POINT HATAF SEGOL
 * 	0000 
 * 05B2 HEBREW POINT HATAF PATAH
 * 	0000 
 * 05B3 HEBREW POINT HATAF QAMATS
 * 	0000 
 * 05B4 HEBREW POINT HIRIQ
 * 	0000 
 * 05B5 HEBREW POINT TSERE
 * 	0000 
 * 05B6 HEBREW POINT SEGOL
 * 	0000 
 * 05B7 HEBREW POINT PATAH
 * 	0000 
 * 05B8 HEBREW POINT QAMATS
 * 	0000 
 * 05B9 HEBREW POINT HOLAM
 * 	0000 
 * 05BA HEBREW POINT HOLAM HASER FOR VAV
 * 	0000 
 * 05BB HEBREW POINT QUBUTS
 * 	0000 
 * 05BC HEBREW POINT DAGESH OR MAPIQ
 * 	0000 
 * 05BD HEBREW POINT METEG
 * 	0000 
 * 05BF HEBREW POINT RAFE
 * 	0000 
 * 05C1 HEBREW POINT SHIN DOT
 * 	0000 
 * 05C2 HEBREW POINT SIN DOT
 * 	0000 
 * 05C4 HEBREW MARK UPPER DOT
 * 	0000 
 * 05C5 HEBREW MARK LOWER DOT
 * 	0000 
 * 05C7 HEBREW POINT QAMATS QATAN
 * 	0000 
 * 0610 ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM
 * 	0000 
 * 0611 ARABIC SIGN ALAYHE ASSALLAM
 * 	0000 
 * 0612 ARABIC SIGN RAHMATULLAH ALAYHE
 * 	0000 
 * 0613 ARABIC SIGN RADI ALLAHOU ANHU
 * 	0000 
 * 0614 ARABIC SIGN TAKHALLUS
 * 	0000 
 * 0615 ARABIC SMALL HIGH TAH
 * 	0000 
 * 0616 ARABIC SMALL HIGH LIGATURE ALEF WITH LAM WITH YEH
 * 	0000 
 * 0617 ARABIC SMALL HIGH ZAIN
 * 	0000 
 * 0618 ARABIC SMALL FATHA
 * 	0000 
 * 0619 ARABIC SMALL DAMMA
 * 	0000 
 * 061A ARABIC SMALL KASRA
 * 	0000 
 * 0622 ARABIC LETTER ALEF WITH MADDA ABOVE
 * 	0627 ARABIC LETTER ALEF
 * 0623 ARABIC LETTER ALEF WITH HAMZA ABOVE
 * 	0627 ARABIC LETTER ALEF
 * 0624 ARABIC LETTER WAW WITH HAMZA ABOVE
 * 	0648 ARABIC LETTER WAW
 * 0625 ARABIC LETTER ALEF WITH HAMZA BELOW
 * 	0627 ARABIC LETTER ALEF
 * 0626 ARABIC LETTER YEH WITH HAMZA ABOVE
 * 	064A ARABIC LETTER YEH
 * 064B ARABIC FATHATAN
 * 	0000 
 * 064C ARABIC DAMMATAN
 * 	0000 
 * 064D ARABIC KASRATAN
 * 	0000 
 * 064E ARABIC FATHA
 * 	0000 
 * 064F ARABIC DAMMA
 * 	0000 
 * 0650 ARABIC KASRA
 * 	0000 
 * 0651 ARABIC SHADDA
 * 	0000 
 * 0652 ARABIC SUKUN
 * 	0000 
 * 0653 ARABIC MADDAH ABOVE
 * 	0000 
 * 0654 ARABIC HAMZA ABOVE
 * 	0000 
 * 0655 ARABIC HAMZA BELOW
 * 	0000 
 * 0656 ARABIC SUBSCRIPT ALEF
 * 	0000 
 * 0657 ARABIC INVERTED DAMMA
 * 	0000 
 * 0658 ARABIC MARK NOON GHUNNA
 * 	0000 
 * 0659 ARABIC ZWARAKAY
 * 	0000 
 * 065A ARABIC VOWEL SIGN SMALL V ABOVE
 * 	0000 
 * 065B ARABIC VOWEL SIGN INVERTED SMALL V ABOVE
 * 	0000 
 * 065C ARABIC VOWEL SIGN DOT BELOW
 * 	0000 
 * 065D ARABIC REVERSED DAMMA
 * 	0000 
 * 065E ARABIC FATHA WITH TWO DOTS
 * 	0000 
 * 065F ARABIC WAVY HAMZA BELOW
 * 	0000 
 * 0670 ARABIC LETTER SUPERSCRIPT ALEF
 * 	0000 
 * 0675 ARABIC LETTER HIGH HAMZA ALEF
 * 	0627 ARABIC LETTER ALEF
 * 	0674 ARABIC LETTER HIGH HAMZA
 * 0676 ARABIC LETTER HIGH HAMZA WAW
 * 	0648 ARABIC LETTER WAW
 * 	0674 ARABIC LETTER HIGH HAMZA
 * 0677 ARABIC LETTER U WITH HAMZA ABOVE
 * 	06C7 ARABIC LETTER U
 * 	0674 ARABIC LETTER HIGH HAMZA
 * 0678 ARABIC LETTER HIGH HAMZA YEH
 * 	064A ARABIC LETTER YEH
 * 	0674 ARABIC LETTER HIGH HAMZA
 * 06C0 ARABIC LETTER HEH WITH YEH ABOVE
 * 	06D5 ARABIC LETTER AE
 * 06C2 ARABIC LETTER HEH GOAL WITH HAMZA ABOVE
 * 	06C1 ARABIC LETTER HEH GOAL
 * 06D3 ARABIC LETTER YEH BARREE WITH HAMZA ABOVE
 * 	06D2 ARABIC LETTER YEH BARREE
 * 06D6 ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA
 * 	0000 
 * 06D7 ARABIC SMALL HIGH LIGATURE QAF WITH LAM WITH ALEF MAKSURA
 * 	0000 
 * 06D8 ARABIC SMALL HIGH MEEM INITIAL FORM
 * 	0000 
 * 06D9 ARABIC SMALL HIGH LAM ALEF
 * 	0000 
 * 06DA ARABIC SMALL HIGH JEEM
 * 	0000 
 * 06DB ARABIC SMALL HIGH THREE DOTS
 * 	0000 
 * 06DC ARABIC SMALL HIGH SEEN
 * 	0000 
 * 06DF ARABIC SMALL HIGH ROUNDED ZERO
 * 	0000 
 * 06E0 ARABIC SMALL HIGH UPRIGHT RECTANGULAR ZERO
 * 	0000 
 * 06E1 ARABIC SMALL HIGH DOTLESS HEAD OF KHAH
 * 	0000 
 * 06E2 ARABIC SMALL HIGH MEEM ISOLATED FORM
 * 	0000 
 * 06E3 ARABIC SMALL LOW SEEN
 * 	0000 
 * 06E4 ARABIC SMALL HIGH MADDA
 * 	0000 
 * 06E7 ARABIC SMALL HIGH YEH
 * 	0000 
 * 06E8 ARABIC SMALL HIGH NOON
 * 	0000 
 * 06EA ARABIC EMPTY CENTRE LOW STOP
 * 	0000 
 * 06EB ARABIC EMPTY CENTRE HIGH STOP
 * 	0000 
 * 06EC ARABIC ROUNDED HIGH STOP WITH FILLED CENTRE
 * 	0000 
 * 06ED ARABIC SMALL LOW MEEM
 * 	0000 
 * 0711 SYRIAC LETTER SUPERSCRIPT ALAPH
 * 	0000 
 * 0730 SYRIAC PTHAHA ABOVE
 * 	0000 
 * 0731 SYRIAC PTHAHA BELOW
 * 	0000 
 * 0732 SYRIAC PTHAHA DOTTED
 * 	0000 
 * 0733 SYRIAC ZQAPHA ABOVE
 * 	0000 
 * 0734 SYRIAC ZQAPHA BELOW
 * 	0000 
 * 0735 SYRIAC ZQAPHA DOTTED
 * 	0000 
 * 0736 SYRIAC RBASA ABOVE
 * 	0000 
 * 0737 SYRIAC RBASA BELOW
 * 	0000 
 * 0738 SYRIAC DOTTED ZLAMA HORIZONTAL
 * 	0000 
 * 0739 SYRIAC DOTTED ZLAMA ANGULAR
 * 	0000 
 * 073A SYRIAC HBASA ABOVE
 * 	0000 
 * 073B SYRIAC HBASA BELOW
 * 	0000 
 * 073C SYRIAC HBASA-ESASA DOTTED
 * 	0000 
 * 073D SYRIAC ESASA ABOVE
 * 	0000 
 * 073E SYRIAC ESASA BELOW
 * 	0000 
 * 073F SYRIAC RWAHA
 * 	0000 
 * 0740 SYRIAC FEMININE DOT
 * 	0000 
 * 0741 SYRIAC QUSHSHAYA
 * 	0000 
 * 0742 SYRIAC RUKKAKHA
 * 	0000 
 * 0743 SYRIAC TWO VERTICAL DOTS ABOVE
 * 	0000 
 * 0744 SYRIAC TWO VERTICAL DOTS BELOW
 * 	0000 
 * 0745 SYRIAC THREE DOTS ABOVE
 * 	0000 
 * 0746 SYRIAC THREE DOTS BELOW
 * 	0000 
 * 0747 SYRIAC OBLIQUE LINE ABOVE
 * 	0000 
 * 0748 SYRIAC OBLIQUE LINE BELOW
 * 	0000 
 * 0749 SYRIAC MUSIC
 * 	0000 
 * 074A SYRIAC BARREKH
 * 	0000 
 * 07A6 THAANA ABAFILI
 * 	0000 
 * 07A7 THAANA AABAAFILI
 * 	0000 
 * 07A8 THAANA IBIFILI
 * 	0000 
 * 07A9 THAANA EEBEEFILI
 * 	0000 
 * 07AA THAANA UBUFILI
 * 	0000 
 * 07AB THAANA OOBOOFILI
 * 	0000 
 * 07AC THAANA EBEFILI
 * 	0000 
 * 07AD THAANA EYBEYFILI
 * 	0000 
 * 07AE THAANA OBOFILI
 * 	0000 
 * 07AF THAANA OABOAFILI
 * 	0000 
 * 07B0 THAANA SUKUN
 * 	0000 
 * 07EB NKO COMBINING SHORT HIGH TONE
 * 	0000 
 * 07EC NKO COMBINING SHORT LOW TONE
 * 	0000 
 * 07ED NKO COMBINING SHORT RISING TONE
 * 	0000 
 * 07EE NKO COMBINING LONG DESCENDING TONE
 * 	0000 
 * 07EF NKO COMBINING LONG HIGH TONE
 * 	0000 
 * 07F0 NKO COMBINING LONG LOW TONE
 * 	0000 
 * 07F1 NKO COMBINING LONG RISING TONE
 * 	0000 
 * 07F2 NKO COMBINING NASALIZATION MARK
 * 	0000 
 * 07F3 NKO COMBINING DOUBLE DOT ABOVE
 * 	0000 
 * 0816 SAMARITAN MARK IN
 * 	0000 
 * 0817 SAMARITAN MARK IN-ALAF
 * 	0000 
 * 0818 SAMARITAN MARK OCCLUSION
 * 	0000 
 * 0819 SAMARITAN MARK DAGESH
 * 	0000 
 * 081B SAMARITAN MARK EPENTHETIC YUT
 * 	0000 
 * 081C SAMARITAN VOWEL SIGN LONG E
 * 	0000 
 * 081D SAMARITAN VOWEL SIGN E
 * 	0000 
 * 081E SAMARITAN VOWEL SIGN OVERLONG AA
 * 	0000 
 * 081F SAMARITAN VOWEL SIGN LONG AA
 * 	0000 
 * 0820 SAMARITAN VOWEL SIGN AA
 * 	0000 
 * 0821 SAMARITAN VOWEL SIGN OVERLONG A
 * 	0000 
 * 0822 SAMARITAN VOWEL SIGN LONG A
 * 	0000 
 * 0823 SAMARITAN VOWEL SIGN A
 * 	0000 
 * 0825 SAMARITAN VOWEL SIGN SHORT A
 * 	0000 
 * 0826 SAMARITAN VOWEL SIGN LONG U
 * 	0000 
 * 0827 SAMARITAN VOWEL SIGN U
 * 	0000 
 * 0829 SAMARITAN VOWEL SIGN LONG I
 * 	0000 
 * 082A SAMARITAN VOWEL SIGN I
 * 	0000 
 * 082B SAMARITAN VOWEL SIGN O
 * 	0000 
 * 082C SAMARITAN VOWEL SIGN SUKUN
 * 	0000 
 * 082D SAMARITAN MARK NEQUDAA
 * 	0000 
 * 0859 MANDAIC AFFRICATION MARK
 * 	0000 
 * 085A MANDAIC VOCALIZATION MARK
 * 	0000 
 * 085B MANDAIC GEMINATION MARK
 * 	0000 
 * 08E4 ARABIC CURLY FATHA
 * 	0000 
 * 08E5 ARABIC CURLY DAMMA
 * 	0000 
 * 08E6 ARABIC CURLY KASRA
 * 	0000 
 * 08E7 ARABIC CURLY FATHATAN
 * 	0000 
 * 08E8 ARABIC CURLY DAMMATAN
 * 	0000 
 * 08E9 ARABIC CURLY KASRATAN
 * 	0000 
 * 08EA ARABIC TONE ONE DOT ABOVE
 * 	0000 
 * 08EB ARABIC TONE TWO DOTS ABOVE
 * 	0000 
 * 08EC ARABIC TONE LOOP ABOVE
 * 	0000 
 * 08ED ARABIC TONE ONE DOT BELOW
 * 	0000 
 * 08EE ARABIC TONE TWO DOTS BELOW
 * 	0000 
 * 08EF ARABIC TONE LOOP BELOW
 * 	0000 
 * 08F0 ARABIC OPEN FATHATAN
 * 	0000 
 * 08F1 ARABIC OPEN DAMMATAN
 * 	0000 
 * 08F2 ARABIC OPEN KASRATAN
 * 	0000 
 * 08F3 ARABIC SMALL HIGH WAW
 * 	0000 
 * 08F4 ARABIC FATHA WITH RING
 * 	0000 
 * 08F5 ARABIC FATHA WITH DOT ABOVE
 * 	0000 
 * 08F6 ARABIC KASRA WITH DOT BELOW
 * 	0000 
 * 08F7 ARABIC LEFT ARROWHEAD ABOVE
 * 	0000 
 * 08F8 ARABIC RIGHT ARROWHEAD ABOVE
 * 	0000 
 * 08F9 ARABIC LEFT ARROWHEAD BELOW
 * 	0000 
 * 08FA ARABIC RIGHT ARROWHEAD BELOW
 * 	0000 
 * 08FB ARABIC DOUBLE RIGHT ARROWHEAD ABOVE
 * 	0000 
 * 08FC ARABIC DOUBLE RIGHT ARROWHEAD ABOVE WITH DOT
 * 	0000 
 * 08FD ARABIC RIGHT ARROWHEAD ABOVE WITH DOT
 * 	0000 
 * 08FE ARABIC DAMMA WITH DOT
 * 	0000 
 * 0A01 GURMUKHI SIGN ADAK BINDI
 * 	0000 
 * 0A02 GURMUKHI SIGN BINDI
 * 	0000 
 * 0A03 GURMUKHI SIGN VISARGA
 * 	0000 
 * 0A33 GURMUKHI LETTER LLA
 * 	0A32 GURMUKHI LETTER LA
 * 0A36 GURMUKHI LETTER SHA
 * 	0A38 GURMUKHI LETTER SA
 * 0A3C GURMUKHI SIGN NUKTA
 * 	0000 
 * 0A3E GURMUKHI VOWEL SIGN AA
 * 	0000 
 * 0A3F GURMUKHI VOWEL SIGN I
 * 	0000 
 * 0A40 GURMUKHI VOWEL SIGN II
 * 	0000 
 * 0A41 GURMUKHI VOWEL SIGN U
 * 	0000 
 * 0A42 GURMUKHI VOWEL SIGN UU
 * 	0000 
 * 0A47 GURMUKHI VOWEL SIGN EE
 * 	0000 
 * 0A48 GURMUKHI VOWEL SIGN AI
 * 	0000 
 * 0A4B GURMUKHI VOWEL SIGN OO
 * 	0000 
 * 0A4C GURMUKHI VOWEL SIGN AU
 * 	0000 
 * 0A4D GURMUKHI SIGN VIRAMA
 * 	0000 
 * 0A51 GURMUKHI SIGN UDAAT
 * 	0000 
 * 0A59 GURMUKHI LETTER KHHA
 * 	0A16 GURMUKHI LETTER KHA
 * 0A5A GURMUKHI LETTER GHHA
 * 	0A17 GURMUKHI LETTER GA
 * 0A5B GURMUKHI LETTER ZA
 * 	0A1C GURMUKHI LETTER JA
 * 0A5E GURMUKHI LETTER FA
 * 	0A2B GURMUKHI LETTER PHA
 * 0A70 GURMUKHI TIPPI
 * 	0000 
 * 0A71 GURMUKHI ADDAK
 * 	0000 
 * 0A75 GURMUKHI SIGN YAKASH
 * 	0000 
 * 0A81 GUJARATI SIGN CANDRABINDU
 * 	0000 
 * 0A82 GUJARATI SIGN ANUSVARA
 * 	0000 
 * 0A83 GUJARATI SIGN VISARGA
 * 	0000 
 * 0ABC GUJARATI SIGN NUKTA
 * 	0000 
 * 0ABE GUJARATI VOWEL SIGN AA
 * 	0000 
 * 0ABF GUJARATI VOWEL SIGN I
 * 	0000 
 * 0AC0 GUJARATI VOWEL SIGN II
 * 	0000 
 * 0AC1 GUJARATI VOWEL SIGN U
 * 	0000 
 * 0AC2 GUJARATI VOWEL SIGN UU
 * 	0000 
 * 0AC3 GUJARATI VOWEL SIGN VOCALIC R
 * 	0000 
 * 0AC4 GUJARATI VOWEL SIGN VOCALIC RR
 * 	0000 
 * 0AC5 GUJARATI VOWEL SIGN CANDRA E
 * 	0000 
 * 0AC7 GUJARATI VOWEL SIGN E
 * 	0000 
 * 0AC8 GUJARATI VOWEL SIGN AI
 * 	0000 
 * 0AC9 GUJARATI VOWEL SIGN CANDRA O
 * 	0000 
 * 0ACB GUJARATI VOWEL SIGN O
 * 	0000 
 * 0ACC GUJARATI VOWEL SIGN AU
 * 	0000 
 * 0ACD GUJARATI SIGN VIRAMA
 * 	0000 
 * 0AE2 GUJARATI VOWEL SIGN VOCALIC L
 * 	0000 
 * 0AE3 GUJARATI VOWEL SIGN VOCALIC LL
 * 	0000 
 * 0B01 ORIYA SIGN CANDRABINDU
 * 	0000 
 * 0B02 ORIYA SIGN ANUSVARA
 * 	0000 
 * 0B03 ORIYA SIGN VISARGA
 * 	0000 
 * 0B3C ORIYA SIGN NUKTA
 * 	0000 
 * 0B3E ORIYA VOWEL SIGN AA
 * 	0000 
 * 0B3F ORIYA VOWEL SIGN I
 * 	0000 
 * 0B40 ORIYA VOWEL SIGN II
 * 	0000 
 * 0B41 ORIYA VOWEL SIGN U
 * 	0000 
 * 0B42 ORIYA VOWEL SIGN UU
 * 	0000 
 * 0B43 ORIYA VOWEL SIGN VOCALIC R
 * 	0000 
 * 0B44 ORIYA VOWEL SIGN VOCALIC RR
 * 	0000 
 * 0B47 ORIYA VOWEL SIGN E
 * 	0000 
 * 0B48 ORIYA VOWEL SIGN AI
 * 	0000 
 * 0B4B ORIYA VOWEL SIGN O
 * 	0000 
 * 0B4C ORIYA VOWEL SIGN AU
 * 	0000 
 * 0B4D ORIYA SIGN VIRAMA
 * 	0000 
 * 0B56 ORIYA AI LENGTH MARK
 * 	0000 
 * 0B57 ORIYA AU LENGTH MARK
 * 	0000 
 * 0B5C ORIYA LETTER RRA
 * 	0B21 ORIYA LETTER DDA
 * 0B5D ORIYA LETTER RHA
 * 	0B22 ORIYA LETTER DDHA
 * 0B62 ORIYA VOWEL SIGN VOCALIC L
 * 	0000 
 * 0B63 ORIYA VOWEL SIGN VOCALIC LL
 * 	0000 
 * 0B82 TAMIL SIGN ANUSVARA
 * 	0000 
 * 0B94 TAMIL LETTER AU
 * 	0B92 TAMIL LETTER O
 * 0BBE TAMIL VOWEL SIGN AA
 * 	0000 
 * 0BBF TAMIL VOWEL SIGN I
 * 	0000 
 * 0BC0 TAMIL VOWEL SIGN II
 * 	0000 
 * 0BC1 TAMIL VOWEL SIGN U
 * 	0000 
 * 0BC2 TAMIL VOWEL SIGN UU
 * 	0000 
 * 0BC6 TAMIL VOWEL SIGN E
 * 	0000 
 * 0BC7 TAMIL VOWEL SIGN EE
 * 	0000 
 * 0BC8 TAMIL VOWEL SIGN AI
 * 	0000 
 * 0BCA TAMIL VOWEL SIGN O
 * 	0000 
 * 0BCB TAMIL VOWEL SIGN OO
 * 	0000 
 * 0BCC TAMIL VOWEL SIGN AU
 * 	0000 
 * 0BCD TAMIL SIGN VIRAMA
 * 	0000 
 * 0BD7 TAMIL AU LENGTH MARK
 * 	0000 
 * 0C01 TELUGU SIGN CANDRABINDU
 * 	0000 
 * 0C02 TELUGU SIGN ANUSVARA
 * 	0000 
 * 0C03 TELUGU SIGN VISARGA
 * 	0000 
 * 0C3E TELUGU VOWEL SIGN AA
 * 	0000 
 * 0C3F TELUGU VOWEL SIGN I
 * 	0000 
 * 0C40 TELUGU VOWEL SIGN II
 * 	0000 
 * 0C41 TELUGU VOWEL SIGN U
 * 	0000 
 * 0C42 TELUGU VOWEL SIGN UU
 * 	0000 
 * 0C43 TELUGU VOWEL SIGN VOCALIC R
 * 	0000 
 * 0C44 TELUGU VOWEL SIGN VOCALIC RR
 * 	0000 
 * 0C46 TELUGU VOWEL SIGN E
 * 	0000 
 * 0C47 TELUGU VOWEL SIGN EE
 * 	0000 
 * 0C48 TELUGU VOWEL SIGN AI
 * 	0000 
 * 0C4A TELUGU VOWEL SIGN O
 * 	0000 
 * 0C4B TELUGU VOWEL SIGN OO
 * 	0000 
 * 0C4C TELUGU VOWEL SIGN AU
 * 	0000 
 * 0C4D TELUGU SIGN VIRAMA
 * 	0000 
 * 0C55 TELUGU LENGTH MARK
 * 	0000 
 * 0C56 TELUGU AI LENGTH MARK
 * 	0000 
 * 0C62 TELUGU VOWEL SIGN VOCALIC L
 * 	0000 
 * 0C63 TELUGU VOWEL SIGN VOCALIC LL
 * 	0000 
 * 0C82 KANNADA SIGN ANUSVARA
 * 	0000 
 * 0C83 KANNADA SIGN VISARGA
 * 	0000 
 * 0CBC KANNADA SIGN NUKTA
 * 	0000 
 * 0CBE KANNADA VOWEL SIGN AA
 * 	0000 
 * 0CBF KANNADA VOWEL SIGN I
 * 	0000 
 * 0CC0 KANNADA VOWEL SIGN II
 * 	0000 
 * 0CC1 KANNADA VOWEL SIGN U
 * 	0000 
 * 0CC2 KANNADA VOWEL SIGN UU
 * 	0000 
 * 0CC3 KANNADA VOWEL SIGN VOCALIC R
 * 	0000 
 * 0CC4 KANNADA VOWEL SIGN VOCALIC RR
 * 	0000 
 * 0CC6 KANNADA VOWEL SIGN E
 * 	0000 
 * 0CC7 KANNADA VOWEL SIGN EE
 * 	0000 
 * 0CC8 KANNADA VOWEL SIGN AI
 * 	0000 
 * 0CCA KANNADA VOWEL SIGN O
 * 	0000 
 * 0CCB KANNADA VOWEL SIGN OO
 * 	0000 
 * 0CCC KANNADA VOWEL SIGN AU
 * 	0000 
 * 0CCD KANNADA SIGN VIRAMA
 * 	0000 
 * 0CD5 KANNADA LENGTH MARK
 * 	0000 
 * 0CD6 KANNADA AI LENGTH MARK
 * 	0000 
 * 0CE2 KANNADA VOWEL SIGN VOCALIC L
 * 	0000 
 * 0CE3 KANNADA VOWEL SIGN VOCALIC LL
 * 	0000 
 * 0D02 MALAYALAM SIGN ANUSVARA
 * 	0000 
 * 0D03 MALAYALAM SIGN VISARGA
 * 	0000 
 * 0D3E MALAYALAM VOWEL SIGN AA
 * 	0000 
 * 0D3F MALAYALAM VOWEL SIGN I
 * 	0000 
 * 0D40 MALAYALAM VOWEL SIGN II
 * 	0000 
 * 0D41 MALAYALAM VOWEL SIGN U
 * 	0000 
 * 0D42 MALAYALAM VOWEL SIGN UU
 * 	0000 
 * 0D43 MALAYALAM VOWEL SIGN VOCALIC R
 * 	0000 
 * 0D44 MALAYALAM VOWEL SIGN VOCALIC RR
 * 	0000 
 * 0D46 MALAYALAM VOWEL SIGN E
 * 	0000 
 * 0D47 MALAYALAM VOWEL SIGN EE
 * 	0000 
 * 0D48 MALAYALAM VOWEL SIGN AI
 * 	0000 
 * 0D4A MALAYALAM VOWEL SIGN O
 * 	0000 
 * 0D4B MALAYALAM VOWEL SIGN OO
 * 	0000 
 * 0D4C MALAYALAM VOWEL SIGN AU
 * 	0000 
 * 0D4D MALAYALAM SIGN VIRAMA
 * 	0000 
 * 0D57 MALAYALAM AU LENGTH MARK
 * 	0000 
 * 0D62 MALAYALAM VOWEL SIGN VOCALIC L
 * 	0000 
 * 0D63 MALAYALAM VOWEL SIGN VOCALIC LL
 * 	0000 
 * 0D82 SINHALA SIGN ANUSVARAYA
 * 	0000 
 * 0D83 SINHALA SIGN VISARGAYA
 * 	0000 
 * 0DCA SINHALA SIGN AL-LAKUNA
 * 	0000 
 * 0DCF SINHALA VOWEL SIGN AELA-PILLA
 * 	0000 
 * 0DD0 SINHALA VOWEL SIGN KETTI AEDA-PILLA
 * 	0000 
 * 0DD1 SINHALA VOWEL SIGN DIGA AEDA-PILLA
 * 	0000 
 * 0DD2 SINHALA VOWEL SIGN KETTI IS-PILLA
 * 	0000 
 * 0DD3 SINHALA VOWEL SIGN DIGA IS-PILLA
 * 	0000 
 * 0DD4 SINHALA VOWEL SIGN KETTI PAA-PILLA
 * 	0000 
 * 0DD6 SINHALA VOWEL SIGN DIGA PAA-PILLA
 * 	0000 
 * 0DD8 SINHALA VOWEL SIGN GAETTA-PILLA
 * 	0000 
 * 0DD9 SINHALA VOWEL SIGN KOMBUVA
 * 	0000 
 * 0DDA SINHALA VOWEL SIGN DIGA KOMBUVA
 * 	0000 
 * 0DDB SINHALA VOWEL SIGN KOMBU DEKA
 * 	0000 
 * 0DDC SINHALA VOWEL SIGN KOMBUVA HAA AELA-PILLA
 * 	0000 
 * 0DDD SINHALA VOWEL SIGN KOMBUVA HAA DIGA AELA-PILLA
 * 	0000 
 * 0DDE SINHALA VOWEL SIGN KOMBUVA HAA GAYANUKITTA
 * 	0000 
 * 0DDF SINHALA VOWEL SIGN GAYANUKITTA
 * 	0000 
 * 0DF2 SINHALA VOWEL SIGN DIGA GAETTA-PILLA
 * 	0000 
 * 0DF3 SINHALA VOWEL SIGN DIGA GAYANUKITTA
 * 	0000 
 * 0E31 THAI CHARACTER MAI HAN-AKAT
 * 	0000 
 * 0E33 THAI CHARACTER SARA AM
 * 	0E32 THAI CHARACTER SARA AA
 * 0E34 THAI CHARACTER SARA I
 * 	0000 
 * 0E35 THAI CHARACTER SARA II
 * 	0000 
 * 0E36 THAI CHARACTER SARA UE
 * 	0000 
 * 0E37 THAI CHARACTER SARA UEE
 * 	0000 
 * 0E38 THAI CHARACTER SARA U
 * 	0000 
 * 0E39 THAI CHARACTER SARA UU
 * 	0000 
 * 0E3A THAI CHARACTER PHINTHU
 * 	0000 
 * 0E47 THAI CHARACTER MAITAIKHU
 * 	0000 
 * 0E48 THAI CHARACTER MAI EK
 * 	0000 
 * 0E49 THAI CHARACTER MAI THO
 * 	0000 
 * 0E4A THAI CHARACTER MAI TRI
 * 	0000 
 * 0E4B THAI CHARACTER MAI CHATTAWA
 * 	0000 
 * 0E4C THAI CHARACTER THANTHAKHAT
 * 	0000 
 * 0E4D THAI CHARACTER NIKHAHIT
 * 	0000 
 * 0E4E THAI CHARACTER YAMAKKAN
 * 	0000 
 * 0EB1 LAO VOWEL SIGN MAI KAN
 * 	0000 
 * 0EB3 LAO VOWEL SIGN AM
 * 	0EB2 LAO VOWEL SIGN AA
 * 0EB4 LAO VOWEL SIGN I
 * 	0000 
 * 0EB5 LAO VOWEL SIGN II
 * 	0000 
 * 0EB6 LAO VOWEL SIGN Y
 * 	0000 
 * 0EB7 LAO VOWEL SIGN YY
 * 	0000 
 * 0EB8 LAO VOWEL SIGN U
 * 	0000 
 * 0EB9 LAO VOWEL SIGN UU
 * 	0000 
 * 0EBB LAO VOWEL SIGN MAI KON
 * 	0000 
 * 0EBC LAO SEMIVOWEL SIGN LO
 * 	0000 
 * 0EC8 LAO TONE MAI EK
 * 	0000 
 * 0EC9 LAO TONE MAI THO
 * 	0000 
 * 0ECA LAO TONE MAI TI
 * 	0000 
 * 0ECB LAO TONE MAI CATAWA
 * 	0000 
 * 0ECC LAO CANCELLATION MARK
 * 	0000 
 * 0ECD LAO NIGGAHITA
 * 	0000 
 * 0EDC LAO HO NO
 * 	0EAB LAO LETTER HO SUNG
 * 	0E99 LAO LETTER NO
 * 0EDD LAO HO MO
 * 	0EAB LAO LETTER HO SUNG
 * 	0EA1 LAO LETTER MO
 * 0F0C TIBETAN MARK DELIMITER TSHEG BSTAR
 * 	0F0B TIBETAN MARK INTERSYLLABIC TSHEG
 * 0F18 TIBETAN ASTROLOGICAL SIGN -KHYUD PA
 * 	0000 
 * 0F19 TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS
 * 	0000 
 * 0F35 TIBETAN MARK NGAS BZUNG NYI ZLA
 * 	0000 
 * 0F37 TIBETAN MARK NGAS BZUNG SGOR RTAGS
 * 	0000 
 * 0F39 TIBETAN MARK TSA -PHRU
 * 	0000 
 * 0F3E TIBETAN SIGN YAR TSHES
 * 	0000 
 * 0F3F TIBETAN SIGN MAR TSHES
 * 	0000 
 * 0F43 TIBETAN LETTER GHA
 * 	0F42 TIBETAN LETTER GA
 * 0F4D TIBETAN LETTER DDHA
 * 	0F4C TIBETAN LETTER DDA
 * 0F52 TIBETAN LETTER DHA
 * 	0F51 TIBETAN LETTER DA
 * 0F57 TIBETAN LETTER BHA
 * 	0F56 TIBETAN LETTER BA
 * 0F5C TIBETAN LETTER DZHA
 * 	0F5B TIBETAN LETTER DZA
 * 0F69 TIBETAN LETTER KSSA
 * 	0F40 TIBETAN LETTER KA
 * 0F71 TIBETAN VOWEL SIGN AA
 * 	0000 
 * 0F72 TIBETAN VOWEL SIGN I
 * 	0000 
 * 0F73 TIBETAN VOWEL SIGN II
 * 	0000 
 * 0F74 TIBETAN VOWEL SIGN U
 * 	0000 
 * 0F75 TIBETAN VOWEL SIGN UU
 * 	0000 
 * 0F76 TIBETAN VOWEL SIGN VOCALIC R
 * 	0000 
 * 0F77 TIBETAN VOWEL SIGN VOCALIC RR
 * 	0000 
 * 0F78 TIBETAN VOWEL SIGN VOCALIC L
 * 	0000 
 * 0F79 TIBETAN VOWEL SIGN VOCALIC LL
 * 	0000 
 * 0F7A TIBETAN VOWEL SIGN E
 * 	0000 
 * 0F7B TIBETAN VOWEL SIGN EE
 * 	0000 
 * 0F7C TIBETAN VOWEL SIGN O
 * 	0000 
 * 0F7D TIBETAN VOWEL SIGN OO
 * 	0000 
 * 0F7E TIBETAN SIGN RJES SU NGA RO
 * 	0000 
 * 0F7F TIBETAN SIGN RNAM BCAD
 * 	0000 
 * 0F80 TIBETAN VOWEL SIGN REVERSED I
 * 	0000 
 * 0F81 TIBETAN VOWEL SIGN REVERSED II
 * 	0000 
 * 0F82 TIBETAN SIGN NYI ZLA NAA DA
 * 	0000 
 * 0F83 TIBETAN SIGN SNA LDAN
 * 	0000 
 * 0F84 TIBETAN MARK HALANTA
 * 	0000 
 * 0F86 TIBETAN SIGN LCI RTAGS
 * 	0000 
 * 0F87 TIBETAN SIGN YANG RTAGS
 * 	0000 
 * 0F8D TIBETAN SUBJOINED SIGN LCE TSA CAN
 * 	0000 
 * 0F8E TIBETAN SUBJOINED SIGN MCHU CAN
 * 	0000 
 * 0F8F TIBETAN SUBJOINED SIGN INVERTED MCHU CAN
 * 	0000 
 * 0F90 TIBETAN SUBJOINED LETTER KA
 * 	0000 
 * 0F91 TIBETAN SUBJOINED LETTER KHA
 * 	0000 
 * 0F92 TIBETAN SUBJOINED LETTER GA
 * 	0000 
 * 0F93 TIBETAN SUBJOINED LETTER GHA
 * 	0000 
 * 0F94 TIBETAN SUBJOINED LETTER NGA
 * 	0000 
 * 0F95 TIBETAN SUBJOINED LETTER CA
 * 	0000 
 * 0F96 TIBETAN SUBJOINED LETTER CHA
 * 	0000 
 * 0F97 TIBETAN SUBJOINED LETTER JA
 * 	0000 
 * 0F99 TIBETAN SUBJOINED LETTER NYA
 * 	0000 
 * 0F9A TIBETAN SUBJOINED LETTER TTA
 * 	0000 
 * 0F9B TIBETAN SUBJOINED LETTER TTHA
 * 	0000 
 * 0F9C TIBETAN SUBJOINED LETTER DDA
 * 	0000 
 * 0F9D TIBETAN SUBJOINED LETTER DDHA
 * 	0000 
 * 0F9E TIBETAN SUBJOINED LETTER NNA
 * 	0000 
 * 0F9F TIBETAN SUBJOINED LETTER TA
 * 	0000 
 * 0FA0 TIBETAN SUBJOINED LETTER THA
 * 	0000 
 * 0FA1 TIBETAN SUBJOINED LETTER DA
 * 	0000 
 * 0FA2 TIBETAN SUBJOINED LETTER DHA
 * 	0000 
 * 0FA3 TIBETAN SUBJOINED LETTER NA
 * 	0000 
 * 0FA4 TIBETAN SUBJOINED LETTER PA
 * 	0000 
 * 0FA5 TIBETAN SUBJOINED LETTER PHA
 * 	0000 
 * 0FA6 TIBETAN SUBJOINED LETTER BA
 * 	0000 
 * 0FA7 TIBETAN SUBJOINED LETTER BHA
 * 	0000 
 * 0FA8 TIBETAN SUBJOINED LETTER MA
 * 	0000 
 * 0FA9 TIBETAN SUBJOINED LETTER TSA
 * 	0000 
 * 0FAA TIBETAN SUBJOINED LETTER TSHA
 * 	0000 
 * 0FAB TIBETAN SUBJOINED LETTER DZA
 * 	0000 
 * 0FAC TIBETAN SUBJOINED LETTER DZHA
 * 	0000 
 * 0FAD TIBETAN SUBJOINED LETTER WA
 * 	0000 
 * 0FAE TIBETAN SUBJOINED LETTER ZHA
 * 	0000 
 * 0FAF TIBETAN SUBJOINED LETTER ZA
 * 	0000 
 * 0FB0 TIBETAN SUBJOINED LETTER -A
 * 	0000 
 * 0FB1 TIBETAN SUBJOINED LETTER YA
 * 	0000 
 * 0FB2 TIBETAN SUBJOINED LETTER RA
 * 	0000 
 * 0FB3 TIBETAN SUBJOINED LETTER LA
 * 	0000 
 * 0FB4 TIBETAN SUBJOINED LETTER SHA
 * 	0000 
 * 0FB5 TIBETAN SUBJOINED LETTER SSA
 * 	0000 
 * 0FB6 TIBETAN SUBJOINED LETTER SA
 * 	0000 
 * 0FB7 TIBETAN SUBJOINED LETTER HA
 * 	0000 
 * 0FB8 TIBETAN SUBJOINED LETTER A
 * 	0000 
 * 0FB9 TIBETAN SUBJOINED LETTER KSSA
 * 	0000 
 * 0FBA TIBETAN SUBJOINED LETTER FIXED-FORM WA
 * 	0000 
 * 0FBB TIBETAN SUBJOINED LETTER FIXED-FORM YA
 * 	0000 
 * 0FBC TIBETAN SUBJOINED LETTER FIXED-FORM RA
 * 	0000 
 * 0FC6 TIBETAN SYMBOL PADMA GDAN
 * 	0000 
 * 1026 MYANMAR LETTER UU
 * 	1025 MYANMAR LETTER U
 * 102B MYANMAR VOWEL SIGN TALL AA
 * 	0000 
 * 102C MYANMAR VOWEL SIGN AA
 * 	0000 
 * 102D MYANMAR VOWEL SIGN I
 * 	0000 
 * 102E MYANMAR VOWEL SIGN II
 * 	0000 
 * 102F MYANMAR VOWEL SIGN U
 * 	0000 
 * 1030 MYANMAR VOWEL SIGN UU
 * 	0000 
 * 1031 MYANMAR VOWEL SIGN E
 * 	0000 
 * 1032 MYANMAR VOWEL SIGN AI
 * 	0000 
 * 1033 MYANMAR VOWEL SIGN MON II
 * 	0000 
 * 1034 MYANMAR VOWEL SIGN MON O
 * 	0000 
 * 1035 MYANMAR VOWEL SIGN E ABOVE
 * 	0000 
 * 1036 MYANMAR SIGN ANUSVARA
 * 	0000 
 * 1037 MYANMAR SIGN DOT BELOW
 * 	0000 
 * 1038 MYANMAR SIGN VISARGA
 * 	0000 
 * 1039 MYANMAR SIGN VIRAMA
 * 	0000 
 * 103A MYANMAR SIGN ASAT
 * 	0000 
 * 103B MYANMAR CONSONANT SIGN MEDIAL YA
 * 	0000 
 * 103C MYANMAR CONSONANT SIGN MEDIAL RA
 * 	0000 
 * 103D MYANMAR CONSONANT SIGN MEDIAL WA
 * 	0000 
 * 103E MYANMAR CONSONANT SIGN MEDIAL HA
 * 	0000 
 * 1056 MYANMAR VOWEL SIGN VOCALIC R
 * 	0000 
 * 1057 MYANMAR VOWEL SIGN VOCALIC RR
 * 	0000 
 * 1058 MYANMAR VOWEL SIGN VOCALIC L
 * 	0000 
 * 1059 MYANMAR VOWEL SIGN VOCALIC LL
 * 	0000 
 * 105E MYANMAR CONSONANT SIGN MON MEDIAL NA
 * 	0000 
 * 105F MYANMAR CONSONANT SIGN MON MEDIAL MA
 * 	0000 
 * 1060 MYANMAR CONSONANT SIGN MON MEDIAL LA
 * 	0000 
 * 1062 MYANMAR VOWEL SIGN SGAW KAREN EU
 * 	0000 
 * 1063 MYANMAR TONE MARK SGAW KAREN HATHI
 * 	0000 
 * 1064 MYANMAR TONE MARK SGAW KAREN KE PHO
 * 	0000 
 * 1067 MYANMAR VOWEL SIGN WESTERN PWO KAREN EU
 * 	0000 
 * 1068 MYANMAR VOWEL SIGN WESTERN PWO KAREN UE
 * 	0000 
 * 1069 MYANMAR SIGN WESTERN PWO KAREN TONE-1
 * 	0000 
 * 106A MYANMAR SIGN WESTERN PWO KAREN TONE-2
 * 	0000 
 * 106B MYANMAR SIGN WESTERN PWO KAREN TONE-3
 * 	0000 
 * 106C MYANMAR SIGN WESTERN PWO KAREN TONE-4
 * 	0000 
 * 106D MYANMAR SIGN WESTERN PWO KAREN TONE-5
 * 	0000 
 * 1071 MYANMAR VOWEL SIGN GEBA KAREN I
 * 	0000 
 * 1072 MYANMAR VOWEL SIGN KAYAH OE
 * 	0000 
 * 1073 MYANMAR VOWEL SIGN KAYAH U
 * 	0000 
 * 1074 MYANMAR VOWEL SIGN KAYAH EE
 * 	0000 
 * 1082 MYANMAR CONSONANT SIGN SHAN MEDIAL WA
 * 	0000 
 * 1083 MYANMAR VOWEL SIGN SHAN AA
 * 	0000 
 * 1084 MYANMAR VOWEL SIGN SHAN E
 * 	0000 
 * 1085 MYANMAR VOWEL SIGN SHAN E ABOVE
 * 	0000 
 * 1086 MYANMAR VOWEL SIGN SHAN FINAL Y
 * 	0000 
 * 1087 MYANMAR SIGN SHAN TONE-2
 * 	0000 
 * 1088 MYANMAR SIGN SHAN TONE-3
 * 	0000 
 * 1089 MYANMAR SIGN SHAN TONE-5
 * 	0000 
 * 108A MYANMAR SIGN SHAN TONE-6
 * 	0000 
 * 108B MYANMAR SIGN SHAN COUNCIL TONE-2
 * 	0000 
 * 108C MYANMAR SIGN SHAN COUNCIL TONE-3
 * 	0000 
 * 108D MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE
 * 	0000 
 * 108F MYANMAR SIGN RUMAI PALAUNG TONE-5
 * 	0000 
 * 109A MYANMAR SIGN KHAMTI TONE-1
 * 	0000 
 * 109B MYANMAR SIGN KHAMTI TONE-3
 * 	0000 
 * 109C MYANMAR VOWEL SIGN AITON A
 * 	0000 
 * 109D MYANMAR VOWEL SIGN AITON AI
 * 	0000 
 * 10FC MODIFIER LETTER GEORGIAN NAR
 * 	10DC GEORGIAN LETTER NAR
 * 135D ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK
 * 	0000 
 * 135E ETHIOPIC COMBINING VOWEL LENGTH MARK
 * 	0000 
 * 135F ETHIOPIC COMBINING GEMINATION MARK
 * 	0000 
 * 1712 TAGALOG VOWEL SIGN I
 * 	0000 
 * 1713 TAGALOG VOWEL SIGN U
 * 	0000 
 * 1714 TAGALOG SIGN VIRAMA
 * 	0000 
 * 1732 HANUNOO VOWEL SIGN I
 * 	0000 
 * 1733 HANUNOO VOWEL SIGN U
 * 	0000 
 * 1734 HANUNOO SIGN PAMUDPOD
 * 	0000 
 * 1752 BUHID VOWEL SIGN I
 * 	0000 
 * 1753 BUHID VOWEL SIGN U
 * 	0000 
 * 1772 TAGBANWA VOWEL SIGN I
 * 	0000 
 * 1773 TAGBANWA VOWEL SIGN U
 * 	0000 
 * 17B4 KHMER VOWEL INHERENT AQ
 * 	0000 
 * 17B5 KHMER VOWEL INHERENT AA
 * 	0000 
 * 17B6 KHMER VOWEL SIGN AA
 * 	0000 
 * 17B7 KHMER VOWEL SIGN I
 * 	0000 
 * 17B8 KHMER VOWEL SIGN II
 * 	0000 
 * 17B9 KHMER VOWEL SIGN Y
 * 	0000 
 * 17BA KHMER VOWEL SIGN YY
 * 	0000 
 * 17BB KHMER VOWEL SIGN U
 * 	0000 
 * 17BC KHMER VOWEL SIGN UU
 * 	0000 
 * 17BD KHMER VOWEL SIGN UA
 * 	0000 
 * 17BE KHMER VOWEL SIGN OE
 * 	0000 
 * 17BF KHMER VOWEL SIGN YA
 * 	0000 
 * 17C0 KHMER VOWEL SIGN IE
 * 	0000 
 * 17C1 KHMER VOWEL SIGN E
 * 	0000 
 * 17C2 KHMER VOWEL SIGN AE
 * 	0000 
 * 17C3 KHMER VOWEL SIGN AI
 * 	0000 
 * 17C4 KHMER VOWEL SIGN OO
 * 	0000 
 * 17C5 KHMER VOWEL SIGN AU
 * 	0000 
 * 17C6 KHMER SIGN NIKAHIT
 * 	0000 
 * 17C7 KHMER SIGN REAHMUK
 * 	0000 
 * 17C8 KHMER SIGN YUUKALEAPINTU
 * 	0000 
 * 17C9 KHMER SIGN MUUSIKATOAN
 * 	0000 
 * 17CA KHMER SIGN TRIISAP
 * 	0000 
 * 17CB KHMER SIGN BANTOC
 * 	0000 
 * 17CC KHMER SIGN ROBAT
 * 	0000 
 * 17CD KHMER SIGN TOANDAKHIAT
 * 	0000 
 * 17CE KHMER SIGN KAKABAT
 * 	0000 
 * 17CF KHMER SIGN AHSDA
 * 	0000 
 * 17D0 KHMER SIGN SAMYOK SANNYA
 * 	0000 
 * 17D1 KHMER SIGN VIRIAM
 * 	0000 
 * 17D2 KHMER SIGN COENG
 * 	0000 
 * 17D3 KHMER SIGN BATHAMASAT
 * 	0000 
 * 17DD KHMER SIGN ATTHACAN
 * 	0000 
 * 180B MONGOLIAN FREE VARIATION SELECTOR ONE
 * 	0000 
 * 180C MONGOLIAN FREE VARIATION SELECTOR TWO
 * 	0000 
 * 180D MONGOLIAN FREE VARIATION SELECTOR THREE
 * 	0000 
 * 18A9 MONGOLIAN LETTER ALI GALI DAGALGA
 * 	0000 
 * 1920 LIMBU VOWEL SIGN A
 * 	0000 
 * 1921 LIMBU VOWEL SIGN I
 * 	0000 
 * 1922 LIMBU VOWEL SIGN U
 * 	0000 
 * 1923 LIMBU VOWEL SIGN EE
 * 	0000 
 * 1924 LIMBU VOWEL SIGN AI
 * 	0000 
 * 1925 LIMBU VOWEL SIGN OO
 * 	0000 
 * 1926 LIMBU VOWEL SIGN AU
 * 	0000 
 * 1927 LIMBU VOWEL SIGN E
 * 	0000 
 * 1928 LIMBU VOWEL SIGN O
 * 	0000 
 * 1929 LIMBU SUBJOINED LETTER YA
 * 	0000 
 * 192A LIMBU SUBJOINED LETTER RA
 * 	0000 
 * 192B LIMBU SUBJOINED LETTER WA
 * 	0000 
 * 1930 LIMBU SMALL LETTER KA
 * 	0000 
 * 1931 LIMBU SMALL LETTER NGA
 * 	0000 
 * 1932 LIMBU SMALL LETTER ANUSVARA
 * 	0000 
 * 1933 LIMBU SMALL LETTER TA
 * 	0000 
 * 1934 LIMBU SMALL LETTER NA
 * 	0000 
 * 1935 LIMBU SMALL LETTER PA
 * 	0000 
 * 1936 LIMBU SMALL LETTER MA
 * 	0000 
 * 1937 LIMBU SMALL LETTER RA
 * 	0000 
 * 1938 LIMBU SMALL LETTER LA
 * 	0000 
 * 1939 LIMBU SIGN MUKPHRENG
 * 	0000 
 * 193A LIMBU SIGN KEMPHRENG
 * 	0000 
 * 193B LIMBU SIGN SA-I
 * 	0000 
 * 19B0 NEW TAI LUE VOWEL SIGN VOWEL SHORTENER
 * 	0000 
 * 19B1 NEW TAI LUE VOWEL SIGN AA
 * 	0000 
 * 19B2 NEW TAI LUE VOWEL SIGN II
 * 	0000 
 * 19B3 NEW TAI LUE VOWEL SIGN U
 * 	0000 
 * 19B4 NEW TAI LUE VOWEL SIGN UU
 * 	0000 
 * 19B5 NEW TAI LUE VOWEL SIGN E
 * 	0000 
 * 19B6 NEW TAI LUE VOWEL SIGN AE
 * 	0000 
 * 19B7 NEW TAI LUE VOWEL SIGN O
 * 	0000 
 * 19B8 NEW TAI LUE VOWEL SIGN OA
 * 	0000 
 * 19B9 NEW TAI LUE VOWEL SIGN UE
 * 	0000 
 * 19BA NEW TAI LUE VOWEL SIGN AY
 * 	0000 
 * 19BB NEW TAI LUE VOWEL SIGN AAY
 * 	0000 
 * 19BC NEW TAI LUE VOWEL SIGN UY
 * 	0000 
 * 19BD NEW TAI LUE VOWEL SIGN OY
 * 	0000 
 * 19BE NEW TAI LUE VOWEL SIGN OAY
 * 	0000 
 * 19BF NEW TAI LUE VOWEL SIGN UEY
 * 	0000 
 * 19C0 NEW TAI LUE VOWEL SIGN IY
 * 	0000 
 * 19C8 NEW TAI LUE TONE MARK-1
 * 	0000 
 * 19C9 NEW TAI LUE TONE MARK-2
 * 	0000 
 * 1A17 BUGINESE VOWEL SIGN I
 * 	0000 
 * 1A18 BUGINESE VOWEL SIGN U
 * 	0000 
 * 1A19 BUGINESE VOWEL SIGN E
 * 	0000 
 * 1A1A BUGINESE VOWEL SIGN O
 * 	0000 
 * 1A1B BUGINESE VOWEL SIGN AE
 * 	0000 
 * 1A55 TAI THAM CONSONANT SIGN MEDIAL RA
 * 	0000 
 * 1A56 TAI THAM CONSONANT SIGN MEDIAL LA
 * 	0000 
 * 1A57 TAI THAM CONSONANT SIGN LA TANG LAI
 * 	0000 
 * 1A58 TAI THAM SIGN MAI KANG LAI
 * 	0000 
 * 1A59 TAI THAM CONSONANT SIGN FINAL NGA
 * 	0000 
 * 1A5A TAI THAM CONSONANT SIGN LOW PA
 * 	0000 
 * 1A5B TAI THAM CONSONANT SIGN HIGH RATHA OR LOW PA
 * 	0000 
 * 1A5C TAI THAM CONSONANT SIGN MA
 * 	0000 
 * 1A5D TAI THAM CONSONANT SIGN BA
 * 	0000 
 * 1A5E TAI THAM CONSONANT SIGN SA
 * 	0000 
 * 1A60 TAI THAM SIGN SAKOT
 * 	0000 
 * 1A61 TAI THAM VOWEL SIGN A
 * 	0000 
 * 1A62 TAI THAM VOWEL SIGN MAI SAT
 * 	0000 
 * 1A63 TAI THAM VOWEL SIGN AA
 * 	0000 
 * 1A64 TAI THAM VOWEL SIGN TALL AA
 * 	0000 
 * 1A65 TAI THAM VOWEL SIGN I
 * 	0000 
 * 1A66 TAI THAM VOWEL SIGN II
 * 	0000 
 * 1A67 TAI THAM VOWEL SIGN UE
 * 	0000 
 * 1A68 TAI THAM VOWEL SIGN UUE
 * 	0000 
 * 1A69 TAI THAM VOWEL SIGN U
 * 	0000 
 * 1A6A TAI THAM VOWEL SIGN UU
 * 	0000 
 * 1A6B TAI THAM VOWEL SIGN O
 * 	0000 
 * 1A6C TAI THAM VOWEL SIGN OA BELOW
 * 	0000 
 * 1A6D TAI THAM VOWEL SIGN OY
 * 	0000 
 * 1A6E TAI THAM VOWEL SIGN E
 * 	0000 
 * 1A6F TAI THAM VOWEL SIGN AE
 * 	0000 
 * 1A70 TAI THAM VOWEL SIGN OO
 * 	0000 
 * 1A71 TAI THAM VOWEL SIGN AI
 * 	0000 
 * 1A72 TAI THAM VOWEL SIGN THAM AI
 * 	0000 
 * 1A73 TAI THAM VOWEL SIGN OA ABOVE
 * 	0000 
 * 1A74 TAI THAM SIGN MAI KANG
 * 	0000 
 * 1A75 TAI THAM SIGN TONE-1
 * 	0000 
 * 1A76 TAI THAM SIGN TONE-2
 * 	0000 
 * 1A77 TAI THAM SIGN KHUEN TONE-3
 * 	0000 
 * 1A78 TAI THAM SIGN KHUEN TONE-4
 * 	0000 
 * 1A79 TAI THAM SIGN KHUEN TONE-5
 * 	0000 
 * 1A7A TAI THAM SIGN RA HAAM
 * 	0000 
 * 1A7B TAI THAM SIGN MAI SAM
 * 	0000 
 * 1A7C TAI THAM SIGN KHUEN-LUE KARAN
 * 	0000 
 * 1A7F TAI THAM COMBINING CRYPTOGRAMMIC DOT
 * 	0000 
 * 1B00 BALINESE SIGN ULU RICEM
 * 	0000 
 * 1B01 BALINESE SIGN ULU CANDRA
 * 	0000 
 * 1B02 BALINESE SIGN CECEK
 * 	0000 
 * 1B03 BALINESE SIGN SURANG
 * 	0000 
 * 1B04 BALINESE SIGN BISAH
 * 	0000 
 * 1B06 BALINESE LETTER AKARA TEDUNG
 * 	1B05 BALINESE LETTER AKARA
 * 1B08 BALINESE LETTER IKARA TEDUNG
 * 	1B07 BALINESE LETTER IKARA
 * 1B0A BALINESE LETTER UKARA TEDUNG
 * 	1B09 BALINESE LETTER UKARA
 * 1B0C BALINESE LETTER RA REPA TEDUNG
 * 	1B0B BALINESE LETTER RA REPA
 * 1B0E BALINESE LETTER LA LENGA TEDUNG
 * 	1B0D BALINESE LETTER LA LENGA
 * 1B12 BALINESE LETTER OKARA TEDUNG
 * 	1B11 BALINESE LETTER OKARA
 * 1B34 BALINESE SIGN REREKAN
 * 	0000 
 * 1B35 BALINESE VOWEL SIGN TEDUNG
 * 	0000 
 * 1B36 BALINESE VOWEL SIGN ULU
 * 	0000 
 * 1B37 BALINESE VOWEL SIGN ULU SARI
 * 	0000 
 * 1B38 BALINESE VOWEL SIGN SUKU
 * 	0000 
 * 1B39 BALINESE VOWEL SIGN SUKU ILUT
 * 	0000 
 * 1B3A BALINESE VOWEL SIGN RA REPA
 * 	0000 
 * 1B3B BALINESE VOWEL SIGN RA REPA TEDUNG
 * 	0000 
 * 1B3C BALINESE VOWEL SIGN LA LENGA
 * 	0000 
 * 1B3D BALINESE VOWEL SIGN LA LENGA TEDUNG
 * 	0000 
 * 1B3E BALINESE VOWEL SIGN TALING
 * 	0000 
 * 1B3F BALINESE VOWEL SIGN TALING REPA
 * 	0000 
 * 1B40 BALINESE VOWEL SIGN TALING TEDUNG
 * 	0000 
 * 1B41 BALINESE VOWEL SIGN TALING REPA TEDUNG
 * 	0000 
 * 1B42 BALINESE VOWEL SIGN PEPET
 * 	0000 
 * 1B43 BALINESE VOWEL SIGN PEPET TEDUNG
 * 	0000 
 * 1B44 BALINESE ADEG ADEG
 * 	0000 
 * 1B6B BALINESE MUSICAL SYMBOL COMBINING TEGEH
 * 	0000 
 * 1B6C BALINESE MUSICAL SYMBOL COMBINING ENDEP
 * 	0000 
 * 1B6D BALINESE MUSICAL SYMBOL COMBINING KEMPUL
 * 	0000 
 * 1B6E BALINESE MUSICAL SYMBOL COMBINING KEMPLI
 * 	0000 
 * 1B6F BALINESE MUSICAL SYMBOL COMBINING JEGOGAN
 * 	0000 
 * 1B70 BALINESE MUSICAL SYMBOL COMBINING KEMPUL WITH JEGOGAN
 * 	0000 
 * 1B71 BALINESE MUSICAL SYMBOL COMBINING KEMPLI WITH JEGOGAN
 * 	0000 
 * 1B72 BALINESE MUSICAL SYMBOL COMBINING BENDE
 * 	0000 
 * 1B73 BALINESE MUSICAL SYMBOL COMBINING GONG
 * 	0000 
 * 1B80 SUNDANESE SIGN PANYECEK
 * 	0000 
 * 1B81 SUNDANESE SIGN PANGLAYAR
 * 	0000 
 * 1B82 SUNDANESE SIGN PANGWISAD
 * 	0000 
 * 1BA1 SUNDANESE CONSONANT SIGN PAMINGKAL
 * 	0000 
 * 1BA2 SUNDANESE CONSONANT SIGN PANYAKRA
 * 	0000 
 * 1BA3 SUNDANESE CONSONANT SIGN PANYIKU
 * 	0000 
 * 1BA4 SUNDANESE VOWEL SIGN PANGHULU
 * 	0000 
 * 1BA5 SUNDANESE VOWEL SIGN PANYUKU
 * 	0000 
 * 1BA6 SUNDANESE VOWEL SIGN PANAELAENG
 * 	0000 
 * 1BA7 SUNDANESE VOWEL SIGN PANOLONG
 * 	0000 
 * 1BA8 SUNDANESE VOWEL SIGN PAMEPET
 * 	0000 
 * 1BA9 SUNDANESE VOWEL SIGN PANEULEUNG
 * 	0000 
 * 1BAA SUNDANESE SIGN PAMAAEH
 * 	0000 
 * 1BAB SUNDANESE SIGN VIRAMA
 * 	0000 
 * 1BAC SUNDANESE CONSONANT SIGN PASANGAN MA
 * 	0000 
 * 1BAD SUNDANESE CONSONANT SIGN PASANGAN WA
 * 	0000 
 * 1BE6 BATAK SIGN TOMPI
 * 	0000 
 * 1BE7 BATAK VOWEL SIGN E
 * 	0000 
 * 1BE8 BATAK VOWEL SIGN PAKPAK E
 * 	0000 
 * 1BE9 BATAK VOWEL SIGN EE
 * 	0000 
 * 1BEA BATAK VOWEL SIGN I
 * 	0000 
 * 1BEB BATAK VOWEL SIGN KARO I
 * 	0000 
 * 1BEC BATAK VOWEL SIGN O
 * 	0000 
 * 1BED BATAK VOWEL SIGN KARO O
 * 	0000 
 * 1BEE BATAK VOWEL SIGN U
 * 	0000 
 * 1BEF BATAK VOWEL SIGN U FOR SIMALUNGUN SA
 * 	0000 
 * 1BF0 BATAK CONSONANT SIGN NG
 * 	0000 
 * 1BF1 BATAK CONSONANT SIGN H
 * 	0000 
 * 1BF2 BATAK PANGOLAT
 * 	0000 
 * 1BF3 BATAK PANONGONAN
 * 	0000 
 * 1C24 LEPCHA SUBJOINED LETTER YA
 * 	0000 
 * 1C25 LEPCHA SUBJOINED LETTER RA
 * 	0000 
 * 1C26 LEPCHA VOWEL SIGN AA
 * 	0000 
 * 1C27 LEPCHA VOWEL SIGN I
 * 	0000 
 * 1C28 LEPCHA VOWEL SIGN O
 * 	0000 
 * 1C29 LEPCHA VOWEL SIGN OO
 * 	0000 
 * 1C2A LEPCHA VOWEL SIGN U
 * 	0000 
 * 1C2B LEPCHA VOWEL SIGN UU
 * 	0000 
 * 1C2C LEPCHA VOWEL SIGN E
 * 	0000 
 * 1C2D LEPCHA CONSONANT SIGN K
 * 	0000 
 * 1C2E LEPCHA CONSONANT SIGN M
 * 	0000 
 * 1C2F LEPCHA CONSONANT SIGN L
 * 	0000 
 * 1C30 LEPCHA CONSONANT SIGN N
 * 	0000 
 * 1C31 LEPCHA CONSONANT SIGN P
 * 	0000 
 * 1C32 LEPCHA CONSONANT SIGN R
 * 	0000 
 * 1C33 LEPCHA CONSONANT SIGN T
 * 	0000 
 * 1C34 LEPCHA CONSONANT SIGN NYIN-DO
 * 	0000 
 * 1C35 LEPCHA CONSONANT SIGN KANG
 * 	0000 
 * 1C36 LEPCHA SIGN RAN
 * 	0000 
 * 1C37 LEPCHA SIGN NUKTA
 * 	0000 
 * 1CD0 VEDIC TONE KARSHANA
 * 	0000 
 * 1CD1 VEDIC TONE SHARA
 * 	0000 
 * 1CD2 VEDIC TONE PRENKHA
 * 	0000 
 * 1CD4 VEDIC SIGN YAJURVEDIC MIDLINE SVARITA
 * 	0000 
 * 1CD5 VEDIC TONE YAJURVEDIC AGGRAVATED INDEPENDENT SVARITA
 * 	0000 
 * 1CD6 VEDIC TONE YAJURVEDIC INDEPENDENT SVARITA
 * 	0000 
 * 1CD7 VEDIC TONE YAJURVEDIC KATHAKA INDEPENDENT SVARITA
 * 	0000 
 * 1CD8 VEDIC TONE CANDRA BELOW
 * 	0000 
 * 1CD9 VEDIC TONE YAJURVEDIC KATHAKA INDEPENDENT SVARITA SCHROEDER
 * 	0000 
 * 1CDA VEDIC TONE DOUBLE SVARITA
 * 	0000 
 * 1CDB VEDIC TONE TRIPLE SVARITA
 * 	0000 
 * 1CDC VEDIC TONE KATHAKA ANUDATTA
 * 	0000 
 * 1CDD VEDIC TONE DOT BELOW
 * 	0000 
 * 1CDE VEDIC TONE TWO DOTS BELOW
 * 	0000 
 * 1CDF VEDIC TONE THREE DOTS BELOW
 * 	0000 
 * 1CE0 VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA
 * 	0000 
 * 1CE1 VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA
 * 	0000 
 * 1CE2 VEDIC SIGN VISARGA SVARITA
 * 	0000 
 * 1CE3 VEDIC SIGN VISARGA UDATTA
 * 	0000 
 * 1CE4 VEDIC SIGN REVERSED VISARGA UDATTA
 * 	0000 
 * 1CE5 VEDIC SIGN VISARGA ANUDATTA
 * 	0000 
 * 1CE6 VEDIC SIGN REVERSED VISARGA ANUDATTA
 * 	0000 
 * 1CE7 VEDIC SIGN VISARGA UDATTA WITH TAIL
 * 	0000 
 * 1CE8 VEDIC SIGN VISARGA ANUDATTA WITH TAIL
 * 	0000 
 * 1CED VEDIC SIGN TIRYAK
 * 	0000 
 * 1CF2 VEDIC SIGN ARDHAVISARGA
 * 	0000 
 * 1CF3 VEDIC SIGN ROTATED ARDHAVISARGA
 * 	0000 
 * 1CF4 VEDIC TONE CANDRA ABOVE
 * 	0000 
 * 1D2C MODIFIER LETTER CAPITAL A
 * 	0041 LATIN CAPITAL LETTER A
 * 1D2D MODIFIER LETTER CAPITAL AE
 * 	00C6 LATIN CAPITAL LETTER AE
 * 1D2E MODIFIER LETTER CAPITAL B
 * 	0042 LATIN CAPITAL LETTER B
 * 1D30 MODIFIER LETTER CAPITAL D
 * 	0044 LATIN CAPITAL LETTER D
 * 1D31 MODIFIER LETTER CAPITAL E
 * 	0045 LATIN CAPITAL LETTER E
 * 1D32 MODIFIER LETTER CAPITAL REVERSED E
 * 	018E LATIN CAPITAL LETTER REVERSED E
 * 1D33 MODIFIER LETTER CAPITAL G
 * 	0047 LATIN CAPITAL LETTER G
 * 1D34 MODIFIER LETTER CAPITAL H
 * 	0048 LATIN CAPITAL LETTER H
 * 1D35 MODIFIER LETTER CAPITAL I
 * 	0049 LATIN CAPITAL LETTER I
 * 1D36 MODIFIER LETTER CAPITAL J
 * 	004A LATIN CAPITAL LETTER J
 * 1D37 MODIFIER LETTER CAPITAL K
 * 	004B LATIN CAPITAL LETTER K
 * 1D38 MODIFIER LETTER CAPITAL L
 * 	004C LATIN CAPITAL LETTER L
 * 1D39 MODIFIER LETTER CAPITAL M
 * 	004D LATIN CAPITAL LETTER M
 * 1D3A MODIFIER LETTER CAPITAL N
 * 	004E LATIN CAPITAL LETTER N
 * 1D3C MODIFIER LETTER CAPITAL O
 * 	004F LATIN CAPITAL LETTER O
 * 1D3D MODIFIER LETTER CAPITAL OU
 * 	0222 LATIN CAPITAL LETTER OU
 * 1D3E MODIFIER LETTER CAPITAL P
 * 	0050 LATIN CAPITAL LETTER P
 * 1D3F MODIFIER LETTER CAPITAL R
 * 	0052 LATIN CAPITAL LETTER R
 * 1D40 MODIFIER LETTER CAPITAL T
 * 	0054 LATIN CAPITAL LETTER T
 * 1D41 MODIFIER LETTER CAPITAL U
 * 	0055 LATIN CAPITAL LETTER U
 * 1D42 MODIFIER LETTER CAPITAL W
 * 	0057 LATIN CAPITAL LETTER W
 * 1D43 MODIFIER LETTER SMALL A
 * 	0061 LATIN SMALL LETTER A
 * 1D44 MODIFIER LETTER SMALL TURNED A
 * 	0250 LATIN SMALL LETTER TURNED A
 * 1D45 MODIFIER LETTER SMALL ALPHA
 * 	0251 LATIN SMALL LETTER ALPHA
 * 1D46 MODIFIER LETTER SMALL TURNED AE
 * 	1D02 LATIN SMALL LETTER TURNED AE
 * 1D47 MODIFIER LETTER SMALL B
 * 	0062 LATIN SMALL LETTER B
 * 1D48 MODIFIER LETTER SMALL D
 * 	0064 LATIN SMALL LETTER D
 * 1D49 MODIFIER LETTER SMALL E
 * 	0065 LATIN SMALL LETTER E
 * 1D4A MODIFIER LETTER SMALL SCHWA
 * 	0259 LATIN SMALL LETTER SCHWA
 * 1D4B MODIFIER LETTER SMALL OPEN E
 * 	025B LATIN SMALL LETTER OPEN E
 * 1D4C MODIFIER LETTER SMALL TURNED OPEN E
 * 	025C LATIN SMALL LETTER REVERSED OPEN E
 * 1D4D MODIFIER LETTER SMALL G
 * 	0067 LATIN SMALL LETTER G
 * 1D4F MODIFIER LETTER SMALL K
 * 	006B LATIN SMALL LETTER K
 * 1D50 MODIFIER LETTER SMALL M
 * 	006D LATIN SMALL LETTER M
 * 1D51 MODIFIER LETTER SMALL ENG
 * 	014B LATIN SMALL LETTER ENG
 * 1D52 MODIFIER LETTER SMALL O
 * 	006F LATIN SMALL LETTER O
 * 1D53 MODIFIER LETTER SMALL OPEN O
 * 	0254 LATIN SMALL LETTER OPEN O
 * 1D54 MODIFIER LETTER SMALL TOP HALF O
 * 	1D16 LATIN SMALL LETTER TOP HALF O
 * 1D55 MODIFIER LETTER SMALL BOTTOM HALF O
 * 	1D17 LATIN SMALL LETTER BOTTOM HALF O
 * 1D56 MODIFIER LETTER SMALL P
 * 	0070 LATIN SMALL LETTER P
 * 1D57 MODIFIER LETTER SMALL T
 * 	0074 LATIN SMALL LETTER T
 * 1D58 MODIFIER LETTER SMALL U
 * 	0075 LATIN SMALL LETTER U
 * 1D59 MODIFIER LETTER SMALL SIDEWAYS U
 * 	1D1D LATIN SMALL LETTER SIDEWAYS U
 * 1D5A MODIFIER LETTER SMALL TURNED M
 * 	026F LATIN SMALL LETTER TURNED M
 * 1D5B MODIFIER LETTER SMALL V
 * 	0076 LATIN SMALL LETTER V
 * 1D5C MODIFIER LETTER SMALL AIN
 * 	1D25 LATIN LETTER AIN
 * 1D5D MODIFIER LETTER SMALL BETA
 * 	03B2 GREEK SMALL LETTER BETA
 * 1D5E MODIFIER LETTER SMALL GREEK GAMMA
 * 	03B3 GREEK SMALL LETTER GAMMA
 * 1D5F MODIFIER LETTER SMALL DELTA
 * 	03B4 GREEK SMALL LETTER DELTA
 * 1D60 MODIFIER LETTER SMALL GREEK PHI
 * 	03C6 GREEK SMALL LETTER PHI
 * 1D61 MODIFIER LETTER SMALL CHI
 * 	03C7 GREEK SMALL LETTER CHI
 * 1D62 LATIN SUBSCRIPT SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 1D63 LATIN SUBSCRIPT SMALL LETTER R
 * 	0072 LATIN SMALL LETTER R
 * 1D64 LATIN SUBSCRIPT SMALL LETTER U
 * 	0075 LATIN SMALL LETTER U
 * 1D65 LATIN SUBSCRIPT SMALL LETTER V
 * 	0076 LATIN SMALL LETTER V
 * 1D66 GREEK SUBSCRIPT SMALL LETTER BETA
 * 	03B2 GREEK SMALL LETTER BETA
 * 1D67 GREEK SUBSCRIPT SMALL LETTER GAMMA
 * 	03B3 GREEK SMALL LETTER GAMMA
 * 1D68 GREEK SUBSCRIPT SMALL LETTER RHO
 * 	03C1 GREEK SMALL LETTER RHO
 * 1D69 GREEK SUBSCRIPT SMALL LETTER PHI
 * 	03C6 GREEK SMALL LETTER PHI
 * 1D6A GREEK SUBSCRIPT SMALL LETTER CHI
 * 	03C7 GREEK SMALL LETTER CHI
 * 1D78 MODIFIER LETTER CYRILLIC EN
 * 	043D CYRILLIC SMALL LETTER EN
 * 1D9B MODIFIER LETTER SMALL TURNED ALPHA
 * 	0252 LATIN SMALL LETTER TURNED ALPHA
 * 1D9C MODIFIER LETTER SMALL C
 * 	0063 LATIN SMALL LETTER C
 * 1D9D MODIFIER LETTER SMALL C WITH CURL
 * 	0255 LATIN SMALL LETTER C WITH CURL
 * 1D9E MODIFIER LETTER SMALL ETH
 * 	00F0 LATIN SMALL LETTER ETH
 * 1D9F MODIFIER LETTER SMALL REVERSED OPEN E
 * 	025C LATIN SMALL LETTER REVERSED OPEN E
 * 1DA0 MODIFIER LETTER SMALL F
 * 	0066 LATIN SMALL LETTER F
 * 1DA1 MODIFIER LETTER SMALL DOTLESS J WITH STROKE
 * 	025F LATIN SMALL LETTER DOTLESS J WITH STROKE
 * 1DA2 MODIFIER LETTER SMALL SCRIPT G
 * 	0261 LATIN SMALL LETTER SCRIPT G
 * 1DA3 MODIFIER LETTER SMALL TURNED H
 * 	0265 LATIN SMALL LETTER TURNED H
 * 1DA4 MODIFIER LETTER SMALL I WITH STROKE
 * 	0268 LATIN SMALL LETTER I WITH STROKE
 * 1DA5 MODIFIER LETTER SMALL IOTA
 * 	0269 LATIN SMALL LETTER IOTA
 * 1DA6 MODIFIER LETTER SMALL CAPITAL I
 * 	026A LATIN LETTER SMALL CAPITAL I
 * 1DA7 MODIFIER LETTER SMALL CAPITAL I WITH STROKE
 * 	1D7B LATIN SMALL CAPITAL LETTER I WITH STROKE
 * 1DA8 MODIFIER LETTER SMALL J WITH CROSSED-TAIL
 * 	029D LATIN SMALL LETTER J WITH CROSSED-TAIL
 * 1DA9 MODIFIER LETTER SMALL L WITH RETROFLEX HOOK
 * 	026D LATIN SMALL LETTER L WITH RETROFLEX HOOK
 * 1DAA MODIFIER LETTER SMALL L WITH PALATAL HOOK
 * 	1D85 LATIN SMALL LETTER L WITH PALATAL HOOK
 * 1DAB MODIFIER LETTER SMALL CAPITAL L
 * 	029F LATIN LETTER SMALL CAPITAL L
 * 1DAC MODIFIER LETTER SMALL M WITH HOOK
 * 	0271 LATIN SMALL LETTER M WITH HOOK
 * 1DAD MODIFIER LETTER SMALL TURNED M WITH LONG LEG
 * 	0270 LATIN SMALL LETTER TURNED M WITH LONG LEG
 * 1DAE MODIFIER LETTER SMALL N WITH LEFT HOOK
 * 	0272 LATIN SMALL LETTER N WITH LEFT HOOK
 * 1DAF MODIFIER LETTER SMALL N WITH RETROFLEX HOOK
 * 	0273 LATIN SMALL LETTER N WITH RETROFLEX HOOK
 * 1DB0 MODIFIER LETTER SMALL CAPITAL N
 * 	0274 LATIN LETTER SMALL CAPITAL N
 * 1DB1 MODIFIER LETTER SMALL BARRED O
 * 	0275 LATIN SMALL LETTER BARRED O
 * 1DB2 MODIFIER LETTER SMALL PHI
 * 	0278 LATIN SMALL LETTER PHI
 * 1DB3 MODIFIER LETTER SMALL S WITH HOOK
 * 	0282 LATIN SMALL LETTER S WITH HOOK
 * 1DB4 MODIFIER LETTER SMALL ESH
 * 	0283 LATIN SMALL LETTER ESH
 * 1DB5 MODIFIER LETTER SMALL T WITH PALATAL HOOK
 * 	01AB LATIN SMALL LETTER T WITH PALATAL HOOK
 * 1DB6 MODIFIER LETTER SMALL U BAR
 * 	0289 LATIN SMALL LETTER U BAR
 * 1DB7 MODIFIER LETTER SMALL UPSILON
 * 	028A LATIN SMALL LETTER UPSILON
 * 1DB8 MODIFIER LETTER SMALL CAPITAL U
 * 	1D1C LATIN LETTER SMALL CAPITAL U
 * 1DB9 MODIFIER LETTER SMALL V WITH HOOK
 * 	028B LATIN SMALL LETTER V WITH HOOK
 * 1DBA MODIFIER LETTER SMALL TURNED V
 * 	028C LATIN SMALL LETTER TURNED V
 * 1DBB MODIFIER LETTER SMALL Z
 * 	007A LATIN SMALL LETTER Z
 * 1DBC MODIFIER LETTER SMALL Z WITH RETROFLEX HOOK
 * 	0290 LATIN SMALL LETTER Z WITH RETROFLEX HOOK
 * 1DBD MODIFIER LETTER SMALL Z WITH CURL
 * 	0291 LATIN SMALL LETTER Z WITH CURL
 * 1DBE MODIFIER LETTER SMALL EZH
 * 	0292 LATIN SMALL LETTER EZH
 * 1DBF MODIFIER LETTER SMALL THETA
 * 	03B8 GREEK SMALL LETTER THETA
 * 1DC0 COMBINING DOTTED GRAVE ACCENT
 * 	0000 
 * 1DC1 COMBINING DOTTED ACUTE ACCENT
 * 	0000 
 * 1DC2 COMBINING SNAKE BELOW
 * 	0000 
 * 1DC3 COMBINING SUSPENSION MARK
 * 	0000 
 * 1DC4 COMBINING MACRON-ACUTE
 * 	0000 
 * 1DC5 COMBINING GRAVE-MACRON
 * 	0000 
 * 1DC6 COMBINING MACRON-GRAVE
 * 	0000 
 * 1DC7 COMBINING ACUTE-MACRON
 * 	0000 
 * 1DC8 COMBINING GRAVE-ACUTE-GRAVE
 * 	0000 
 * 1DC9 COMBINING ACUTE-GRAVE-ACUTE
 * 	0000 
 * 1DCA COMBINING LATIN SMALL LETTER R BELOW
 * 	0000 
 * 1DCB COMBINING BREVE-MACRON
 * 	0000 
 * 1DCC COMBINING MACRON-BREVE
 * 	0000 
 * 1DCD COMBINING DOUBLE CIRCUMFLEX ABOVE
 * 	0000 
 * 1DCE COMBINING OGONEK ABOVE
 * 	0000 
 * 1DCF COMBINING ZIGZAG BELOW
 * 	0000 
 * 1DD0 COMBINING IS BELOW
 * 	0000 
 * 1DD1 COMBINING UR ABOVE
 * 	0000 
 * 1DD2 COMBINING US ABOVE
 * 	0000 
 * 1DD3 COMBINING LATIN SMALL LETTER FLATTENED OPEN A ABOVE
 * 	0000 
 * 1DD4 COMBINING LATIN SMALL LETTER AE
 * 	0000 
 * 1DD5 COMBINING LATIN SMALL LETTER AO
 * 	0000 
 * 1DD6 COMBINING LATIN SMALL LETTER AV
 * 	0000 
 * 1DD7 COMBINING LATIN SMALL LETTER C CEDILLA
 * 	0000 
 * 1DD8 COMBINING LATIN SMALL LETTER INSULAR D
 * 	0000 
 * 1DD9 COMBINING LATIN SMALL LETTER ETH
 * 	0000 
 * 1DDA COMBINING LATIN SMALL LETTER G
 * 	0000 
 * 1DDB COMBINING LATIN LETTER SMALL CAPITAL G
 * 	0000 
 * 1DDC COMBINING LATIN SMALL LETTER K
 * 	0000 
 * 1DDD COMBINING LATIN SMALL LETTER L
 * 	0000 
 * 1DDE COMBINING LATIN LETTER SMALL CAPITAL L
 * 	0000 
 * 1DDF COMBINING LATIN LETTER SMALL CAPITAL M
 * 	0000 
 * 1DE0 COMBINING LATIN SMALL LETTER N
 * 	0000 
 * 1DE1 COMBINING LATIN LETTER SMALL CAPITAL N
 * 	0000 
 * 1DE2 COMBINING LATIN LETTER SMALL CAPITAL R
 * 	0000 
 * 1DE3 COMBINING LATIN SMALL LETTER R ROTUNDA
 * 	0000 
 * 1DE4 COMBINING LATIN SMALL LETTER S
 * 	0000 
 * 1DE5 COMBINING LATIN SMALL LETTER LONG S
 * 	0000 
 * 1DE6 COMBINING LATIN SMALL LETTER Z
 * 	0000 
 * 1DFC COMBINING DOUBLE INVERTED BREVE BELOW
 * 	0000 
 * 1DFD COMBINING ALMOST EQUAL TO BELOW
 * 	0000 
 * 1DFE COMBINING LEFT ARROWHEAD ABOVE
 * 	0000 
 * 1DFF COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW
 * 	0000 
 * 1E00 LATIN CAPITAL LETTER A WITH RING BELOW
 * 	0041 LATIN CAPITAL LETTER A
 * 1E01 LATIN SMALL LETTER A WITH RING BELOW
 * 	0061 LATIN SMALL LETTER A
 * 1E02 LATIN CAPITAL LETTER B WITH DOT ABOVE
 * 	0042 LATIN CAPITAL LETTER B
 * 1E03 LATIN SMALL LETTER B WITH DOT ABOVE
 * 	0062 LATIN SMALL LETTER B
 * 1E04 LATIN CAPITAL LETTER B WITH DOT BELOW
 * 	0042 LATIN CAPITAL LETTER B
 * 1E05 LATIN SMALL LETTER B WITH DOT BELOW
 * 	0062 LATIN SMALL LETTER B
 * 1E06 LATIN CAPITAL LETTER B WITH LINE BELOW
 * 	0042 LATIN CAPITAL LETTER B
 * 1E07 LATIN SMALL LETTER B WITH LINE BELOW
 * 	0062 LATIN SMALL LETTER B
 * 1E08 LATIN CAPITAL LETTER C WITH CEDILLA AND ACUTE
 * 	0043 LATIN CAPITAL LETTER C
 * 1E09 LATIN SMALL LETTER C WITH CEDILLA AND ACUTE
 * 	0063 LATIN SMALL LETTER C
 * 1E0A LATIN CAPITAL LETTER D WITH DOT ABOVE
 * 	0044 LATIN CAPITAL LETTER D
 * 1E0B LATIN SMALL LETTER D WITH DOT ABOVE
 * 	0064 LATIN SMALL LETTER D
 * 1E0C LATIN CAPITAL LETTER D WITH DOT BELOW
 * 	0044 LATIN CAPITAL LETTER D
 * 1E0D LATIN SMALL LETTER D WITH DOT BELOW
 * 	0064 LATIN SMALL LETTER D
 * 1E0E LATIN CAPITAL LETTER D WITH LINE BELOW
 * 	0044 LATIN CAPITAL LETTER D
 * 1E0F LATIN SMALL LETTER D WITH LINE BELOW
 * 	0064 LATIN SMALL LETTER D
 * 1E10 LATIN CAPITAL LETTER D WITH CEDILLA
 * 	0044 LATIN CAPITAL LETTER D
 * 1E11 LATIN SMALL LETTER D WITH CEDILLA
 * 	0064 LATIN SMALL LETTER D
 * 1E12 LATIN CAPITAL LETTER D WITH CIRCUMFLEX BELOW
 * 	0044 LATIN CAPITAL LETTER D
 * 1E13 LATIN SMALL LETTER D WITH CIRCUMFLEX BELOW
 * 	0064 LATIN SMALL LETTER D
 * 1E14 LATIN CAPITAL LETTER E WITH MACRON AND GRAVE
 * 	0045 LATIN CAPITAL LETTER E
 * 1E15 LATIN SMALL LETTER E WITH MACRON AND GRAVE
 * 	0065 LATIN SMALL LETTER E
 * 1E16 LATIN CAPITAL LETTER E WITH MACRON AND ACUTE
 * 	0045 LATIN CAPITAL LETTER E
 * 1E17 LATIN SMALL LETTER E WITH MACRON AND ACUTE
 * 	0065 LATIN SMALL LETTER E
 * 1E18 LATIN CAPITAL LETTER E WITH CIRCUMFLEX BELOW
 * 	0045 LATIN CAPITAL LETTER E
 * 1E19 LATIN SMALL LETTER E WITH CIRCUMFLEX BELOW
 * 	0065 LATIN SMALL LETTER E
 * 1E1A LATIN CAPITAL LETTER E WITH TILDE BELOW
 * 	0045 LATIN CAPITAL LETTER E
 * 1E1B LATIN SMALL LETTER E WITH TILDE BELOW
 * 	0065 LATIN SMALL LETTER E
 * 1E1C LATIN CAPITAL LETTER E WITH CEDILLA AND BREVE
 * 	0045 LATIN CAPITAL LETTER E
 * 1E1D LATIN SMALL LETTER E WITH CEDILLA AND BREVE
 * 	0065 LATIN SMALL LETTER E
 * 1E1E LATIN CAPITAL LETTER F WITH DOT ABOVE
 * 	0046 LATIN CAPITAL LETTER F
 * 1E1F LATIN SMALL LETTER F WITH DOT ABOVE
 * 	0066 LATIN SMALL LETTER F
 * 1E20 LATIN CAPITAL LETTER G WITH MACRON
 * 	0047 LATIN CAPITAL LETTER G
 * 1E21 LATIN SMALL LETTER G WITH MACRON
 * 	0067 LATIN SMALL LETTER G
 * 1E22 LATIN CAPITAL LETTER H WITH DOT ABOVE
 * 	0048 LATIN CAPITAL LETTER H
 * 1E23 LATIN SMALL LETTER H WITH DOT ABOVE
 * 	0068 LATIN SMALL LETTER H
 * 1E24 LATIN CAPITAL LETTER H WITH DOT BELOW
 * 	0048 LATIN CAPITAL LETTER H
 * 1E25 LATIN SMALL LETTER H WITH DOT BELOW
 * 	0068 LATIN SMALL LETTER H
 * 1E26 LATIN CAPITAL LETTER H WITH DIAERESIS
 * 	0048 LATIN CAPITAL LETTER H
 * 1E27 LATIN SMALL LETTER H WITH DIAERESIS
 * 	0068 LATIN SMALL LETTER H
 * 1E28 LATIN CAPITAL LETTER H WITH CEDILLA
 * 	0048 LATIN CAPITAL LETTER H
 * 1E29 LATIN SMALL LETTER H WITH CEDILLA
 * 	0068 LATIN SMALL LETTER H
 * 1E2A LATIN CAPITAL LETTER H WITH BREVE BELOW
 * 	0048 LATIN CAPITAL LETTER H
 * 1E2B LATIN SMALL LETTER H WITH BREVE BELOW
 * 	0068 LATIN SMALL LETTER H
 * 1E2C LATIN CAPITAL LETTER I WITH TILDE BELOW
 * 	0049 LATIN CAPITAL LETTER I
 * 1E2D LATIN SMALL LETTER I WITH TILDE BELOW
 * 	0069 LATIN SMALL LETTER I
 * 1E2E LATIN CAPITAL LETTER I WITH DIAERESIS AND ACUTE
 * 	0049 LATIN CAPITAL LETTER I
 * 1E2F LATIN SMALL LETTER I WITH DIAERESIS AND ACUTE
 * 	0069 LATIN SMALL LETTER I
 * 1E30 LATIN CAPITAL LETTER K WITH ACUTE
 * 	004B LATIN CAPITAL LETTER K
 * 1E31 LATIN SMALL LETTER K WITH ACUTE
 * 	006B LATIN SMALL LETTER K
 * 1E32 LATIN CAPITAL LETTER K WITH DOT BELOW
 * 	004B LATIN CAPITAL LETTER K
 * 1E33 LATIN SMALL LETTER K WITH DOT BELOW
 * 	006B LATIN SMALL LETTER K
 * 1E34 LATIN CAPITAL LETTER K WITH LINE BELOW
 * 	004B LATIN CAPITAL LETTER K
 * 1E35 LATIN SMALL LETTER K WITH LINE BELOW
 * 	006B LATIN SMALL LETTER K
 * 1E36 LATIN CAPITAL LETTER L WITH DOT BELOW
 * 	004C LATIN CAPITAL LETTER L
 * 1E37 LATIN SMALL LETTER L WITH DOT BELOW
 * 	006C LATIN SMALL LETTER L
 * 1E38 LATIN CAPITAL LETTER L WITH DOT BELOW AND MACRON
 * 	004C LATIN CAPITAL LETTER L
 * 1E39 LATIN SMALL LETTER L WITH DOT BELOW AND MACRON
 * 	006C LATIN SMALL LETTER L
 * 1E3A LATIN CAPITAL LETTER L WITH LINE BELOW
 * 	004C LATIN CAPITAL LETTER L
 * 1E3B LATIN SMALL LETTER L WITH LINE BELOW
 * 	006C LATIN SMALL LETTER L
 * 1E3C LATIN CAPITAL LETTER L WITH CIRCUMFLEX BELOW
 * 	004C LATIN CAPITAL LETTER L
 * 1E3D LATIN SMALL LETTER L WITH CIRCUMFLEX BELOW
 * 	006C LATIN SMALL LETTER L
 * 1E3E LATIN CAPITAL LETTER M WITH ACUTE
 * 	004D LATIN CAPITAL LETTER M
 * 1E3F LATIN SMALL LETTER M WITH ACUTE
 * 	006D LATIN SMALL LETTER M
 * 1E40 LATIN CAPITAL LETTER M WITH DOT ABOVE
 * 	004D LATIN CAPITAL LETTER M
 * 1E41 LATIN SMALL LETTER M WITH DOT ABOVE
 * 	006D LATIN SMALL LETTER M
 * 1E42 LATIN CAPITAL LETTER M WITH DOT BELOW
 * 	004D LATIN CAPITAL LETTER M
 * 1E43 LATIN SMALL LETTER M WITH DOT BELOW
 * 	006D LATIN SMALL LETTER M
 * 1E44 LATIN CAPITAL LETTER N WITH DOT ABOVE
 * 	004E LATIN CAPITAL LETTER N
 * 1E45 LATIN SMALL LETTER N WITH DOT ABOVE
 * 	006E LATIN SMALL LETTER N
 * 1E46 LATIN CAPITAL LETTER N WITH DOT BELOW
 * 	004E LATIN CAPITAL LETTER N
 * 1E47 LATIN SMALL LETTER N WITH DOT BELOW
 * 	006E LATIN SMALL LETTER N
 * 1E48 LATIN CAPITAL LETTER N WITH LINE BELOW
 * 	004E LATIN CAPITAL LETTER N
 * 1E49 LATIN SMALL LETTER N WITH LINE BELOW
 * 	006E LATIN SMALL LETTER N
 * 1E4A LATIN CAPITAL LETTER N WITH CIRCUMFLEX BELOW
 * 	004E LATIN CAPITAL LETTER N
 * 1E4B LATIN SMALL LETTER N WITH CIRCUMFLEX BELOW
 * 	006E LATIN SMALL LETTER N
 * 1E4C LATIN CAPITAL LETTER O WITH TILDE AND ACUTE
 * 	004F LATIN CAPITAL LETTER O
 * 1E4D LATIN SMALL LETTER O WITH TILDE AND ACUTE
 * 	006F LATIN SMALL LETTER O
 * 1E4E LATIN CAPITAL LETTER O WITH TILDE AND DIAERESIS
 * 	004F LATIN CAPITAL LETTER O
 * 1E4F LATIN SMALL LETTER O WITH TILDE AND DIAERESIS
 * 	006F LATIN SMALL LETTER O
 * 1E50 LATIN CAPITAL LETTER O WITH MACRON AND GRAVE
 * 	004F LATIN CAPITAL LETTER O
 * 1E51 LATIN SMALL LETTER O WITH MACRON AND GRAVE
 * 	006F LATIN SMALL LETTER O
 * 1E52 LATIN CAPITAL LETTER O WITH MACRON AND ACUTE
 * 	004F LATIN CAPITAL LETTER O
 * 1E53 LATIN SMALL LETTER O WITH MACRON AND ACUTE
 * 	006F LATIN SMALL LETTER O
 * 1E54 LATIN CAPITAL LETTER P WITH ACUTE
 * 	0050 LATIN CAPITAL LETTER P
 * 1E55 LATIN SMALL LETTER P WITH ACUTE
 * 	0070 LATIN SMALL LETTER P
 * 1E56 LATIN CAPITAL LETTER P WITH DOT ABOVE
 * 	0050 LATIN CAPITAL LETTER P
 * 1E57 LATIN SMALL LETTER P WITH DOT ABOVE
 * 	0070 LATIN SMALL LETTER P
 * 1E58 LATIN CAPITAL LETTER R WITH DOT ABOVE
 * 	0052 LATIN CAPITAL LETTER R
 * 1E59 LATIN SMALL LETTER R WITH DOT ABOVE
 * 	0072 LATIN SMALL LETTER R
 * 1E5A LATIN CAPITAL LETTER R WITH DOT BELOW
 * 	0052 LATIN CAPITAL LETTER R
 * 1E5B LATIN SMALL LETTER R WITH DOT BELOW
 * 	0072 LATIN SMALL LETTER R
 * 1E5C LATIN CAPITAL LETTER R WITH DOT BELOW AND MACRON
 * 	0052 LATIN CAPITAL LETTER R
 * 1E5D LATIN SMALL LETTER R WITH DOT BELOW AND MACRON
 * 	0072 LATIN SMALL LETTER R
 * 1E5E LATIN CAPITAL LETTER R WITH LINE BELOW
 * 	0052 LATIN CAPITAL LETTER R
 * 1E5F LATIN SMALL LETTER R WITH LINE BELOW
 * 	0072 LATIN SMALL LETTER R
 * 1E60 LATIN CAPITAL LETTER S WITH DOT ABOVE
 * 	0053 LATIN CAPITAL LETTER S
 * 1E61 LATIN SMALL LETTER S WITH DOT ABOVE
 * 	0073 LATIN SMALL LETTER S
 * 1E62 LATIN CAPITAL LETTER S WITH DOT BELOW
 * 	0053 LATIN CAPITAL LETTER S
 * 1E63 LATIN SMALL LETTER S WITH DOT BELOW
 * 	0073 LATIN SMALL LETTER S
 * 1E64 LATIN CAPITAL LETTER S WITH ACUTE AND DOT ABOVE
 * 	0053 LATIN CAPITAL LETTER S
 * 1E65 LATIN SMALL LETTER S WITH ACUTE AND DOT ABOVE
 * 	0073 LATIN SMALL LETTER S
 * 1E66 LATIN CAPITAL LETTER S WITH CARON AND DOT ABOVE
 * 	0053 LATIN CAPITAL LETTER S
 * 1E67 LATIN SMALL LETTER S WITH CARON AND DOT ABOVE
 * 	0073 LATIN SMALL LETTER S
 * 1E68 LATIN CAPITAL LETTER S WITH DOT BELOW AND DOT ABOVE
 * 	0053 LATIN CAPITAL LETTER S
 * 1E69 LATIN SMALL LETTER S WITH DOT BELOW AND DOT ABOVE
 * 	0073 LATIN SMALL LETTER S
 * 1E6A LATIN CAPITAL LETTER T WITH DOT ABOVE
 * 	0054 LATIN CAPITAL LETTER T
 * 1E6B LATIN SMALL LETTER T WITH DOT ABOVE
 * 	0074 LATIN SMALL LETTER T
 * 1E6C LATIN CAPITAL LETTER T WITH DOT BELOW
 * 	0054 LATIN CAPITAL LETTER T
 * 1E6D LATIN SMALL LETTER T WITH DOT BELOW
 * 	0074 LATIN SMALL LETTER T
 * 1E6E LATIN CAPITAL LETTER T WITH LINE BELOW
 * 	0054 LATIN CAPITAL LETTER T
 * 1E6F LATIN SMALL LETTER T WITH LINE BELOW
 * 	0074 LATIN SMALL LETTER T
 * 1E70 LATIN CAPITAL LETTER T WITH CIRCUMFLEX BELOW
 * 	0054 LATIN CAPITAL LETTER T
 * 1E71 LATIN SMALL LETTER T WITH CIRCUMFLEX BELOW
 * 	0074 LATIN SMALL LETTER T
 * 1E72 LATIN CAPITAL LETTER U WITH DIAERESIS BELOW
 * 	0055 LATIN CAPITAL LETTER U
 * 1E73 LATIN SMALL LETTER U WITH DIAERESIS BELOW
 * 	0075 LATIN SMALL LETTER U
 * 1E74 LATIN CAPITAL LETTER U WITH TILDE BELOW
 * 	0055 LATIN CAPITAL LETTER U
 * 1E75 LATIN SMALL LETTER U WITH TILDE BELOW
 * 	0075 LATIN SMALL LETTER U
 * 1E76 LATIN CAPITAL LETTER U WITH CIRCUMFLEX BELOW
 * 	0055 LATIN CAPITAL LETTER U
 * 1E77 LATIN SMALL LETTER U WITH CIRCUMFLEX BELOW
 * 	0075 LATIN SMALL LETTER U
 * 1E78 LATIN CAPITAL LETTER U WITH TILDE AND ACUTE
 * 	0055 LATIN CAPITAL LETTER U
 * 1E79 LATIN SMALL LETTER U WITH TILDE AND ACUTE
 * 	0075 LATIN SMALL LETTER U
 * 1E7A LATIN CAPITAL LETTER U WITH MACRON AND DIAERESIS
 * 	0055 LATIN CAPITAL LETTER U
 * 1E7B LATIN SMALL LETTER U WITH MACRON AND DIAERESIS
 * 	0075 LATIN SMALL LETTER U
 * 1E7C LATIN CAPITAL LETTER V WITH TILDE
 * 	0056 LATIN CAPITAL LETTER V
 * 1E7D LATIN SMALL LETTER V WITH TILDE
 * 	0076 LATIN SMALL LETTER V
 * 1E7E LATIN CAPITAL LETTER V WITH DOT BELOW
 * 	0056 LATIN CAPITAL LETTER V
 * 1E7F LATIN SMALL LETTER V WITH DOT BELOW
 * 	0076 LATIN SMALL LETTER V
 * 1E80 LATIN CAPITAL LETTER W WITH GRAVE
 * 	0057 LATIN CAPITAL LETTER W
 * 1E81 LATIN SMALL LETTER W WITH GRAVE
 * 	0077 LATIN SMALL LETTER W
 * 1E82 LATIN CAPITAL LETTER W WITH ACUTE
 * 	0057 LATIN CAPITAL LETTER W
 * 1E83 LATIN SMALL LETTER W WITH ACUTE
 * 	0077 LATIN SMALL LETTER W
 * 1E84 LATIN CAPITAL LETTER W WITH DIAERESIS
 * 	0057 LATIN CAPITAL LETTER W
 * 1E85 LATIN SMALL LETTER W WITH DIAERESIS
 * 	0077 LATIN SMALL LETTER W
 * 1E86 LATIN CAPITAL LETTER W WITH DOT ABOVE
 * 	0057 LATIN CAPITAL LETTER W
 * 1E87 LATIN SMALL LETTER W WITH DOT ABOVE
 * 	0077 LATIN SMALL LETTER W
 * 1E88 LATIN CAPITAL LETTER W WITH DOT BELOW
 * 	0057 LATIN CAPITAL LETTER W
 * 1E89 LATIN SMALL LETTER W WITH DOT BELOW
 * 	0077 LATIN SMALL LETTER W
 * 1E8A LATIN CAPITAL LETTER X WITH DOT ABOVE
 * 	0058 LATIN CAPITAL LETTER X
 * 1E8B LATIN SMALL LETTER X WITH DOT ABOVE
 * 	0078 LATIN SMALL LETTER X
 * 1E8C LATIN CAPITAL LETTER X WITH DIAERESIS
 * 	0058 LATIN CAPITAL LETTER X
 * 1E8D LATIN SMALL LETTER X WITH DIAERESIS
 * 	0078 LATIN SMALL LETTER X
 * 1E8E LATIN CAPITAL LETTER Y WITH DOT ABOVE
 * 	0059 LATIN CAPITAL LETTER Y
 * 1E8F LATIN SMALL LETTER Y WITH DOT ABOVE
 * 	0079 LATIN SMALL LETTER Y
 * 1E90 LATIN CAPITAL LETTER Z WITH CIRCUMFLEX
 * 	005A LATIN CAPITAL LETTER Z
 * 1E91 LATIN SMALL LETTER Z WITH CIRCUMFLEX
 * 	007A LATIN SMALL LETTER Z
 * 1E92 LATIN CAPITAL LETTER Z WITH DOT BELOW
 * 	005A LATIN CAPITAL LETTER Z
 * 1E93 LATIN SMALL LETTER Z WITH DOT BELOW
 * 	007A LATIN SMALL LETTER Z
 * 1E94 LATIN CAPITAL LETTER Z WITH LINE BELOW
 * 	005A LATIN CAPITAL LETTER Z
 * 1E95 LATIN SMALL LETTER Z WITH LINE BELOW
 * 	007A LATIN SMALL LETTER Z
 * 1E96 LATIN SMALL LETTER H WITH LINE BELOW
 * 	0068 LATIN SMALL LETTER H
 * 1E97 LATIN SMALL LETTER T WITH DIAERESIS
 * 	0074 LATIN SMALL LETTER T
 * 1E98 LATIN SMALL LETTER W WITH RING ABOVE
 * 	0077 LATIN SMALL LETTER W
 * 1E99 LATIN SMALL LETTER Y WITH RING ABOVE
 * 	0079 LATIN SMALL LETTER Y
 * 1E9A LATIN SMALL LETTER A WITH RIGHT HALF RING
 * 	0061 LATIN SMALL LETTER A
 * 	02BE MODIFIER LETTER RIGHT HALF RING
 * 1E9B LATIN SMALL LETTER LONG S WITH DOT ABOVE
 * 	0073 LATIN SMALL LETTER S
 * 1EA0 LATIN CAPITAL LETTER A WITH DOT BELOW
 * 	0041 LATIN CAPITAL LETTER A
 * 1EA1 LATIN SMALL LETTER A WITH DOT BELOW
 * 	0061 LATIN SMALL LETTER A
 * 1EA2 LATIN CAPITAL LETTER A WITH HOOK ABOVE
 * 	0041 LATIN CAPITAL LETTER A
 * 1EA3 LATIN SMALL LETTER A WITH HOOK ABOVE
 * 	0061 LATIN SMALL LETTER A
 * 1EA4 LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND ACUTE
 * 	0041 LATIN CAPITAL LETTER A
 * 1EA5 LATIN SMALL LETTER A WITH CIRCUMFLEX AND ACUTE
 * 	0061 LATIN SMALL LETTER A
 * 1EA6 LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND GRAVE
 * 	0041 LATIN CAPITAL LETTER A
 * 1EA7 LATIN SMALL LETTER A WITH CIRCUMFLEX AND GRAVE
 * 	0061 LATIN SMALL LETTER A
 * 1EA8 LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE
 * 	0041 LATIN CAPITAL LETTER A
 * 1EA9 LATIN SMALL LETTER A WITH CIRCUMFLEX AND HOOK ABOVE
 * 	0061 LATIN SMALL LETTER A
 * 1EAA LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND TILDE
 * 	0041 LATIN CAPITAL LETTER A
 * 1EAB LATIN SMALL LETTER A WITH CIRCUMFLEX AND TILDE
 * 	0061 LATIN SMALL LETTER A
 * 1EAC LATIN CAPITAL LETTER A WITH CIRCUMFLEX AND DOT BELOW
 * 	0041 LATIN CAPITAL LETTER A
 * 1EAD LATIN SMALL LETTER A WITH CIRCUMFLEX AND DOT BELOW
 * 	0061 LATIN SMALL LETTER A
 * 1EAE LATIN CAPITAL LETTER A WITH BREVE AND ACUTE
 * 	0041 LATIN CAPITAL LETTER A
 * 1EAF LATIN SMALL LETTER A WITH BREVE AND ACUTE
 * 	0061 LATIN SMALL LETTER A
 * 1EB0 LATIN CAPITAL LETTER A WITH BREVE AND GRAVE
 * 	0041 LATIN CAPITAL LETTER A
 * 1EB1 LATIN SMALL LETTER A WITH BREVE AND GRAVE
 * 	0061 LATIN SMALL LETTER A
 * 1EB2 LATIN CAPITAL LETTER A WITH BREVE AND HOOK ABOVE
 * 	0041 LATIN CAPITAL LETTER A
 * 1EB3 LATIN SMALL LETTER A WITH BREVE AND HOOK ABOVE
 * 	0061 LATIN SMALL LETTER A
 * 1EB4 LATIN CAPITAL LETTER A WITH BREVE AND TILDE
 * 	0041 LATIN CAPITAL LETTER A
 * 1EB5 LATIN SMALL LETTER A WITH BREVE AND TILDE
 * 	0061 LATIN SMALL LETTER A
 * 1EB6 LATIN CAPITAL LETTER A WITH BREVE AND DOT BELOW
 * 	0041 LATIN CAPITAL LETTER A
 * 1EB7 LATIN SMALL LETTER A WITH BREVE AND DOT BELOW
 * 	0061 LATIN SMALL LETTER A
 * 1EB8 LATIN CAPITAL LETTER E WITH DOT BELOW
 * 	0045 LATIN CAPITAL LETTER E
 * 1EB9 LATIN SMALL LETTER E WITH DOT BELOW
 * 	0065 LATIN SMALL LETTER E
 * 1EBA LATIN CAPITAL LETTER E WITH HOOK ABOVE
 * 	0045 LATIN CAPITAL LETTER E
 * 1EBB LATIN SMALL LETTER E WITH HOOK ABOVE
 * 	0065 LATIN SMALL LETTER E
 * 1EBC LATIN CAPITAL LETTER E WITH TILDE
 * 	0045 LATIN CAPITAL LETTER E
 * 1EBD LATIN SMALL LETTER E WITH TILDE
 * 	0065 LATIN SMALL LETTER E
 * 1EBE LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND ACUTE
 * 	0045 LATIN CAPITAL LETTER E
 * 1EBF LATIN SMALL LETTER E WITH CIRCUMFLEX AND ACUTE
 * 	0065 LATIN SMALL LETTER E
 * 1EC0 LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND GRAVE
 * 	0045 LATIN CAPITAL LETTER E
 * 1EC1 LATIN SMALL LETTER E WITH CIRCUMFLEX AND GRAVE
 * 	0065 LATIN SMALL LETTER E
 * 1EC2 LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE
 * 	0045 LATIN CAPITAL LETTER E
 * 1EC3 LATIN SMALL LETTER E WITH CIRCUMFLEX AND HOOK ABOVE
 * 	0065 LATIN SMALL LETTER E
 * 1EC4 LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND TILDE
 * 	0045 LATIN CAPITAL LETTER E
 * 1EC5 LATIN SMALL LETTER E WITH CIRCUMFLEX AND TILDE
 * 	0065 LATIN SMALL LETTER E
 * 1EC6 LATIN CAPITAL LETTER E WITH CIRCUMFLEX AND DOT BELOW
 * 	0045 LATIN CAPITAL LETTER E
 * 1EC7 LATIN SMALL LETTER E WITH CIRCUMFLEX AND DOT BELOW
 * 	0065 LATIN SMALL LETTER E
 * 1EC8 LATIN CAPITAL LETTER I WITH HOOK ABOVE
 * 	0049 LATIN CAPITAL LETTER I
 * 1EC9 LATIN SMALL LETTER I WITH HOOK ABOVE
 * 	0069 LATIN SMALL LETTER I
 * 1ECA LATIN CAPITAL LETTER I WITH DOT BELOW
 * 	0049 LATIN CAPITAL LETTER I
 * 1ECB LATIN SMALL LETTER I WITH DOT BELOW
 * 	0069 LATIN SMALL LETTER I
 * 1ECC LATIN CAPITAL LETTER O WITH DOT BELOW
 * 	004F LATIN CAPITAL LETTER O
 * 1ECD LATIN SMALL LETTER O WITH DOT BELOW
 * 	006F LATIN SMALL LETTER O
 * 1ECE LATIN CAPITAL LETTER O WITH HOOK ABOVE
 * 	004F LATIN CAPITAL LETTER O
 * 1ECF LATIN SMALL LETTER O WITH HOOK ABOVE
 * 	006F LATIN SMALL LETTER O
 * 1ED0 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND ACUTE
 * 	004F LATIN CAPITAL LETTER O
 * 1ED1 LATIN SMALL LETTER O WITH CIRCUMFLEX AND ACUTE
 * 	006F LATIN SMALL LETTER O
 * 1ED2 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND GRAVE
 * 	004F LATIN CAPITAL LETTER O
 * 1ED3 LATIN SMALL LETTER O WITH CIRCUMFLEX AND GRAVE
 * 	006F LATIN SMALL LETTER O
 * 1ED4 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE
 * 	004F LATIN CAPITAL LETTER O
 * 1ED5 LATIN SMALL LETTER O WITH CIRCUMFLEX AND HOOK ABOVE
 * 	006F LATIN SMALL LETTER O
 * 1ED6 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND TILDE
 * 	004F LATIN CAPITAL LETTER O
 * 1ED7 LATIN SMALL LETTER O WITH CIRCUMFLEX AND TILDE
 * 	006F LATIN SMALL LETTER O
 * 1ED8 LATIN CAPITAL LETTER O WITH CIRCUMFLEX AND DOT BELOW
 * 	004F LATIN CAPITAL LETTER O
 * 1ED9 LATIN SMALL LETTER O WITH CIRCUMFLEX AND DOT BELOW
 * 	006F LATIN SMALL LETTER O
 * 1EDA LATIN CAPITAL LETTER O WITH HORN AND ACUTE
 * 	004F LATIN CAPITAL LETTER O
 * 1EDB LATIN SMALL LETTER O WITH HORN AND ACUTE
 * 	006F LATIN SMALL LETTER O
 * 1EDC LATIN CAPITAL LETTER O WITH HORN AND GRAVE
 * 	004F LATIN CAPITAL LETTER O
 * 1EDD LATIN SMALL LETTER O WITH HORN AND GRAVE
 * 	006F LATIN SMALL LETTER O
 * 1EDE LATIN CAPITAL LETTER O WITH HORN AND HOOK ABOVE
 * 	004F LATIN CAPITAL LETTER O
 * 1EDF LATIN SMALL LETTER O WITH HORN AND HOOK ABOVE
 * 	006F LATIN SMALL LETTER O
 * 1EE0 LATIN CAPITAL LETTER O WITH HORN AND TILDE
 * 	004F LATIN CAPITAL LETTER O
 * 1EE1 LATIN SMALL LETTER O WITH HORN AND TILDE
 * 	006F LATIN SMALL LETTER O
 * 1EE2 LATIN CAPITAL LETTER O WITH HORN AND DOT BELOW
 * 	004F LATIN CAPITAL LETTER O
 * 1EE3 LATIN SMALL LETTER O WITH HORN AND DOT BELOW
 * 	006F LATIN SMALL LETTER O
 * 1EE4 LATIN CAPITAL LETTER U WITH DOT BELOW
 * 	0055 LATIN CAPITAL LETTER U
 * 1EE5 LATIN SMALL LETTER U WITH DOT BELOW
 * 	0075 LATIN SMALL LETTER U
 * 1EE6 LATIN CAPITAL LETTER U WITH HOOK ABOVE
 * 	0055 LATIN CAPITAL LETTER U
 * 1EE7 LATIN SMALL LETTER U WITH HOOK ABOVE
 * 	0075 LATIN SMALL LETTER U
 * 1EE8 LATIN CAPITAL LETTER U WITH HORN AND ACUTE
 * 	0055 LATIN CAPITAL LETTER U
 * 1EE9 LATIN SMALL LETTER U WITH HORN AND ACUTE
 * 	0075 LATIN SMALL LETTER U
 * 1EEA LATIN CAPITAL LETTER U WITH HORN AND GRAVE
 * 	0055 LATIN CAPITAL LETTER U
 * 1EEB LATIN SMALL LETTER U WITH HORN AND GRAVE
 * 	0075 LATIN SMALL LETTER U
 * 1EEC LATIN CAPITAL LETTER U WITH HORN AND HOOK ABOVE
 * 	0055 LATIN CAPITAL LETTER U
 * 1EED LATIN SMALL LETTER U WITH HORN AND HOOK ABOVE
 * 	0075 LATIN SMALL LETTER U
 * 1EEE LATIN CAPITAL LETTER U WITH HORN AND TILDE
 * 	0055 LATIN CAPITAL LETTER U
 * 1EEF LATIN SMALL LETTER U WITH HORN AND TILDE
 * 	0075 LATIN SMALL LETTER U
 * 1EF0 LATIN CAPITAL LETTER U WITH HORN AND DOT BELOW
 * 	0055 LATIN CAPITAL LETTER U
 * 1EF1 LATIN SMALL LETTER U WITH HORN AND DOT BELOW
 * 	0075 LATIN SMALL LETTER U
 * 1EF2 LATIN CAPITAL LETTER Y WITH GRAVE
 * 	0059 LATIN CAPITAL LETTER Y
 * 1EF3 LATIN SMALL LETTER Y WITH GRAVE
 * 	0079 LATIN SMALL LETTER Y
 * 1EF4 LATIN CAPITAL LETTER Y WITH DOT BELOW
 * 	0059 LATIN CAPITAL LETTER Y
 * 1EF5 LATIN SMALL LETTER Y WITH DOT BELOW
 * 	0079 LATIN SMALL LETTER Y
 * 1EF6 LATIN CAPITAL LETTER Y WITH HOOK ABOVE
 * 	0059 LATIN CAPITAL LETTER Y
 * 1EF7 LATIN SMALL LETTER Y WITH HOOK ABOVE
 * 	0079 LATIN SMALL LETTER Y
 * 1EF8 LATIN CAPITAL LETTER Y WITH TILDE
 * 	0059 LATIN CAPITAL LETTER Y
 * 1EF9 LATIN SMALL LETTER Y WITH TILDE
 * 	0079 LATIN SMALL LETTER Y
 * 1F00 GREEK SMALL LETTER ALPHA WITH PSILI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F01 GREEK SMALL LETTER ALPHA WITH DASIA
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F02 GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F03 GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F04 GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F05 GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F06 GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F07 GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F08 GREEK CAPITAL LETTER ALPHA WITH PSILI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F09 GREEK CAPITAL LETTER ALPHA WITH DASIA
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F0A GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F0B GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F0C GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F0D GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F0E GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F0F GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F10 GREEK SMALL LETTER EPSILON WITH PSILI
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 1F11 GREEK SMALL LETTER EPSILON WITH DASIA
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 1F12 GREEK SMALL LETTER EPSILON WITH PSILI AND VARIA
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 1F13 GREEK SMALL LETTER EPSILON WITH DASIA AND VARIA
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 1F14 GREEK SMALL LETTER EPSILON WITH PSILI AND OXIA
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 1F15 GREEK SMALL LETTER EPSILON WITH DASIA AND OXIA
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 1F18 GREEK CAPITAL LETTER EPSILON WITH PSILI
 * 	0395 GREEK CAPITAL LETTER EPSILON
 * 1F19 GREEK CAPITAL LETTER EPSILON WITH DASIA
 * 	0395 GREEK CAPITAL LETTER EPSILON
 * 1F1A GREEK CAPITAL LETTER EPSILON WITH PSILI AND VARIA
 * 	0395 GREEK CAPITAL LETTER EPSILON
 * 1F1B GREEK CAPITAL LETTER EPSILON WITH DASIA AND VARIA
 * 	0395 GREEK CAPITAL LETTER EPSILON
 * 1F1C GREEK CAPITAL LETTER EPSILON WITH PSILI AND OXIA
 * 	0395 GREEK CAPITAL LETTER EPSILON
 * 1F1D GREEK CAPITAL LETTER EPSILON WITH DASIA AND OXIA
 * 	0395 GREEK CAPITAL LETTER EPSILON
 * 1F20 GREEK SMALL LETTER ETA WITH PSILI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F21 GREEK SMALL LETTER ETA WITH DASIA
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F22 GREEK SMALL LETTER ETA WITH PSILI AND VARIA
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F23 GREEK SMALL LETTER ETA WITH DASIA AND VARIA
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F24 GREEK SMALL LETTER ETA WITH PSILI AND OXIA
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F25 GREEK SMALL LETTER ETA WITH DASIA AND OXIA
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F26 GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F27 GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F28 GREEK CAPITAL LETTER ETA WITH PSILI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F29 GREEK CAPITAL LETTER ETA WITH DASIA
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F2A GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F2B GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F2C GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F2D GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F2E GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F2F GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F30 GREEK SMALL LETTER IOTA WITH PSILI
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F31 GREEK SMALL LETTER IOTA WITH DASIA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F32 GREEK SMALL LETTER IOTA WITH PSILI AND VARIA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F33 GREEK SMALL LETTER IOTA WITH DASIA AND VARIA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F34 GREEK SMALL LETTER IOTA WITH PSILI AND OXIA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F35 GREEK SMALL LETTER IOTA WITH DASIA AND OXIA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F36 GREEK SMALL LETTER IOTA WITH PSILI AND PERISPOMENI
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F37 GREEK SMALL LETTER IOTA WITH DASIA AND PERISPOMENI
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F38 GREEK CAPITAL LETTER IOTA WITH PSILI
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1F39 GREEK CAPITAL LETTER IOTA WITH DASIA
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1F3A GREEK CAPITAL LETTER IOTA WITH PSILI AND VARIA
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1F3B GREEK CAPITAL LETTER IOTA WITH DASIA AND VARIA
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1F3C GREEK CAPITAL LETTER IOTA WITH PSILI AND OXIA
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1F3D GREEK CAPITAL LETTER IOTA WITH DASIA AND OXIA
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1F3E GREEK CAPITAL LETTER IOTA WITH PSILI AND PERISPOMENI
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1F3F GREEK CAPITAL LETTER IOTA WITH DASIA AND PERISPOMENI
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1F40 GREEK SMALL LETTER OMICRON WITH PSILI
 * 	03BF GREEK SMALL LETTER OMICRON
 * 1F41 GREEK SMALL LETTER OMICRON WITH DASIA
 * 	03BF GREEK SMALL LETTER OMICRON
 * 1F42 GREEK SMALL LETTER OMICRON WITH PSILI AND VARIA
 * 	03BF GREEK SMALL LETTER OMICRON
 * 1F43 GREEK SMALL LETTER OMICRON WITH DASIA AND VARIA
 * 	03BF GREEK SMALL LETTER OMICRON
 * 1F44 GREEK SMALL LETTER OMICRON WITH PSILI AND OXIA
 * 	03BF GREEK SMALL LETTER OMICRON
 * 1F45 GREEK SMALL LETTER OMICRON WITH DASIA AND OXIA
 * 	03BF GREEK SMALL LETTER OMICRON
 * 1F48 GREEK CAPITAL LETTER OMICRON WITH PSILI
 * 	039F GREEK CAPITAL LETTER OMICRON
 * 1F49 GREEK CAPITAL LETTER OMICRON WITH DASIA
 * 	039F GREEK CAPITAL LETTER OMICRON
 * 1F4A GREEK CAPITAL LETTER OMICRON WITH PSILI AND VARIA
 * 	039F GREEK CAPITAL LETTER OMICRON
 * 1F4B GREEK CAPITAL LETTER OMICRON WITH DASIA AND VARIA
 * 	039F GREEK CAPITAL LETTER OMICRON
 * 1F4C GREEK CAPITAL LETTER OMICRON WITH PSILI AND OXIA
 * 	039F GREEK CAPITAL LETTER OMICRON
 * 1F4D GREEK CAPITAL LETTER OMICRON WITH DASIA AND OXIA
 * 	039F GREEK CAPITAL LETTER OMICRON
 * 1F50 GREEK SMALL LETTER UPSILON WITH PSILI
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F51 GREEK SMALL LETTER UPSILON WITH DASIA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F52 GREEK SMALL LETTER UPSILON WITH PSILI AND VARIA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F53 GREEK SMALL LETTER UPSILON WITH DASIA AND VARIA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F54 GREEK SMALL LETTER UPSILON WITH PSILI AND OXIA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F55 GREEK SMALL LETTER UPSILON WITH DASIA AND OXIA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F56 GREEK SMALL LETTER UPSILON WITH PSILI AND PERISPOMENI
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F57 GREEK SMALL LETTER UPSILON WITH DASIA AND PERISPOMENI
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F59 GREEK CAPITAL LETTER UPSILON WITH DASIA
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 1F5B GREEK CAPITAL LETTER UPSILON WITH DASIA AND VARIA
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 1F5D GREEK CAPITAL LETTER UPSILON WITH DASIA AND OXIA
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 1F5F GREEK CAPITAL LETTER UPSILON WITH DASIA AND PERISPOMENI
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 1F60 GREEK SMALL LETTER OMEGA WITH PSILI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F61 GREEK SMALL LETTER OMEGA WITH DASIA
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F62 GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F63 GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F64 GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F65 GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F66 GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F67 GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F68 GREEK CAPITAL LETTER OMEGA WITH PSILI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1F69 GREEK CAPITAL LETTER OMEGA WITH DASIA
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1F6A GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1F6B GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1F6C GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1F6D GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1F6E GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1F6F GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1F70 GREEK SMALL LETTER ALPHA WITH VARIA
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F71 GREEK SMALL LETTER ALPHA WITH OXIA
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F72 GREEK SMALL LETTER EPSILON WITH VARIA
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 1F73 GREEK SMALL LETTER EPSILON WITH OXIA
 * 	03B5 GREEK SMALL LETTER EPSILON
 * 1F74 GREEK SMALL LETTER ETA WITH VARIA
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F75 GREEK SMALL LETTER ETA WITH OXIA
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F76 GREEK SMALL LETTER IOTA WITH VARIA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F77 GREEK SMALL LETTER IOTA WITH OXIA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1F78 GREEK SMALL LETTER OMICRON WITH VARIA
 * 	03BF GREEK SMALL LETTER OMICRON
 * 1F79 GREEK SMALL LETTER OMICRON WITH OXIA
 * 	03BF GREEK SMALL LETTER OMICRON
 * 1F7A GREEK SMALL LETTER UPSILON WITH VARIA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F7B GREEK SMALL LETTER UPSILON WITH OXIA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1F7C GREEK SMALL LETTER OMEGA WITH VARIA
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F7D GREEK SMALL LETTER OMEGA WITH OXIA
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1F80 GREEK SMALL LETTER ALPHA WITH PSILI AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F81 GREEK SMALL LETTER ALPHA WITH DASIA AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F82 GREEK SMALL LETTER ALPHA WITH PSILI AND VARIA AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F83 GREEK SMALL LETTER ALPHA WITH DASIA AND VARIA AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F84 GREEK SMALL LETTER ALPHA WITH PSILI AND OXIA AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F85 GREEK SMALL LETTER ALPHA WITH DASIA AND OXIA AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F86 GREEK SMALL LETTER ALPHA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F87 GREEK SMALL LETTER ALPHA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1F88 GREEK CAPITAL LETTER ALPHA WITH PSILI AND PROSGEGRAMMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F89 GREEK CAPITAL LETTER ALPHA WITH DASIA AND PROSGEGRAMMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F8A GREEK CAPITAL LETTER ALPHA WITH PSILI AND VARIA AND PROSGEGRAMMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F8B GREEK CAPITAL LETTER ALPHA WITH DASIA AND VARIA AND PROSGEGRAMMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F8C GREEK CAPITAL LETTER ALPHA WITH PSILI AND OXIA AND PROSGEGRAMMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F8D GREEK CAPITAL LETTER ALPHA WITH DASIA AND OXIA AND PROSGEGRAMMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F8E GREEK CAPITAL LETTER ALPHA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F8F GREEK CAPITAL LETTER ALPHA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1F90 GREEK SMALL LETTER ETA WITH PSILI AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F91 GREEK SMALL LETTER ETA WITH DASIA AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F92 GREEK SMALL LETTER ETA WITH PSILI AND VARIA AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F93 GREEK SMALL LETTER ETA WITH DASIA AND VARIA AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F94 GREEK SMALL LETTER ETA WITH PSILI AND OXIA AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F95 GREEK SMALL LETTER ETA WITH DASIA AND OXIA AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F96 GREEK SMALL LETTER ETA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F97 GREEK SMALL LETTER ETA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1F98 GREEK CAPITAL LETTER ETA WITH PSILI AND PROSGEGRAMMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F99 GREEK CAPITAL LETTER ETA WITH DASIA AND PROSGEGRAMMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F9A GREEK CAPITAL LETTER ETA WITH PSILI AND VARIA AND PROSGEGRAMMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F9B GREEK CAPITAL LETTER ETA WITH DASIA AND VARIA AND PROSGEGRAMMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F9C GREEK CAPITAL LETTER ETA WITH PSILI AND OXIA AND PROSGEGRAMMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F9D GREEK CAPITAL LETTER ETA WITH DASIA AND OXIA AND PROSGEGRAMMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F9E GREEK CAPITAL LETTER ETA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1F9F GREEK CAPITAL LETTER ETA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1FA0 GREEK SMALL LETTER OMEGA WITH PSILI AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FA1 GREEK SMALL LETTER OMEGA WITH DASIA AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FA2 GREEK SMALL LETTER OMEGA WITH PSILI AND VARIA AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FA3 GREEK SMALL LETTER OMEGA WITH DASIA AND VARIA AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FA4 GREEK SMALL LETTER OMEGA WITH PSILI AND OXIA AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FA5 GREEK SMALL LETTER OMEGA WITH DASIA AND OXIA AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FA6 GREEK SMALL LETTER OMEGA WITH PSILI AND PERISPOMENI AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FA7 GREEK SMALL LETTER OMEGA WITH DASIA AND PERISPOMENI AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FA8 GREEK CAPITAL LETTER OMEGA WITH PSILI AND PROSGEGRAMMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FA9 GREEK CAPITAL LETTER OMEGA WITH DASIA AND PROSGEGRAMMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FAA GREEK CAPITAL LETTER OMEGA WITH PSILI AND VARIA AND PROSGEGRAMMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FAB GREEK CAPITAL LETTER OMEGA WITH DASIA AND VARIA AND PROSGEGRAMMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FAC GREEK CAPITAL LETTER OMEGA WITH PSILI AND OXIA AND PROSGEGRAMMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FAD GREEK CAPITAL LETTER OMEGA WITH DASIA AND OXIA AND PROSGEGRAMMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FAE GREEK CAPITAL LETTER OMEGA WITH PSILI AND PERISPOMENI AND PROSGEGRAMMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FAF GREEK CAPITAL LETTER OMEGA WITH DASIA AND PERISPOMENI AND PROSGEGRAMMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FB0 GREEK SMALL LETTER ALPHA WITH VRACHY
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1FB1 GREEK SMALL LETTER ALPHA WITH MACRON
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1FB2 GREEK SMALL LETTER ALPHA WITH VARIA AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1FB3 GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1FB4 GREEK SMALL LETTER ALPHA WITH OXIA AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1FB6 GREEK SMALL LETTER ALPHA WITH PERISPOMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1FB7 GREEK SMALL LETTER ALPHA WITH PERISPOMENI AND YPOGEGRAMMENI
 * 	03B1 GREEK SMALL LETTER ALPHA
 * 1FB8 GREEK CAPITAL LETTER ALPHA WITH VRACHY
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1FB9 GREEK CAPITAL LETTER ALPHA WITH MACRON
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1FBA GREEK CAPITAL LETTER ALPHA WITH VARIA
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1FBB GREEK CAPITAL LETTER ALPHA WITH OXIA
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1FBC GREEK CAPITAL LETTER ALPHA WITH PROSGEGRAMMENI
 * 	0391 GREEK CAPITAL LETTER ALPHA
 * 1FBD GREEK KORONIS
 * 	0020 SPACE
 * 1FBE GREEK PROSGEGRAMMENI
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1FBF GREEK PSILI
 * 	0020 SPACE
 * 1FC0 GREEK PERISPOMENI
 * 	0020 SPACE
 * 1FC1 GREEK DIALYTIKA AND PERISPOMENI
 * 	0020 SPACE
 * 1FC2 GREEK SMALL LETTER ETA WITH VARIA AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1FC3 GREEK SMALL LETTER ETA WITH YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1FC4 GREEK SMALL LETTER ETA WITH OXIA AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1FC6 GREEK SMALL LETTER ETA WITH PERISPOMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1FC7 GREEK SMALL LETTER ETA WITH PERISPOMENI AND YPOGEGRAMMENI
 * 	03B7 GREEK SMALL LETTER ETA
 * 1FC8 GREEK CAPITAL LETTER EPSILON WITH VARIA
 * 	0395 GREEK CAPITAL LETTER EPSILON
 * 1FC9 GREEK CAPITAL LETTER EPSILON WITH OXIA
 * 	0395 GREEK CAPITAL LETTER EPSILON
 * 1FCA GREEK CAPITAL LETTER ETA WITH VARIA
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1FCB GREEK CAPITAL LETTER ETA WITH OXIA
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1FCC GREEK CAPITAL LETTER ETA WITH PROSGEGRAMMENI
 * 	0397 GREEK CAPITAL LETTER ETA
 * 1FCD GREEK PSILI AND VARIA
 * 	0020 SPACE
 * 1FCE GREEK PSILI AND OXIA
 * 	0020 SPACE
 * 1FCF GREEK PSILI AND PERISPOMENI
 * 	0020 SPACE
 * 1FD0 GREEK SMALL LETTER IOTA WITH VRACHY
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1FD1 GREEK SMALL LETTER IOTA WITH MACRON
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1FD2 GREEK SMALL LETTER IOTA WITH DIALYTIKA AND VARIA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1FD3 GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1FD6 GREEK SMALL LETTER IOTA WITH PERISPOMENI
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1FD7 GREEK SMALL LETTER IOTA WITH DIALYTIKA AND PERISPOMENI
 * 	03B9 GREEK SMALL LETTER IOTA
 * 1FD8 GREEK CAPITAL LETTER IOTA WITH VRACHY
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1FD9 GREEK CAPITAL LETTER IOTA WITH MACRON
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1FDA GREEK CAPITAL LETTER IOTA WITH VARIA
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1FDB GREEK CAPITAL LETTER IOTA WITH OXIA
 * 	0399 GREEK CAPITAL LETTER IOTA
 * 1FDD GREEK DASIA AND VARIA
 * 	0020 SPACE
 * 1FDE GREEK DASIA AND OXIA
 * 	0020 SPACE
 * 1FDF GREEK DASIA AND PERISPOMENI
 * 	0020 SPACE
 * 1FE0 GREEK SMALL LETTER UPSILON WITH VRACHY
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1FE1 GREEK SMALL LETTER UPSILON WITH MACRON
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1FE2 GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND VARIA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1FE3 GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1FE4 GREEK SMALL LETTER RHO WITH PSILI
 * 	03C1 GREEK SMALL LETTER RHO
 * 1FE5 GREEK SMALL LETTER RHO WITH DASIA
 * 	03C1 GREEK SMALL LETTER RHO
 * 1FE6 GREEK SMALL LETTER UPSILON WITH PERISPOMENI
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1FE7 GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND PERISPOMENI
 * 	03C5 GREEK SMALL LETTER UPSILON
 * 1FE8 GREEK CAPITAL LETTER UPSILON WITH VRACHY
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 1FE9 GREEK CAPITAL LETTER UPSILON WITH MACRON
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 1FEA GREEK CAPITAL LETTER UPSILON WITH VARIA
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 1FEB GREEK CAPITAL LETTER UPSILON WITH OXIA
 * 	03A5 GREEK CAPITAL LETTER UPSILON
 * 1FEC GREEK CAPITAL LETTER RHO WITH DASIA
 * 	03A1 GREEK CAPITAL LETTER RHO
 * 1FED GREEK DIALYTIKA AND VARIA
 * 	0020 SPACE
 * 1FEE GREEK DIALYTIKA AND OXIA
 * 	0020 SPACE
 * 1FEF GREEK VARIA
 * 	0060 GRAVE ACCENT
 * 1FF2 GREEK SMALL LETTER OMEGA WITH VARIA AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FF3 GREEK SMALL LETTER OMEGA WITH YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FF4 GREEK SMALL LETTER OMEGA WITH OXIA AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FF6 GREEK SMALL LETTER OMEGA WITH PERISPOMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FF7 GREEK SMALL LETTER OMEGA WITH PERISPOMENI AND YPOGEGRAMMENI
 * 	03C9 GREEK SMALL LETTER OMEGA
 * 1FF8 GREEK CAPITAL LETTER OMICRON WITH VARIA
 * 	039F GREEK CAPITAL LETTER OMICRON
 * 1FF9 GREEK CAPITAL LETTER OMICRON WITH OXIA
 * 	039F GREEK CAPITAL LETTER OMICRON
 * 1FFA GREEK CAPITAL LETTER OMEGA WITH VARIA
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FFB GREEK CAPITAL LETTER OMEGA WITH OXIA
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FFC GREEK CAPITAL LETTER OMEGA WITH PROSGEGRAMMENI
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 1FFD GREEK OXIA
 * 	0020 SPACE
 * 1FFE GREEK DASIA
 * 	0020 SPACE
 * 2000 EN QUAD
 * 	0020 SPACE
 * 2001 EM QUAD
 * 	0020 SPACE
 * 2002 EN SPACE
 * 	0020 SPACE
 * 2003 EM SPACE
 * 	0020 SPACE
 * 2004 THREE-PER-EM SPACE
 * 	0020 SPACE
 * 2005 FOUR-PER-EM SPACE
 * 	0020 SPACE
 * 2006 SIX-PER-EM SPACE
 * 	0020 SPACE
 * 2007 FIGURE SPACE
 * 	0020 SPACE
 * 2008 PUNCTUATION SPACE
 * 	0020 SPACE
 * 2009 THIN SPACE
 * 	0020 SPACE
 * 200A HAIR SPACE
 * 	0020 SPACE
 * 2011 NON-BREAKING HYPHEN
 * 	2010 HYPHEN
 * 2017 DOUBLE LOW LINE
 * 	0020 SPACE
 * 2024 ONE DOT LEADER
 * 	002E FULL STOP
 * 2025 TWO DOT LEADER
 * 	002E FULL STOP
 * 	002E FULL STOP
 * 2026 HORIZONTAL ELLIPSIS
 * 	002E FULL STOP
 * 	002E FULL STOP
 * 	002E FULL STOP
 * 202F NARROW NO-BREAK SPACE
 * 	0020 SPACE
 * 2033 DOUBLE PRIME
 * 	2032 PRIME
 * 	2032 PRIME
 * 2034 TRIPLE PRIME
 * 	2032 PRIME
 * 	2032 PRIME
 * 	2032 PRIME
 * 2036 REVERSED DOUBLE PRIME
 * 	2035 REVERSED PRIME
 * 	2035 REVERSED PRIME
 * 2037 REVERSED TRIPLE PRIME
 * 	2035 REVERSED PRIME
 * 	2035 REVERSED PRIME
 * 	2035 REVERSED PRIME
 * 203C DOUBLE EXCLAMATION MARK
 * 	0021 EXCLAMATION MARK
 * 	0021 EXCLAMATION MARK
 * 203E OVERLINE
 * 	0020 SPACE
 * 2047 DOUBLE QUESTION MARK
 * 	003F QUESTION MARK
 * 	003F QUESTION MARK
 * 2048 QUESTION EXCLAMATION MARK
 * 	003F QUESTION MARK
 * 	0021 EXCLAMATION MARK
 * 2049 EXCLAMATION QUESTION MARK
 * 	0021 EXCLAMATION MARK
 * 	003F QUESTION MARK
 * 2057 QUADRUPLE PRIME
 * 	2032 PRIME
 * 	2032 PRIME
 * 	2032 PRIME
 * 	2032 PRIME
 * 205F MEDIUM MATHEMATICAL SPACE
 * 	0020 SPACE
 * 2070 SUPERSCRIPT ZERO
 * 	0030 DIGIT ZERO
 * 2071 SUPERSCRIPT LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 2074 SUPERSCRIPT FOUR
 * 	0034 DIGIT FOUR
 * 2075 SUPERSCRIPT FIVE
 * 	0035 DIGIT FIVE
 * 2076 SUPERSCRIPT SIX
 * 	0036 DIGIT SIX
 * 2077 SUPERSCRIPT SEVEN
 * 	0037 DIGIT SEVEN
 * 2078 SUPERSCRIPT EIGHT
 * 	0038 DIGIT EIGHT
 * 2079 SUPERSCRIPT NINE
 * 	0039 DIGIT NINE
 * 207A SUPERSCRIPT PLUS SIGN
 * 	002B PLUS SIGN
 * 207B SUPERSCRIPT MINUS
 * 	2212 MINUS SIGN
 * 207C SUPERSCRIPT EQUALS SIGN
 * 	003D EQUALS SIGN
 * 207D SUPERSCRIPT LEFT PARENTHESIS
 * 	0028 LEFT PARENTHESIS
 * 207E SUPERSCRIPT RIGHT PARENTHESIS
 * 	0029 RIGHT PARENTHESIS
 * 207F SUPERSCRIPT LATIN SMALL LETTER N
 * 	006E LATIN SMALL LETTER N
 * 2080 SUBSCRIPT ZERO
 * 	0030 DIGIT ZERO
 * 2081 SUBSCRIPT ONE
 * 	0031 DIGIT ONE
 * 2082 SUBSCRIPT TWO
 * 	0032 DIGIT TWO
 * 2083 SUBSCRIPT THREE
 * 	0033 DIGIT THREE
 * 2084 SUBSCRIPT FOUR
 * 	0034 DIGIT FOUR
 * 2085 SUBSCRIPT FIVE
 * 	0035 DIGIT FIVE
 * 2086 SUBSCRIPT SIX
 * 	0036 DIGIT SIX
 * 2087 SUBSCRIPT SEVEN
 * 	0037 DIGIT SEVEN
 * 2088 SUBSCRIPT EIGHT
 * 	0038 DIGIT EIGHT
 * 2089 SUBSCRIPT NINE
 * 	0039 DIGIT NINE
 * 208A SUBSCRIPT PLUS SIGN
 * 	002B PLUS SIGN
 * 208B SUBSCRIPT MINUS
 * 	2212 MINUS SIGN
 * 208C SUBSCRIPT EQUALS SIGN
 * 	003D EQUALS SIGN
 * 208D SUBSCRIPT LEFT PARENTHESIS
 * 	0028 LEFT PARENTHESIS
 * 208E SUBSCRIPT RIGHT PARENTHESIS
 * 	0029 RIGHT PARENTHESIS
 * 2090 LATIN SUBSCRIPT SMALL LETTER A
 * 	0061 LATIN SMALL LETTER A
 * 2091 LATIN SUBSCRIPT SMALL LETTER E
 * 	0065 LATIN SMALL LETTER E
 * 2092 LATIN SUBSCRIPT SMALL LETTER O
 * 	006F LATIN SMALL LETTER O
 * 2093 LATIN SUBSCRIPT SMALL LETTER X
 * 	0078 LATIN SMALL LETTER X
 * 2094 LATIN SUBSCRIPT SMALL LETTER SCHWA
 * 	0259 LATIN SMALL LETTER SCHWA
 * 2095 LATIN SUBSCRIPT SMALL LETTER H
 * 	0068 LATIN SMALL LETTER H
 * 2096 LATIN SUBSCRIPT SMALL LETTER K
 * 	006B LATIN SMALL LETTER K
 * 2097 LATIN SUBSCRIPT SMALL LETTER L
 * 	006C LATIN SMALL LETTER L
 * 2098 LATIN SUBSCRIPT SMALL LETTER M
 * 	006D LATIN SMALL LETTER M
 * 2099 LATIN SUBSCRIPT SMALL LETTER N
 * 	006E LATIN SMALL LETTER N
 * 209A LATIN SUBSCRIPT SMALL LETTER P
 * 	0070 LATIN SMALL LETTER P
 * 209B LATIN SUBSCRIPT SMALL LETTER S
 * 	0073 LATIN SMALL LETTER S
 * 209C LATIN SUBSCRIPT SMALL LETTER T
 * 	0074 LATIN SMALL LETTER T
 * 20A8 RUPEE SIGN
 * 	0052 LATIN CAPITAL LETTER R
 * 	0073 LATIN SMALL LETTER S
 * 20D0 COMBINING LEFT HARPOON ABOVE
 * 	0000 
 * 20D1 COMBINING RIGHT HARPOON ABOVE
 * 	0000 
 * 20D2 COMBINING LONG VERTICAL LINE OVERLAY
 * 	0000 
 * 20D3 COMBINING SHORT VERTICAL LINE OVERLAY
 * 	0000 
 * 20D4 COMBINING ANTICLOCKWISE ARROW ABOVE
 * 	0000 
 * 20D5 COMBINING CLOCKWISE ARROW ABOVE
 * 	0000 
 * 20D6 COMBINING LEFT ARROW ABOVE
 * 	0000 
 * 20D7 COMBINING RIGHT ARROW ABOVE
 * 	0000 
 * 20D8 COMBINING RING OVERLAY
 * 	0000 
 * 20D9 COMBINING CLOCKWISE RING OVERLAY
 * 	0000 
 * 20DA COMBINING ANTICLOCKWISE RING OVERLAY
 * 	0000 
 * 20DB COMBINING THREE DOTS ABOVE
 * 	0000 
 * 20DC COMBINING FOUR DOTS ABOVE
 * 	0000 
 * 20DD COMBINING ENCLOSING CIRCLE
 * 	0000 
 * 20DE COMBINING ENCLOSING SQUARE
 * 	0000 
 * 20DF COMBINING ENCLOSING DIAMOND
 * 	0000 
 * 20E0 COMBINING ENCLOSING CIRCLE BACKSLASH
 * 	0000 
 * 20E1 COMBINING LEFT RIGHT ARROW ABOVE
 * 	0000 
 * 20E2 COMBINING ENCLOSING SCREEN
 * 	0000 
 * 20E3 COMBINING ENCLOSING KEYCAP
 * 	0000 
 * 20E4 COMBINING ENCLOSING UPWARD POINTING TRIANGLE
 * 	0000 
 * 20E5 COMBINING REVERSE SOLIDUS OVERLAY
 * 	0000 
 * 20E6 COMBINING DOUBLE VERTICAL STROKE OVERLAY
 * 	0000 
 * 20E7 COMBINING ANNUITY SYMBOL
 * 	0000 
 * 20E8 COMBINING TRIPLE UNDERDOT
 * 	0000 
 * 20E9 COMBINING WIDE BRIDGE ABOVE
 * 	0000 
 * 20EA COMBINING LEFTWARDS ARROW OVERLAY
 * 	0000 
 * 20EB COMBINING LONG DOUBLE SOLIDUS OVERLAY
 * 	0000 
 * 20EC COMBINING RIGHTWARDS HARPOON WITH BARB DOWNWARDS
 * 	0000 
 * 20ED COMBINING LEFTWARDS HARPOON WITH BARB DOWNWARDS
 * 	0000 
 * 20EE COMBINING LEFT ARROW BELOW
 * 	0000 
 * 20EF COMBINING RIGHT ARROW BELOW
 * 	0000 
 * 20F0 COMBINING ASTERISK ABOVE
 * 	0000 
 * 2100 ACCOUNT OF
 * 	0061 LATIN SMALL LETTER A
 * 	002F SOLIDUS
 * 	0063 LATIN SMALL LETTER C
 * 2101 ADDRESSED TO THE SUBJECT
 * 	0061 LATIN SMALL LETTER A
 * 	002F SOLIDUS
 * 	0073 LATIN SMALL LETTER S
 * 2102 DOUBLE-STRUCK CAPITAL C
 * 	0043 LATIN CAPITAL LETTER C
 * 2103 DEGREE CELSIUS
 * 	00B0 DEGREE SIGN
 * 	0043 LATIN CAPITAL LETTER C
 * 2105 CARE OF
 * 	0063 LATIN SMALL LETTER C
 * 	002F SOLIDUS
 * 	006F LATIN SMALL LETTER O
 * 2106 CADA UNA
 * 	0063 LATIN SMALL LETTER C
 * 	002F SOLIDUS
 * 	0075 LATIN SMALL LETTER U
 * 2107 EULER CONSTANT
 * 	0190 LATIN CAPITAL LETTER OPEN E
 * 2109 DEGREE FAHRENHEIT
 * 	00B0 DEGREE SIGN
 * 	0046 LATIN CAPITAL LETTER F
 * 210A SCRIPT SMALL G
 * 	0067 LATIN SMALL LETTER G
 * 210B SCRIPT CAPITAL H
 * 	0048 LATIN CAPITAL LETTER H
 * 210C BLACK-LETTER CAPITAL H
 * 	0048 LATIN CAPITAL LETTER H
 * 210D DOUBLE-STRUCK CAPITAL H
 * 	0048 LATIN CAPITAL LETTER H
 * 210E PLANCK CONSTANT
 * 	0068 LATIN SMALL LETTER H
 * 210F PLANCK CONSTANT OVER TWO PI
 * 	0127 LATIN SMALL LETTER H WITH STROKE
 * 2110 SCRIPT CAPITAL I
 * 	0049 LATIN CAPITAL LETTER I
 * 2111 BLACK-LETTER CAPITAL I
 * 	0049 LATIN CAPITAL LETTER I
 * 2112 SCRIPT CAPITAL L
 * 	004C LATIN CAPITAL LETTER L
 * 2113 SCRIPT SMALL L
 * 	006C LATIN SMALL LETTER L
 * 2115 DOUBLE-STRUCK CAPITAL N
 * 	004E LATIN CAPITAL LETTER N
 * 2116 NUMERO SIGN
 * 	004E LATIN CAPITAL LETTER N
 * 	006F LATIN SMALL LETTER O
 * 2119 DOUBLE-STRUCK CAPITAL P
 * 	0050 LATIN CAPITAL LETTER P
 * 211A DOUBLE-STRUCK CAPITAL Q
 * 	0051 LATIN CAPITAL LETTER Q
 * 211B SCRIPT CAPITAL R
 * 	0052 LATIN CAPITAL LETTER R
 * 211C BLACK-LETTER CAPITAL R
 * 	0052 LATIN CAPITAL LETTER R
 * 211D DOUBLE-STRUCK CAPITAL R
 * 	0052 LATIN CAPITAL LETTER R
 * 2120 SERVICE MARK
 * 	0053 LATIN CAPITAL LETTER S
 * 	004D LATIN CAPITAL LETTER M
 * 2121 TELEPHONE SIGN
 * 	0054 LATIN CAPITAL LETTER T
 * 	0045 LATIN CAPITAL LETTER E
 * 	004C LATIN CAPITAL LETTER L
 * 2122 TRADE MARK SIGN
 * 	0054 LATIN CAPITAL LETTER T
 * 	004D LATIN CAPITAL LETTER M
 * 2124 DOUBLE-STRUCK CAPITAL Z
 * 	005A LATIN CAPITAL LETTER Z
 * 2126 OHM SIGN
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 2128 BLACK-LETTER CAPITAL Z
 * 	005A LATIN CAPITAL LETTER Z
 * 212A KELVIN SIGN
 * 	004B LATIN CAPITAL LETTER K
 * 212B ANGSTROM SIGN
 * 	0041 LATIN CAPITAL LETTER A
 * 212C SCRIPT CAPITAL B
 * 	0042 LATIN CAPITAL LETTER B
 * 212D BLACK-LETTER CAPITAL C
 * 	0043 LATIN CAPITAL LETTER C
 * 212F SCRIPT SMALL E
 * 	0065 LATIN SMALL LETTER E
 * 2130 SCRIPT CAPITAL E
 * 	0045 LATIN CAPITAL LETTER E
 * 2131 SCRIPT CAPITAL F
 * 	0046 LATIN CAPITAL LETTER F
 * 2133 SCRIPT CAPITAL M
 * 	004D LATIN CAPITAL LETTER M
 * 2134 SCRIPT SMALL O
 * 	006F LATIN SMALL LETTER O
 * 2135 ALEF SYMBOL
 * 	05D0 HEBREW LETTER ALEF
 * 2136 BET SYMBOL
 * 	05D1 HEBREW LETTER BET
 * 2137 GIMEL SYMBOL
 * 	05D2 HEBREW LETTER GIMEL
 * 2138 DALET SYMBOL
 * 	05D3 HEBREW LETTER DALET
 * 2139 INFORMATION SOURCE
 * 	0069 LATIN SMALL LETTER I
 * 213B FACSIMILE SIGN
 * 	0046 LATIN CAPITAL LETTER F
 * 	0041 LATIN CAPITAL LETTER A
 * 	0058 LATIN CAPITAL LETTER X
 * 213C DOUBLE-STRUCK SMALL PI
 * 	03C0 GREEK SMALL LETTER PI
 * 213D DOUBLE-STRUCK SMALL GAMMA
 * 	03B3 GREEK SMALL LETTER GAMMA
 * 213E DOUBLE-STRUCK CAPITAL GAMMA
 * 	0393 GREEK CAPITAL LETTER GAMMA
 * 213F DOUBLE-STRUCK CAPITAL PI
 * 	03A0 GREEK CAPITAL LETTER PI
 * 2140 DOUBLE-STRUCK N-ARY SUMMATION
 * 	2211 N-ARY SUMMATION
 * 2145 DOUBLE-STRUCK ITALIC CAPITAL D
 * 	0044 LATIN CAPITAL LETTER D
 * 2146 DOUBLE-STRUCK ITALIC SMALL D
 * 	0064 LATIN SMALL LETTER D
 * 2147 DOUBLE-STRUCK ITALIC SMALL E
 * 	0065 LATIN SMALL LETTER E
 * 2148 DOUBLE-STRUCK ITALIC SMALL I
 * 	0069 LATIN SMALL LETTER I
 * 2149 DOUBLE-STRUCK ITALIC SMALL J
 * 	006A LATIN SMALL LETTER J
 * 2150 VULGAR FRACTION ONE SEVENTH
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 	0037 DIGIT SEVEN
 * 2151 VULGAR FRACTION ONE NINTH
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 	0039 DIGIT NINE
 * 2152 VULGAR FRACTION ONE TENTH
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 	0031 DIGIT ONE
 * 	0030 DIGIT ZERO
 * 2153 VULGAR FRACTION ONE THIRD
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 	0033 DIGIT THREE
 * 2154 VULGAR FRACTION TWO THIRDS
 * 	0032 DIGIT TWO
 * 	2044 FRACTION SLASH
 * 	0033 DIGIT THREE
 * 2155 VULGAR FRACTION ONE FIFTH
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 	0035 DIGIT FIVE
 * 2156 VULGAR FRACTION TWO FIFTHS
 * 	0032 DIGIT TWO
 * 	2044 FRACTION SLASH
 * 	0035 DIGIT FIVE
 * 2157 VULGAR FRACTION THREE FIFTHS
 * 	0033 DIGIT THREE
 * 	2044 FRACTION SLASH
 * 	0035 DIGIT FIVE
 * 2158 VULGAR FRACTION FOUR FIFTHS
 * 	0034 DIGIT FOUR
 * 	2044 FRACTION SLASH
 * 	0035 DIGIT FIVE
 * 2159 VULGAR FRACTION ONE SIXTH
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 	0036 DIGIT SIX
 * 215A VULGAR FRACTION FIVE SIXTHS
 * 	0035 DIGIT FIVE
 * 	2044 FRACTION SLASH
 * 	0036 DIGIT SIX
 * 215B VULGAR FRACTION ONE EIGHTH
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 	0038 DIGIT EIGHT
 * 215C VULGAR FRACTION THREE EIGHTHS
 * 	0033 DIGIT THREE
 * 	2044 FRACTION SLASH
 * 	0038 DIGIT EIGHT
 * 215D VULGAR FRACTION FIVE EIGHTHS
 * 	0035 DIGIT FIVE
 * 	2044 FRACTION SLASH
 * 	0038 DIGIT EIGHT
 * 215E VULGAR FRACTION SEVEN EIGHTHS
 * 	0037 DIGIT SEVEN
 * 	2044 FRACTION SLASH
 * 	0038 DIGIT EIGHT
 * 215F FRACTION NUMERATOR ONE
 * 	0031 DIGIT ONE
 * 	2044 FRACTION SLASH
 * 2160 ROMAN NUMERAL ONE
 * 	0049 LATIN CAPITAL LETTER I
 * 2161 ROMAN NUMERAL TWO
 * 	0049 LATIN CAPITAL LETTER I
 * 	0049 LATIN CAPITAL LETTER I
 * 2162 ROMAN NUMERAL THREE
 * 	0049 LATIN CAPITAL LETTER I
 * 	0049 LATIN CAPITAL LETTER I
 * 	0049 LATIN CAPITAL LETTER I
 * 2163 ROMAN NUMERAL FOUR
 * 	0049 LATIN CAPITAL LETTER I
 * 	0056 LATIN CAPITAL LETTER V
 * 2164 ROMAN NUMERAL FIVE
 * 	0056 LATIN CAPITAL LETTER V
 * 2165 ROMAN NUMERAL SIX
 * 	0056 LATIN CAPITAL LETTER V
 * 	0049 LATIN CAPITAL LETTER I
 * 2166 ROMAN NUMERAL SEVEN
 * 	0056 LATIN CAPITAL LETTER V
 * 	0049 LATIN CAPITAL LETTER I
 * 	0049 LATIN CAPITAL LETTER I
 * 2167 ROMAN NUMERAL EIGHT
 * 	0056 LATIN CAPITAL LETTER V
 * 	0049 LATIN CAPITAL LETTER I
 * 	0049 LATIN CAPITAL LETTER I
 * 	0049 LATIN CAPITAL LETTER I
 * 2168 ROMAN NUMERAL NINE
 * 	0049 LATIN CAPITAL LETTER I
 * 	0058 LATIN CAPITAL LETTER X
 * 2169 ROMAN NUMERAL TEN
 * 	0058 LATIN CAPITAL LETTER X
 * 216A ROMAN NUMERAL ELEVEN
 * 	0058 LATIN CAPITAL LETTER X
 * 	0049 LATIN CAPITAL LETTER I
 * 216B ROMAN NUMERAL TWELVE
 * 	0058 LATIN CAPITAL LETTER X
 * 	0049 LATIN CAPITAL LETTER I
 * 	0049 LATIN CAPITAL LETTER I
 * 216C ROMAN NUMERAL FIFTY
 * 	004C LATIN CAPITAL LETTER L
 * 216D ROMAN NUMERAL ONE HUNDRED
 * 	0043 LATIN CAPITAL LETTER C
 * 216E ROMAN NUMERAL FIVE HUNDRED
 * 	0044 LATIN CAPITAL LETTER D
 * 216F ROMAN NUMERAL ONE THOUSAND
 * 	004D LATIN CAPITAL LETTER M
 * 2170 SMALL ROMAN NUMERAL ONE
 * 	0069 LATIN SMALL LETTER I
 * 2171 SMALL ROMAN NUMERAL TWO
 * 	0069 LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 2172 SMALL ROMAN NUMERAL THREE
 * 	0069 LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 2173 SMALL ROMAN NUMERAL FOUR
 * 	0069 LATIN SMALL LETTER I
 * 	0076 LATIN SMALL LETTER V
 * 2174 SMALL ROMAN NUMERAL FIVE
 * 	0076 LATIN SMALL LETTER V
 * 2175 SMALL ROMAN NUMERAL SIX
 * 	0076 LATIN SMALL LETTER V
 * 	0069 LATIN SMALL LETTER I
 * 2176 SMALL ROMAN NUMERAL SEVEN
 * 	0076 LATIN SMALL LETTER V
 * 	0069 LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 2177 SMALL ROMAN NUMERAL EIGHT
 * 	0076 LATIN SMALL LETTER V
 * 	0069 LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 2178 SMALL ROMAN NUMERAL NINE
 * 	0069 LATIN SMALL LETTER I
 * 	0078 LATIN SMALL LETTER X
 * 2179 SMALL ROMAN NUMERAL TEN
 * 	0078 LATIN SMALL LETTER X
 * 217A SMALL ROMAN NUMERAL ELEVEN
 * 	0078 LATIN SMALL LETTER X
 * 	0069 LATIN SMALL LETTER I
 * 217B SMALL ROMAN NUMERAL TWELVE
 * 	0078 LATIN SMALL LETTER X
 * 	0069 LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 217C SMALL ROMAN NUMERAL FIFTY
 * 	006C LATIN SMALL LETTER L
 * 217D SMALL ROMAN NUMERAL ONE HUNDRED
 * 	0063 LATIN SMALL LETTER C
 * 217E SMALL ROMAN NUMERAL FIVE HUNDRED
 * 	0064 LATIN SMALL LETTER D
 * 217F SMALL ROMAN NUMERAL ONE THOUSAND
 * 	006D LATIN SMALL LETTER M
 * 2189 VULGAR FRACTION ZERO THIRDS
 * 	0030 DIGIT ZERO
 * 	2044 FRACTION SLASH
 * 	0033 DIGIT THREE
 * 219A LEFTWARDS ARROW WITH STROKE
 * 	2190 LEFTWARDS ARROW
 * 219B RIGHTWARDS ARROW WITH STROKE
 * 	2192 RIGHTWARDS ARROW
 * 21AE LEFT RIGHT ARROW WITH STROKE
 * 	2194 LEFT RIGHT ARROW
 * 21CD LEFTWARDS DOUBLE ARROW WITH STROKE
 * 	21D0 LEFTWARDS DOUBLE ARROW
 * 21CE LEFT RIGHT DOUBLE ARROW WITH STROKE
 * 	21D4 LEFT RIGHT DOUBLE ARROW
 * 21CF RIGHTWARDS DOUBLE ARROW WITH STROKE
 * 	21D2 RIGHTWARDS DOUBLE ARROW
 * 2204 THERE DOES NOT EXIST
 * 	2203 THERE EXISTS
 * 2209 NOT AN ELEMENT OF
 * 	2208 ELEMENT OF
 * 220C DOES NOT CONTAIN AS MEMBER
 * 	220B CONTAINS AS MEMBER
 * 2224 DOES NOT DIVIDE
 * 	2223 DIVIDES
 * 2226 NOT PARALLEL TO
 * 	2225 PARALLEL TO
 * 222C DOUBLE INTEGRAL
 * 	222B INTEGRAL
 * 	222B INTEGRAL
 * 222D TRIPLE INTEGRAL
 * 	222B INTEGRAL
 * 	222B INTEGRAL
 * 	222B INTEGRAL
 * 222F SURFACE INTEGRAL
 * 	222E CONTOUR INTEGRAL
 * 	222E CONTOUR INTEGRAL
 * 2230 VOLUME INTEGRAL
 * 	222E CONTOUR INTEGRAL
 * 	222E CONTOUR INTEGRAL
 * 	222E CONTOUR INTEGRAL
 * 2241 NOT TILDE
 * 	223C TILDE OPERATOR
 * 2244 NOT ASYMPTOTICALLY EQUAL TO
 * 	2243 ASYMPTOTICALLY EQUAL TO
 * 2247 NEITHER APPROXIMATELY NOR ACTUALLY EQUAL TO
 * 	2245 APPROXIMATELY EQUAL TO
 * 2249 NOT ALMOST EQUAL TO
 * 	2248 ALMOST EQUAL TO
 * 2260 NOT EQUAL TO
 * 	003D EQUALS SIGN
 * 2262 NOT IDENTICAL TO
 * 	2261 IDENTICAL TO
 * 226D NOT EQUIVALENT TO
 * 	224D EQUIVALENT TO
 * 226E NOT LESS-THAN
 * 	003C LESS-THAN SIGN
 * 226F NOT GREATER-THAN
 * 	003E GREATER-THAN SIGN
 * 2270 NEITHER LESS-THAN NOR EQUAL TO
 * 	2264 LESS-THAN OR EQUAL TO
 * 2271 NEITHER GREATER-THAN NOR EQUAL TO
 * 	2265 GREATER-THAN OR EQUAL TO
 * 2274 NEITHER LESS-THAN NOR EQUIVALENT TO
 * 	2272 LESS-THAN OR EQUIVALENT TO
 * 2275 NEITHER GREATER-THAN NOR EQUIVALENT TO
 * 	2273 GREATER-THAN OR EQUIVALENT TO
 * 2278 NEITHER LESS-THAN NOR GREATER-THAN
 * 	2276 LESS-THAN OR GREATER-THAN
 * 2279 NEITHER GREATER-THAN NOR LESS-THAN
 * 	2277 GREATER-THAN OR LESS-THAN
 * 2280 DOES NOT PRECEDE
 * 	227A PRECEDES
 * 2281 DOES NOT SUCCEED
 * 	227B SUCCEEDS
 * 2284 NOT A SUBSET OF
 * 	2282 SUBSET OF
 * 2285 NOT A SUPERSET OF
 * 	2283 SUPERSET OF
 * 2288 NEITHER A SUBSET OF NOR EQUAL TO
 * 	2286 SUBSET OF OR EQUAL TO
 * 2289 NEITHER A SUPERSET OF NOR EQUAL TO
 * 	2287 SUPERSET OF OR EQUAL TO
 * 22AC DOES NOT PROVE
 * 	22A2 RIGHT TACK
 * 22AD NOT TRUE
 * 	22A8 TRUE
 * 22AE DOES NOT FORCE
 * 	22A9 FORCES
 * 22AF NEGATED DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
 * 	22AB DOUBLE VERTICAL BAR DOUBLE RIGHT TURNSTILE
 * 22E0 DOES NOT PRECEDE OR EQUAL
 * 	227C PRECEDES OR EQUAL TO
 * 22E1 DOES NOT SUCCEED OR EQUAL
 * 	227D SUCCEEDS OR EQUAL TO
 * 22E2 NOT SQUARE IMAGE OF OR EQUAL TO
 * 	2291 SQUARE IMAGE OF OR EQUAL TO
 * 22E3 NOT SQUARE ORIGINAL OF OR EQUAL TO
 * 	2292 SQUARE ORIGINAL OF OR EQUAL TO
 * 22EA NOT NORMAL SUBGROUP OF
 * 	22B2 NORMAL SUBGROUP OF
 * 22EB DOES NOT CONTAIN AS NORMAL SUBGROUP
 * 	22B3 CONTAINS AS NORMAL SUBGROUP
 * 22EC NOT NORMAL SUBGROUP OF OR EQUAL TO
 * 	22B4 NORMAL SUBGROUP OF OR EQUAL TO
 * 22ED DOES NOT CONTAIN AS NORMAL SUBGROUP OR EQUAL
 * 	22B5 CONTAINS AS NORMAL SUBGROUP OR EQUAL TO
 * 2329 LEFT-POINTING ANGLE BRACKET
 * 	3008 LEFT ANGLE BRACKET
 * 232A RIGHT-POINTING ANGLE BRACKET
 * 	3009 RIGHT ANGLE BRACKET
 * 2460 CIRCLED DIGIT ONE
 * 	0031 DIGIT ONE
 * 2461 CIRCLED DIGIT TWO
 * 	0032 DIGIT TWO
 * 2462 CIRCLED DIGIT THREE
 * 	0033 DIGIT THREE
 * 2463 CIRCLED DIGIT FOUR
 * 	0034 DIGIT FOUR
 * 2464 CIRCLED DIGIT FIVE
 * 	0035 DIGIT FIVE
 * 2465 CIRCLED DIGIT SIX
 * 	0036 DIGIT SIX
 * 2466 CIRCLED DIGIT SEVEN
 * 	0037 DIGIT SEVEN
 * 2467 CIRCLED DIGIT EIGHT
 * 	0038 DIGIT EIGHT
 * 2468 CIRCLED DIGIT NINE
 * 	0039 DIGIT NINE
 * 2469 CIRCLED NUMBER TEN
 * 	0031 DIGIT ONE
 * 	0030 DIGIT ZERO
 * 246A CIRCLED NUMBER ELEVEN
 * 	0031 DIGIT ONE
 * 	0031 DIGIT ONE
 * 246B CIRCLED NUMBER TWELVE
 * 	0031 DIGIT ONE
 * 	0032 DIGIT TWO
 * 246C CIRCLED NUMBER THIRTEEN
 * 	0031 DIGIT ONE
 * 	0033 DIGIT THREE
 * 246D CIRCLED NUMBER FOURTEEN
 * 	0031 DIGIT ONE
 * 	0034 DIGIT FOUR
 * 246E CIRCLED NUMBER FIFTEEN
 * 	0031 DIGIT ONE
 * 	0035 DIGIT FIVE
 * 246F CIRCLED NUMBER SIXTEEN
 * 	0031 DIGIT ONE
 * 	0036 DIGIT SIX
 * 2470 CIRCLED NUMBER SEVENTEEN
 * 	0031 DIGIT ONE
 * 	0037 DIGIT SEVEN
 * 2471 CIRCLED NUMBER EIGHTEEN
 * 	0031 DIGIT ONE
 * 	0038 DIGIT EIGHT
 * 2472 CIRCLED NUMBER NINETEEN
 * 	0031 DIGIT ONE
 * 	0039 DIGIT NINE
 * 2473 CIRCLED NUMBER TWENTY
 * 	0032 DIGIT TWO
 * 	0030 DIGIT ZERO
 * 2474 PARENTHESIZED DIGIT ONE
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0029 RIGHT PARENTHESIS
 * 2475 PARENTHESIZED DIGIT TWO
 * 	0028 LEFT PARENTHESIS
 * 	0032 DIGIT TWO
 * 	0029 RIGHT PARENTHESIS
 * 2476 PARENTHESIZED DIGIT THREE
 * 	0028 LEFT PARENTHESIS
 * 	0033 DIGIT THREE
 * 	0029 RIGHT PARENTHESIS
 * 2477 PARENTHESIZED DIGIT FOUR
 * 	0028 LEFT PARENTHESIS
 * 	0034 DIGIT FOUR
 * 	0029 RIGHT PARENTHESIS
 * 2478 PARENTHESIZED DIGIT FIVE
 * 	0028 LEFT PARENTHESIS
 * 	0035 DIGIT FIVE
 * 	0029 RIGHT PARENTHESIS
 * 2479 PARENTHESIZED DIGIT SIX
 * 	0028 LEFT PARENTHESIS
 * 	0036 DIGIT SIX
 * 	0029 RIGHT PARENTHESIS
 * 247A PARENTHESIZED DIGIT SEVEN
 * 	0028 LEFT PARENTHESIS
 * 	0037 DIGIT SEVEN
 * 	0029 RIGHT PARENTHESIS
 * 247B PARENTHESIZED DIGIT EIGHT
 * 	0028 LEFT PARENTHESIS
 * 	0038 DIGIT EIGHT
 * 	0029 RIGHT PARENTHESIS
 * 247C PARENTHESIZED DIGIT NINE
 * 	0028 LEFT PARENTHESIS
 * 	0039 DIGIT NINE
 * 	0029 RIGHT PARENTHESIS
 * 247D PARENTHESIZED NUMBER TEN
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0030 DIGIT ZERO
 * 	0029 RIGHT PARENTHESIS
 * 247E PARENTHESIZED NUMBER ELEVEN
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0031 DIGIT ONE
 * 	0029 RIGHT PARENTHESIS
 * 247F PARENTHESIZED NUMBER TWELVE
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0032 DIGIT TWO
 * 	0029 RIGHT PARENTHESIS
 * 2480 PARENTHESIZED NUMBER THIRTEEN
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0033 DIGIT THREE
 * 	0029 RIGHT PARENTHESIS
 * 2481 PARENTHESIZED NUMBER FOURTEEN
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0034 DIGIT FOUR
 * 	0029 RIGHT PARENTHESIS
 * 2482 PARENTHESIZED NUMBER FIFTEEN
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0035 DIGIT FIVE
 * 	0029 RIGHT PARENTHESIS
 * 2483 PARENTHESIZED NUMBER SIXTEEN
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0036 DIGIT SIX
 * 	0029 RIGHT PARENTHESIS
 * 2484 PARENTHESIZED NUMBER SEVENTEEN
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0037 DIGIT SEVEN
 * 	0029 RIGHT PARENTHESIS
 * 2485 PARENTHESIZED NUMBER EIGHTEEN
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0038 DIGIT EIGHT
 * 	0029 RIGHT PARENTHESIS
 * 2486 PARENTHESIZED NUMBER NINETEEN
 * 	0028 LEFT PARENTHESIS
 * 	0031 DIGIT ONE
 * 	0039 DIGIT NINE
 * 	0029 RIGHT PARENTHESIS
 * 2487 PARENTHESIZED NUMBER TWENTY
 * 	0028 LEFT PARENTHESIS
 * 	0032 DIGIT TWO
 * 	0030 DIGIT ZERO
 * 	0029 RIGHT PARENTHESIS
 * 2488 DIGIT ONE FULL STOP
 * 	0031 DIGIT ONE
 * 	002E FULL STOP
 * 2489 DIGIT TWO FULL STOP
 * 	0032 DIGIT TWO
 * 	002E FULL STOP
 * 248A DIGIT THREE FULL STOP
 * 	0033 DIGIT THREE
 * 	002E FULL STOP
 * 248B DIGIT FOUR FULL STOP
 * 	0034 DIGIT FOUR
 * 	002E FULL STOP
 * 248C DIGIT FIVE FULL STOP
 * 	0035 DIGIT FIVE
 * 	002E FULL STOP
 * 248D DIGIT SIX FULL STOP
 * 	0036 DIGIT SIX
 * 	002E FULL STOP
 * 248E DIGIT SEVEN FULL STOP
 * 	0037 DIGIT SEVEN
 * 	002E FULL STOP
 * 248F DIGIT EIGHT FULL STOP
 * 	0038 DIGIT EIGHT
 * 	002E FULL STOP
 * 2490 DIGIT NINE FULL STOP
 * 	0039 DIGIT NINE
 * 	002E FULL STOP
 * 2491 NUMBER TEN FULL STOP
 * 	0031 DIGIT ONE
 * 	0030 DIGIT ZERO
 * 	002E FULL STOP
 * 2492 NUMBER ELEVEN FULL STOP
 * 	0031 DIGIT ONE
 * 	0031 DIGIT ONE
 * 	002E FULL STOP
 * 2493 NUMBER TWELVE FULL STOP
 * 	0031 DIGIT ONE
 * 	0032 DIGIT TWO
 * 	002E FULL STOP
 * 2494 NUMBER THIRTEEN FULL STOP
 * 	0031 DIGIT ONE
 * 	0033 DIGIT THREE
 * 	002E FULL STOP
 * 2495 NUMBER FOURTEEN FULL STOP
 * 	0031 DIGIT ONE
 * 	0034 DIGIT FOUR
 * 	002E FULL STOP
 * 2496 NUMBER FIFTEEN FULL STOP
 * 	0031 DIGIT ONE
 * 	0035 DIGIT FIVE
 * 	002E FULL STOP
 * 2497 NUMBER SIXTEEN FULL STOP
 * 	0031 DIGIT ONE
 * 	0036 DIGIT SIX
 * 	002E FULL STOP
 * 2498 NUMBER SEVENTEEN FULL STOP
 * 	0031 DIGIT ONE
 * 	0037 DIGIT SEVEN
 * 	002E FULL STOP
 * 2499 NUMBER EIGHTEEN FULL STOP
 * 	0031 DIGIT ONE
 * 	0038 DIGIT EIGHT
 * 	002E FULL STOP
 * 249A NUMBER NINETEEN FULL STOP
 * 	0031 DIGIT ONE
 * 	0039 DIGIT NINE
 * 	002E FULL STOP
 * 249B NUMBER TWENTY FULL STOP
 * 	0032 DIGIT TWO
 * 	0030 DIGIT ZERO
 * 	002E FULL STOP
 * 249C PARENTHESIZED LATIN SMALL LETTER A
 * 	0028 LEFT PARENTHESIS
 * 	0061 LATIN SMALL LETTER A
 * 	0029 RIGHT PARENTHESIS
 * 249D PARENTHESIZED LATIN SMALL LETTER B
 * 	0028 LEFT PARENTHESIS
 * 	0062 LATIN SMALL LETTER B
 * 	0029 RIGHT PARENTHESIS
 * 249E PARENTHESIZED LATIN SMALL LETTER C
 * 	0028 LEFT PARENTHESIS
 * 	0063 LATIN SMALL LETTER C
 * 	0029 RIGHT PARENTHESIS
 * 249F PARENTHESIZED LATIN SMALL LETTER D
 * 	0028 LEFT PARENTHESIS
 * 	0064 LATIN SMALL LETTER D
 * 	0029 RIGHT PARENTHESIS
 * 24A0 PARENTHESIZED LATIN SMALL LETTER E
 * 	0028 LEFT PARENTHESIS
 * 	0065 LATIN SMALL LETTER E
 * 	0029 RIGHT PARENTHESIS
 * 24A1 PARENTHESIZED LATIN SMALL LETTER F
 * 	0028 LEFT PARENTHESIS
 * 	0066 LATIN SMALL LETTER F
 * 	0029 RIGHT PARENTHESIS
 * 24A2 PARENTHESIZED LATIN SMALL LETTER G
 * 	0028 LEFT PARENTHESIS
 * 	0067 LATIN SMALL LETTER G
 * 	0029 RIGHT PARENTHESIS
 * 24A3 PARENTHESIZED LATIN SMALL LETTER H
 * 	0028 LEFT PARENTHESIS
 * 	0068 LATIN SMALL LETTER H
 * 	0029 RIGHT PARENTHESIS
 * 24A4 PARENTHESIZED LATIN SMALL LETTER I
 * 	0028 LEFT PARENTHESIS
 * 	0069 LATIN SMALL LETTER I
 * 	0029 RIGHT PARENTHESIS
 * 24A5 PARENTHESIZED LATIN SMALL LETTER J
 * 	0028 LEFT PARENTHESIS
 * 	006A LATIN SMALL LETTER J
 * 	0029 RIGHT PARENTHESIS
 * 24A6 PARENTHESIZED LATIN SMALL LETTER K
 * 	0028 LEFT PARENTHESIS
 * 	006B LATIN SMALL LETTER K
 * 	0029 RIGHT PARENTHESIS
 * 24A7 PARENTHESIZED LATIN SMALL LETTER L
 * 	0028 LEFT PARENTHESIS
 * 	006C LATIN SMALL LETTER L
 * 	0029 RIGHT PARENTHESIS
 * 24A8 PARENTHESIZED LATIN SMALL LETTER M
 * 	0028 LEFT PARENTHESIS
 * 	006D LATIN SMALL LETTER M
 * 	0029 RIGHT PARENTHESIS
 * 24A9 PARENTHESIZED LATIN SMALL LETTER N
 * 	0028 LEFT PARENTHESIS
 * 	006E LATIN SMALL LETTER N
 * 	0029 RIGHT PARENTHESIS
 * 24AA PARENTHESIZED LATIN SMALL LETTER O
 * 	0028 LEFT PARENTHESIS
 * 	006F LATIN SMALL LETTER O
 * 	0029 RIGHT PARENTHESIS
 * 24AB PARENTHESIZED LATIN SMALL LETTER P
 * 	0028 LEFT PARENTHESIS
 * 	0070 LATIN SMALL LETTER P
 * 	0029 RIGHT PARENTHESIS
 * 24AC PARENTHESIZED LATIN SMALL LETTER Q
 * 	0028 LEFT PARENTHESIS
 * 	0071 LATIN SMALL LETTER Q
 * 	0029 RIGHT PARENTHESIS
 * 24AD PARENTHESIZED LATIN SMALL LETTER R
 * 	0028 LEFT PARENTHESIS
 * 	0072 LATIN SMALL LETTER R
 * 	0029 RIGHT PARENTHESIS
 * 24AE PARENTHESIZED LATIN SMALL LETTER S
 * 	0028 LEFT PARENTHESIS
 * 	0073 LATIN SMALL LETTER S
 * 	0029 RIGHT PARENTHESIS
 * 24AF PARENTHESIZED LATIN SMALL LETTER T
 * 	0028 LEFT PARENTHESIS
 * 	0074 LATIN SMALL LETTER T
 * 	0029 RIGHT PARENTHESIS
 * 24B0 PARENTHESIZED LATIN SMALL LETTER U
 * 	0028 LEFT PARENTHESIS
 * 	0075 LATIN SMALL LETTER U
 * 	0029 RIGHT PARENTHESIS
 * 24B1 PARENTHESIZED LATIN SMALL LETTER V
 * 	0028 LEFT PARENTHESIS
 * 	0076 LATIN SMALL LETTER V
 * 	0029 RIGHT PARENTHESIS
 * 24B2 PARENTHESIZED LATIN SMALL LETTER W
 * 	0028 LEFT PARENTHESIS
 * 	0077 LATIN SMALL LETTER W
 * 	0029 RIGHT PARENTHESIS
 * 24B3 PARENTHESIZED LATIN SMALL LETTER X
 * 	0028 LEFT PARENTHESIS
 * 	0078 LATIN SMALL LETTER X
 * 	0029 RIGHT PARENTHESIS
 * 24B4 PARENTHESIZED LATIN SMALL LETTER Y
 * 	0028 LEFT PARENTHESIS
 * 	0079 LATIN SMALL LETTER Y
 * 	0029 RIGHT PARENTHESIS
 * 24B5 PARENTHESIZED LATIN SMALL LETTER Z
 * 	0028 LEFT PARENTHESIS
 * 	007A LATIN SMALL LETTER Z
 * 	0029 RIGHT PARENTHESIS
 * 24B6 CIRCLED LATIN CAPITAL LETTER A
 * 	0041 LATIN CAPITAL LETTER A
 * 24B7 CIRCLED LATIN CAPITAL LETTER B
 * 	0042 LATIN CAPITAL LETTER B
 * 24B8 CIRCLED LATIN CAPITAL LETTER C
 * 	0043 LATIN CAPITAL LETTER C
 * 24B9 CIRCLED LATIN CAPITAL LETTER D
 * 	0044 LATIN CAPITAL LETTER D
 * 24BA CIRCLED LATIN CAPITAL LETTER E
 * 	0045 LATIN CAPITAL LETTER E
 * 24BB CIRCLED LATIN CAPITAL LETTER F
 * 	0046 LATIN CAPITAL LETTER F
 * 24BC CIRCLED LATIN CAPITAL LETTER G
 * 	0047 LATIN CAPITAL LETTER G
 * 24BD CIRCLED LATIN CAPITAL LETTER H
 * 	0048 LATIN CAPITAL LETTER H
 * 24BE CIRCLED LATIN CAPITAL LETTER I
 * 	0049 LATIN CAPITAL LETTER I
 * 24BF CIRCLED LATIN CAPITAL LETTER J
 * 	004A LATIN CAPITAL LETTER J
 * 24C0 CIRCLED LATIN CAPITAL LETTER K
 * 	004B LATIN CAPITAL LETTER K
 * 24C1 CIRCLED LATIN CAPITAL LETTER L
 * 	004C LATIN CAPITAL LETTER L
 * 24C2 CIRCLED LATIN CAPITAL LETTER M
 * 	004D LATIN CAPITAL LETTER M
 * 24C3 CIRCLED LATIN CAPITAL LETTER N
 * 	004E LATIN CAPITAL LETTER N
 * 24C4 CIRCLED LATIN CAPITAL LETTER O
 * 	004F LATIN CAPITAL LETTER O
 * 24C5 CIRCLED LATIN CAPITAL LETTER P
 * 	0050 LATIN CAPITAL LETTER P
 * 24C6 CIRCLED LATIN CAPITAL LETTER Q
 * 	0051 LATIN CAPITAL LETTER Q
 * 24C7 CIRCLED LATIN CAPITAL LETTER R
 * 	0052 LATIN CAPITAL LETTER R
 * 24C8 CIRCLED LATIN CAPITAL LETTER S
 * 	0053 LATIN CAPITAL LETTER S
 * 24C9 CIRCLED LATIN CAPITAL LETTER T
 * 	0054 LATIN CAPITAL LETTER T
 * 24CA CIRCLED LATIN CAPITAL LETTER U
 * 	0055 LATIN CAPITAL LETTER U
 * 24CB CIRCLED LATIN CAPITAL LETTER V
 * 	0056 LATIN CAPITAL LETTER V
 * 24CC CIRCLED LATIN CAPITAL LETTER W
 * 	0057 LATIN CAPITAL LETTER W
 * 24CD CIRCLED LATIN CAPITAL LETTER X
 * 	0058 LATIN CAPITAL LETTER X
 * 24CE CIRCLED LATIN CAPITAL LETTER Y
 * 	0059 LATIN CAPITAL LETTER Y
 * 24CF CIRCLED LATIN CAPITAL LETTER Z
 * 	005A LATIN CAPITAL LETTER Z
 * 24D0 CIRCLED LATIN SMALL LETTER A
 * 	0061 LATIN SMALL LETTER A
 * 24D1 CIRCLED LATIN SMALL LETTER B
 * 	0062 LATIN SMALL LETTER B
 * 24D2 CIRCLED LATIN SMALL LETTER C
 * 	0063 LATIN SMALL LETTER C
 * 24D3 CIRCLED LATIN SMALL LETTER D
 * 	0064 LATIN SMALL LETTER D
 * 24D4 CIRCLED LATIN SMALL LETTER E
 * 	0065 LATIN SMALL LETTER E
 * 24D5 CIRCLED LATIN SMALL LETTER F
 * 	0066 LATIN SMALL LETTER F
 * 24D6 CIRCLED LATIN SMALL LETTER G
 * 	0067 LATIN SMALL LETTER G
 * 24D7 CIRCLED LATIN SMALL LETTER H
 * 	0068 LATIN SMALL LETTER H
 * 24D8 CIRCLED LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * 24D9 CIRCLED LATIN SMALL LETTER J
 * 	006A LATIN SMALL LETTER J
 * 24DA CIRCLED LATIN SMALL LETTER K
 * 	006B LATIN SMALL LETTER K
 * 24DB CIRCLED LATIN SMALL LETTER L
 * 	006C LATIN SMALL LETTER L
 * 24DC CIRCLED LATIN SMALL LETTER M
 * 	006D LATIN SMALL LETTER M
 * 24DD CIRCLED LATIN SMALL LETTER N
 * 	006E LATIN SMALL LETTER N
 * 24DE CIRCLED LATIN SMALL LETTER O
 * 	006F LATIN SMALL LETTER O
 * 24DF CIRCLED LATIN SMALL LETTER P
 * 	0070 LATIN SMALL LETTER P
 * 24E0 CIRCLED LATIN SMALL LETTER Q
 * 	0071 LATIN SMALL LETTER Q
 * 24E1 CIRCLED LATIN SMALL LETTER R
 * 	0072 LATIN SMALL LETTER R
 * 24E2 CIRCLED LATIN SMALL LETTER S
 * 	0073 LATIN SMALL LETTER S
 * 24E3 CIRCLED LATIN SMALL LETTER T
 * 	0074 LATIN SMALL LETTER T
 * 24E4 CIRCLED LATIN SMALL LETTER U
 * 	0075 LATIN SMALL LETTER U
 * 24E5 CIRCLED LATIN SMALL LETTER V
 * 	0076 LATIN SMALL LETTER V
 * 24E6 CIRCLED LATIN SMALL LETTER W
 * 	0077 LATIN SMALL LETTER W
 * 24E7 CIRCLED LATIN SMALL LETTER X
 * 	0078 LATIN SMALL LETTER X
 * 24E8 CIRCLED LATIN SMALL LETTER Y
 * 	0079 LATIN SMALL LETTER Y
 * 24E9 CIRCLED LATIN SMALL LETTER Z
 * 	007A LATIN SMALL LETTER Z
 * 24EA CIRCLED DIGIT ZERO
 * 	0030 DIGIT ZERO
 * 2A0C QUADRUPLE INTEGRAL OPERATOR
 * 	222B INTEGRAL
 * 	222B INTEGRAL
 * 	222B INTEGRAL
 * 	222B INTEGRAL
 * 2A74 DOUBLE COLON EQUAL
 * 	003A COLON
 * 	003A COLON
 * 	003D EQUALS SIGN
 * 2A75 TWO CONSECUTIVE EQUALS SIGNS
 * 	003D EQUALS SIGN
 * 	003D EQUALS SIGN
 * 2A76 THREE CONSECUTIVE EQUALS SIGNS
 * 	003D EQUALS SIGN
 * 	003D EQUALS SIGN
 * 	003D EQUALS SIGN
 * 2ADC FORKING
 * 	2ADD NONFORKING
 * 2C7C LATIN SUBSCRIPT SMALL LETTER J
 * 	006A LATIN SMALL LETTER J
 * 2C7D MODIFIER LETTER CAPITAL V
 * 	0056 LATIN CAPITAL LETTER V
 * 2CEF COPTIC COMBINING NI ABOVE
 * 	0000 
 * 2CF0 COPTIC COMBINING SPIRITUS ASPER
 * 	0000 
 * 2CF1 COPTIC COMBINING SPIRITUS LENIS
 * 	0000 
 * 2D6F TIFINAGH MODIFIER LETTER LABIALIZATION MARK
 * 	2D61 TIFINAGH LETTER YAW
 * 2D7F TIFINAGH CONSONANT JOINER
 * 	0000 
 * 2DE0 COMBINING CYRILLIC LETTER BE
 * 	0000 
 * 2DE1 COMBINING CYRILLIC LETTER VE
 * 	0000 
 * 2DE2 COMBINING CYRILLIC LETTER GHE
 * 	0000 
 * 2DE3 COMBINING CYRILLIC LETTER DE
 * 	0000 
 * 2DE4 COMBINING CYRILLIC LETTER ZHE
 * 	0000 
 * 2DE5 COMBINING CYRILLIC LETTER ZE
 * 	0000 
 * 2DE6 COMBINING CYRILLIC LETTER KA
 * 	0000 
 * 2DE7 COMBINING CYRILLIC LETTER EL
 * 	0000 
 * 2DE8 COMBINING CYRILLIC LETTER EM
 * 	0000 
 * 2DE9 COMBINING CYRILLIC LETTER EN
 * 	0000 
 * 2DEA COMBINING CYRILLIC LETTER O
 * 	0000 
 * 2DEB COMBINING CYRILLIC LETTER PE
 * 	0000 
 * 2DEC COMBINING CYRILLIC LETTER ER
 * 	0000 
 * 2DED COMBINING CYRILLIC LETTER ES
 * 	0000 
 * 2DEE COMBINING CYRILLIC LETTER TE
 * 	0000 
 * 2DEF COMBINING CYRILLIC LETTER HA
 * 	0000 
 * 2DF0 COMBINING CYRILLIC LETTER TSE
 * 	0000 
 * 2DF1 COMBINING CYRILLIC LETTER CHE
 * 	0000 
 * 2DF2 COMBINING CYRILLIC LETTER SHA
 * 	0000 
 * 2DF3 COMBINING CYRILLIC LETTER SHCHA
 * 	0000 
 * 2DF4 COMBINING CYRILLIC LETTER FITA
 * 	0000 
 * 2DF5 COMBINING CYRILLIC LETTER ES-TE
 * 	0000 
 * 2DF6 COMBINING CYRILLIC LETTER A
 * 	0000 
 * 2DF7 COMBINING CYRILLIC LETTER IE
 * 	0000 
 * 2DF8 COMBINING CYRILLIC LETTER DJERV
 * 	0000 
 * 2DF9 COMBINING CYRILLIC LETTER MONOGRAPH UK
 * 	0000 
 * 2DFA COMBINING CYRILLIC LETTER YAT
 * 	0000 
 * 2DFB COMBINING CYRILLIC LETTER YU
 * 	0000 
 * 2DFC COMBINING CYRILLIC LETTER IOTIFIED A
 * 	0000 
 * 2DFD COMBINING CYRILLIC LETTER LITTLE YUS
 * 	0000 
 * 2DFE COMBINING CYRILLIC LETTER BIG YUS
 * 	0000 
 * 2DFF COMBINING CYRILLIC LETTER IOTIFIED BIG YUS
 * 	0000 
 * 2E9F CJK RADICAL MOTHER
 * 	6BCD ??
 * 2EF3 CJK RADICAL C-SIMPLIFIED TURTLE
 * 	9F9F ??
 * 2F00 KANGXI RADICAL ONE
 * 	4E00 
 * 2F01 KANGXI RADICAL LINE
 * 	4E28 ??
 * 2F02 KANGXI RADICAL DOT
 * 	4E36 ??
 * 2F03 KANGXI RADICAL SLASH
 * 	4E3F ??
 * 2F04 KANGXI RADICAL SECOND
 * 	4E59 ??
 * 2F05 KANGXI RADICAL HOOK
 * 	4E85 ??
 * 2F06 KANGXI RADICAL TWO
 * 	4E8C ??
 * 2F07 KANGXI RADICAL LID
 * 	4EA0 ??
 * 2F08 KANGXI RADICAL MAN
 * 	4EBA ??
 * 2F09 KANGXI RADICAL LEGS
 * 	513F ??
 * 2F0A KANGXI RADICAL ENTER
 * 	5165 ??
 * 2F0B KANGXI RADICAL EIGHT
 * 	516B ??
 * 2F0C KANGXI RADICAL DOWN BOX
 * 	5182 ??
 * 2F0D KANGXI RADICAL COVER
 * 	5196 ??
 * 2F0E KANGXI RADICAL ICE
 * 	51AB ??
 * 2F0F KANGXI RADICAL TABLE
 * 	51E0 ??
 * 2F10 KANGXI RADICAL OPEN BOX
 * 	51F5 ??
 * 2F11 KANGXI RADICAL KNIFE
 * 	5200 ??
 * 2F12 KANGXI RADICAL POWER
 * 	529B ??
 * 2F13 KANGXI RADICAL WRAP
 * 	52F9 ??
 * 2F14 KANGXI RADICAL SPOON
 * 	5315 ??
 * 2F15 KANGXI RADICAL RIGHT OPEN BOX
 * 	531A ??
 * 2F16 KANGXI RADICAL HIDING ENCLOSURE
 * 	5338 ??
 * 2F17 KANGXI RADICAL TEN
 * 	5341 ??
 * 2F18 KANGXI RADICAL DIVINATION
 * 	535C ??
 * 2F19 KANGXI RADICAL SEAL
 * 	5369 ??
 * 2F1A KANGXI RADICAL CLIFF
 * 	5382 ??
 * 2F1B KANGXI RADICAL PRIVATE
 * 	53B6 ??
 * 2F1C KANGXI RADICAL AGAIN
 * 	53C8 ??
 * 2F1D KANGXI RADICAL MOUTH
 * 	53E3 ??
 * 2F1E KANGXI RADICAL ENCLOSURE
 * 	56D7 ??
 * 2F1F KANGXI RADICAL EARTH
 * 	571F ??
 * 2F20 KANGXI RADICAL SCHOLAR
 * 	58EB ??
 * 2F21 KANGXI RADICAL GO
 * 	5902 ??
 * 2F22 KANGXI RADICAL GO SLOWLY
 * 	590A ??
 * 2F23 KANGXI RADICAL EVENING
 * 	5915 ??
 * 2F24 KANGXI RADICAL BIG
 * 	5927 ??
 * 2F25 KANGXI RADICAL WOMAN
 * 	5973 ??
 * 2F26 KANGXI RADICAL CHILD
 * 	5B50 ??
 * 2F27 KANGXI RADICAL ROOF
 * 	5B80 ??
 * 2F28 KANGXI RADICAL INCH
 * 	5BF8 ??
 * 2F29 KANGXI RADICAL SMALL
 * 	5C0F ??
 * 2F2A KANGXI RADICAL LAME
 * 	5C22 ??
 * 2F2B KANGXI RADICAL CORPSE
 * 	5C38 ??
 * 2F2C KANGXI RADICAL SPROUT
 * 	5C6E ??
 * 2F2D KANGXI RADICAL MOUNTAIN
 * 	5C71 ??
 * 2F2E KANGXI RADICAL RIVER
 * 	5DDB ??
 * 2F2F KANGXI RADICAL WORK
 * 	5DE5 ??
 * 2F30 KANGXI RADICAL ONESELF
 * 	5DF1 ??
 * 2F31 KANGXI RADICAL TURBAN
 * 	5DFE ??
 * 2F32 KANGXI RADICAL DRY
 * 	5E72 ??
 * 2F33 KANGXI RADICAL SHORT THREAD
 * 	5E7A ??
 * 2F34 KANGXI RADICAL DOTTED CLIFF
 * 	5E7F ??
 * 2F35 KANGXI RADICAL LONG STRIDE
 * 	5EF4 ??
 * 2F36 KANGXI RADICAL TWO HANDS
 * 	5EFE ??
 * 2F37 KANGXI RADICAL SHOOT
 * 	5F0B ??
 * 2F38 KANGXI RADICAL BOW
 * 	5F13 ??
 * 2F39 KANGXI RADICAL SNOUT
 * 	5F50 ??
 * 2F3A KANGXI RADICAL BRISTLE
 * 	5F61 ??
 * 2F3B KANGXI RADICAL STEP
 * 	5F73 ??
 * 2F3C KANGXI RADICAL HEART
 * 	5FC3 ??
 * 2F3D KANGXI RADICAL HALBERD
 * 	6208 ??
 * 2F3E KANGXI RADICAL DOOR
 * 	6236 ??
 * 2F3F KANGXI RADICAL HAND
 * 	624B ??
 * 2F40 KANGXI RADICAL BRANCH
 * 	652F ??
 * 2F41 KANGXI RADICAL RAP
 * 	6534 ??
 * 2F42 KANGXI RADICAL SCRIPT
 * 	6587 ??
 * 2F43 KANGXI RADICAL DIPPER
 * 	6597 ??
 * 2F44 KANGXI RADICAL AXE
 * 	65A4 ??
 * 2F45 KANGXI RADICAL SQUARE
 * 	65B9 ??
 * 2F46 KANGXI RADICAL NOT
 * 	65E0 ??
 * 2F47 KANGXI RADICAL SUN
 * 	65E5 ??
 * 2F48 KANGXI RADICAL SAY
 * 	66F0 ??
 * 2F49 KANGXI RADICAL MOON
 * 	6708 ??
 * 2F4A KANGXI RADICAL TREE
 * 	6728 ??
 * 2F4B KANGXI RADICAL LACK
 * 	6B20 ??
 * 2F4C KANGXI RADICAL STOP
 * 	6B62 ??
 * 2F4D KANGXI RADICAL DEATH
 * 	6B79 ??
 * 2F4E KANGXI RADICAL WEAPON
 * 	6BB3 ??
 * 2F4F KANGXI RADICAL DO NOT
 * 	6BCB ??
 * 2F50 KANGXI RADICAL COMPARE
 * 	6BD4 ??
 * 2F51 KANGXI RADICAL FUR
 * 	6BDB ??
 * 2F52 KANGXI RADICAL CLAN
 * 	6C0F ??
 * 2F53 KANGXI RADICAL STEAM
 * 	6C14 ??
 * 2F54 KANGXI RADICAL WATER
 * 	6C34 ??
 * 2F55 KANGXI RADICAL FIRE
 * 	706B ??
 * 2F56 KANGXI RADICAL CLAW
 * 	722A ??
 * 2F57 KANGXI RADICAL FATHER
 * 	7236 ??
 * 2F58 KANGXI RADICAL DOUBLE X
 * 	723B ??
 * 2F59 KANGXI RADICAL HALF TREE TRUNK
 * 	723F ??
 * 2F5A KANGXI RADICAL SLICE
 * 	7247 ??
 * 2F5B KANGXI RADICAL FANG
 * 	7259 ??
 * 2F5C KANGXI RADICAL COW
 * 	725B ??
 * 2F5D KANGXI RADICAL DOG
 * 	72AC ??
 * 2F5E KANGXI RADICAL PROFOUND
 * 	7384 ??
 * 2F5F KANGXI RADICAL JADE
 * 	7389 ??
 * 2F60 KANGXI RADICAL MELON
 * 	74DC ??
 * 2F61 KANGXI RADICAL TILE
 * 	74E6 ??
 * 2F62 KANGXI RADICAL SWEET
 * 	7518 ??
 * 2F63 KANGXI RADICAL LIFE
 * 	751F ??
 * 2F64 KANGXI RADICAL USE
 * 	7528 ??
 * 2F65 KANGXI RADICAL FIELD
 * 	7530 ??
 * 2F66 KANGXI RADICAL BOLT OF CLOTH
 * 	758B ??
 * 2F67 KANGXI RADICAL SICKNESS
 * 	7592 ??
 * 2F68 KANGXI RADICAL DOTTED TENT
 * 	7676 ??
 * 2F69 KANGXI RADICAL WHITE
 * 	767D ??
 * 2F6A KANGXI RADICAL SKIN
 * 	76AE ??
 * 2F6B KANGXI RADICAL DISH
 * 	76BF ??
 * 2F6C KANGXI RADICAL EYE
 * 	76EE ??
 * 2F6D KANGXI RADICAL SPEAR
 * 	77DB ??
 * 2F6E KANGXI RADICAL ARROW
 * 	77E2 ??
 * 2F6F KANGXI RADICAL STONE
 * 	77F3 ??
 * 2F70 KANGXI RADICAL SPIRIT
 * 	793A ??
 * 2F71 KANGXI RADICAL TRACK
 * 	79B8 ??
 * 2F72 KANGXI RADICAL GRAIN
 * 	79BE ??
 * 2F73 KANGXI RADICAL CAVE
 * 	7A74 ??
 * 2F74 KANGXI RADICAL STAND
 * 	7ACB ??
 * 2F75 KANGXI RADICAL BAMBOO
 * 	7AF9 ??
 * 2F76 KANGXI RADICAL RICE
 * 	7C73 ??
 * 2F77 KANGXI RADICAL SILK
 * 	7CF8 ??
 * 2F78 KANGXI RADICAL JAR
 * 	7F36 ??
 * 2F79 KANGXI RADICAL NET
 * 	7F51 ??
 * 2F7A KANGXI RADICAL SHEEP
 * 	7F8A ??
 * 2F7B KANGXI RADICAL FEATHER
 * 	7FBD ??
 * 2F7C KANGXI RADICAL OLD
 * 	8001 ??
 * 2F7D KANGXI RADICAL AND
 * 	800C ??
 * 2F7E KANGXI RADICAL PLOW
 * 	8012 ??
 * 2F7F KANGXI RADICAL EAR
 * 	8033 ??
 * 2F80 KANGXI RADICAL BRUSH
 * 	807F ??
 * 2F81 KANGXI RADICAL MEAT
 * 	8089 ??
 * 2F82 KANGXI RADICAL MINISTER
 * 	81E3 ??
 * 2F83 KANGXI RADICAL SELF
 * 	81EA ??
 * 2F84 KANGXI RADICAL ARRIVE
 * 	81F3 ??
 * 2F85 KANGXI RADICAL MORTAR
 * 	81FC ??
 * 2F86 KANGXI RADICAL TONGUE
 * 	820C ??
 * 2F87 KANGXI RADICAL OPPOSE
 * 	821B ??
 * 2F88 KANGXI RADICAL BOAT
 * 	821F ??
 * 2F89 KANGXI RADICAL STOPPING
 * 	826E ??
 * 2F8A KANGXI RADICAL COLOR
 * 	8272 ??
 * 2F8B KANGXI RADICAL GRASS
 * 	8278 ??
 * 2F8C KANGXI RADICAL TIGER
 * 	864D ??
 * 2F8D KANGXI RADICAL INSECT
 * 	866B ??
 * 2F8E KANGXI RADICAL BLOOD
 * 	8840 ??
 * 2F8F KANGXI RADICAL WALK ENCLOSURE
 * 	884C ??
 * 2F90 KANGXI RADICAL CLOTHES
 * 	8863 ??
 * 2F91 KANGXI RADICAL WEST
 * 	897E ??
 * 2F92 KANGXI RADICAL SEE
 * 	898B ??
 * 2F93 KANGXI RADICAL HORN
 * 	89D2 ??
 * 2F94 KANGXI RADICAL SPEECH
 * 	8A00 ??
 * 2F95 KANGXI RADICAL VALLEY
 * 	8C37 ??
 * 2F96 KANGXI RADICAL BEAN
 * 	8C46 ??
 * 2F97 KANGXI RADICAL PIG
 * 	8C55 ??
 * 2F98 KANGXI RADICAL BADGER
 * 	8C78 ??
 * 2F99 KANGXI RADICAL SHELL
 * 	8C9D ??
 * 2F9A KANGXI RADICAL RED
 * 	8D64 ??
 * 2F9B KANGXI RADICAL RUN
 * 	8D70 ??
 * 2F9C KANGXI RADICAL FOOT
 * 	8DB3 ??
 * 2F9D KANGXI RADICAL BODY
 * 	8EAB ??
 * 2F9E KANGXI RADICAL CART
 * 	8ECA ??
 * 2F9F KANGXI RADICAL BITTER
 * 	8F9B ??
 * 2FA0 KANGXI RADICAL MORNING
 * 	8FB0 ??
 * 2FA1 KANGXI RADICAL WALK
 * 	8FB5 ??
 * 2FA2 KANGXI RADICAL CITY
 * 	9091 ??
 * 2FA3 KANGXI RADICAL WINE
 * 	9149 ??
 * 2FA4 KANGXI RADICAL DISTINGUISH
 * 	91C6 ??
 * 2FA5 KANGXI RADICAL VILLAGE
 * 	91CC ??
 * 2FA6 KANGXI RADICAL GOLD
 * 	91D1 ??
 * 2FA7 KANGXI RADICAL LONG
 * 	9577 ??
 * 2FA8 KANGXI RADICAL GATE
 * 	9580 ??
 * 2FA9 KANGXI RADICAL MOUND
 * 	961C ??
 * 2FAA KANGXI RADICAL SLAVE
 * 	96B6 ??
 * 2FAB KANGXI RADICAL SHORT TAILED BIRD
 * 	96B9 ??
 * 2FAC KANGXI RADICAL RAIN
 * 	96E8 ??
 * 2FAD KANGXI RADICAL BLUE
 * 	9751 ??
 * 2FAE KANGXI RADICAL WRONG
 * 	975E ??
 * 2FAF KANGXI RADICAL FACE
 * 	9762 ??
 * 2FB0 KANGXI RADICAL LEATHER
 * 	9769 ??
 * 2FB1 KANGXI RADICAL TANNED LEATHER
 * 	97CB ??
 * 2FB2 KANGXI RADICAL LEEK
 * 	97ED ??
 * 2FB3 KANGXI RADICAL SOUND
 * 	97F3 ??
 * 2FB4 KANGXI RADICAL LEAF
 * 	9801 ??
 * 2FB5 KANGXI RADICAL WIND
 * 	98A8 ??
 * 2FB6 KANGXI RADICAL FLY
 * 	98DB ??
 * 2FB7 KANGXI RADICAL EAT
 * 	98DF ??
 * 2FB8 KANGXI RADICAL HEAD
 * 	9996 ??
 * 2FB9 KANGXI RADICAL FRAGRANT
 * 	9999 ??
 * 2FBA KANGXI RADICAL HORSE
 * 	99AC ??
 * 2FBB KANGXI RADICAL BONE
 * 	9AA8 ??
 * 2FBC KANGXI RADICAL TALL
 * 	9AD8 ??
 * 2FBD KANGXI RADICAL HAIR
 * 	9ADF ??
 * 2FBE KANGXI RADICAL FIGHT
 * 	9B25 ??
 * 2FBF KANGXI RADICAL SACRIFICIAL WINE
 * 	9B2F ??
 * 2FC0 KANGXI RADICAL CAULDRON
 * 	9B32 ??
 * 2FC1 KANGXI RADICAL GHOST
 * 	9B3C ??
 * 2FC2 KANGXI RADICAL FISH
 * 	9B5A ??
 * 2FC3 KANGXI RADICAL BIRD
 * 	9CE5 ??
 * 2FC4 KANGXI RADICAL SALT
 * 	9E75 ??
 * 2FC5 KANGXI RADICAL DEER
 * 	9E7F ??
 * 2FC6 KANGXI RADICAL WHEAT
 * 	9EA5 ??
 * 2FC7 KANGXI RADICAL HEMP
 * 	9EBB ??
 * 2FC8 KANGXI RADICAL YELLOW
 * 	9EC3 ??
 * 2FC9 KANGXI RADICAL MILLET
 * 	9ECD ??
 * 2FCA KANGXI RADICAL BLACK
 * 	9ED1 ??
 * 2FCB KANGXI RADICAL EMBROIDERY
 * 	9EF9 ??
 * 2FCC KANGXI RADICAL FROG
 * 	9EFD ??
 * 2FCD KANGXI RADICAL TRIPOD
 * 	9F0E ??
 * 2FCE KANGXI RADICAL DRUM
 * 	9F13 ??
 * 2FCF KANGXI RADICAL RAT
 * 	9F20 ??
 * 2FD0 KANGXI RADICAL NOSE
 * 	9F3B ??
 * 2FD1 KANGXI RADICAL EVEN
 * 	9F4A ??
 * 2FD2 KANGXI RADICAL TOOTH
 * 	9F52 ??
 * 2FD3 KANGXI RADICAL DRAGON
 * 	9F8D ??
 * 2FD4 KANGXI RADICAL TURTLE
 * 	9F9C ??
 * 2FD5 KANGXI RADICAL FLUTE
 * 	9FA0 ??
 * 3000 IDEOGRAPHIC SPACE
 * 	0020 SPACE
 * 302A IDEOGRAPHIC LEVEL TONE MARK
 * 	0000 
 * 302B IDEOGRAPHIC RISING TONE MARK
 * 	0000 
 * 302C IDEOGRAPHIC DEPARTING TONE MARK
 * 	0000 
 * 302D IDEOGRAPHIC ENTERING TONE MARK
 * 	0000 
 * 302E HANGUL SINGLE DOT TONE MARK
 * 	0000 
 * 302F HANGUL DOUBLE DOT TONE MARK
 * 	0000 
 * 3036 CIRCLED POSTAL MARK
 * 	3012 POSTAL MARK
 * 3038 HANGZHOU NUMERAL TEN
 * 	5341 ??
 * 3039 HANGZHOU NUMERAL TWENTY
 * 	5344 ??
 * 303A HANGZHOU NUMERAL THIRTY
 * 	5345 ??
 * 3131 HANGUL LETTER KIYEOK
 * 	1100 HANGUL CHOSEONG KIYEOK
 * 3132 HANGUL LETTER SSANGKIYEOK
 * 	1101 HANGUL CHOSEONG SSANGKIYEOK
 * 3133 HANGUL LETTER KIYEOK-SIOS
 * 	11AA HANGUL JONGSEONG KIYEOK-SIOS
 * 3134 HANGUL LETTER NIEUN
 * 	1102 HANGUL CHOSEONG NIEUN
 * 3135 HANGUL LETTER NIEUN-CIEUC
 * 	11AC HANGUL JONGSEONG NIEUN-CIEUC
 * 3136 HANGUL LETTER NIEUN-HIEUH
 * 	11AD HANGUL JONGSEONG NIEUN-HIEUH
 * 3137 HANGUL LETTER TIKEUT
 * 	1103 HANGUL CHOSEONG TIKEUT
 * 3138 HANGUL LETTER SSANGTIKEUT
 * 	1104 HANGUL CHOSEONG SSANGTIKEUT
 * 3139 HANGUL LETTER RIEUL
 * 	1105 HANGUL CHOSEONG RIEUL
 * 313A HANGUL LETTER RIEUL-KIYEOK
 * 	11B0 HANGUL JONGSEONG RIEUL-KIYEOK
 * 313B HANGUL LETTER RIEUL-MIEUM
 * 	11B1 HANGUL JONGSEONG RIEUL-MIEUM
 * 313C HANGUL LETTER RIEUL-PIEUP
 * 	11B2 HANGUL JONGSEONG RIEUL-PIEUP
 * 313D HANGUL LETTER RIEUL-SIOS
 * 	11B3 HANGUL JONGSEONG RIEUL-SIOS
 * 313E HANGUL LETTER RIEUL-THIEUTH
 * 	11B4 HANGUL JONGSEONG RIEUL-THIEUTH
 * 313F HANGUL LETTER RIEUL-PHIEUPH
 * 	11B5 HANGUL JONGSEONG RIEUL-PHIEUPH
 * 3140 HANGUL LETTER RIEUL-HIEUH
 * 	111A HANGUL CHOSEONG RIEUL-HIEUH
 * 3141 HANGUL LETTER MIEUM
 * 	1106 HANGUL CHOSEONG MIEUM
 * 3142 HANGUL LETTER PIEUP
 * 	1107 HANGUL CHOSEONG PIEUP
 * 3143 HANGUL LETTER SSANGPIEUP
 * 	1108 HANGUL CHOSEONG SSANGPIEUP
 * 3144 HANGUL LETTER PIEUP-SIOS
 * 	1121 HANGUL CHOSEONG PIEUP-SIOS
 * 3145 HANGUL LETTER SIOS
 * 	1109 HANGUL CHOSEONG SIOS
 * 3146 HANGUL LETTER SSANGSIOS
 * 	110A HANGUL CHOSEONG SSANGSIOS
 * 3147 HANGUL LETTER IEUNG
 * 	110B HANGUL CHOSEONG IEUNG
 * 3148 HANGUL LETTER CIEUC
 * 	110C HANGUL CHOSEONG CIEUC
 * 3149 HANGUL LETTER SSANGCIEUC
 * 	110D HANGUL CHOSEONG SSANGCIEUC
 * 314A HANGUL LETTER CHIEUCH
 * 	110E HANGUL CHOSEONG CHIEUCH
 * 314B HANGUL LETTER KHIEUKH
 * 	110F HANGUL CHOSEONG KHIEUKH
 * 314C HANGUL LETTER THIEUTH
 * 	1110 HANGUL CHOSEONG THIEUTH
 * 314D HANGUL LETTER PHIEUPH
 * 	1111 HANGUL CHOSEONG PHIEUPH
 * 314E HANGUL LETTER HIEUH
 * 	1112 HANGUL CHOSEONG HIEUH
 * 314F HANGUL LETTER A
 * 	1161 HANGUL JUNGSEONG A
 * 3150 HANGUL LETTER AE
 * 	1162 HANGUL JUNGSEONG AE
 * 3151 HANGUL LETTER YA
 * 	1163 HANGUL JUNGSEONG YA
 * 3152 HANGUL LETTER YAE
 * 	1164 HANGUL JUNGSEONG YAE
 * 3153 HANGUL LETTER EO
 * 	1165 HANGUL JUNGSEONG EO
 * 3154 HANGUL LETTER E
 * 	1166 HANGUL JUNGSEONG E
 * 3155 HANGUL LETTER YEO
 * 	1167 HANGUL JUNGSEONG YEO
 * 3156 HANGUL LETTER YE
 * 	1168 HANGUL JUNGSEONG YE
 * 3157 HANGUL LETTER O
 * 	1169 HANGUL JUNGSEONG O
 * 3158 HANGUL LETTER WA
 * 	116A HANGUL JUNGSEONG WA
 * 3159 HANGUL LETTER WAE
 * 	116B HANGUL JUNGSEONG WAE
 * 315A HANGUL LETTER OE
 * 	116C HANGUL JUNGSEONG OE
 * 315B HANGUL LETTER YO
 * 	116D HANGUL JUNGSEONG YO
 * 315C HANGUL LETTER U
 * 	116E HANGUL JUNGSEONG U
 * 315D HANGUL LETTER WEO
 * 	116F HANGUL JUNGSEONG WEO
 * 315E HANGUL LETTER WE
 * 	1170 HANGUL JUNGSEONG WE
 * 315F HANGUL LETTER WI
 * 	1171 HANGUL JUNGSEONG WI
 * 3160 HANGUL LETTER YU
 * 	1172 HANGUL JUNGSEONG YU
 * 3161 HANGUL LETTER EU
 * 	1173 HANGUL JUNGSEONG EU
 * 3162 HANGUL LETTER YI
 * 	1174 HANGUL JUNGSEONG YI
 * 3163 HANGUL LETTER I
 * 	1175 HANGUL JUNGSEONG I
 * 3164 HANGUL FILLER
 * 	1160 HANGUL JUNGSEONG FILLER
 * 3165 HANGUL LETTER SSANGNIEUN
 * 	1114 HANGUL CHOSEONG SSANGNIEUN
 * 3166 HANGUL LETTER NIEUN-TIKEUT
 * 	1115 HANGUL CHOSEONG NIEUN-TIKEUT
 * 3167 HANGUL LETTER NIEUN-SIOS
 * 	11C7 HANGUL JONGSEONG NIEUN-SIOS
 * 3168 HANGUL LETTER NIEUN-PANSIOS
 * 	11C8 HANGUL JONGSEONG NIEUN-PANSIOS
 * 3169 HANGUL LETTER RIEUL-KIYEOK-SIOS
 * 	11CC HANGUL JONGSEONG RIEUL-KIYEOK-SIOS
 * 316A HANGUL LETTER RIEUL-TIKEUT
 * 	11CE HANGUL JONGSEONG RIEUL-TIKEUT
 * 316B HANGUL LETTER RIEUL-PIEUP-SIOS
 * 	11D3 HANGUL JONGSEONG RIEUL-PIEUP-SIOS
 * 316C HANGUL LETTER RIEUL-PANSIOS
 * 	11D7 HANGUL JONGSEONG RIEUL-PANSIOS
 * 316D HANGUL LETTER RIEUL-YEORINHIEUH
 * 	11D9 HANGUL JONGSEONG RIEUL-YEORINHIEUH
 * 316E HANGUL LETTER MIEUM-PIEUP
 * 	111C HANGUL CHOSEONG MIEUM-PIEUP
 * 316F HANGUL LETTER MIEUM-SIOS
 * 	11DD HANGUL JONGSEONG MIEUM-SIOS
 * 3170 HANGUL LETTER MIEUM-PANSIOS
 * 	11DF HANGUL JONGSEONG MIEUM-PANSIOS
 * 3171 HANGUL LETTER KAPYEOUNMIEUM
 * 	111D HANGUL CHOSEONG KAPYEOUNMIEUM
 * 3172 HANGUL LETTER PIEUP-KIYEOK
 * 	111E HANGUL CHOSEONG PIEUP-KIYEOK
 * 3173 HANGUL LETTER PIEUP-TIKEUT
 * 	1120 HANGUL CHOSEONG PIEUP-TIKEUT
 * 3174 HANGUL LETTER PIEUP-SIOS-KIYEOK
 * 	1122 HANGUL CHOSEONG PIEUP-SIOS-KIYEOK
 * 3175 HANGUL LETTER PIEUP-SIOS-TIKEUT
 * 	1123 HANGUL CHOSEONG PIEUP-SIOS-TIKEUT
 * 3176 HANGUL LETTER PIEUP-CIEUC
 * 	1127 HANGUL CHOSEONG PIEUP-CIEUC
 * 3177 HANGUL LETTER PIEUP-THIEUTH
 * 	1129 HANGUL CHOSEONG PIEUP-THIEUTH
 * 3178 HANGUL LETTER KAPYEOUNPIEUP
 * 	112B HANGUL CHOSEONG KAPYEOUNPIEUP
 * 3179 HANGUL LETTER KAPYEOUNSSANGPIEUP
 * 	112C HANGUL CHOSEONG KAPYEOUNSSANGPIEUP
 * 317A HANGUL LETTER SIOS-KIYEOK
 * 	112D HANGUL CHOSEONG SIOS-KIYEOK
 * 317B HANGUL LETTER SIOS-NIEUN
 * 	112E HANGUL CHOSEONG SIOS-NIEUN
 * 317C HANGUL LETTER SIOS-TIKEUT
 * 	112F HANGUL CHOSEONG SIOS-TIKEUT
 * 317D HANGUL LETTER SIOS-PIEUP
 * 	1132 HANGUL CHOSEONG SIOS-PIEUP
 * 317E HANGUL LETTER SIOS-CIEUC
 * 	1136 HANGUL CHOSEONG SIOS-CIEUC
 * 317F HANGUL LETTER PANSIOS
 * 	1140 HANGUL CHOSEONG PANSIOS
 * 3180 HANGUL LETTER SSANGIEUNG
 * 	1147 HANGUL CHOSEONG SSANGIEUNG
 * 3181 HANGUL LETTER YESIEUNG
 * 	114C HANGUL CHOSEONG YESIEUNG
 * 3182 HANGUL LETTER YESIEUNG-SIOS
 * 	11F1 HANGUL JONGSEONG YESIEUNG-SIOS
 * 3183 HANGUL LETTER YESIEUNG-PANSIOS
 * 	11F2 HANGUL JONGSEONG YESIEUNG-PANSIOS
 * 3184 HANGUL LETTER KAPYEOUNPHIEUPH
 * 	1157 HANGUL CHOSEONG KAPYEOUNPHIEUPH
 * 3185 HANGUL LETTER SSANGHIEUH
 * 	1158 HANGUL CHOSEONG SSANGHIEUH
 * 3186 HANGUL LETTER YEORINHIEUH
 * 	1159 HANGUL CHOSEONG YEORINHIEUH
 * 3187 HANGUL LETTER YO-YA
 * 	1184 HANGUL JUNGSEONG YO-YA
 * 3188 HANGUL LETTER YO-YAE
 * 	1185 HANGUL JUNGSEONG YO-YAE
 * 3189 HANGUL LETTER YO-I
 * 	1188 HANGUL JUNGSEONG YO-I
 * 318A HANGUL LETTER YU-YEO
 * 	1191 HANGUL JUNGSEONG YU-YEO
 * 318B HANGUL LETTER YU-YE
 * 	1192 HANGUL JUNGSEONG YU-YE
 * 318C HANGUL LETTER YU-I
 * 	1194 HANGUL JUNGSEONG YU-I
 * 318D HANGUL LETTER ARAEA
 * 	119E HANGUL JUNGSEONG ARAEA
 * 318E HANGUL LETTER ARAEAE
 * 	11A1 HANGUL JUNGSEONG ARAEA-I
 * 3192 IDEOGRAPHIC ANNOTATION ONE MARK
 * 	4E00 
 * 3193 IDEOGRAPHIC ANNOTATION TWO MARK
 * 	4E8C ??
 * 3194 IDEOGRAPHIC ANNOTATION THREE MARK
 * 	4E09 ??
 * 3195 IDEOGRAPHIC ANNOTATION FOUR MARK
 * 	56DB ??
 * 3196 IDEOGRAPHIC ANNOTATION TOP MARK
 * 	4E0A ??
 * 3197 IDEOGRAPHIC ANNOTATION MIDDLE MARK
 * 	4E2D ??
 * 3198 IDEOGRAPHIC ANNOTATION BOTTOM MARK
 * 	4E0B ??
 * 3199 IDEOGRAPHIC ANNOTATION FIRST MARK
 * 	7532 ??
 * 319A IDEOGRAPHIC ANNOTATION SECOND MARK
 * 	4E59 ??
 * 319B IDEOGRAPHIC ANNOTATION THIRD MARK
 * 	4E19 ??
 * 319C IDEOGRAPHIC ANNOTATION FOURTH MARK
 * 	4E01 ??
 * 319D IDEOGRAPHIC ANNOTATION HEAVEN MARK
 * 	5929 ??
 * 319E IDEOGRAPHIC ANNOTATION EARTH MARK
 * 	5730 ??
 * 319F IDEOGRAPHIC ANNOTATION MAN MARK
 * 	4EBA ??
 * 3200 PARENTHESIZED HANGUL KIYEOK
 * 	0028 LEFT PARENTHESIS
 * 	1100 HANGUL CHOSEONG KIYEOK
 * 	0029 RIGHT PARENTHESIS
 * 3201 PARENTHESIZED HANGUL NIEUN
 * 	0028 LEFT PARENTHESIS
 * 	1102 HANGUL CHOSEONG NIEUN
 * 	0029 RIGHT PARENTHESIS
 * 3202 PARENTHESIZED HANGUL TIKEUT
 * 	0028 LEFT PARENTHESIS
 * 	1103 HANGUL CHOSEONG TIKEUT
 * 	0029 RIGHT PARENTHESIS
 * 3203 PARENTHESIZED HANGUL RIEUL
 * 	0028 LEFT PARENTHESIS
 * 	1105 HANGUL CHOSEONG RIEUL
 * 	0029 RIGHT PARENTHESIS
 * 3204 PARENTHESIZED HANGUL MIEUM
 * 	0028 LEFT PARENTHESIS
 * 	1106 HANGUL CHOSEONG MIEUM
 * 	0029 RIGHT PARENTHESIS
 * 3205 PARENTHESIZED HANGUL PIEUP
 * 	0028 LEFT PARENTHESIS
 * 	1107 HANGUL CHOSEONG PIEUP
 * 	0029 RIGHT PARENTHESIS
 * 3206 PARENTHESIZED HANGUL SIOS
 * 	0028 LEFT PARENTHESIS
 * 	1109 HANGUL CHOSEONG SIOS
 * 	0029 RIGHT PARENTHESIS
 * 3207 PARENTHESIZED HANGUL IEUNG
 * 	0028 LEFT PARENTHESIS
 * 	110B HANGUL CHOSEONG IEUNG
 * 	0029 RIGHT PARENTHESIS
 * 3208 PARENTHESIZED HANGUL CIEUC
 * 	0028 LEFT PARENTHESIS
 * 	110C HANGUL CHOSEONG CIEUC
 * 	0029 RIGHT PARENTHESIS
 * 3209 PARENTHESIZED HANGUL CHIEUCH
 * 	0028 LEFT PARENTHESIS
 * 	110E HANGUL CHOSEONG CHIEUCH
 * 	0029 RIGHT PARENTHESIS
 * 320A PARENTHESIZED HANGUL KHIEUKH
 * 	0028 LEFT PARENTHESIS
 * 	110F HANGUL CHOSEONG KHIEUKH
 * 	0029 RIGHT PARENTHESIS
 * 320B PARENTHESIZED HANGUL THIEUTH
 * 	0028 LEFT PARENTHESIS
 * 	1110 HANGUL CHOSEONG THIEUTH
 * 	0029 RIGHT PARENTHESIS
 * 320C PARENTHESIZED HANGUL PHIEUPH
 * 	0028 LEFT PARENTHESIS
 * 	1111 HANGUL CHOSEONG PHIEUPH
 * 	0029 RIGHT PARENTHESIS
 * 320D PARENTHESIZED HANGUL HIEUH
 * 	0028 LEFT PARENTHESIS
 * 	1112 HANGUL CHOSEONG HIEUH
 * 	0029 RIGHT PARENTHESIS
 * 320E PARENTHESIZED HANGUL KIYEOK A
 * 	0028 LEFT PARENTHESIS
 * 	1100 HANGUL CHOSEONG KIYEOK
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 320F PARENTHESIZED HANGUL NIEUN A
 * 	0028 LEFT PARENTHESIS
 * 	1102 HANGUL CHOSEONG NIEUN
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3210 PARENTHESIZED HANGUL TIKEUT A
 * 	0028 LEFT PARENTHESIS
 * 	1103 HANGUL CHOSEONG TIKEUT
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3211 PARENTHESIZED HANGUL RIEUL A
 * 	0028 LEFT PARENTHESIS
 * 	1105 HANGUL CHOSEONG RIEUL
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3212 PARENTHESIZED HANGUL MIEUM A
 * 	0028 LEFT PARENTHESIS
 * 	1106 HANGUL CHOSEONG MIEUM
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3213 PARENTHESIZED HANGUL PIEUP A
 * 	0028 LEFT PARENTHESIS
 * 	1107 HANGUL CHOSEONG PIEUP
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3214 PARENTHESIZED HANGUL SIOS A
 * 	0028 LEFT PARENTHESIS
 * 	1109 HANGUL CHOSEONG SIOS
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3215 PARENTHESIZED HANGUL IEUNG A
 * 	0028 LEFT PARENTHESIS
 * 	110B HANGUL CHOSEONG IEUNG
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3216 PARENTHESIZED HANGUL CIEUC A
 * 	0028 LEFT PARENTHESIS
 * 	110C HANGUL CHOSEONG CIEUC
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3217 PARENTHESIZED HANGUL CHIEUCH A
 * 	0028 LEFT PARENTHESIS
 * 	110E HANGUL CHOSEONG CHIEUCH
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3218 PARENTHESIZED HANGUL KHIEUKH A
 * 	0028 LEFT PARENTHESIS
 * 	110F HANGUL CHOSEONG KHIEUKH
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 3219 PARENTHESIZED HANGUL THIEUTH A
 * 	0028 LEFT PARENTHESIS
 * 	1110 HANGUL CHOSEONG THIEUTH
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 321A PARENTHESIZED HANGUL PHIEUPH A
 * 	0028 LEFT PARENTHESIS
 * 	1111 HANGUL CHOSEONG PHIEUPH
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 321B PARENTHESIZED HANGUL HIEUH A
 * 	0028 LEFT PARENTHESIS
 * 	1112 HANGUL CHOSEONG HIEUH
 * 	1161 HANGUL JUNGSEONG A
 * 	0029 RIGHT PARENTHESIS
 * 321C PARENTHESIZED HANGUL CIEUC U
 * 	0028 LEFT PARENTHESIS
 * 	110C HANGUL CHOSEONG CIEUC
 * 	116E HANGUL JUNGSEONG U
 * 	0029 RIGHT PARENTHESIS
 * 321D PARENTHESIZED KOREAN CHARACTER OJEON
 * 	0028 LEFT PARENTHESIS
 * 	110B HANGUL CHOSEONG IEUNG
 * 	1169 HANGUL JUNGSEONG O
 * 	110C HANGUL CHOSEONG CIEUC
 * 	1165 HANGUL JUNGSEONG EO
 * 	11AB HANGUL JONGSEONG NIEUN
 * 	0029 RIGHT PARENTHESIS
 * 321E PARENTHESIZED KOREAN CHARACTER O HU
 * 	0028 LEFT PARENTHESIS
 * 	110B HANGUL CHOSEONG IEUNG
 * 	1169 HANGUL JUNGSEONG O
 * 	1112 HANGUL CHOSEONG HIEUH
 * 	116E HANGUL JUNGSEONG U
 * 	0029 RIGHT PARENTHESIS
 * 3220 PARENTHESIZED IDEOGRAPH ONE
 * 	0028 LEFT PARENTHESIS
 * 	4E00 
 * 	0029 RIGHT PARENTHESIS
 * 3221 PARENTHESIZED IDEOGRAPH TWO
 * 	0028 LEFT PARENTHESIS
 * 	4E8C ??
 * 	0029 RIGHT PARENTHESIS
 * 3222 PARENTHESIZED IDEOGRAPH THREE
 * 	0028 LEFT PARENTHESIS
 * 	4E09 ??
 * 	0029 RIGHT PARENTHESIS
 * 3223 PARENTHESIZED IDEOGRAPH FOUR
 * 	0028 LEFT PARENTHESIS
 * 	56DB ??
 * 	0029 RIGHT PARENTHESIS
 * 3224 PARENTHESIZED IDEOGRAPH FIVE
 * 	0028 LEFT PARENTHESIS
 * 	4E94 ??
 * 	0029 RIGHT PARENTHESIS
 * 3225 PARENTHESIZED IDEOGRAPH SIX
 * 	0028 LEFT PARENTHESIS
 * 	516D ??
 * 	0029 RIGHT PARENTHESIS
 * 3226 PARENTHESIZED IDEOGRAPH SEVEN
 * 	0028 LEFT PARENTHESIS
 * 	4E03 ??
 * 	0029 RIGHT PARENTHESIS
 * 3227 PARENTHESIZED IDEOGRAPH EIGHT
 * 	0028 LEFT PARENTHESIS
 * 	516B ??
 * 	0029 RIGHT PARENTHESIS
 * 3228 PARENTHESIZED IDEOGRAPH NINE
 * 	0028 LEFT PARENTHESIS
 * 	4E5D ??
 * 	0029 RIGHT PARENTHESIS
 * 3229 PARENTHESIZED IDEOGRAPH TEN
 * 	0028 LEFT PARENTHESIS
 * 	5341 ??
 * 	0029 RIGHT PARENTHESIS
 * 322A PARENTHESIZED IDEOGRAPH MOON
 * 	0028 LEFT PARENTHESIS
 * 	6708 ??
 * 	0029 RIGHT PARENTHESIS
 * 322B PARENTHESIZED IDEOGRAPH FIRE
 * 	0028 LEFT PARENTHESIS
 * 	706B ??
 * 	0029 RIGHT PARENTHESIS
 * 322C PARENTHESIZED IDEOGRAPH WATER
 * 	0028 LEFT PARENTHESIS
 * 	6C34 ??
 * 	0029 RIGHT PARENTHESIS
 * 322D PARENTHESIZED IDEOGRAPH WOOD
 * 	0028 LEFT PARENTHESIS
 * 	6728 ??
 * 	0029 RIGHT PARENTHESIS
 * 322E PARENTHESIZED IDEOGRAPH METAL
 * 	0028 LEFT PARENTHESIS
 * 	91D1 ??
 * 	0029 RIGHT PARENTHESIS
 * 322F PARENTHESIZED IDEOGRAPH EARTH
 * 	0028 LEFT PARENTHESIS
 * 	571F ??
 * 	0029 RIGHT PARENTHESIS
 * 3230 PARENTHESIZED IDEOGRAPH SUN
 * 	0028 LEFT PARENTHESIS
 * 	65E5 ??
 * 	0029 RIGHT PARENTHESIS
 * 3231 PARENTHESIZED IDEOGRAPH STOCK
 * 	0028 LEFT PARENTHESIS
 * 	682A ??
 * 	0029 RIGHT PARENTHESIS
 * 3232 PARENTHESIZED IDEOGRAPH HAVE
 * 	0028 LEFT PARENTHESIS
 * 	6709 ??
 * 	0029 RIGHT PARENTHESIS
 * 3233 PARENTHESIZED IDEOGRAPH SOCIETY
 * 	0028 LEFT PARENTHESIS
 * 	793E ??
 * 	0029 RIGHT PARENTHESIS
 * 3234 PARENTHESIZED IDEOGRAPH NAME
 * 	0028 LEFT PARENTHESIS
 * 	540D ??
 * 	0029 RIGHT PARENTHESIS
 * 3235 PARENTHESIZED IDEOGRAPH SPECIAL
 * 	0028 LEFT PARENTHESIS
 * 	7279 ??
 * 	0029 RIGHT PARENTHESIS
 * 3236 PARENTHESIZED IDEOGRAPH FINANCIAL
 * 	0028 LEFT PARENTHESIS
 * 	8CA1 ??
 * 	0029 RIGHT PARENTHESIS
 * 3237 PARENTHESIZED IDEOGRAPH CONGRATULATION
 * 	0028 LEFT PARENTHESIS
 * 	795D ??
 * 	0029 RIGHT PARENTHESIS
 * 3238 PARENTHESIZED IDEOGRAPH LABOR
 * 	0028 LEFT PARENTHESIS
 * 	52B4 ??
 * 	0029 RIGHT PARENTHESIS
 * 3239 PARENTHESIZED IDEOGRAPH REPRESENT
 * 	0028 LEFT PARENTHESIS
 * 	4EE3 ??
 * 	0029 RIGHT PARENTHESIS
 * 323A PARENTHESIZED IDEOGRAPH CALL
 * 	0028 LEFT PARENTHESIS
 * 	547C ??
 * 	0029 RIGHT PARENTHESIS
 * 323B PARENTHESIZED IDEOGRAPH STUDY
 * 	0028 LEFT PARENTHESIS
 * 	5B66 ??
 * 	0029 RIGHT PARENTHESIS
 * 323C PARENTHESIZED IDEOGRAPH SUPERVISE
 * 	0028 LEFT PARENTHESIS
 * 	76E3 ??
 * 	0029 RIGHT PARENTHESIS
 * 323D PARENTHESIZED IDEOGRAPH ENTERPRISE
 * 	0028 LEFT PARENTHESIS
 * 	4F01 ??
 * 	0029 RIGHT PARENTHESIS
 * 323E PARENTHESIZED IDEOGRAPH RESOURCE
 * 	0028 LEFT PARENTHESIS
 * 	8CC7 ??
 * 	0029 RIGHT PARENTHESIS
 * 323F PARENTHESIZED IDEOGRAPH ALLIANCE
 * 	0028 LEFT PARENTHESIS
 * 	5354 ??
 * 	0029 RIGHT PARENTHESIS
 * 3240 PARENTHESIZED IDEOGRAPH FESTIVAL
 * 	0028 LEFT PARENTHESIS
 * 	796D ??
 * 	0029 RIGHT PARENTHESIS
 * 3241 PARENTHESIZED IDEOGRAPH REST
 * 	0028 LEFT PARENTHESIS
 * 	4F11 ??
 * 	0029 RIGHT PARENTHESIS
 * 3242 PARENTHESIZED IDEOGRAPH SELF
 * 	0028 LEFT PARENTHESIS
 * 	81EA ??
 * 	0029 RIGHT PARENTHESIS
 * 3243 PARENTHESIZED IDEOGRAPH REACH
 * 	0028 LEFT PARENTHESIS
 * 	81F3 ??
 * 	0029 RIGHT PARENTHESIS
 * 3244 CIRCLED IDEOGRAPH QUESTION
 * 	554F ??
 * 3245 CIRCLED IDEOGRAPH KINDERGARTEN
 * 	5E7C ??
 * 3246 CIRCLED IDEOGRAPH SCHOOL
 * 	6587 ??
 * 3247 CIRCLED IDEOGRAPH KOTO
 * 	7B8F ??
 * 3250 PARTNERSHIP SIGN
 * 	0050 LATIN CAPITAL LETTER P
 * 	0054 LATIN CAPITAL LETTER T
 * 	0045 LATIN CAPITAL LETTER E
 * 3251 CIRCLED NUMBER TWENTY ONE
 * 	0032 DIGIT TWO
 * 	0031 DIGIT ONE
 * 3252 CIRCLED NUMBER TWENTY TWO
 * 	0032 DIGIT TWO
 * 	0032 DIGIT TWO
 * 3253 CIRCLED NUMBER TWENTY THREE
 * 	0032 DIGIT TWO
 * 	0033 DIGIT THREE
 * 3254 CIRCLED NUMBER TWENTY FOUR
 * 	0032 DIGIT TWO
 * 	0034 DIGIT FOUR
 * 3255 CIRCLED NUMBER TWENTY FIVE
 * 	0032 DIGIT TWO
 * 	0035 DIGIT FIVE
 * 3256 CIRCLED NUMBER TWENTY SIX
 * 	0032 DIGIT TWO
 * 	0036 DIGIT SIX
 * 3257 CIRCLED NUMBER TWENTY SEVEN
 * 	0032 DIGIT TWO
 * 	0037 DIGIT SEVEN
 * 3258 CIRCLED NUMBER TWENTY EIGHT
 * 	0032 DIGIT TWO
 * 	0038 DIGIT EIGHT
 * 3259 CIRCLED NUMBER TWENTY NINE
 * 	0032 DIGIT TWO
 * 	0039 DIGIT NINE
 * 325A CIRCLED NUMBER THIRTY
 * 	0033 DIGIT THREE
 * 	0030 DIGIT ZERO
 * 325B CIRCLED NUMBER THIRTY ONE
 * 	0033 DIGIT THREE
 * 	0031 DIGIT ONE
 * 325C CIRCLED NUMBER THIRTY TWO
 * 	0033 DIGIT THREE
 * 	0032 DIGIT TWO
 * 325D CIRCLED NUMBER THIRTY THREE
 * 	0033 DIGIT THREE
 * 	0033 DIGIT THREE
 * 325E CIRCLED NUMBER THIRTY FOUR
 * 	0033 DIGIT THREE
 * 	0034 DIGIT FOUR
 * 325F CIRCLED NUMBER THIRTY FIVE
 * 	0033 DIGIT THREE
 * 	0035 DIGIT FIVE
 * 3260 CIRCLED HANGUL KIYEOK
 * 	1100 HANGUL CHOSEONG KIYEOK
 * 3261 CIRCLED HANGUL NIEUN
 * 	1102 HANGUL CHOSEONG NIEUN
 * 3262 CIRCLED HANGUL TIKEUT
 * 	1103 HANGUL CHOSEONG TIKEUT
 * 3263 CIRCLED HANGUL RIEUL
 * 	1105 HANGUL CHOSEONG RIEUL
 * 3264 CIRCLED HANGUL MIEUM
 * 	1106 HANGUL CHOSEONG MIEUM
 * 3265 CIRCLED HANGUL PIEUP
 * 	1107 HANGUL CHOSEONG PIEUP
 * 3266 CIRCLED HANGUL SIOS
 * 	1109 HANGUL CHOSEONG SIOS
 * 3267 CIRCLED HANGUL IEUNG
 * 	110B HANGUL CHOSEONG IEUNG
 * 3268 CIRCLED HANGUL CIEUC
 * 	110C HANGUL CHOSEONG CIEUC
 * 3269 CIRCLED HANGUL CHIEUCH
 * 	110E HANGUL CHOSEONG CHIEUCH
 * 326A CIRCLED HANGUL KHIEUKH
 * 	110F HANGUL CHOSEONG KHIEUKH
 * 326B CIRCLED HANGUL THIEUTH
 * 	1110 HANGUL CHOSEONG THIEUTH
 * 326C CIRCLED HANGUL PHIEUPH
 * 	1111 HANGUL CHOSEONG PHIEUPH
 * 326D CIRCLED HANGUL HIEUH
 * 	1112 HANGUL CHOSEONG HIEUH
 * 326E CIRCLED HANGUL KIYEOK A
 * 	1100 HANGUL CHOSEONG KIYEOK
 * 	1161 HANGUL JUNGSEONG A
 * 326F CIRCLED HANGUL NIEUN A
 * 	1102 HANGUL CHOSEONG NIEUN
 * 	1161 HANGUL JUNGSEONG A
 * 3270 CIRCLED HANGUL TIKEUT A
 * 	1103 HANGUL CHOSEONG TIKEUT
 * 	1161 HANGUL JUNGSEONG A
 * 3271 CIRCLED HANGUL RIEUL A
 * 	1105 HANGUL CHOSEONG RIEUL
 * 	1161 HANGUL JUNGSEONG A
 * 3272 CIRCLED HANGUL MIEUM A
 * 	1106 HANGUL CHOSEONG MIEUM
 * 	1161 HANGUL JUNGSEONG A
 * 3273 CIRCLED HANGUL PIEUP A
 * 	1107 HANGUL CHOSEONG PIEUP
 * 	1161 HANGUL JUNGSEONG A
 * 3274 CIRCLED HANGUL SIOS A
 * 	1109 HANGUL CHOSEONG SIOS
 * 	1161 HANGUL JUNGSEONG A
 * 3275 CIRCLED HANGUL IEUNG A
 * 	110B HANGUL CHOSEONG IEUNG
 * 	1161 HANGUL JUNGSEONG A
 * 3276 CIRCLED HANGUL CIEUC A
 * 	110C HANGUL CHOSEONG CIEUC
 * 	1161 HANGUL JUNGSEONG A
 * 3277 CIRCLED HANGUL CHIEUCH A
 * 	110E HANGUL CHOSEONG CHIEUCH
 * 	1161 HANGUL JUNGSEONG A
 * 3278 CIRCLED HANGUL KHIEUKH A
 * 	110F HANGUL CHOSEONG KHIEUKH
 * 	1161 HANGUL JUNGSEONG A
 * 3279 CIRCLED HANGUL THIEUTH A
 * 	1110 HANGUL CHOSEONG THIEUTH
 * 	1161 HANGUL JUNGSEONG A
 * 327A CIRCLED HANGUL PHIEUPH A
 * 	1111 HANGUL CHOSEONG PHIEUPH
 * 	1161 HANGUL JUNGSEONG A
 * 327B CIRCLED HANGUL HIEUH A
 * 	1112 HANGUL CHOSEONG HIEUH
 * 	1161 HANGUL JUNGSEONG A
 * 327C CIRCLED KOREAN CHARACTER CHAMKO
 * 	110E HANGUL CHOSEONG CHIEUCH
 * 	1161 HANGUL JUNGSEONG A
 * 	11B7 HANGUL JONGSEONG MIEUM
 * 	1100 HANGUL CHOSEONG KIYEOK
 * 	1169 HANGUL JUNGSEONG O
 * 327D CIRCLED KOREAN CHARACTER JUEUI
 * 	110C HANGUL CHOSEONG CIEUC
 * 	116E HANGUL JUNGSEONG U
 * 	110B HANGUL CHOSEONG IEUNG
 * 	1174 HANGUL JUNGSEONG YI
 * 327E CIRCLED HANGUL IEUNG U
 * 	110B HANGUL CHOSEONG IEUNG
 * 	116E HANGUL JUNGSEONG U
 * 3280 CIRCLED IDEOGRAPH ONE
 * 	4E00 
 * 3281 CIRCLED IDEOGRAPH TWO
 * 	4E8C ??
 * 3282 CIRCLED IDEOGRAPH THREE
 * 	4E09 ??
 * 3283 CIRCLED IDEOGRAPH FOUR
 * 	56DB ??
 * 3284 CIRCLED IDEOGRAPH FIVE
 * 	4E94 ??
 * 3285 CIRCLED IDEOGRAPH SIX
 * 	516D ??
 * 3286 CIRCLED IDEOGRAPH SEVEN
 * 	4E03 ??
 * 3287 CIRCLED IDEOGRAPH EIGHT
 * 	516B ??
 * 3288 CIRCLED IDEOGRAPH NINE
 * 	4E5D ??
 * 3289 CIRCLED IDEOGRAPH TEN
 * 	5341 ??
 * 328A CIRCLED IDEOGRAPH MOON
 * 	6708 ??
 * 328B CIRCLED IDEOGRAPH FIRE
 * 	706B ??
 * 328C CIRCLED IDEOGRAPH WATER
 * 	6C34 ??
 * 328D CIRCLED IDEOGRAPH WOOD
 * 	6728 ??
 * 328E CIRCLED IDEOGRAPH METAL
 * 	91D1 ??
 * 328F CIRCLED IDEOGRAPH EARTH
 * 	571F ??
 * 3290 CIRCLED IDEOGRAPH SUN
 * 	65E5 ??
 * 3291 CIRCLED IDEOGRAPH STOCK
 * 	682A ??
 * 3292 CIRCLED IDEOGRAPH HAVE
 * 	6709 ??
 * 3293 CIRCLED IDEOGRAPH SOCIETY
 * 	793E ??
 * 3294 CIRCLED IDEOGRAPH NAME
 * 	540D ??
 * 3295 CIRCLED IDEOGRAPH SPECIAL
 * 	7279 ??
 * 3296 CIRCLED IDEOGRAPH FINANCIAL
 * 	8CA1 ??
 * 3297 CIRCLED IDEOGRAPH CONGRATULATION
 * 	795D ??
 * 3298 CIRCLED IDEOGRAPH LABOR
 * 	52B4 ??
 * 3299 CIRCLED IDEOGRAPH SECRET
 * 	79D8 ??
 * 329A CIRCLED IDEOGRAPH MALE
 * 	7537 ??
 * 329B CIRCLED IDEOGRAPH FEMALE
 * 	5973 ??
 * 329C CIRCLED IDEOGRAPH SUITABLE
 * 	9069 ??
 * 329D CIRCLED IDEOGRAPH EXCELLENT
 * 	512A ??
 * 329E CIRCLED IDEOGRAPH PRINT
 * 	5370 ??
 * 329F CIRCLED IDEOGRAPH ATTENTION
 * 	6CE8 ??
 * 32A0 CIRCLED IDEOGRAPH ITEM
 * 	9805 ??
 * 32A1 CIRCLED IDEOGRAPH REST
 * 	4F11 ??
 * 32A2 CIRCLED IDEOGRAPH COPY
 * 	5199 ??
 * 32A3 CIRCLED IDEOGRAPH CORRECT
 * 	6B63 ??
 * 32A4 CIRCLED IDEOGRAPH HIGH
 * 	4E0A ??
 * 32A5 CIRCLED IDEOGRAPH CENTRE
 * 	4E2D ??
 * 32A6 CIRCLED IDEOGRAPH LOW
 * 	4E0B ??
 * 32A7 CIRCLED IDEOGRAPH LEFT
 * 	5DE6 ??
 * 32A8 CIRCLED IDEOGRAPH RIGHT
 * 	53F3 ??
 * 32A9 CIRCLED IDEOGRAPH MEDICINE
 * 	533B ??
 * 32AA CIRCLED IDEOGRAPH RELIGION
 * 	5B97 ??
 * 32AB CIRCLED IDEOGRAPH STUDY
 * 	5B66 ??
 * 32AC CIRCLED IDEOGRAPH SUPERVISE
 * 	76E3 ??
 * 32AD CIRCLED IDEOGRAPH ENTERPRISE
 * 	4F01 ??
 * 32AE CIRCLED IDEOGRAPH RESOURCE
 * 	8CC7 ??
 * 32AF CIRCLED IDEOGRAPH ALLIANCE
 * 	5354 ??
 * 32B0 CIRCLED IDEOGRAPH NIGHT
 * 	591C ??
 * 32B1 CIRCLED NUMBER THIRTY SIX
 * 	0033 DIGIT THREE
 * 	0036 DIGIT SIX
 * 32B2 CIRCLED NUMBER THIRTY SEVEN
 * 	0033 DIGIT THREE
 * 	0037 DIGIT SEVEN
 * 32B3 CIRCLED NUMBER THIRTY EIGHT
 * 	0033 DIGIT THREE
 * 	0038 DIGIT EIGHT
 * 32B4 CIRCLED NUMBER THIRTY NINE
 * 	0033 DIGIT THREE
 * 	0039 DIGIT NINE
 * 32B5 CIRCLED NUMBER FORTY
 * 	0034 DIGIT FOUR
 * 	0030 DIGIT ZERO
 * 32B6 CIRCLED NUMBER FORTY ONE
 * 	0034 DIGIT FOUR
 * 	0031 DIGIT ONE
 * 32B7 CIRCLED NUMBER FORTY TWO
 * 	0034 DIGIT FOUR
 * 	0032 DIGIT TWO
 * 32B8 CIRCLED NUMBER FORTY THREE
 * 	0034 DIGIT FOUR
 * 	0033 DIGIT THREE
 * 32B9 CIRCLED NUMBER FORTY FOUR
 * 	0034 DIGIT FOUR
 * 	0034 DIGIT FOUR
 * 32BA CIRCLED NUMBER FORTY FIVE
 * 	0034 DIGIT FOUR
 * 	0035 DIGIT FIVE
 * 32BB CIRCLED NUMBER FORTY SIX
 * 	0034 DIGIT FOUR
 * 	0036 DIGIT SIX
 * 32BC CIRCLED NUMBER FORTY SEVEN
 * 	0034 DIGIT FOUR
 * 	0037 DIGIT SEVEN
 * 32BD CIRCLED NUMBER FORTY EIGHT
 * 	0034 DIGIT FOUR
 * 	0038 DIGIT EIGHT
 * 32BE CIRCLED NUMBER FORTY NINE
 * 	0034 DIGIT FOUR
 * 	0039 DIGIT NINE
 * 32BF CIRCLED NUMBER FIFTY
 * 	0035 DIGIT FIVE
 * 	0030 DIGIT ZERO
 * 32C0 IDEOGRAPHIC TELEGRAPH SYMBOL FOR JANUARY
 * 	0031 DIGIT ONE
 * 	6708 ??
 * 32C1 IDEOGRAPHIC TELEGRAPH SYMBOL FOR FEBRUARY
 * 	0032 DIGIT TWO
 * 	6708 ??
 * 32C2 IDEOGRAPHIC TELEGRAPH SYMBOL FOR MARCH
 * 	0033 DIGIT THREE
 * 	6708 ??
 * 32C3 IDEOGRAPHIC TELEGRAPH SYMBOL FOR APRIL
 * 	0034 DIGIT FOUR
 * 	6708 ??
 * 32C4 IDEOGRAPHIC TELEGRAPH SYMBOL FOR MAY
 * 	0035 DIGIT FIVE
 * 	6708 ??
 * 32C5 IDEOGRAPHIC TELEGRAPH SYMBOL FOR JUNE
 * 	0036 DIGIT SIX
 * 	6708 ??
 * 32C6 IDEOGRAPHIC TELEGRAPH SYMBOL FOR JULY
 * 	0037 DIGIT SEVEN
 * 	6708 ??
 * 32C7 IDEOGRAPHIC TELEGRAPH SYMBOL FOR AUGUST
 * 	0038 DIGIT EIGHT
 * 	6708 ??
 * 32C8 IDEOGRAPHIC TELEGRAPH SYMBOL FOR SEPTEMBER
 * 	0039 DIGIT NINE
 * 	6708 ??
 * 32C9 IDEOGRAPHIC TELEGRAPH SYMBOL FOR OCTOBER
 * 	0031 DIGIT ONE
 * 	0030 DIGIT ZERO
 * 	6708 ??
 * 32CA IDEOGRAPHIC TELEGRAPH SYMBOL FOR NOVEMBER
 * 	0031 DIGIT ONE
 * 	0031 DIGIT ONE
 * 	6708 ??
 * 32CB IDEOGRAPHIC TELEGRAPH SYMBOL FOR DECEMBER
 * 	0031 DIGIT ONE
 * 	0032 DIGIT TWO
 * 	6708 ??
 * 32CC SQUARE HG
 * 	0048 LATIN CAPITAL LETTER H
 * 	0067 LATIN SMALL LETTER G
 * 32CD SQUARE ERG
 * 	0065 LATIN SMALL LETTER E
 * 	0072 LATIN SMALL LETTER R
 * 	0067 LATIN SMALL LETTER G
 * 32CE SQUARE EV
 * 	0065 LATIN SMALL LETTER E
 * 	0056 LATIN CAPITAL LETTER V
 * 32CF LIMITED LIABILITY SIGN
 * 	004C LATIN CAPITAL LETTER L
 * 	0054 LATIN CAPITAL LETTER T
 * 	0044 LATIN CAPITAL LETTER D
 * 32D0 CIRCLED KATAKANA A
 * 	30A2 KATAKANA LETTER A
 * 32D1 CIRCLED KATAKANA I
 * 	30A4 KATAKANA LETTER I
 * 32D2 CIRCLED KATAKANA U
 * 	30A6 KATAKANA LETTER U
 * 32D3 CIRCLED KATAKANA E
 * 	30A8 KATAKANA LETTER E
 * 32D4 CIRCLED KATAKANA O
 * 	30AA KATAKANA LETTER O
 * 32D5 CIRCLED KATAKANA KA
 * 	30AB KATAKANA LETTER KA
 * 32D6 CIRCLED KATAKANA KI
 * 	30AD KATAKANA LETTER KI
 * 32D7 CIRCLED KATAKANA KU
 * 	30AF KATAKANA LETTER KU
 * 32D8 CIRCLED KATAKANA KE
 * 	30B1 KATAKANA LETTER KE
 * 32D9 CIRCLED KATAKANA KO
 * 	30B3 KATAKANA LETTER KO
 * 32DA CIRCLED KATAKANA SA
 * 	30B5 KATAKANA LETTER SA
 * 32DB CIRCLED KATAKANA SI
 * 	30B7 KATAKANA LETTER SI
 * 32DC CIRCLED KATAKANA SU
 * 	30B9 KATAKANA LETTER SU
 * 32DD CIRCLED KATAKANA SE
 * 	30BB KATAKANA LETTER SE
 * 32DE CIRCLED KATAKANA SO
 * 	30BD KATAKANA LETTER SO
 * 32DF CIRCLED KATAKANA TA
 * 	30BF KATAKANA LETTER TA
 * 32E0 CIRCLED KATAKANA TI
 * 	30C1 KATAKANA LETTER TI
 * 32E1 CIRCLED KATAKANA TU
 * 	30C4 KATAKANA LETTER TU
 * 32E2 CIRCLED KATAKANA TE
 * 	30C6 KATAKANA LETTER TE
 * 32E3 CIRCLED KATAKANA TO
 * 	30C8 KATAKANA LETTER TO
 * 32E4 CIRCLED KATAKANA NA
 * 	30CA KATAKANA LETTER NA
 * 32E5 CIRCLED KATAKANA NI
 * 	30CB KATAKANA LETTER NI
 * 32E6 CIRCLED KATAKANA NU
 * 	30CC KATAKANA LETTER NU
 * 32E7 CIRCLED KATAKANA NE
 * 	30CD KATAKANA LETTER NE
 * 32E8 CIRCLED KATAKANA NO
 * 	30CE KATAKANA LETTER NO
 * 32E9 CIRCLED KATAKANA HA
 * 	30CF KATAKANA LETTER HA
 * 32EA CIRCLED KATAKANA HI
 * 	30D2 KATAKANA LETTER HI
 * 32EB CIRCLED KATAKANA HU
 * 	30D5 KATAKANA LETTER HU
 * 32EC CIRCLED KATAKANA HE
 * 	30D8 KATAKANA LETTER HE
 * 32ED CIRCLED KATAKANA HO
 * 	30DB KATAKANA LETTER HO
 * 32EE CIRCLED KATAKANA MA
 * 	30DE KATAKANA LETTER MA
 * 32EF CIRCLED KATAKANA MI
 * 	30DF KATAKANA LETTER MI
 * 32F0 CIRCLED KATAKANA MU
 * 	30E0 KATAKANA LETTER MU
 * 32F1 CIRCLED KATAKANA ME
 * 	30E1 KATAKANA LETTER ME
 * 32F2 CIRCLED KATAKANA MO
 * 	30E2 KATAKANA LETTER MO
 * 32F3 CIRCLED KATAKANA YA
 * 	30E4 KATAKANA LETTER YA
 * 32F4 CIRCLED KATAKANA YU
 * 	30E6 KATAKANA LETTER YU
 * 32F5 CIRCLED KATAKANA YO
 * 	30E8 KATAKANA LETTER YO
 * 32F6 CIRCLED KATAKANA RA
 * 	30E9 KATAKANA LETTER RA
 * 32F7 CIRCLED KATAKANA RI
 * 	30EA KATAKANA LETTER RI
 * 32F8 CIRCLED KATAKANA RU
 * 	30EB KATAKANA LETTER RU
 * 32F9 CIRCLED KATAKANA RE
 * 	30EC KATAKANA LETTER RE
 * 32FA CIRCLED KATAKANA RO
 * 	30ED KATAKANA LETTER RO
 * 32FB CIRCLED KATAKANA WA
 * 	30EF KATAKANA LETTER WA
 * 32FC CIRCLED KATAKANA WI
 * 	30F0 KATAKANA LETTER WI
 * 32FD CIRCLED KATAKANA WE
 * 	30F1 KATAKANA LETTER WE
 * 32FE CIRCLED KATAKANA WO
 * 	30F2 KATAKANA LETTER WO
 * 3300 SQUARE APAATO
 * 	30A2 KATAKANA LETTER A
 * 	30D1 KATAKANA LETTER PA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30C8 KATAKANA LETTER TO
 * 3301 SQUARE ARUHUA
 * 	30A2 KATAKANA LETTER A
 * 	30EB KATAKANA LETTER RU
 * 	30D5 KATAKANA LETTER HU
 * 	30A1 KATAKANA LETTER SMALL A
 * 3302 SQUARE ANPEA
 * 	30A2 KATAKANA LETTER A
 * 	30F3 KATAKANA LETTER N
 * 	30DA KATAKANA LETTER PE
 * 	30A2 KATAKANA LETTER A
 * 3303 SQUARE AARU
 * 	30A2 KATAKANA LETTER A
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30EB KATAKANA LETTER RU
 * 3304 SQUARE ININGU
 * 	30A4 KATAKANA LETTER I
 * 	30CB KATAKANA LETTER NI
 * 	30F3 KATAKANA LETTER N
 * 	30B0 KATAKANA LETTER GU
 * 3305 SQUARE INTI
 * 	30A4 KATAKANA LETTER I
 * 	30F3 KATAKANA LETTER N
 * 	30C1 KATAKANA LETTER TI
 * 3306 SQUARE UON
 * 	30A6 KATAKANA LETTER U
 * 	30A9 KATAKANA LETTER SMALL O
 * 	30F3 KATAKANA LETTER N
 * 3307 SQUARE ESUKUUDO
 * 	30A8 KATAKANA LETTER E
 * 	30B9 KATAKANA LETTER SU
 * 	30AF KATAKANA LETTER KU
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30C9 KATAKANA LETTER DO
 * 3308 SQUARE EEKAA
 * 	30A8 KATAKANA LETTER E
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30AB KATAKANA LETTER KA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 3309 SQUARE ONSU
 * 	30AA KATAKANA LETTER O
 * 	30F3 KATAKANA LETTER N
 * 	30B9 KATAKANA LETTER SU
 * 330A SQUARE OOMU
 * 	30AA KATAKANA LETTER O
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30E0 KATAKANA LETTER MU
 * 330B SQUARE KAIRI
 * 	30AB KATAKANA LETTER KA
 * 	30A4 KATAKANA LETTER I
 * 	30EA KATAKANA LETTER RI
 * 330C SQUARE KARATTO
 * 	30AB KATAKANA LETTER KA
 * 	30E9 KATAKANA LETTER RA
 * 	30C3 KATAKANA LETTER SMALL TU
 * 	30C8 KATAKANA LETTER TO
 * 330D SQUARE KARORII
 * 	30AB KATAKANA LETTER KA
 * 	30ED KATAKANA LETTER RO
 * 	30EA KATAKANA LETTER RI
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 330E SQUARE GARON
 * 	30AC KATAKANA LETTER GA
 * 	30ED KATAKANA LETTER RO
 * 	30F3 KATAKANA LETTER N
 * 330F SQUARE GANMA
 * 	30AC KATAKANA LETTER GA
 * 	30F3 KATAKANA LETTER N
 * 	30DE KATAKANA LETTER MA
 * 3310 SQUARE GIGA
 * 	30AE KATAKANA LETTER GI
 * 	30AC KATAKANA LETTER GA
 * 3311 SQUARE GINII
 * 	30AE KATAKANA LETTER GI
 * 	30CB KATAKANA LETTER NI
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 3312 SQUARE KYURII
 * 	30AD KATAKANA LETTER KI
 * 	30E5 KATAKANA LETTER SMALL YU
 * 	30EA KATAKANA LETTER RI
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 3313 SQUARE GIRUDAA
 * 	30AE KATAKANA LETTER GI
 * 	30EB KATAKANA LETTER RU
 * 	30C0 KATAKANA LETTER DA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 3314 SQUARE KIRO
 * 	30AD KATAKANA LETTER KI
 * 	30ED KATAKANA LETTER RO
 * 3315 SQUARE KIROGURAMU
 * 	30AD KATAKANA LETTER KI
 * 	30ED KATAKANA LETTER RO
 * 	30B0 KATAKANA LETTER GU
 * 	30E9 KATAKANA LETTER RA
 * 	30E0 KATAKANA LETTER MU
 * 3316 SQUARE KIROMEETORU
 * 	30AD KATAKANA LETTER KI
 * 	30ED KATAKANA LETTER RO
 * 	30E1 KATAKANA LETTER ME
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30C8 KATAKANA LETTER TO
 * 	30EB KATAKANA LETTER RU
 * 3317 SQUARE KIROWATTO
 * 	30AD KATAKANA LETTER KI
 * 	30ED KATAKANA LETTER RO
 * 	30EF KATAKANA LETTER WA
 * 	30C3 KATAKANA LETTER SMALL TU
 * 	30C8 KATAKANA LETTER TO
 * 3318 SQUARE GURAMU
 * 	30B0 KATAKANA LETTER GU
 * 	30E9 KATAKANA LETTER RA
 * 	30E0 KATAKANA LETTER MU
 * 3319 SQUARE GURAMUTON
 * 	30B0 KATAKANA LETTER GU
 * 	30E9 KATAKANA LETTER RA
 * 	30E0 KATAKANA LETTER MU
 * 	30C8 KATAKANA LETTER TO
 * 	30F3 KATAKANA LETTER N
 * 331A SQUARE KURUZEIRO
 * 	30AF KATAKANA LETTER KU
 * 	30EB KATAKANA LETTER RU
 * 	30BC KATAKANA LETTER ZE
 * 	30A4 KATAKANA LETTER I
 * 	30ED KATAKANA LETTER RO
 * 331B SQUARE KUROONE
 * 	30AF KATAKANA LETTER KU
 * 	30ED KATAKANA LETTER RO
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30CD KATAKANA LETTER NE
 * 331C SQUARE KEESU
 * 	30B1 KATAKANA LETTER KE
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30B9 KATAKANA LETTER SU
 * 331D SQUARE KORUNA
 * 	30B3 KATAKANA LETTER KO
 * 	30EB KATAKANA LETTER RU
 * 	30CA KATAKANA LETTER NA
 * 331E SQUARE KOOPO
 * 	30B3 KATAKANA LETTER KO
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30DD KATAKANA LETTER PO
 * 331F SQUARE SAIKURU
 * 	30B5 KATAKANA LETTER SA
 * 	30A4 KATAKANA LETTER I
 * 	30AF KATAKANA LETTER KU
 * 	30EB KATAKANA LETTER RU
 * 3320 SQUARE SANTIIMU
 * 	30B5 KATAKANA LETTER SA
 * 	30F3 KATAKANA LETTER N
 * 	30C1 KATAKANA LETTER TI
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30E0 KATAKANA LETTER MU
 * 3321 SQUARE SIRINGU
 * 	30B7 KATAKANA LETTER SI
 * 	30EA KATAKANA LETTER RI
 * 	30F3 KATAKANA LETTER N
 * 	30B0 KATAKANA LETTER GU
 * 3322 SQUARE SENTI
 * 	30BB KATAKANA LETTER SE
 * 	30F3 KATAKANA LETTER N
 * 	30C1 KATAKANA LETTER TI
 * 3323 SQUARE SENTO
 * 	30BB KATAKANA LETTER SE
 * 	30F3 KATAKANA LETTER N
 * 	30C8 KATAKANA LETTER TO
 * 3324 SQUARE DAASU
 * 	30C0 KATAKANA LETTER DA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30B9 KATAKANA LETTER SU
 * 3325 SQUARE DESI
 * 	30C7 KATAKANA LETTER DE
 * 	30B7 KATAKANA LETTER SI
 * 3326 SQUARE DORU
 * 	30C9 KATAKANA LETTER DO
 * 	30EB KATAKANA LETTER RU
 * 3327 SQUARE TON
 * 	30C8 KATAKANA LETTER TO
 * 	30F3 KATAKANA LETTER N
 * 3328 SQUARE NANO
 * 	30CA KATAKANA LETTER NA
 * 	30CE KATAKANA LETTER NO
 * 3329 SQUARE NOTTO
 * 	30CE KATAKANA LETTER NO
 * 	30C3 KATAKANA LETTER SMALL TU
 * 	30C8 KATAKANA LETTER TO
 * 332A SQUARE HAITU
 * 	30CF KATAKANA LETTER HA
 * 	30A4 KATAKANA LETTER I
 * 	30C4 KATAKANA LETTER TU
 * 332B SQUARE PAASENTO
 * 	30D1 KATAKANA LETTER PA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30BB KATAKANA LETTER SE
 * 	30F3 KATAKANA LETTER N
 * 	30C8 KATAKANA LETTER TO
 * 332C SQUARE PAATU
 * 	30D1 KATAKANA LETTER PA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30C4 KATAKANA LETTER TU
 * 332D SQUARE BAARERU
 * 	30D0 KATAKANA LETTER BA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30EC KATAKANA LETTER RE
 * 	30EB KATAKANA LETTER RU
 * 332E SQUARE PIASUTORU
 * 	30D4 KATAKANA LETTER PI
 * 	30A2 KATAKANA LETTER A
 * 	30B9 KATAKANA LETTER SU
 * 	30C8 KATAKANA LETTER TO
 * 	30EB KATAKANA LETTER RU
 * 332F SQUARE PIKURU
 * 	30D4 KATAKANA LETTER PI
 * 	30AF KATAKANA LETTER KU
 * 	30EB KATAKANA LETTER RU
 * 3330 SQUARE PIKO
 * 	30D4 KATAKANA LETTER PI
 * 	30B3 KATAKANA LETTER KO
 * 3331 SQUARE BIRU
 * 	30D3 KATAKANA LETTER BI
 * 	30EB KATAKANA LETTER RU
 * 3332 SQUARE HUARADDO
 * 	30D5 KATAKANA LETTER HU
 * 	30A1 KATAKANA LETTER SMALL A
 * 	30E9 KATAKANA LETTER RA
 * 	30C3 KATAKANA LETTER SMALL TU
 * 	30C9 KATAKANA LETTER DO
 * 3333 SQUARE HUIITO
 * 	30D5 KATAKANA LETTER HU
 * 	30A3 KATAKANA LETTER SMALL I
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30C8 KATAKANA LETTER TO
 * 3334 SQUARE BUSSYERU
 * 	30D6 KATAKANA LETTER BU
 * 	30C3 KATAKANA LETTER SMALL TU
 * 	30B7 KATAKANA LETTER SI
 * 	30A7 KATAKANA LETTER SMALL E
 * 	30EB KATAKANA LETTER RU
 * 3335 SQUARE HURAN
 * 	30D5 KATAKANA LETTER HU
 * 	30E9 KATAKANA LETTER RA
 * 	30F3 KATAKANA LETTER N
 * 3336 SQUARE HEKUTAARU
 * 	30D8 KATAKANA LETTER HE
 * 	30AF KATAKANA LETTER KU
 * 	30BF KATAKANA LETTER TA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30EB KATAKANA LETTER RU
 * 3337 SQUARE PESO
 * 	30DA KATAKANA LETTER PE
 * 	30BD KATAKANA LETTER SO
 * 3338 SQUARE PENIHI
 * 	30DA KATAKANA LETTER PE
 * 	30CB KATAKANA LETTER NI
 * 	30D2 KATAKANA LETTER HI
 * 3339 SQUARE HERUTU
 * 	30D8 KATAKANA LETTER HE
 * 	30EB KATAKANA LETTER RU
 * 	30C4 KATAKANA LETTER TU
 * 333A SQUARE PENSU
 * 	30DA KATAKANA LETTER PE
 * 	30F3 KATAKANA LETTER N
 * 	30B9 KATAKANA LETTER SU
 * 333B SQUARE PEEZI
 * 	30DA KATAKANA LETTER PE
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30B8 KATAKANA LETTER ZI
 * 333C SQUARE BEETA
 * 	30D9 KATAKANA LETTER BE
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30BF KATAKANA LETTER TA
 * 333D SQUARE POINTO
 * 	30DD KATAKANA LETTER PO
 * 	30A4 KATAKANA LETTER I
 * 	30F3 KATAKANA LETTER N
 * 	30C8 KATAKANA LETTER TO
 * 333E SQUARE BORUTO
 * 	30DC KATAKANA LETTER BO
 * 	30EB KATAKANA LETTER RU
 * 	30C8 KATAKANA LETTER TO
 * 333F SQUARE HON
 * 	30DB KATAKANA LETTER HO
 * 	30F3 KATAKANA LETTER N
 * 3340 SQUARE PONDO
 * 	30DD KATAKANA LETTER PO
 * 	30F3 KATAKANA LETTER N
 * 	30C9 KATAKANA LETTER DO
 * 3341 SQUARE HOORU
 * 	30DB KATAKANA LETTER HO
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30EB KATAKANA LETTER RU
 * 3342 SQUARE HOON
 * 	30DB KATAKANA LETTER HO
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30F3 KATAKANA LETTER N
 * 3343 SQUARE MAIKURO
 * 	30DE KATAKANA LETTER MA
 * 	30A4 KATAKANA LETTER I
 * 	30AF KATAKANA LETTER KU
 * 	30ED KATAKANA LETTER RO
 * 3344 SQUARE MAIRU
 * 	30DE KATAKANA LETTER MA
 * 	30A4 KATAKANA LETTER I
 * 	30EB KATAKANA LETTER RU
 * 3345 SQUARE MAHHA
 * 	30DE KATAKANA LETTER MA
 * 	30C3 KATAKANA LETTER SMALL TU
 * 	30CF KATAKANA LETTER HA
 * 3346 SQUARE MARUKU
 * 	30DE KATAKANA LETTER MA
 * 	30EB KATAKANA LETTER RU
 * 	30AF KATAKANA LETTER KU
 * 3347 SQUARE MANSYON
 * 	30DE KATAKANA LETTER MA
 * 	30F3 KATAKANA LETTER N
 * 	30B7 KATAKANA LETTER SI
 * 	30E7 KATAKANA LETTER SMALL YO
 * 	30F3 KATAKANA LETTER N
 * 3348 SQUARE MIKURON
 * 	30DF KATAKANA LETTER MI
 * 	30AF KATAKANA LETTER KU
 * 	30ED KATAKANA LETTER RO
 * 	30F3 KATAKANA LETTER N
 * 3349 SQUARE MIRI
 * 	30DF KATAKANA LETTER MI
 * 	30EA KATAKANA LETTER RI
 * 334A SQUARE MIRIBAARU
 * 	30DF KATAKANA LETTER MI
 * 	30EA KATAKANA LETTER RI
 * 	30D0 KATAKANA LETTER BA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30EB KATAKANA LETTER RU
 * 334B SQUARE MEGA
 * 	30E1 KATAKANA LETTER ME
 * 	30AC KATAKANA LETTER GA
 * 334C SQUARE MEGATON
 * 	30E1 KATAKANA LETTER ME
 * 	30AC KATAKANA LETTER GA
 * 	30C8 KATAKANA LETTER TO
 * 	30F3 KATAKANA LETTER N
 * 334D SQUARE MEETORU
 * 	30E1 KATAKANA LETTER ME
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30C8 KATAKANA LETTER TO
 * 	30EB KATAKANA LETTER RU
 * 334E SQUARE YAADO
 * 	30E4 KATAKANA LETTER YA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30C9 KATAKANA LETTER DO
 * 334F SQUARE YAARU
 * 	30E4 KATAKANA LETTER YA
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30EB KATAKANA LETTER RU
 * 3350 SQUARE YUAN
 * 	30E6 KATAKANA LETTER YU
 * 	30A2 KATAKANA LETTER A
 * 	30F3 KATAKANA LETTER N
 * 3351 SQUARE RITTORU
 * 	30EA KATAKANA LETTER RI
 * 	30C3 KATAKANA LETTER SMALL TU
 * 	30C8 KATAKANA LETTER TO
 * 	30EB KATAKANA LETTER RU
 * 3352 SQUARE RIRA
 * 	30EA KATAKANA LETTER RI
 * 	30E9 KATAKANA LETTER RA
 * 3353 SQUARE RUPII
 * 	30EB KATAKANA LETTER RU
 * 	30D4 KATAKANA LETTER PI
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 3354 SQUARE RUUBURU
 * 	30EB KATAKANA LETTER RU
 * 	30FC KATAKANA-HIRAGANA PROLONGED SOUND MARK
 * 	30D6 KATAKANA LETTER BU
 * 	30EB KATAKANA LETTER RU
 * 3355 SQUARE REMU
 * 	30EC KATAKANA LETTER RE
 * 	30E0 KATAKANA LETTER MU
 * 3356 SQUARE RENTOGEN
 * 	30EC KATAKANA LETTER RE
 * 	30F3 KATAKANA LETTER N
 * 	30C8 KATAKANA LETTER TO
 * 	30B2 KATAKANA LETTER GE
 * 	30F3 KATAKANA LETTER N
 * 3357 SQUARE WATTO
 * 	30EF KATAKANA LETTER WA
 * 	30C3 KATAKANA LETTER SMALL TU
 * 	30C8 KATAKANA LETTER TO
 * 3358 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR ZERO
 * 	0030 DIGIT ZERO
 * 	70B9 ??
 * 3359 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR ONE
 * 	0031 DIGIT ONE
 * 	70B9 ??
 * 335A IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWO
 * 	0032 DIGIT TWO
 * 	70B9 ??
 * 335B IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR THREE
 * 	0033 DIGIT THREE
 * 	70B9 ??
 * 335C IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR FOUR
 * 	0034 DIGIT FOUR
 * 	70B9 ??
 * 335D IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR FIVE
 * 	0035 DIGIT FIVE
 * 	70B9 ??
 * 335E IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR SIX
 * 	0036 DIGIT SIX
 * 	70B9 ??
 * 335F IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR SEVEN
 * 	0037 DIGIT SEVEN
 * 	70B9 ??
 * 3360 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR EIGHT
 * 	0038 DIGIT EIGHT
 * 	70B9 ??
 * 3361 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR NINE
 * 	0039 DIGIT NINE
 * 	70B9 ??
 * 3362 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TEN
 * 	0031 DIGIT ONE
 * 	0030 DIGIT ZERO
 * 	70B9 ??
 * 3363 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR ELEVEN
 * 	0031 DIGIT ONE
 * 	0031 DIGIT ONE
 * 	70B9 ??
 * 3364 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWELVE
 * 	0031 DIGIT ONE
 * 	0032 DIGIT TWO
 * 	70B9 ??
 * 3365 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR THIRTEEN
 * 	0031 DIGIT ONE
 * 	0033 DIGIT THREE
 * 	70B9 ??
 * 3366 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR FOURTEEN
 * 	0031 DIGIT ONE
 * 	0034 DIGIT FOUR
 * 	70B9 ??
 * 3367 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR FIFTEEN
 * 	0031 DIGIT ONE
 * 	0035 DIGIT FIVE
 * 	70B9 ??
 * 3368 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR SIXTEEN
 * 	0031 DIGIT ONE
 * 	0036 DIGIT SIX
 * 	70B9 ??
 * 3369 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR SEVENTEEN
 * 	0031 DIGIT ONE
 * 	0037 DIGIT SEVEN
 * 	70B9 ??
 * 336A IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR EIGHTEEN
 * 	0031 DIGIT ONE
 * 	0038 DIGIT EIGHT
 * 	70B9 ??
 * 336B IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR NINETEEN
 * 	0031 DIGIT ONE
 * 	0039 DIGIT NINE
 * 	70B9 ??
 * 336C IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY
 * 	0032 DIGIT TWO
 * 	0030 DIGIT ZERO
 * 	70B9 ??
 * 336D IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY-ONE
 * 	0032 DIGIT TWO
 * 	0031 DIGIT ONE
 * 	70B9 ??
 * 336E IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY-TWO
 * 	0032 DIGIT TWO
 * 	0032 DIGIT TWO
 * 	70B9 ??
 * 336F IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY-THREE
 * 	0032 DIGIT TWO
 * 	0033 DIGIT THREE
 * 	70B9 ??
 * 3370 IDEOGRAPHIC TELEGRAPH SYMBOL FOR HOUR TWENTY-FOUR
 * 	0032 DIGIT TWO
 * 	0034 DIGIT FOUR
 * 	70B9 ??
 * 3371 SQUARE HPA
 * 	0068 LATIN SMALL LETTER H
 * 	0050 LATIN CAPITAL LETTER P
 * 	0061 LATIN SMALL LETTER A
 * 3372 SQUARE DA
 * 	0064 LATIN SMALL LETTER D
 * 	0061 LATIN SMALL LETTER A
 * 3373 SQUARE AU
 * 	0041 LATIN CAPITAL LETTER A
 * 	0055 LATIN CAPITAL LETTER U
 * 3374 SQUARE BAR
 * 	0062 LATIN SMALL LETTER B
 * 	0061 LATIN SMALL LETTER A
 * 	0072 LATIN SMALL LETTER R
 * 3375 SQUARE OV
 * 	006F LATIN SMALL LETTER O
 * 	0056 LATIN CAPITAL LETTER V
 * 3376 SQUARE PC
 * 	0070 LATIN SMALL LETTER P
 * 	0063 LATIN SMALL LETTER C
 * 3377 SQUARE DM
 * 	0064 LATIN SMALL LETTER D
 * 	006D LATIN SMALL LETTER M
 * 3378 SQUARE DM SQUARED
 * 	0064 LATIN SMALL LETTER D
 * 	006D LATIN SMALL LETTER M
 * 	0032 DIGIT TWO
 * 3379 SQUARE DM CUBED
 * 	0064 LATIN SMALL LETTER D
 * 	006D LATIN SMALL LETTER M
 * 	0033 DIGIT THREE
 * 337A SQUARE IU
 * 	0049 LATIN CAPITAL LETTER I
 * 	0055 LATIN CAPITAL LETTER U
 * 337B SQUARE ERA NAME HEISEI
 * 	5E73 ??
 * 	6210 ??
 * 337C SQUARE ERA NAME SYOUWA
 * 	662D ??
 * 	548C ??
 * 337D SQUARE ERA NAME TAISYOU
 * 	5927 ??
 * 	6B63 ??
 * 337E SQUARE ERA NAME MEIZI
 * 	660E ??
 * 	6CBB ??
 * 337F SQUARE CORPORATION
 * 	682A ??
 * 	5F0F ??
 * 	4F1A ??
 * 	793E ??
 * 3380 SQUARE PA AMPS
 * 	0070 LATIN SMALL LETTER P
 * 	0041 LATIN CAPITAL LETTER A
 * 3381 SQUARE NA
 * 	006E LATIN SMALL LETTER N
 * 	0041 LATIN CAPITAL LETTER A
 * 3382 SQUARE MU A
 * 	03BC GREEK SMALL LETTER MU
 * 	0041 LATIN CAPITAL LETTER A
 * 3383 SQUARE MA
 * 	006D LATIN SMALL LETTER M
 * 	0041 LATIN CAPITAL LETTER A
 * 3384 SQUARE KA
 * 	006B LATIN SMALL LETTER K
 * 	0041 LATIN CAPITAL LETTER A
 * 3385 SQUARE KB
 * 	004B LATIN CAPITAL LETTER K
 * 	0042 LATIN CAPITAL LETTER B
 * 3386 SQUARE MB
 * 	004D LATIN CAPITAL LETTER M
 * 	0042 LATIN CAPITAL LETTER B
 * 3387 SQUARE GB
 * 	0047 LATIN CAPITAL LETTER G
 * 	0042 LATIN CAPITAL LETTER B
 * 3388 SQUARE CAL
 * 	0063 LATIN SMALL LETTER C
 * 	0061 LATIN SMALL LETTER A
 * 	006C LATIN SMALL LETTER L
 * 3389 SQUARE KCAL
 * 	006B LATIN SMALL LETTER K
 * 	0063 LATIN SMALL LETTER C
 * 	0061 LATIN SMALL LETTER A
 * 	006C LATIN SMALL LETTER L
 * 338A SQUARE PF
 * 	0070 LATIN SMALL LETTER P
 * 	0046 LATIN CAPITAL LETTER F
 * 338B SQUARE NF
 * 	006E LATIN SMALL LETTER N
 * 	0046 LATIN CAPITAL LETTER F
 * 338C SQUARE MU F
 * 	03BC GREEK SMALL LETTER MU
 * 	0046 LATIN CAPITAL LETTER F
 * 338D SQUARE MU G
 * 	03BC GREEK SMALL LETTER MU
 * 	0067 LATIN SMALL LETTER G
 * 338E SQUARE MG
 * 	006D LATIN SMALL LETTER M
 * 	0067 LATIN SMALL LETTER G
 * 338F SQUARE KG
 * 	006B LATIN SMALL LETTER K
 * 	0067 LATIN SMALL LETTER G
 * 3390 SQUARE HZ
 * 	0048 LATIN CAPITAL LETTER H
 * 	007A LATIN SMALL LETTER Z
 * 3391 SQUARE KHZ
 * 	006B LATIN SMALL LETTER K
 * 	0048 LATIN CAPITAL LETTER H
 * 	007A LATIN SMALL LETTER Z
 * 3392 SQUARE MHZ
 * 	004D LATIN CAPITAL LETTER M
 * 	0048 LATIN CAPITAL LETTER H
 * 	007A LATIN SMALL LETTER Z
 * 3393 SQUARE GHZ
 * 	0047 LATIN CAPITAL LETTER G
 * 	0048 LATIN CAPITAL LETTER H
 * 	007A LATIN SMALL LETTER Z
 * 3394 SQUARE THZ
 * 	0054 LATIN CAPITAL LETTER T
 * 	0048 LATIN CAPITAL LETTER H
 * 	007A LATIN SMALL LETTER Z
 * 3395 SQUARE MU L
 * 	03BC GREEK SMALL LETTER MU
 * 	006C LATIN SMALL LETTER L
 * 3396 SQUARE ML
 * 	006D LATIN SMALL LETTER M
 * 	006C LATIN SMALL LETTER L
 * 3397 SQUARE DL
 * 	0064 LATIN SMALL LETTER D
 * 	006C LATIN SMALL LETTER L
 * 3398 SQUARE KL
 * 	006B LATIN SMALL LETTER K
 * 	006C LATIN SMALL LETTER L
 * 3399 SQUARE FM
 * 	0066 LATIN SMALL LETTER F
 * 	006D LATIN SMALL LETTER M
 * 339A SQUARE NM
 * 	006E LATIN SMALL LETTER N
 * 	006D LATIN SMALL LETTER M
 * 339B SQUARE MU M
 * 	03BC GREEK SMALL LETTER MU
 * 	006D LATIN SMALL LETTER M
 * 339C SQUARE MM
 * 	006D LATIN SMALL LETTER M
 * 	006D LATIN SMALL LETTER M
 * 339D SQUARE CM
 * 	0063 LATIN SMALL LETTER C
 * 	006D LATIN SMALL LETTER M
 * 339E SQUARE KM
 * 	006B LATIN SMALL LETTER K
 * 	006D LATIN SMALL LETTER M
 * 339F SQUARE MM SQUARED
 * 	006D LATIN SMALL LETTER M
 * 	006D LATIN SMALL LETTER M
 * 	0032 DIGIT TWO
 * 33A0 SQUARE CM SQUARED
 * 	0063 LATIN SMALL LETTER C
 * 	006D LATIN SMALL LETTER M
 * 	0032 DIGIT TWO
 * 33A1 SQUARE M SQUARED
 * 	006D LATIN SMALL LETTER M
 * 	0032 DIGIT TWO
 * 33A2 SQUARE KM SQUARED
 * 	006B LATIN SMALL LETTER K
 * 	006D LATIN SMALL LETTER M
 * 	0032 DIGIT TWO
 * 33A3 SQUARE MM CUBED
 * 	006D LATIN SMALL LETTER M
 * 	006D LATIN SMALL LETTER M
 * 	0033 DIGIT THREE
 * 33A4 SQUARE CM CUBED
 * 	0063 LATIN SMALL LETTER C
 * 	006D LATIN SMALL LETTER M
 * 	0033 DIGIT THREE
 * 33A5 SQUARE M CUBED
 * 	006D LATIN SMALL LETTER M
 * 	0033 DIGIT THREE
 * 33A6 SQUARE KM CUBED
 * 	006B LATIN SMALL LETTER K
 * 	006D LATIN SMALL LETTER M
 * 	0033 DIGIT THREE
 * 33A7 SQUARE M OVER S
 * 	006D LATIN SMALL LETTER M
 * 	2215 DIVISION SLASH
 * 	0073 LATIN SMALL LETTER S
 * 33A8 SQUARE M OVER S SQUARED
 * 	006D LATIN SMALL LETTER M
 * 	2215 DIVISION SLASH
 * 	0073 LATIN SMALL LETTER S
 * 	0032 DIGIT TWO
 * 33A9 SQUARE PA
 * 	0050 LATIN CAPITAL LETTER P
 * 	0061 LATIN SMALL LETTER A
 * 33AA SQUARE KPA
 * 	006B LATIN SMALL LETTER K
 * 	0050 LATIN CAPITAL LETTER P
 * 	0061 LATIN SMALL LETTER A
 * 33AB SQUARE MPA
 * 	004D LATIN CAPITAL LETTER M
 * 	0050 LATIN CAPITAL LETTER P
 * 	0061 LATIN SMALL LETTER A
 * 33AC SQUARE GPA
 * 	0047 LATIN CAPITAL LETTER G
 * 	0050 LATIN CAPITAL LETTER P
 * 	0061 LATIN SMALL LETTER A
 * 33AD SQUARE RAD
 * 	0072 LATIN SMALL LETTER R
 * 	0061 LATIN SMALL LETTER A
 * 	0064 LATIN SMALL LETTER D
 * 33AE SQUARE RAD OVER S
 * 	0072 LATIN SMALL LETTER R
 * 	0061 LATIN SMALL LETTER A
 * 	0064 LATIN SMALL LETTER D
 * 	2215 DIVISION SLASH
 * 	0073 LATIN SMALL LETTER S
 * 33AF SQUARE RAD OVER S SQUARED
 * 	0072 LATIN SMALL LETTER R
 * 	0061 LATIN SMALL LETTER A
 * 	0064 LATIN SMALL LETTER D
 * 	2215 DIVISION SLASH
 * 	0073 LATIN SMALL LETTER S
 * 	0032 DIGIT TWO
 * 33B0 SQUARE PS
 * 	0070 LATIN SMALL LETTER P
 * 	0073 LATIN SMALL LETTER S
 * 33B1 SQUARE NS
 * 	006E LATIN SMALL LETTER N
 * 	0073 LATIN SMALL LETTER S
 * 33B2 SQUARE MU S
 * 	03BC GREEK SMALL LETTER MU
 * 	0073 LATIN SMALL LETTER S
 * 33B3 SQUARE MS
 * 	006D LATIN SMALL LETTER M
 * 	0073 LATIN SMALL LETTER S
 * 33B4 SQUARE PV
 * 	0070 LATIN SMALL LETTER P
 * 	0056 LATIN CAPITAL LETTER V
 * 33B5 SQUARE NV
 * 	006E LATIN SMALL LETTER N
 * 	0056 LATIN CAPITAL LETTER V
 * 33B6 SQUARE MU V
 * 	03BC GREEK SMALL LETTER MU
 * 	0056 LATIN CAPITAL LETTER V
 * 33B7 SQUARE MV
 * 	006D LATIN SMALL LETTER M
 * 	0056 LATIN CAPITAL LETTER V
 * 33B8 SQUARE KV
 * 	006B LATIN SMALL LETTER K
 * 	0056 LATIN CAPITAL LETTER V
 * 33B9 SQUARE MV MEGA
 * 	004D LATIN CAPITAL LETTER M
 * 	0056 LATIN CAPITAL LETTER V
 * 33BA SQUARE PW
 * 	0070 LATIN SMALL LETTER P
 * 	0057 LATIN CAPITAL LETTER W
 * 33BB SQUARE NW
 * 	006E LATIN SMALL LETTER N
 * 	0057 LATIN CAPITAL LETTER W
 * 33BC SQUARE MU W
 * 	03BC GREEK SMALL LETTER MU
 * 	0057 LATIN CAPITAL LETTER W
 * 33BD SQUARE MW
 * 	006D LATIN SMALL LETTER M
 * 	0057 LATIN CAPITAL LETTER W
 * 33BE SQUARE KW
 * 	006B LATIN SMALL LETTER K
 * 	0057 LATIN CAPITAL LETTER W
 * 33BF SQUARE MW MEGA
 * 	004D LATIN CAPITAL LETTER M
 * 	0057 LATIN CAPITAL LETTER W
 * 33C0 SQUARE K OHM
 * 	006B LATIN SMALL LETTER K
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 33C1 SQUARE M OHM
 * 	004D LATIN CAPITAL LETTER M
 * 	03A9 GREEK CAPITAL LETTER OMEGA
 * 33C2 SQUARE AM
 * 	0061 LATIN SMALL LETTER A
 * 	002E FULL STOP
 * 	006D LATIN SMALL LETTER M
 * 	002E FULL STOP
 * 33C3 SQUARE BQ
 * 	0042 LATIN CAPITAL LETTER B
 * 	0071 LATIN SMALL LETTER Q
 * 33C4 SQUARE CC
 * 	0063 LATIN SMALL LETTER C
 * 	0063 LATIN SMALL LETTER C
 * 33C5 SQUARE CD
 * 	0063 LATIN SMALL LETTER C
 * 	0064 LATIN SMALL LETTER D
 * 33C6 SQUARE C OVER KG
 * 	0043 LATIN CAPITAL LETTER C
 * 	2215 DIVISION SLASH
 * 	006B LATIN SMALL LETTER K
 * 	0067 LATIN SMALL LETTER G
 * 33C7 SQUARE CO
 * 	0043 LATIN CAPITAL LETTER C
 * 	006F LATIN SMALL LETTER O
 * 	002E FULL STOP
 * 33C8 SQUARE DB
 * 	0064 LATIN SMALL LETTER D
 * 	0042 LATIN CAPITAL LETTER B
 * 33C9 SQUARE GY
 * 	0047 LATIN CAPITAL LETTER G
 * 	0079 LATIN SMALL LETTER Y
 * 33CA SQUARE HA
 * 	0068 LATIN SMALL LETTER H
 * 	0061 LATIN SMALL LETTER A
 * 33CB SQUARE HP
 * 	0048 LATIN CAPITAL LETTER H
 * 	0050 LATIN CAPITAL LETTER P
 * 33CC SQUARE IN
 * 	0069 LATIN SMALL LETTER I
 * 	006E LATIN SMALL LETTER N
 * 33CD SQUARE KK
 * 	004B LATIN CAPITAL LETTER K
 * 	004B LATIN CAPITAL LETTER K
 * 33CE SQUARE KM CAPITAL
 * 	004B LATIN CAPITAL LETTER K
 * 	004D LATIN CAPITAL LETTER M
 * 33CF SQUARE KT
 * 	006B LATIN SMALL LETTER K
 * 	0074 LATIN SMALL LETTER T
 * 33D0 SQUARE LM
 * 	006C LATIN SMALL LETTER L
 * 	006D LATIN SMALL LETTER M
 * 33D1 SQUARE LN
 * 	006C LATIN SMALL LETTER L
 * 	006E LATIN SMALL LETTER N
 * 33D2 SQUARE LOG
 * 	006C LATIN SMALL LETTER L
 * 	006F LATIN SMALL LETTER O
 * 	0067 LATIN SMALL LETTER G
 * 33D3 SQUARE LX
 * 	006C LATIN SMALL LETTER L
 * 	0078 LATIN SMALL LETTER X
 * 33D4 SQUARE MB SMALL
 * 	006D LATIN SMALL LETTER M
 * 	0062 LATIN SMALL LETTER B
 * 33D5 SQUARE MIL
 * 	006D LATIN SMALL LETTER M
 * 	0069 LATIN SMALL LETTER I
 * 	006C LATIN SMALL LETTER L
 * 33D6 SQUARE MOL
 * 	006D LATIN SMALL LETTER M
 * 	006F LATIN SMALL LETTER O
 * 	006C LATIN SMALL LETTER L
 * 33D7 SQUARE PH
 * 	0050 LATIN CAPITAL LETTER P
 * 	0048 LATIN CAPITAL LETTER H
 * 33D8 SQUARE PM
 * 	0070 LATIN SMALL LETTER P
 * 	002E FULL STOP
 * 	006D LATIN SMALL LETTER M
 * 	002E FULL STOP
 * 33D9 SQUARE PPM
 * 	0050 LATIN CAPITAL LETTER P
 * 	0050 LATIN CAPITAL LETTER P
 * 	004D LATIN CAPITAL LETTER M
 * 33DA SQUARE PR
 * 	0050 LATIN CAPITAL LETTER P
 * 	0052 LATIN CAPITAL LETTER R
 * 33DB SQUARE SR
 * 	0073 LATIN SMALL LETTER S
 * 	0072 LATIN SMALL LETTER R
 * 33DC SQUARE SV
 * 	0053 LATIN CAPITAL LETTER S
 * 	0076 LATIN SMALL LETTER V
 * 33DD SQUARE WB
 * 	0057 LATIN CAPITAL LETTER W
 * 	0062 LATIN SMALL LETTER B
 * 33DE SQUARE V OVER M
 * 	0056 LATIN CAPITAL LETTER V
 * 	2215 DIVISION SLASH
 * 	006D LATIN SMALL LETTER M
 * 33DF SQUARE A OVER M
 * 	0041 LATIN CAPITAL LETTER A
 * 	2215 DIVISION SLASH
 * 	006D LATIN SMALL LETTER M
 * 33E0 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY ONE
 * 	0031 DIGIT ONE
 * 	65E5 ??
 * 33E1 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWO
 * 	0032 DIGIT TWO
 * 	65E5 ??
 * 33E2 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY THREE
 * 	0033 DIGIT THREE
 * 	65E5 ??
 * 33E3 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY FOUR
 * 	0034 DIGIT FOUR
 * 	65E5 ??
 * 33E4 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY FIVE
 * 	0035 DIGIT FIVE
 * 	65E5 ??
 * 33E5 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY SIX
 * 	0036 DIGIT SIX
 * 	65E5 ??
 * 33E6 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY SEVEN
 * 	0037 DIGIT SEVEN
 * 	65E5 ??
 * 33E7 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY EIGHT
 * 	0038 DIGIT EIGHT
 * 	65E5 ??
 * 33E8 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY NINE
 * 	0039 DIGIT NINE
 * 	65E5 ??
 * 33E9 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TEN
 * 	0031 DIGIT ONE
 * 	0030 DIGIT ZERO
 * 	65E5 ??
 * 33EA IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY ELEVEN
 * 	0031 DIGIT ONE
 * 	0031 DIGIT ONE
 * 	65E5 ??
 * 33EB IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWELVE
 * 	0031 DIGIT ONE
 * 	0032 DIGIT TWO
 * 	65E5 ??
 * 33EC IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY THIRTEEN
 * 	0031 DIGIT ONE
 * 	0033 DIGIT THREE
 * 	65E5 ??
 * 33ED IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY FOURTEEN
 * 	0031 DIGIT ONE
 * 	0034 DIGIT FOUR
 * 	65E5 ??
 * 33EE IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY FIFTEEN
 * 	0031 DIGIT ONE
 * 	0035 DIGIT FIVE
 * 	65E5 ??
 * 33EF IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY SIXTEEN
 * 	0031 DIGIT ONE
 * 	0036 DIGIT SIX
 * 	65E5 ??
 * 33F0 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY SEVENTEEN
 * 	0031 DIGIT ONE
 * 	0037 DIGIT SEVEN
 * 	65E5 ??
 * 33F1 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY EIGHTEEN
 * 	0031 DIGIT ONE
 * 	0038 DIGIT EIGHT
 * 	65E5 ??
 * 33F2 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY NINETEEN
 * 	0031 DIGIT ONE
 * 	0039 DIGIT NINE
 * 	65E5 ??
 * 33F3 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY
 * 	0032 DIGIT TWO
 * 	0030 DIGIT ZERO
 * 	65E5 ??
 * 33F4 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-ONE
 * 	0032 DIGIT TWO
 * 	0031 DIGIT ONE
 * 	65E5 ??
 * 33F5 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-TWO
 * 	0032 DIGIT TWO
 * 	0032 DIGIT TWO
 * 	65E5 ??
 * 33F6 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-THREE
 * 	0032 DIGIT TWO
 * 	0033 DIGIT THREE
 * 	65E5 ??
 * 33F7 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-FOUR
 * 	0032 DIGIT TWO
 * 	0034 DIGIT FOUR
 * 	65E5 ??
 * 33F8 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-FIVE
 * 	0032 DIGIT TWO
 * 	0035 DIGIT FIVE
 * 	65E5 ??
 * 33F9 IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-SIX
 * 	0032 DIGIT TWO
 * 	0036 DIGIT SIX
 * 	65E5 ??
 * 33FA IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-SEVEN
 * 	0032 DIGIT TWO
 * 	0037 DIGIT SEVEN
 * 	65E5 ??
 * 33FB IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-EIGHT
 * 	0032 DIGIT TWO
 * 	0038 DIGIT EIGHT
 * 	65E5 ??
 * 33FC IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY TWENTY-NINE
 * 	0032 DIGIT TWO
 * 	0039 DIGIT NINE
 * 	65E5 ??
 * 33FD IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY THIRTY
 * 	0033 DIGIT THREE
 * 	0030 DIGIT ZERO
 * 	65E5 ??
 * 33FE IDEOGRAPHIC TELEGRAPH SYMBOL FOR DAY THIRTY-ONE
 * 	0033 DIGIT THREE
 * 	0031 DIGIT ONE
 * 	65E5 ??
 * 33FF SQUARE GAL
 * 	0067 LATIN SMALL LETTER G
 * 	0061 LATIN SMALL LETTER A
 * 	006C LATIN SMALL LETTER L
 * A66F COMBINING CYRILLIC VZMET
 * 	0000 
 * A670 COMBINING CYRILLIC TEN MILLIONS SIGN
 * 	0000 
 * A671 COMBINING CYRILLIC HUNDRED MILLIONS SIGN
 * 	0000 
 * A672 COMBINING CYRILLIC THOUSAND MILLIONS SIGN
 * 	0000 
 * A674 COMBINING CYRILLIC LETTER UKRAINIAN IE
 * 	0000 
 * A675 COMBINING CYRILLIC LETTER I
 * 	0000 
 * A676 COMBINING CYRILLIC LETTER YI
 * 	0000 
 * A677 COMBINING CYRILLIC LETTER U
 * 	0000 
 * A678 COMBINING CYRILLIC LETTER HARD SIGN
 * 	0000 
 * A679 COMBINING CYRILLIC LETTER YERU
 * 	0000 
 * A67A COMBINING CYRILLIC LETTER SOFT SIGN
 * 	0000 
 * A67B COMBINING CYRILLIC LETTER OMEGA
 * 	0000 
 * A67C COMBINING CYRILLIC KAVYKA
 * 	0000 
 * A67D COMBINING CYRILLIC PAYEROK
 * 	0000 
 * A69F COMBINING CYRILLIC LETTER IOTIFIED E
 * 	0000 
 * A6F0 BAMUM COMBINING MARK KOQNDON
 * 	0000 
 * A6F1 BAMUM COMBINING MARK TUKWENTIS
 * 	0000 
 * A770 MODIFIER LETTER US
 * 	A76F LATIN SMALL LETTER CON
 * A7F8 MODIFIER LETTER CAPITAL H WITH STROKE
 * 	0126 LATIN CAPITAL LETTER H WITH STROKE
 * A7F9 MODIFIER LETTER SMALL LIGATURE OE
 * 	0153 LATIN SMALL LIGATURE OE
 * A802 SYLOTI NAGRI SIGN DVISVARA
 * 	0000 
 * A806 SYLOTI NAGRI SIGN HASANTA
 * 	0000 
 * A80B SYLOTI NAGRI SIGN ANUSVARA
 * 	0000 
 * A823 SYLOTI NAGRI VOWEL SIGN A
 * 	0000 
 * A824 SYLOTI NAGRI VOWEL SIGN I
 * 	0000 
 * A825 SYLOTI NAGRI VOWEL SIGN U
 * 	0000 
 * A826 SYLOTI NAGRI VOWEL SIGN E
 * 	0000 
 * A827 SYLOTI NAGRI VOWEL SIGN OO
 * 	0000 
 * A880 SAURASHTRA SIGN ANUSVARA
 * 	0000 
 * A881 SAURASHTRA SIGN VISARGA
 * 	0000 
 * A8B4 SAURASHTRA CONSONANT SIGN HAARU
 * 	0000 
 * A8B5 SAURASHTRA VOWEL SIGN AA
 * 	0000 
 * A8B6 SAURASHTRA VOWEL SIGN I
 * 	0000 
 * A8B7 SAURASHTRA VOWEL SIGN II
 * 	0000 
 * A8B8 SAURASHTRA VOWEL SIGN U
 * 	0000 
 * A8B9 SAURASHTRA VOWEL SIGN UU
 * 	0000 
 * A8BA SAURASHTRA VOWEL SIGN VOCALIC R
 * 	0000 
 * A8BB SAURASHTRA VOWEL SIGN VOCALIC RR
 * 	0000 
 * A8BC SAURASHTRA VOWEL SIGN VOCALIC L
 * 	0000 
 * A8BD SAURASHTRA VOWEL SIGN VOCALIC LL
 * 	0000 
 * A8BE SAURASHTRA VOWEL SIGN E
 * 	0000 
 * A8BF SAURASHTRA VOWEL SIGN EE
 * 	0000 
 * A8C0 SAURASHTRA VOWEL SIGN AI
 * 	0000 
 * A8C1 SAURASHTRA VOWEL SIGN O
 * 	0000 
 * A8C2 SAURASHTRA VOWEL SIGN OO
 * 	0000 
 * A8C3 SAURASHTRA VOWEL SIGN AU
 * 	0000 
 * A8C4 SAURASHTRA SIGN VIRAMA
 * 	0000 
 * A926 KAYAH LI VOWEL UE
 * 	0000 
 * A927 KAYAH LI VOWEL E
 * 	0000 
 * A928 KAYAH LI VOWEL U
 * 	0000 
 * A929 KAYAH LI VOWEL EE
 * 	0000 
 * A92A KAYAH LI VOWEL O
 * 	0000 
 * A92B KAYAH LI TONE PLOPHU
 * 	0000 
 * A92C KAYAH LI TONE CALYA
 * 	0000 
 * A92D KAYAH LI TONE CALYA PLOPHU
 * 	0000 
 * A947 REJANG VOWEL SIGN I
 * 	0000 
 * A948 REJANG VOWEL SIGN U
 * 	0000 
 * A949 REJANG VOWEL SIGN E
 * 	0000 
 * A94A REJANG VOWEL SIGN AI
 * 	0000 
 * A94B REJANG VOWEL SIGN O
 * 	0000 
 * A94C REJANG VOWEL SIGN AU
 * 	0000 
 * A94D REJANG VOWEL SIGN EU
 * 	0000 
 * A94E REJANG VOWEL SIGN EA
 * 	0000 
 * A94F REJANG CONSONANT SIGN NG
 * 	0000 
 * A950 REJANG CONSONANT SIGN N
 * 	0000 
 * A951 REJANG CONSONANT SIGN R
 * 	0000 
 * A952 REJANG CONSONANT SIGN H
 * 	0000 
 * A953 REJANG VIRAMA
 * 	0000 
 * A980 JAVANESE SIGN PANYANGGA
 * 	0000 
 * A981 JAVANESE SIGN CECAK
 * 	0000 
 * A982 JAVANESE SIGN LAYAR
 * 	0000 
 * A983 JAVANESE SIGN WIGNYAN
 * 	0000 
 * A9B3 JAVANESE SIGN CECAK TELU
 * 	0000 
 * A9B4 JAVANESE VOWEL SIGN TARUNG
 * 	0000 
 * A9B5 JAVANESE VOWEL SIGN TOLONG
 * 	0000 
 * A9B6 JAVANESE VOWEL SIGN WULU
 * 	0000 
 * A9B7 JAVANESE VOWEL SIGN WULU MELIK
 * 	0000 
 * A9B8 JAVANESE VOWEL SIGN SUKU
 * 	0000 
 * A9B9 JAVANESE VOWEL SIGN SUKU MENDUT
 * 	0000 
 * A9BA JAVANESE VOWEL SIGN TALING
 * 	0000 
 * A9BB JAVANESE VOWEL SIGN DIRGA MURE
 * 	0000 
 * A9BC JAVANESE VOWEL SIGN PEPET
 * 	0000 
 * A9BD JAVANESE CONSONANT SIGN KERET
 * 	0000 
 * A9BE JAVANESE CONSONANT SIGN PENGKAL
 * 	0000 
 * A9BF JAVANESE CONSONANT SIGN CAKRA
 * 	0000 
 * A9C0 JAVANESE PANGKON
 * 	0000 
 * AA29 CHAM VOWEL SIGN AA
 * 	0000 
 * AA2A CHAM VOWEL SIGN I
 * 	0000 
 * AA2B CHAM VOWEL SIGN II
 * 	0000 
 * AA2C CHAM VOWEL SIGN EI
 * 	0000 
 * AA2D CHAM VOWEL SIGN U
 * 	0000 
 * AA2E CHAM VOWEL SIGN OE
 * 	0000 
 * AA2F CHAM VOWEL SIGN O
 * 	0000 
 * AA30 CHAM VOWEL SIGN AI
 * 	0000 
 * AA31 CHAM VOWEL SIGN AU
 * 	0000 
 * AA32 CHAM VOWEL SIGN UE
 * 	0000 
 * AA33 CHAM CONSONANT SIGN YA
 * 	0000 
 * AA34 CHAM CONSONANT SIGN RA
 * 	0000 
 * AA35 CHAM CONSONANT SIGN LA
 * 	0000 
 * AA36 CHAM CONSONANT SIGN WA
 * 	0000 
 * AA43 CHAM CONSONANT SIGN FINAL NG
 * 	0000 
 * AA4C CHAM CONSONANT SIGN FINAL M
 * 	0000 
 * AA4D CHAM CONSONANT SIGN FINAL H
 * 	0000 
 * AA7B MYANMAR SIGN PAO KAREN TONE
 * 	0000 
 * AAB0 TAI VIET MAI KANG
 * 	0000 
 * AAB2 TAI VIET VOWEL I
 * 	0000 
 * AAB3 TAI VIET VOWEL UE
 * 	0000 
 * AAB4 TAI VIET VOWEL U
 * 	0000 
 * AAB7 TAI VIET MAI KHIT
 * 	0000 
 * AAB8 TAI VIET VOWEL IA
 * 	0000 
 * AABE TAI VIET VOWEL AM
 * 	0000 
 * AABF TAI VIET TONE MAI EK
 * 	0000 
 * AAC1 TAI VIET TONE MAI THO
 * 	0000 
 * AAEB MEETEI MAYEK VOWEL SIGN II
 * 	0000 
 * AAEC MEETEI MAYEK VOWEL SIGN UU
 * 	0000 
 * AAED MEETEI MAYEK VOWEL SIGN AAI
 * 	0000 
 * AAEE MEETEI MAYEK VOWEL SIGN AU
 * 	0000 
 * AAEF MEETEI MAYEK VOWEL SIGN AAU
 * 	0000 
 * AAF5 MEETEI MAYEK VOWEL SIGN VISARGA
 * 	0000 
 * AAF6 MEETEI MAYEK VIRAMA
 * 	0000 
 * ABE3 MEETEI MAYEK VOWEL SIGN ONAP
 * 	0000 
 * ABE4 MEETEI MAYEK VOWEL SIGN INAP
 * 	0000 
 * ABE5 MEETEI MAYEK VOWEL SIGN ANAP
 * 	0000 
 * ABE6 MEETEI MAYEK VOWEL SIGN YENAP
 * 	0000 
 * ABE7 MEETEI MAYEK VOWEL SIGN SOUNAP
 * 	0000 
 * ABE8 MEETEI MAYEK VOWEL SIGN UNAP
 * 	0000 
 * ABE9 MEETEI MAYEK VOWEL SIGN CHEINAP
 * 	0000 
 * ABEA MEETEI MAYEK VOWEL SIGN NUNG
 * 	0000 
 * ABEC MEETEI MAYEK LUM IYEK
 * 	0000 
 * ABED MEETEI MAYEK APUN IYEK
 * 	0000 
 * F900 CJK COMPATIBILITY IDEOGRAPH-F900
 * 	8C48 ??
 * F901 CJK COMPATIBILITY IDEOGRAPH-F901
 * 	66F4 ??
 * F902 CJK COMPATIBILITY IDEOGRAPH-F902
 * 	8ECA ??
 * F903 CJK COMPATIBILITY IDEOGRAPH-F903
 * 	8CC8 ??
 * F904 CJK COMPATIBILITY IDEOGRAPH-F904
 * 	6ED1 ??
 * F905 CJK COMPATIBILITY IDEOGRAPH-F905
 * 	4E32 ??
 * F906 CJK COMPATIBILITY IDEOGRAPH-F906
 * 	53E5 ??
 * F907 CJK COMPATIBILITY IDEOGRAPH-F907
 * 	9F9C ??
 * F908 CJK COMPATIBILITY IDEOGRAPH-F908
 * 	9F9C ??
 * F909 CJK COMPATIBILITY IDEOGRAPH-F909
 * 	5951 ??
 * F90A CJK COMPATIBILITY IDEOGRAPH-F90A
 * 	91D1 ??
 * F90B CJK COMPATIBILITY IDEOGRAPH-F90B
 * 	5587 ??
 * F90C CJK COMPATIBILITY IDEOGRAPH-F90C
 * 	5948 ??
 * F90D CJK COMPATIBILITY IDEOGRAPH-F90D
 * 	61F6 ??
 * F90E CJK COMPATIBILITY IDEOGRAPH-F90E
 * 	7669 ??
 * F90F CJK COMPATIBILITY IDEOGRAPH-F90F
 * 	7F85 ??
 * F910 CJK COMPATIBILITY IDEOGRAPH-F910
 * 	863F ??
 * F911 CJK COMPATIBILITY IDEOGRAPH-F911
 * 	87BA ??
 * F912 CJK COMPATIBILITY IDEOGRAPH-F912
 * 	88F8 ??
 * F913 CJK COMPATIBILITY IDEOGRAPH-F913
 * 	908F ??
 * F914 CJK COMPATIBILITY IDEOGRAPH-F914
 * 	6A02 ??
 * F915 CJK COMPATIBILITY IDEOGRAPH-F915
 * 	6D1B ??
 * F916 CJK COMPATIBILITY IDEOGRAPH-F916
 * 	70D9 ??
 * F917 CJK COMPATIBILITY IDEOGRAPH-F917
 * 	73DE ??
 * F918 CJK COMPATIBILITY IDEOGRAPH-F918
 * 	843D ??
 * F919 CJK COMPATIBILITY IDEOGRAPH-F919
 * 	916A ??
 * F91A CJK COMPATIBILITY IDEOGRAPH-F91A
 * 	99F1 ??
 * F91B CJK COMPATIBILITY IDEOGRAPH-F91B
 * 	4E82 ??
 * F91C CJK COMPATIBILITY IDEOGRAPH-F91C
 * 	5375 ??
 * F91D CJK COMPATIBILITY IDEOGRAPH-F91D
 * 	6B04 ??
 * F91E CJK COMPATIBILITY IDEOGRAPH-F91E
 * 	721B ??
 * F91F CJK COMPATIBILITY IDEOGRAPH-F91F
 * 	862D ??
 * F920 CJK COMPATIBILITY IDEOGRAPH-F920
 * 	9E1E ??
 * F921 CJK COMPATIBILITY IDEOGRAPH-F921
 * 	5D50 ??
 * F922 CJK COMPATIBILITY IDEOGRAPH-F922
 * 	6FEB ??
 * F923 CJK COMPATIBILITY IDEOGRAPH-F923
 * 	85CD ??
 * F924 CJK COMPATIBILITY IDEOGRAPH-F924
 * 	8964 ??
 * F925 CJK COMPATIBILITY IDEOGRAPH-F925
 * 	62C9 ??
 * F926 CJK COMPATIBILITY IDEOGRAPH-F926
 * 	81D8 ??
 * F927 CJK COMPATIBILITY IDEOGRAPH-F927
 * 	881F ??
 * F928 CJK COMPATIBILITY IDEOGRAPH-F928
 * 	5ECA ??
 * F929 CJK COMPATIBILITY IDEOGRAPH-F929
 * 	6717 ??
 * F92A CJK COMPATIBILITY IDEOGRAPH-F92A
 * 	6D6A ??
 * F92B CJK COMPATIBILITY IDEOGRAPH-F92B
 * 	72FC ??
 * F92C CJK COMPATIBILITY IDEOGRAPH-F92C
 * 	90CE ??
 * F92D CJK COMPATIBILITY IDEOGRAPH-F92D
 * 	4F86 ??
 * F92E CJK COMPATIBILITY IDEOGRAPH-F92E
 * 	51B7 ??
 * F92F CJK COMPATIBILITY IDEOGRAPH-F92F
 * 	52DE ??
 * F930 CJK COMPATIBILITY IDEOGRAPH-F930
 * 	64C4 ??
 * F931 CJK COMPATIBILITY IDEOGRAPH-F931
 * 	6AD3 ??
 * F932 CJK COMPATIBILITY IDEOGRAPH-F932
 * 	7210 ??
 * F933 CJK COMPATIBILITY IDEOGRAPH-F933
 * 	76E7 ??
 * F934 CJK COMPATIBILITY IDEOGRAPH-F934
 * 	8001 ??
 * F935 CJK COMPATIBILITY IDEOGRAPH-F935
 * 	8606 ??
 * F936 CJK COMPATIBILITY IDEOGRAPH-F936
 * 	865C ??
 * F937 CJK COMPATIBILITY IDEOGRAPH-F937
 * 	8DEF ??
 * F938 CJK COMPATIBILITY IDEOGRAPH-F938
 * 	9732 ??
 * F939 CJK COMPATIBILITY IDEOGRAPH-F939
 * 	9B6F ??
 * F93A CJK COMPATIBILITY IDEOGRAPH-F93A
 * 	9DFA ??
 * F93B CJK COMPATIBILITY IDEOGRAPH-F93B
 * 	788C ??
 * F93C CJK COMPATIBILITY IDEOGRAPH-F93C
 * 	797F ??
 * F93D CJK COMPATIBILITY IDEOGRAPH-F93D
 * 	7DA0 ??
 * F93E CJK COMPATIBILITY IDEOGRAPH-F93E
 * 	83C9 ??
 * F93F CJK COMPATIBILITY IDEOGRAPH-F93F
 * 	9304 ??
 * F940 CJK COMPATIBILITY IDEOGRAPH-F940
 * 	9E7F ??
 * F941 CJK COMPATIBILITY IDEOGRAPH-F941
 * 	8AD6 ??
 * F942 CJK COMPATIBILITY IDEOGRAPH-F942
 * 	58DF ??
 * F943 CJK COMPATIBILITY IDEOGRAPH-F943
 * 	5F04 ??
 * F944 CJK COMPATIBILITY IDEOGRAPH-F944
 * 	7C60 ??
 * F945 CJK COMPATIBILITY IDEOGRAPH-F945
 * 	807E ??
 * F946 CJK COMPATIBILITY IDEOGRAPH-F946
 * 	7262 ??
 * F947 CJK COMPATIBILITY IDEOGRAPH-F947
 * 	78CA ??
 * F948 CJK COMPATIBILITY IDEOGRAPH-F948
 * 	8CC2 ??
 * F949 CJK COMPATIBILITY IDEOGRAPH-F949
 * 	96F7 ??
 * F94A CJK COMPATIBILITY IDEOGRAPH-F94A
 * 	58D8 ??
 * F94B CJK COMPATIBILITY IDEOGRAPH-F94B
 * 	5C62 ??
 * F94C CJK COMPATIBILITY IDEOGRAPH-F94C
 * 	6A13 ??
 * F94D CJK COMPATIBILITY IDEOGRAPH-F94D
 * 	6DDA ??
 * F94E CJK COMPATIBILITY IDEOGRAPH-F94E
 * 	6F0F ??
 * F94F CJK COMPATIBILITY IDEOGRAPH-F94F
 * 	7D2F ??
 * F950 CJK COMPATIBILITY IDEOGRAPH-F950
 * 	7E37 ??
 * F951 CJK COMPATIBILITY IDEOGRAPH-F951
 * 	964B ??
 * F952 CJK COMPATIBILITY IDEOGRAPH-F952
 * 	52D2 ??
 * F953 CJK COMPATIBILITY IDEOGRAPH-F953
 * 	808B ??
 * F954 CJK COMPATIBILITY IDEOGRAPH-F954
 * 	51DC ??
 * F955 CJK COMPATIBILITY IDEOGRAPH-F955
 * 	51CC ??
 * F956 CJK COMPATIBILITY IDEOGRAPH-F956
 * 	7A1C ??
 * F957 CJK COMPATIBILITY IDEOGRAPH-F957
 * 	7DBE ??
 * F958 CJK COMPATIBILITY IDEOGRAPH-F958
 * 	83F1 ??
 * F959 CJK COMPATIBILITY IDEOGRAPH-F959
 * 	9675 ??
 * F95A CJK COMPATIBILITY IDEOGRAPH-F95A
 * 	8B80 ??
 * F95B CJK COMPATIBILITY IDEOGRAPH-F95B
 * 	62CF ??
 * F95C CJK COMPATIBILITY IDEOGRAPH-F95C
 * 	6A02 ??
 * F95D CJK COMPATIBILITY IDEOGRAPH-F95D
 * 	8AFE ??
 * F95E CJK COMPATIBILITY IDEOGRAPH-F95E
 * 	4E39 ??
 * F95F CJK COMPATIBILITY IDEOGRAPH-F95F
 * 	5BE7 ??
 * F960 CJK COMPATIBILITY IDEOGRAPH-F960
 * 	6012 ??
 * F961 CJK COMPATIBILITY IDEOGRAPH-F961
 * 	7387 ??
 * F962 CJK COMPATIBILITY IDEOGRAPH-F962
 * 	7570 ??
 * F963 CJK COMPATIBILITY IDEOGRAPH-F963
 * 	5317 ??
 * F964 CJK COMPATIBILITY IDEOGRAPH-F964
 * 	78FB ??
 * F965 CJK COMPATIBILITY IDEOGRAPH-F965
 * 	4FBF ??
 * F966 CJK COMPATIBILITY IDEOGRAPH-F966
 * 	5FA9 ??
 * F967 CJK COMPATIBILITY IDEOGRAPH-F967
 * 	4E0D ??
 * F968 CJK COMPATIBILITY IDEOGRAPH-F968
 * 	6CCC ??
 * F969 CJK COMPATIBILITY IDEOGRAPH-F969
 * 	6578 ??
 * F96A CJK COMPATIBILITY IDEOGRAPH-F96A
 * 	7D22 ??
 * F96B CJK COMPATIBILITY IDEOGRAPH-F96B
 * 	53C3 ??
 * F96C CJK COMPATIBILITY IDEOGRAPH-F96C
 * 	585E ??
 * F96D CJK COMPATIBILITY IDEOGRAPH-F96D
 * 	7701 ??
 * F96E CJK COMPATIBILITY IDEOGRAPH-F96E
 * 	8449 ??
 * F96F CJK COMPATIBILITY IDEOGRAPH-F96F
 * 	8AAA ??
 * F970 CJK COMPATIBILITY IDEOGRAPH-F970
 * 	6BBA ??
 * F971 CJK COMPATIBILITY IDEOGRAPH-F971
 * 	8FB0 ??
 * F972 CJK COMPATIBILITY IDEOGRAPH-F972
 * 	6C88 ??
 * F973 CJK COMPATIBILITY IDEOGRAPH-F973
 * 	62FE ??
 * F974 CJK COMPATIBILITY IDEOGRAPH-F974
 * 	82E5 ??
 * F975 CJK COMPATIBILITY IDEOGRAPH-F975
 * 	63A0 ??
 * F976 CJK COMPATIBILITY IDEOGRAPH-F976
 * 	7565 ??
 * F977 CJK COMPATIBILITY IDEOGRAPH-F977
 * 	4EAE ??
 * F978 CJK COMPATIBILITY IDEOGRAPH-F978
 * 	5169 ??
 * F979 CJK COMPATIBILITY IDEOGRAPH-F979
 * 	51C9 ??
 * F97A CJK COMPATIBILITY IDEOGRAPH-F97A
 * 	6881 ??
 * F97B CJK COMPATIBILITY IDEOGRAPH-F97B
 * 	7CE7 ??
 * F97C CJK COMPATIBILITY IDEOGRAPH-F97C
 * 	826F ??
 * F97D CJK COMPATIBILITY IDEOGRAPH-F97D
 * 	8AD2 ??
 * F97E CJK COMPATIBILITY IDEOGRAPH-F97E
 * 	91CF ??
 * F97F CJK COMPATIBILITY IDEOGRAPH-F97F
 * 	52F5 ??
 * F980 CJK COMPATIBILITY IDEOGRAPH-F980
 * 	5442 ??
 * F981 CJK COMPATIBILITY IDEOGRAPH-F981
 * 	5973 ??
 * F982 CJK COMPATIBILITY IDEOGRAPH-F982
 * 	5EEC ??
 * F983 CJK COMPATIBILITY IDEOGRAPH-F983
 * 	65C5 ??
 * F984 CJK COMPATIBILITY IDEOGRAPH-F984
 * 	6FFE ??
 * F985 CJK COMPATIBILITY IDEOGRAPH-F985
 * 	792A ??
 * F986 CJK COMPATIBILITY IDEOGRAPH-F986
 * 	95AD ??
 * F987 CJK COMPATIBILITY IDEOGRAPH-F987
 * 	9A6A ??
 * F988 CJK COMPATIBILITY IDEOGRAPH-F988
 * 	9E97 ??
 * F989 CJK COMPATIBILITY IDEOGRAPH-F989
 * 	9ECE ??
 * F98A CJK COMPATIBILITY IDEOGRAPH-F98A
 * 	529B ??
 * F98B CJK COMPATIBILITY IDEOGRAPH-F98B
 * 	66C6 ??
 * F98C CJK COMPATIBILITY IDEOGRAPH-F98C
 * 	6B77 ??
 * F98D CJK COMPATIBILITY IDEOGRAPH-F98D
 * 	8F62 ??
 * F98E CJK COMPATIBILITY IDEOGRAPH-F98E
 * 	5E74 ??
 * F98F CJK COMPATIBILITY IDEOGRAPH-F98F
 * 	6190 ??
 * F990 CJK COMPATIBILITY IDEOGRAPH-F990
 * 	6200 ??
 * F991 CJK COMPATIBILITY IDEOGRAPH-F991
 * 	649A ??
 * F992 CJK COMPATIBILITY IDEOGRAPH-F992
 * 	6F23 ??
 * F993 CJK COMPATIBILITY IDEOGRAPH-F993
 * 	7149 ??
 * F994 CJK COMPATIBILITY IDEOGRAPH-F994
 * 	7489 ??
 * F995 CJK COMPATIBILITY IDEOGRAPH-F995
 * 	79CA ??
 * F996 CJK COMPATIBILITY IDEOGRAPH-F996
 * 	7DF4 ??
 * F997 CJK COMPATIBILITY IDEOGRAPH-F997
 * 	806F ??
 * F998 CJK COMPATIBILITY IDEOGRAPH-F998
 * 	8F26 ??
 * F999 CJK COMPATIBILITY IDEOGRAPH-F999
 * 	84EE ??
 * F99A CJK COMPATIBILITY IDEOGRAPH-F99A
 * 	9023 ??
 * F99B CJK COMPATIBILITY IDEOGRAPH-F99B
 * 	934A ??
 * F99C CJK COMPATIBILITY IDEOGRAPH-F99C
 * 	5217 ??
 * F99D CJK COMPATIBILITY IDEOGRAPH-F99D
 * 	52A3 ??
 * F99E CJK COMPATIBILITY IDEOGRAPH-F99E
 * 	54BD ??
 * F99F CJK COMPATIBILITY IDEOGRAPH-F99F
 * 	70C8 ??
 * F9A0 CJK COMPATIBILITY IDEOGRAPH-F9A0
 * 	88C2 ??
 * F9A1 CJK COMPATIBILITY IDEOGRAPH-F9A1
 * 	8AAA ??
 * F9A2 CJK COMPATIBILITY IDEOGRAPH-F9A2
 * 	5EC9 ??
 * F9A3 CJK COMPATIBILITY IDEOGRAPH-F9A3
 * 	5FF5 ??
 * F9A4 CJK COMPATIBILITY IDEOGRAPH-F9A4
 * 	637B ??
 * F9A5 CJK COMPATIBILITY IDEOGRAPH-F9A5
 * 	6BAE ??
 * F9A6 CJK COMPATIBILITY IDEOGRAPH-F9A6
 * 	7C3E ??
 * F9A7 CJK COMPATIBILITY IDEOGRAPH-F9A7
 * 	7375 ??
 * F9A8 CJK COMPATIBILITY IDEOGRAPH-F9A8
 * 	4EE4 ??
 * F9A9 CJK COMPATIBILITY IDEOGRAPH-F9A9
 * 	56F9 ??
 * F9AA CJK COMPATIBILITY IDEOGRAPH-F9AA
 * 	5BE7 ??
 * F9AB CJK COMPATIBILITY IDEOGRAPH-F9AB
 * 	5DBA ??
 * F9AC CJK COMPATIBILITY IDEOGRAPH-F9AC
 * 	601C ??
 * F9AD CJK COMPATIBILITY IDEOGRAPH-F9AD
 * 	73B2 ??
 * F9AE CJK COMPATIBILITY IDEOGRAPH-F9AE
 * 	7469 ??
 * F9AF CJK COMPATIBILITY IDEOGRAPH-F9AF
 * 	7F9A ??
 * F9B0 CJK COMPATIBILITY IDEOGRAPH-F9B0
 * 	8046 ??
 * F9B1 CJK COMPATIBILITY IDEOGRAPH-F9B1
 * 	9234 ??
 * F9B2 CJK COMPATIBILITY IDEOGRAPH-F9B2
 * 	96F6 ??
 * F9B3 CJK COMPATIBILITY IDEOGRAPH-F9B3
 * 	9748 ??
 * F9B4 CJK COMPATIBILITY IDEOGRAPH-F9B4
 * 	9818 ??
 * F9B5 CJK COMPATIBILITY IDEOGRAPH-F9B5
 * 	4F8B ??
 * F9B6 CJK COMPATIBILITY IDEOGRAPH-F9B6
 * 	79AE ??
 * F9B7 CJK COMPATIBILITY IDEOGRAPH-F9B7
 * 	91B4 ??
 * F9B8 CJK COMPATIBILITY IDEOGRAPH-F9B8
 * 	96B8 ??
 * F9B9 CJK COMPATIBILITY IDEOGRAPH-F9B9
 * 	60E1 ??
 * F9BA CJK COMPATIBILITY IDEOGRAPH-F9BA
 * 	4E86 ??
 * F9BB CJK COMPATIBILITY IDEOGRAPH-F9BB
 * 	50DA ??
 * F9BC CJK COMPATIBILITY IDEOGRAPH-F9BC
 * 	5BEE ??
 * F9BD CJK COMPATIBILITY IDEOGRAPH-F9BD
 * 	5C3F ??
 * F9BE CJK COMPATIBILITY IDEOGRAPH-F9BE
 * 	6599 ??
 * F9BF CJK COMPATIBILITY IDEOGRAPH-F9BF
 * 	6A02 ??
 * F9C0 CJK COMPATIBILITY IDEOGRAPH-F9C0
 * 	71CE ??
 * F9C1 CJK COMPATIBILITY IDEOGRAPH-F9C1
 * 	7642 ??
 * F9C2 CJK COMPATIBILITY IDEOGRAPH-F9C2
 * 	84FC ??
 * F9C3 CJK COMPATIBILITY IDEOGRAPH-F9C3
 * 	907C ??
 * F9C4 CJK COMPATIBILITY IDEOGRAPH-F9C4
 * 	9F8D ??
 * F9C5 CJK COMPATIBILITY IDEOGRAPH-F9C5
 * 	6688 ??
 * F9C6 CJK COMPATIBILITY IDEOGRAPH-F9C6
 * 	962E ??
 * F9C7 CJK COMPATIBILITY IDEOGRAPH-F9C7
 * 	5289 ??
 * F9C8 CJK COMPATIBILITY IDEOGRAPH-F9C8
 * 	677B ??
 * F9C9 CJK COMPATIBILITY IDEOGRAPH-F9C9
 * 	67F3 ??
 * F9CA CJK COMPATIBILITY IDEOGRAPH-F9CA
 * 	6D41 ??
 * F9CB CJK COMPATIBILITY IDEOGRAPH-F9CB
 * 	6E9C ??
 * F9CC CJK COMPATIBILITY IDEOGRAPH-F9CC
 * 	7409 ??
 * F9CD CJK COMPATIBILITY IDEOGRAPH-F9CD
 * 	7559 ??
 * F9CE CJK COMPATIBILITY IDEOGRAPH-F9CE
 * 	786B ??
 * F9CF CJK COMPATIBILITY IDEOGRAPH-F9CF
 * 	7D10 ??
 * F9D0 CJK COMPATIBILITY IDEOGRAPH-F9D0
 * 	985E ??
 * F9D1 CJK COMPATIBILITY IDEOGRAPH-F9D1
 * 	516D ??
 * F9D2 CJK COMPATIBILITY IDEOGRAPH-F9D2
 * 	622E ??
 * F9D3 CJK COMPATIBILITY IDEOGRAPH-F9D3
 * 	9678 ??
 * F9D4 CJK COMPATIBILITY IDEOGRAPH-F9D4
 * 	502B ??
 * F9D5 CJK COMPATIBILITY IDEOGRAPH-F9D5
 * 	5D19 ??
 * F9D6 CJK COMPATIBILITY IDEOGRAPH-F9D6
 * 	6DEA ??
 * F9D7 CJK COMPATIBILITY IDEOGRAPH-F9D7
 * 	8F2A ??
 * F9D8 CJK COMPATIBILITY IDEOGRAPH-F9D8
 * 	5F8B ??
 * F9D9 CJK COMPATIBILITY IDEOGRAPH-F9D9
 * 	6144 ??
 * F9DA CJK COMPATIBILITY IDEOGRAPH-F9DA
 * 	6817 ??
 * F9DB CJK COMPATIBILITY IDEOGRAPH-F9DB
 * 	7387 ??
 * F9DC CJK COMPATIBILITY IDEOGRAPH-F9DC
 * 	9686 ??
 * F9DD CJK COMPATIBILITY IDEOGRAPH-F9DD
 * 	5229 ??
 * F9DE CJK COMPATIBILITY IDEOGRAPH-F9DE
 * 	540F ??
 * F9DF CJK COMPATIBILITY IDEOGRAPH-F9DF
 * 	5C65 ??
 * F9E0 CJK COMPATIBILITY IDEOGRAPH-F9E0
 * 	6613 ??
 * F9E1 CJK COMPATIBILITY IDEOGRAPH-F9E1
 * 	674E ??
 * F9E2 CJK COMPATIBILITY IDEOGRAPH-F9E2
 * 	68A8 ??
 * F9E3 CJK COMPATIBILITY IDEOGRAPH-F9E3
 * 	6CE5 ??
 * F9E4 CJK COMPATIBILITY IDEOGRAPH-F9E4
 * 	7406 ??
 * F9E5 CJK COMPATIBILITY IDEOGRAPH-F9E5
 * 	75E2 ??
 * F9E6 CJK COMPATIBILITY IDEOGRAPH-F9E6
 * 	7F79 ??
 * F9E7 CJK COMPATIBILITY IDEOGRAPH-F9E7
 * 	88CF ??
 * F9E8 CJK COMPATIBILITY IDEOGRAPH-F9E8
 * 	88E1 ??
 * F9E9 CJK COMPATIBILITY IDEOGRAPH-F9E9
 * 	91CC ??
 * F9EA CJK COMPATIBILITY IDEOGRAPH-F9EA
 * 	96E2 ??
 * F9EB CJK COMPATIBILITY IDEOGRAPH-F9EB
 * 	533F ??
 * F9EC CJK COMPATIBILITY IDEOGRAPH-F9EC
 * 	6EBA ??
 * F9ED CJK COMPATIBILITY IDEOGRAPH-F9ED
 * 	541D ??
 * F9EE CJK COMPATIBILITY IDEOGRAPH-F9EE
 * 	71D0 ??
 * F9EF CJK COMPATIBILITY IDEOGRAPH-F9EF
 * 	7498 ??
 * F9F0 CJK COMPATIBILITY IDEOGRAPH-F9F0
 * 	85FA ??
 * F9F1 CJK COMPATIBILITY IDEOGRAPH-F9F1
 * 	96A3 ??
 * F9F2 CJK COMPATIBILITY IDEOGRAPH-F9F2
 * 	9C57 ??
 * F9F3 CJK COMPATIBILITY IDEOGRAPH-F9F3
 * 	9E9F ??
 * F9F4 CJK COMPATIBILITY IDEOGRAPH-F9F4
 * 	6797 ??
 * F9F5 CJK COMPATIBILITY IDEOGRAPH-F9F5
 * 	6DCB ??
 * F9F6 CJK COMPATIBILITY IDEOGRAPH-F9F6
 * 	81E8 ??
 * F9F7 CJK COMPATIBILITY IDEOGRAPH-F9F7
 * 	7ACB ??
 * F9F8 CJK COMPATIBILITY IDEOGRAPH-F9F8
 * 	7B20 ??
 * F9F9 CJK COMPATIBILITY IDEOGRAPH-F9F9
 * 	7C92 ??
 * F9FA CJK COMPATIBILITY IDEOGRAPH-F9FA
 * 	72C0 ??
 * F9FB CJK COMPATIBILITY IDEOGRAPH-F9FB
 * 	7099 ??
 * F9FC CJK COMPATIBILITY IDEOGRAPH-F9FC
 * 	8B58 ??
 * F9FD CJK COMPATIBILITY IDEOGRAPH-F9FD
 * 	4EC0 ??
 * F9FE CJK COMPATIBILITY IDEOGRAPH-F9FE
 * 	8336 ??
 * F9FF CJK COMPATIBILITY IDEOGRAPH-F9FF
 * 	523A ??
 * FA00 CJK COMPATIBILITY IDEOGRAPH-FA00
 * 	5207 ??
 * FA01 CJK COMPATIBILITY IDEOGRAPH-FA01
 * 	5EA6 ??
 * FA02 CJK COMPATIBILITY IDEOGRAPH-FA02
 * 	62D3 ??
 * FA03 CJK COMPATIBILITY IDEOGRAPH-FA03
 * 	7CD6 ??
 * FA04 CJK COMPATIBILITY IDEOGRAPH-FA04
 * 	5B85 ??
 * FA05 CJK COMPATIBILITY IDEOGRAPH-FA05
 * 	6D1E ??
 * FA06 CJK COMPATIBILITY IDEOGRAPH-FA06
 * 	66B4 ??
 * FA07 CJK COMPATIBILITY IDEOGRAPH-FA07
 * 	8F3B ??
 * FA08 CJK COMPATIBILITY IDEOGRAPH-FA08
 * 	884C ??
 * FA09 CJK COMPATIBILITY IDEOGRAPH-FA09
 * 	964D ??
 * FA0A CJK COMPATIBILITY IDEOGRAPH-FA0A
 * 	898B ??
 * FA0B CJK COMPATIBILITY IDEOGRAPH-FA0B
 * 	5ED3 ??
 * FA0C CJK COMPATIBILITY IDEOGRAPH-FA0C
 * 	5140 ??
 * FA0D CJK COMPATIBILITY IDEOGRAPH-FA0D
 * 	55C0 ??
 * FA10 CJK COMPATIBILITY IDEOGRAPH-FA10
 * 	585A ??
 * FA12 CJK COMPATIBILITY IDEOGRAPH-FA12
 * 	6674 ??
 * FA15 CJK COMPATIBILITY IDEOGRAPH-FA15
 * 	51DE ??
 * FA16 CJK COMPATIBILITY IDEOGRAPH-FA16
 * 	732A ??
 * FA17 CJK COMPATIBILITY IDEOGRAPH-FA17
 * 	76CA ??
 * FA18 CJK COMPATIBILITY IDEOGRAPH-FA18
 * 	793C ??
 * FA19 CJK COMPATIBILITY IDEOGRAPH-FA19
 * 	795E ??
 * FA1A CJK COMPATIBILITY IDEOGRAPH-FA1A
 * 	7965 ??
 * FA1B CJK COMPATIBILITY IDEOGRAPH-FA1B
 * 	798F ??
 * FA1C CJK COMPATIBILITY IDEOGRAPH-FA1C
 * 	9756 ??
 * FA1D CJK COMPATIBILITY IDEOGRAPH-FA1D
 * 	7CBE ??
 * FA1E CJK COMPATIBILITY IDEOGRAPH-FA1E
 * 	7FBD ??
 * FA20 CJK COMPATIBILITY IDEOGRAPH-FA20
 * 	8612 ??
 * FA22 CJK COMPATIBILITY IDEOGRAPH-FA22
 * 	8AF8 ??
 * FA25 CJK COMPATIBILITY IDEOGRAPH-FA25
 * 	9038 ??
 * FA26 CJK COMPATIBILITY IDEOGRAPH-FA26
 * 	90FD ??
 * FA2A CJK COMPATIBILITY IDEOGRAPH-FA2A
 * 	98EF ??
 * FA2B CJK COMPATIBILITY IDEOGRAPH-FA2B
 * 	98FC ??
 * FA2C CJK COMPATIBILITY IDEOGRAPH-FA2C
 * 	9928 ??
 * FA2D CJK COMPATIBILITY IDEOGRAPH-FA2D
 * 	9DB4 ??
 * FA2E CJK COMPATIBILITY IDEOGRAPH-FA2E
 * 	90DE ??
 * FA2F CJK COMPATIBILITY IDEOGRAPH-FA2F
 * 	96B7 ??
 * FA30 CJK COMPATIBILITY IDEOGRAPH-FA30
 * 	4FAE ??
 * FA31 CJK COMPATIBILITY IDEOGRAPH-FA31
 * 	50E7 ??
 * FA32 CJK COMPATIBILITY IDEOGRAPH-FA32
 * 	514D ??
 * FA33 CJK COMPATIBILITY IDEOGRAPH-FA33
 * 	52C9 ??
 * FA34 CJK COMPATIBILITY IDEOGRAPH-FA34
 * 	52E4 ??
 * FA35 CJK COMPATIBILITY IDEOGRAPH-FA35
 * 	5351 ??
 * FA36 CJK COMPATIBILITY IDEOGRAPH-FA36
 * 	559D ??
 * FA37 CJK COMPATIBILITY IDEOGRAPH-FA37
 * 	5606 ??
 * FA38 CJK COMPATIBILITY IDEOGRAPH-FA38
 * 	5668 ??
 * FA39 CJK COMPATIBILITY IDEOGRAPH-FA39
 * 	5840 ??
 * FA3A CJK COMPATIBILITY IDEOGRAPH-FA3A
 * 	58A8 ??
 * FA3B CJK COMPATIBILITY IDEOGRAPH-FA3B
 * 	5C64 ??
 * FA3C CJK COMPATIBILITY IDEOGRAPH-FA3C
 * 	5C6E ??
 * FA3D CJK COMPATIBILITY IDEOGRAPH-FA3D
 * 	6094 ??
 * FA3E CJK COMPATIBILITY IDEOGRAPH-FA3E
 * 	6168 ??
 * FA3F CJK COMPATIBILITY IDEOGRAPH-FA3F
 * 	618E ??
 * FA40 CJK COMPATIBILITY IDEOGRAPH-FA40
 * 	61F2 ??
 * FA41 CJK COMPATIBILITY IDEOGRAPH-FA41
 * 	654F ??
 * FA42 CJK COMPATIBILITY IDEOGRAPH-FA42
 * 	65E2 ??
 * FA43 CJK COMPATIBILITY IDEOGRAPH-FA43
 * 	6691 ??
 * FA44 CJK COMPATIBILITY IDEOGRAPH-FA44
 * 	6885 ??
 * FA45 CJK COMPATIBILITY IDEOGRAPH-FA45
 * 	6D77 ??
 * FA46 CJK COMPATIBILITY IDEOGRAPH-FA46
 * 	6E1A ??
 * FA47 CJK COMPATIBILITY IDEOGRAPH-FA47
 * 	6F22 ??
 * FA48 CJK COMPATIBILITY IDEOGRAPH-FA48
 * 	716E ??
 * FA49 CJK COMPATIBILITY IDEOGRAPH-FA49
 * 	722B ??
 * FA4A CJK COMPATIBILITY IDEOGRAPH-FA4A
 * 	7422 ??
 * FA4B CJK COMPATIBILITY IDEOGRAPH-FA4B
 * 	7891 ??
 * FA4C CJK COMPATIBILITY IDEOGRAPH-FA4C
 * 	793E ??
 * FA4D CJK COMPATIBILITY IDEOGRAPH-FA4D
 * 	7949 ??
 * FA4E CJK COMPATIBILITY IDEOGRAPH-FA4E
 * 	7948 ??
 * FA4F CJK COMPATIBILITY IDEOGRAPH-FA4F
 * 	7950 ??
 * FA50 CJK COMPATIBILITY IDEOGRAPH-FA50
 * 	7956 ??
 * FA51 CJK COMPATIBILITY IDEOGRAPH-FA51
 * 	795D ??
 * FA52 CJK COMPATIBILITY IDEOGRAPH-FA52
 * 	798D ??
 * FA53 CJK COMPATIBILITY IDEOGRAPH-FA53
 * 	798E ??
 * FA54 CJK COMPATIBILITY IDEOGRAPH-FA54
 * 	7A40 ??
 * FA55 CJK COMPATIBILITY IDEOGRAPH-FA55
 * 	7A81 ??
 * FA56 CJK COMPATIBILITY IDEOGRAPH-FA56
 * 	7BC0 ??
 * FA57 CJK COMPATIBILITY IDEOGRAPH-FA57
 * 	7DF4 ??
 * FA58 CJK COMPATIBILITY IDEOGRAPH-FA58
 * 	7E09 ??
 * FA59 CJK COMPATIBILITY IDEOGRAPH-FA59
 * 	7E41 ??
 * FA5A CJK COMPATIBILITY IDEOGRAPH-FA5A
 * 	7F72 ??
 * FA5B CJK COMPATIBILITY IDEOGRAPH-FA5B
 * 	8005 ??
 * FA5C CJK COMPATIBILITY IDEOGRAPH-FA5C
 * 	81ED ??
 * FA5D CJK COMPATIBILITY IDEOGRAPH-FA5D
 * 	8279 ??
 * FA5E CJK COMPATIBILITY IDEOGRAPH-FA5E
 * 	8279 ??
 * FA5F CJK COMPATIBILITY IDEOGRAPH-FA5F
 * 	8457 ??
 * FA60 CJK COMPATIBILITY IDEOGRAPH-FA60
 * 	8910 ??
 * FA61 CJK COMPATIBILITY IDEOGRAPH-FA61
 * 	8996 ??
 * FA62 CJK COMPATIBILITY IDEOGRAPH-FA62
 * 	8B01 ??
 * FA63 CJK COMPATIBILITY IDEOGRAPH-FA63
 * 	8B39 ??
 * FA64 CJK COMPATIBILITY IDEOGRAPH-FA64
 * 	8CD3 ??
 * FA65 CJK COMPATIBILITY IDEOGRAPH-FA65
 * 	8D08 ??
 * FA66 CJK COMPATIBILITY IDEOGRAPH-FA66
 * 	8FB6 ??
 * FA67 CJK COMPATIBILITY IDEOGRAPH-FA67
 * 	9038 ??
 * FA68 CJK COMPATIBILITY IDEOGRAPH-FA68
 * 	96E3 ??
 * FA69 CJK COMPATIBILITY IDEOGRAPH-FA69
 * 	97FF ??
 * FA6A CJK COMPATIBILITY IDEOGRAPH-FA6A
 * 	983B ??
 * FA6B CJK COMPATIBILITY IDEOGRAPH-FA6B
 * 	6075 ??
 * FA6D CJK COMPATIBILITY IDEOGRAPH-FA6D
 * 	8218 ??
 * FA70 CJK COMPATIBILITY IDEOGRAPH-FA70
 * 	4E26 ??
 * FA71 CJK COMPATIBILITY IDEOGRAPH-FA71
 * 	51B5 ??
 * FA72 CJK COMPATIBILITY IDEOGRAPH-FA72
 * 	5168 ??
 * FA73 CJK COMPATIBILITY IDEOGRAPH-FA73
 * 	4F80 ??
 * FA74 CJK COMPATIBILITY IDEOGRAPH-FA74
 * 	5145 ??
 * FA75 CJK COMPATIBILITY IDEOGRAPH-FA75
 * 	5180 ??
 * FA76 CJK COMPATIBILITY IDEOGRAPH-FA76
 * 	52C7 ??
 * FA77 CJK COMPATIBILITY IDEOGRAPH-FA77
 * 	52FA ??
 * FA78 CJK COMPATIBILITY IDEOGRAPH-FA78
 * 	559D ??
 * FA79 CJK COMPATIBILITY IDEOGRAPH-FA79
 * 	5555 ??
 * FA7A CJK COMPATIBILITY IDEOGRAPH-FA7A
 * 	5599 ??
 * FA7B CJK COMPATIBILITY IDEOGRAPH-FA7B
 * 	55E2 ??
 * FA7C CJK COMPATIBILITY IDEOGRAPH-FA7C
 * 	585A ??
 * FA7D CJK COMPATIBILITY IDEOGRAPH-FA7D
 * 	58B3 ??
 * FA7E CJK COMPATIBILITY IDEOGRAPH-FA7E
 * 	5944 ??
 * FA7F CJK COMPATIBILITY IDEOGRAPH-FA7F
 * 	5954 ??
 * FA80 CJK COMPATIBILITY IDEOGRAPH-FA80
 * 	5A62 ??
 * FA81 CJK COMPATIBILITY IDEOGRAPH-FA81
 * 	5B28 ??
 * FA82 CJK COMPATIBILITY IDEOGRAPH-FA82
 * 	5ED2 ??
 * FA83 CJK COMPATIBILITY IDEOGRAPH-FA83
 * 	5ED9 ??
 * FA84 CJK COMPATIBILITY IDEOGRAPH-FA84
 * 	5F69 ??
 * FA85 CJK COMPATIBILITY IDEOGRAPH-FA85
 * 	5FAD ??
 * FA86 CJK COMPATIBILITY IDEOGRAPH-FA86
 * 	60D8 ??
 * FA87 CJK COMPATIBILITY IDEOGRAPH-FA87
 * 	614E ??
 * FA88 CJK COMPATIBILITY IDEOGRAPH-FA88
 * 	6108 ??
 * FA89 CJK COMPATIBILITY IDEOGRAPH-FA89
 * 	618E ??
 * FA8A CJK COMPATIBILITY IDEOGRAPH-FA8A
 * 	6160 ??
 * FA8B CJK COMPATIBILITY IDEOGRAPH-FA8B
 * 	61F2 ??
 * FA8C CJK COMPATIBILITY IDEOGRAPH-FA8C
 * 	6234 ??
 * FA8D CJK COMPATIBILITY IDEOGRAPH-FA8D
 * 	63C4 ??
 * FA8E CJK COMPATIBILITY IDEOGRAPH-FA8E
 * 	641C ??
 * FA8F CJK COMPATIBILITY IDEOGRAPH-FA8F
 * 	6452 ??
 * FA90 CJK COMPATIBILITY IDEOGRAPH-FA90
 * 	6556 ??
 * FA91 CJK COMPATIBILITY IDEOGRAPH-FA91
 * 	6674 ??
 * FA92 CJK COMPATIBILITY IDEOGRAPH-FA92
 * 	6717 ??
 * FA93 CJK COMPATIBILITY IDEOGRAPH-FA93
 * 	671B ??
 * FA94 CJK COMPATIBILITY IDEOGRAPH-FA94
 * 	6756 ??
 * FA95 CJK COMPATIBILITY IDEOGRAPH-FA95
 * 	6B79 ??
 * FA96 CJK COMPATIBILITY IDEOGRAPH-FA96
 * 	6BBA ??
 * FA97 CJK COMPATIBILITY IDEOGRAPH-FA97
 * 	6D41 ??
 * FA98 CJK COMPATIBILITY IDEOGRAPH-FA98
 * 	6EDB ??
 * FA99 CJK COMPATIBILITY IDEOGRAPH-FA99
 * 	6ECB ??
 * FA9A CJK COMPATIBILITY IDEOGRAPH-FA9A
 * 	6F22 ??
 * FA9B CJK COMPATIBILITY IDEOGRAPH-FA9B
 * 	701E ??
 * FA9C CJK COMPATIBILITY IDEOGRAPH-FA9C
 * 	716E ??
 * FA9D CJK COMPATIBILITY IDEOGRAPH-FA9D
 * 	77A7 ??
 * FA9E CJK COMPATIBILITY IDEOGRAPH-FA9E
 * 	7235 ??
 * FA9F CJK COMPATIBILITY IDEOGRAPH-FA9F
 * 	72AF ??
 * FAA0 CJK COMPATIBILITY IDEOGRAPH-FAA0
 * 	732A ??
 * FAA1 CJK COMPATIBILITY IDEOGRAPH-FAA1
 * 	7471 ??
 * FAA2 CJK COMPATIBILITY IDEOGRAPH-FAA2
 * 	7506 ??
 * FAA3 CJK COMPATIBILITY IDEOGRAPH-FAA3
 * 	753B ??
 * FAA4 CJK COMPATIBILITY IDEOGRAPH-FAA4
 * 	761D ??
 * FAA5 CJK COMPATIBILITY IDEOGRAPH-FAA5
 * 	761F ??
 * FAA6 CJK COMPATIBILITY IDEOGRAPH-FAA6
 * 	76CA ??
 * FAA7 CJK COMPATIBILITY IDEOGRAPH-FAA7
 * 	76DB ??
 * FAA8 CJK COMPATIBILITY IDEOGRAPH-FAA8
 * 	76F4 ??
 * FAA9 CJK COMPATIBILITY IDEOGRAPH-FAA9
 * 	774A ??
 * FAAA CJK COMPATIBILITY IDEOGRAPH-FAAA
 * 	7740 ??
 * FAAB CJK COMPATIBILITY IDEOGRAPH-FAAB
 * 	78CC ??
 * FAAC CJK COMPATIBILITY IDEOGRAPH-FAAC
 * 	7AB1 ??
 * FAAD CJK COMPATIBILITY IDEOGRAPH-FAAD
 * 	7BC0 ??
 * FAAE CJK COMPATIBILITY IDEOGRAPH-FAAE
 * 	7C7B ??
 * FAAF CJK COMPATIBILITY IDEOGRAPH-FAAF
 * 	7D5B ??
 * FAB0 CJK COMPATIBILITY IDEOGRAPH-FAB0
 * 	7DF4 ??
 * FAB1 CJK COMPATIBILITY IDEOGRAPH-FAB1
 * 	7F3E ??
 * FAB2 CJK COMPATIBILITY IDEOGRAPH-FAB2
 * 	8005 ??
 * FAB3 CJK COMPATIBILITY IDEOGRAPH-FAB3
 * 	8352 ??
 * FAB4 CJK COMPATIBILITY IDEOGRAPH-FAB4
 * 	83EF ??
 * FAB5 CJK COMPATIBILITY IDEOGRAPH-FAB5
 * 	8779 ??
 * FAB6 CJK COMPATIBILITY IDEOGRAPH-FAB6
 * 	8941 ??
 * FAB7 CJK COMPATIBILITY IDEOGRAPH-FAB7
 * 	8986 ??
 * FAB8 CJK COMPATIBILITY IDEOGRAPH-FAB8
 * 	8996 ??
 * FAB9 CJK COMPATIBILITY IDEOGRAPH-FAB9
 * 	8ABF ??
 * FABA CJK COMPATIBILITY IDEOGRAPH-FABA
 * 	8AF8 ??
 * FABB CJK COMPATIBILITY IDEOGRAPH-FABB
 * 	8ACB ??
 * FABC CJK COMPATIBILITY IDEOGRAPH-FABC
 * 	8B01 ??
 * FABD CJK COMPATIBILITY IDEOGRAPH-FABD
 * 	8AFE ??
 * FABE CJK COMPATIBILITY IDEOGRAPH-FABE
 * 	8AED ??
 * FABF CJK COMPATIBILITY IDEOGRAPH-FABF
 * 	8B39 ??
 * FAC0 CJK COMPATIBILITY IDEOGRAPH-FAC0
 * 	8B8A ??
 * FAC1 CJK COMPATIBILITY IDEOGRAPH-FAC1
 * 	8D08 ??
 * FAC2 CJK COMPATIBILITY IDEOGRAPH-FAC2
 * 	8F38 ??
 * FAC3 CJK COMPATIBILITY IDEOGRAPH-FAC3
 * 	9072 ??
 * FAC4 CJK COMPATIBILITY IDEOGRAPH-FAC4
 * 	9199 ??
 * FAC5 CJK COMPATIBILITY IDEOGRAPH-FAC5
 * 	9276 ??
 * FAC6 CJK COMPATIBILITY IDEOGRAPH-FAC6
 * 	967C ??
 * FAC7 CJK COMPATIBILITY IDEOGRAPH-FAC7
 * 	96E3 ??
 * FAC8 CJK COMPATIBILITY IDEOGRAPH-FAC8
 * 	9756 ??
 * FAC9 CJK COMPATIBILITY IDEOGRAPH-FAC9
 * 	97DB ??
 * FACA CJK COMPATIBILITY IDEOGRAPH-FACA
 * 	97FF ??
 * FACB CJK COMPATIBILITY IDEOGRAPH-FACB
 * 	980B ??
 * FACC CJK COMPATIBILITY IDEOGRAPH-FACC
 * 	983B ??
 * FACD CJK COMPATIBILITY IDEOGRAPH-FACD
 * 	9B12 ??
 * FACE CJK COMPATIBILITY IDEOGRAPH-FACE
 * 	9F9C ??
 * FAD2 CJK COMPATIBILITY IDEOGRAPH-FAD2
 * 	3B9D ??
 * FAD3 CJK COMPATIBILITY IDEOGRAPH-FAD3
 * 	4018 ??
 * FAD4 CJK COMPATIBILITY IDEOGRAPH-FAD4
 * 	4039 ??
 * FAD8 CJK COMPATIBILITY IDEOGRAPH-FAD8
 * 	9F43 ??
 * FAD9 CJK COMPATIBILITY IDEOGRAPH-FAD9
 * 	9F8E ??
 * FB00 LATIN SMALL LIGATURE FF
 * 	0066 LATIN SMALL LETTER F
 * 	0066 LATIN SMALL LETTER F
 * FB01 LATIN SMALL LIGATURE FI
 * 	0066 LATIN SMALL LETTER F
 * 	0069 LATIN SMALL LETTER I
 * FB02 LATIN SMALL LIGATURE FL
 * 	0066 LATIN SMALL LETTER F
 * 	006C LATIN SMALL LETTER L
 * FB03 LATIN SMALL LIGATURE FFI
 * 	0066 LATIN SMALL LETTER F
 * 	0066 LATIN SMALL LETTER F
 * 	0069 LATIN SMALL LETTER I
 * FB04 LATIN SMALL LIGATURE FFL
 * 	0066 LATIN SMALL LETTER F
 * 	0066 LATIN SMALL LETTER F
 * 	006C LATIN SMALL LETTER L
 * FB05 LATIN SMALL LIGATURE LONG S T
 * 	0074 LATIN SMALL LETTER T
 * 	0073 LATIN SMALL LETTER S
 * FB06 LATIN SMALL LIGATURE ST
 * 	0073 LATIN SMALL LETTER S
 * 	0074 LATIN SMALL LETTER T
 * FB13 ARMENIAN SMALL LIGATURE MEN NOW
 * 	0574 ARMENIAN SMALL LETTER MEN
 * 	0576 ARMENIAN SMALL LETTER NOW
 * FB14 ARMENIAN SMALL LIGATURE MEN ECH
 * 	0574 ARMENIAN SMALL LETTER MEN
 * 	0565 ARMENIAN SMALL LETTER ECH
 * FB15 ARMENIAN SMALL LIGATURE MEN INI
 * 	0574 ARMENIAN SMALL LETTER MEN
 * 	056B ARMENIAN SMALL LETTER INI
 * FB16 ARMENIAN SMALL LIGATURE VEW NOW
 * 	057E ARMENIAN SMALL LETTER VEW
 * 	0576 ARMENIAN SMALL LETTER NOW
 * FB17 ARMENIAN SMALL LIGATURE MEN XEH
 * 	0574 ARMENIAN SMALL LETTER MEN
 * 	056D ARMENIAN SMALL LETTER XEH
 * FB1D HEBREW LETTER YOD WITH HIRIQ
 * 	05D9 HEBREW LETTER YOD
 * FB1E HEBREW POINT JUDEO-SPANISH VARIKA
 * 	0000 
 * FB1F HEBREW LIGATURE YIDDISH YOD YOD PATAH
 * 	05F2 HEBREW LIGATURE YIDDISH DOUBLE YOD
 * FB20 HEBREW LETTER ALTERNATIVE AYIN
 * 	05E2 HEBREW LETTER AYIN
 * FB21 HEBREW LETTER WIDE ALEF
 * 	05D0 HEBREW LETTER ALEF
 * FB22 HEBREW LETTER WIDE DALET
 * 	05D3 HEBREW LETTER DALET
 * FB23 HEBREW LETTER WIDE HE
 * 	05D4 HEBREW LETTER HE
 * FB24 HEBREW LETTER WIDE KAF
 * 	05DB HEBREW LETTER KAF
 * FB25 HEBREW LETTER WIDE LAMED
 * 	05DC HEBREW LETTER LAMED
 * FB26 HEBREW LETTER WIDE FINAL MEM
 * 	05DD HEBREW LETTER FINAL MEM
 * FB27 HEBREW LETTER WIDE RESH
 * 	05E8 HEBREW LETTER RESH
 * FB28 HEBREW LETTER WIDE TAV
 * 	05EA HEBREW LETTER TAV
 * FB29 HEBREW LETTER ALTERNATIVE PLUS SIGN
 * 	002B PLUS SIGN
 * FB2A HEBREW LETTER SHIN WITH SHIN DOT
 * 	05E9 HEBREW LETTER SHIN
 * FB2B HEBREW LETTER SHIN WITH SIN DOT
 * 	05E9 HEBREW LETTER SHIN
 * FB2C HEBREW LETTER SHIN WITH DAGESH AND SHIN DOT
 * 	05E9 HEBREW LETTER SHIN
 * FB2D HEBREW LETTER SHIN WITH DAGESH AND SIN DOT
 * 	05E9 HEBREW LETTER SHIN
 * FB2E HEBREW LETTER ALEF WITH PATAH
 * 	05D0 HEBREW LETTER ALEF
 * FB2F HEBREW LETTER ALEF WITH QAMATS
 * 	05D0 HEBREW LETTER ALEF
 * FB30 HEBREW LETTER ALEF WITH MAPIQ
 * 	05D0 HEBREW LETTER ALEF
 * FB31 HEBREW LETTER BET WITH DAGESH
 * 	05D1 HEBREW LETTER BET
 * FB32 HEBREW LETTER GIMEL WITH DAGESH
 * 	05D2 HEBREW LETTER GIMEL
 * FB33 HEBREW LETTER DALET WITH DAGESH
 * 	05D3 HEBREW LETTER DALET
 * FB34 HEBREW LETTER HE WITH MAPIQ
 * 	05D4 HEBREW LETTER HE
 * FB35 HEBREW LETTER VAV WITH DAGESH
 * 	05D5 HEBREW LETTER VAV
 * FB36 HEBREW LETTER ZAYIN WITH DAGESH
 * 	05D6 HEBREW LETTER ZAYIN
 * FB38 HEBREW LETTER TET WITH DAGESH
 * 	05D8 HEBREW LETTER TET
 * FB39 HEBREW LETTER YOD WITH DAGESH
 * 	05D9 HEBREW LETTER YOD
 * FB3A HEBREW LETTER FINAL KAF WITH DAGESH
 * 	05DA HEBREW LETTER FINAL KAF
 * FB3B HEBREW LETTER KAF WITH DAGESH
 * 	05DB HEBREW LETTER KAF
 * FB3C HEBREW LETTER LAMED WITH DAGESH
 * 	05DC HEBREW LETTER LAMED
 * FB3E HEBREW LETTER MEM WITH DAGESH
 * 	05DE HEBREW LETTER MEM
 * FB40 HEBREW LETTER NUN WITH DAGESH
 * 	05E0 HEBREW LETTER NUN
 * FB41 HEBREW LETTER SAMEKH WITH DAGESH
 * 	05E1 HEBREW LETTER SAMEKH
 * FB43 HEBREW LETTER FINAL PE WITH DAGESH
 * 	05E3 HEBREW LETTER FINAL PE
 * FB44 HEBREW LETTER PE WITH DAGESH
 * 	05E4 HEBREW LETTER PE
 * FB46 HEBREW LETTER TSADI WITH DAGESH
 * 	05E6 HEBREW LETTER TSADI
 * FB47 HEBREW LETTER QOF WITH DAGESH
 * 	05E7 HEBREW LETTER QOF
 * FB48 HEBREW LETTER RESH WITH DAGESH
 * 	05E8 HEBREW LETTER RESH
 * FB49 HEBREW LETTER SHIN WITH DAGESH
 * 	05E9 HEBREW LETTER SHIN
 * FB4A HEBREW LETTER TAV WITH DAGESH
 * 	05EA HEBREW LETTER TAV
 * FB4B HEBREW LETTER VAV WITH HOLAM
 * 	05D5 HEBREW LETTER VAV
 * FB4C HEBREW LETTER BET WITH RAFE
 * 	05D1 HEBREW LETTER BET
 * FB4D HEBREW LETTER KAF WITH RAFE
 * 	05DB HEBREW LETTER KAF
 * FB4E HEBREW LETTER PE WITH RAFE
 * 	05E4 HEBREW LETTER PE
 * FB4F HEBREW LIGATURE ALEF LAMED
 * 	05D0 HEBREW LETTER ALEF
 * 	05DC HEBREW LETTER LAMED
 * FB50 ARABIC LETTER ALEF WASLA ISOLATED FORM
 * 	0671 ARABIC LETTER ALEF WASLA
 * FB51 ARABIC LETTER ALEF WASLA FINAL FORM
 * 	0671 ARABIC LETTER ALEF WASLA
 * FB52 ARABIC LETTER BEEH ISOLATED FORM
 * 	067B ARABIC LETTER BEEH
 * FB53 ARABIC LETTER BEEH FINAL FORM
 * 	067B ARABIC LETTER BEEH
 * FB54 ARABIC LETTER BEEH INITIAL FORM
 * 	067B ARABIC LETTER BEEH
 * FB55 ARABIC LETTER BEEH MEDIAL FORM
 * 	067B ARABIC LETTER BEEH
 * FB56 ARABIC LETTER PEH ISOLATED FORM
 * 	067E ARABIC LETTER PEH
 * FB57 ARABIC LETTER PEH FINAL FORM
 * 	067E ARABIC LETTER PEH
 * FB58 ARABIC LETTER PEH INITIAL FORM
 * 	067E ARABIC LETTER PEH
 * FB59 ARABIC LETTER PEH MEDIAL FORM
 * 	067E ARABIC LETTER PEH
 * FB5A ARABIC LETTER BEHEH ISOLATED FORM
 * 	0680 ARABIC LETTER BEHEH
 * FB5B ARABIC LETTER BEHEH FINAL FORM
 * 	0680 ARABIC LETTER BEHEH
 * FB5C ARABIC LETTER BEHEH INITIAL FORM
 * 	0680 ARABIC LETTER BEHEH
 * FB5D ARABIC LETTER BEHEH MEDIAL FORM
 * 	0680 ARABIC LETTER BEHEH
 * FB5E ARABIC LETTER TTEHEH ISOLATED FORM
 * 	067A ARABIC LETTER TTEHEH
 * FB5F ARABIC LETTER TTEHEH FINAL FORM
 * 	067A ARABIC LETTER TTEHEH
 * FB60 ARABIC LETTER TTEHEH INITIAL FORM
 * 	067A ARABIC LETTER TTEHEH
 * FB61 ARABIC LETTER TTEHEH MEDIAL FORM
 * 	067A ARABIC LETTER TTEHEH
 * FB62 ARABIC LETTER TEHEH ISOLATED FORM
 * 	067F ARABIC LETTER TEHEH
 * FB63 ARABIC LETTER TEHEH FINAL FORM
 * 	067F ARABIC LETTER TEHEH
 * FB64 ARABIC LETTER TEHEH INITIAL FORM
 * 	067F ARABIC LETTER TEHEH
 * FB65 ARABIC LETTER TEHEH MEDIAL FORM
 * 	067F ARABIC LETTER TEHEH
 * FB66 ARABIC LETTER TTEH ISOLATED FORM
 * 	0679 ARABIC LETTER TTEH
 * FB67 ARABIC LETTER TTEH FINAL FORM
 * 	0679 ARABIC LETTER TTEH
 * FB68 ARABIC LETTER TTEH INITIAL FORM
 * 	0679 ARABIC LETTER TTEH
 * FB69 ARABIC LETTER TTEH MEDIAL FORM
 * 	0679 ARABIC LETTER TTEH
 * FB6A ARABIC LETTER VEH ISOLATED FORM
 * 	06A4 ARABIC LETTER VEH
 * FB6B ARABIC LETTER VEH FINAL FORM
 * 	06A4 ARABIC LETTER VEH
 * FB6C ARABIC LETTER VEH INITIAL FORM
 * 	06A4 ARABIC LETTER VEH
 * FB6D ARABIC LETTER VEH MEDIAL FORM
 * 	06A4 ARABIC LETTER VEH
 * FB6E ARABIC LETTER PEHEH ISOLATED FORM
 * 	06A6 ARABIC LETTER PEHEH
 * FB6F ARABIC LETTER PEHEH FINAL FORM
 * 	06A6 ARABIC LETTER PEHEH
 * FB70 ARABIC LETTER PEHEH INITIAL FORM
 * 	06A6 ARABIC LETTER PEHEH
 * FB71 ARABIC LETTER PEHEH MEDIAL FORM
 * 	06A6 ARABIC LETTER PEHEH
 * FB72 ARABIC LETTER DYEH ISOLATED FORM
 * 	0684 ARABIC LETTER DYEH
 * FB73 ARABIC LETTER DYEH FINAL FORM
 * 	0684 ARABIC LETTER DYEH
 * FB74 ARABIC LETTER DYEH INITIAL FORM
 * 	0684 ARABIC LETTER DYEH
 * FB75 ARABIC LETTER DYEH MEDIAL FORM
 * 	0684 ARABIC LETTER DYEH
 * FB76 ARABIC LETTER NYEH ISOLATED FORM
 * 	0683 ARABIC LETTER NYEH
 * FB77 ARABIC LETTER NYEH FINAL FORM
 * 	0683 ARABIC LETTER NYEH
 * FB78 ARABIC LETTER NYEH INITIAL FORM
 * 	0683 ARABIC LETTER NYEH
 * FB79 ARABIC LETTER NYEH MEDIAL FORM
 * 	0683 ARABIC LETTER NYEH
 * FB7A ARABIC LETTER TCHEH ISOLATED FORM
 * 	0686 ARABIC LETTER TCHEH
 * FB7B ARABIC LETTER TCHEH FINAL FORM
 * 	0686 ARABIC LETTER TCHEH
 * FB7C ARABIC LETTER TCHEH INITIAL FORM
 * 	0686 ARABIC LETTER TCHEH
 * FB7D ARABIC LETTER TCHEH MEDIAL FORM
 * 	0686 ARABIC LETTER TCHEH
 * FB7E ARABIC LETTER TCHEHEH ISOLATED FORM
 * 	0687 ARABIC LETTER TCHEHEH
 * FB7F ARABIC LETTER TCHEHEH FINAL FORM
 * 	0687 ARABIC LETTER TCHEHEH
 * FB80 ARABIC LETTER TCHEHEH INITIAL FORM
 * 	0687 ARABIC LETTER TCHEHEH
 * FB81 ARABIC LETTER TCHEHEH MEDIAL FORM
 * 	0687 ARABIC LETTER TCHEHEH
 * FB82 ARABIC LETTER DDAHAL ISOLATED FORM
 * 	068D ARABIC LETTER DDAHAL
 * FB83 ARABIC LETTER DDAHAL FINAL FORM
 * 	068D ARABIC LETTER DDAHAL
 * FB84 ARABIC LETTER DAHAL ISOLATED FORM
 * 	068C ARABIC LETTER DAHAL
 * FB85 ARABIC LETTER DAHAL FINAL FORM
 * 	068C ARABIC LETTER DAHAL
 * FB86 ARABIC LETTER DUL ISOLATED FORM
 * 	068E ARABIC LETTER DUL
 * FB87 ARABIC LETTER DUL FINAL FORM
 * 	068E ARABIC LETTER DUL
 * FB88 ARABIC LETTER DDAL ISOLATED FORM
 * 	0688 ARABIC LETTER DDAL
 * FB89 ARABIC LETTER DDAL FINAL FORM
 * 	0688 ARABIC LETTER DDAL
 * FB8A ARABIC LETTER JEH ISOLATED FORM
 * 	0698 ARABIC LETTER JEH
 * FB8B ARABIC LETTER JEH FINAL FORM
 * 	0698 ARABIC LETTER JEH
 * FB8C ARABIC LETTER RREH ISOLATED FORM
 * 	0691 ARABIC LETTER RREH
 * FB8D ARABIC LETTER RREH FINAL FORM
 * 	0691 ARABIC LETTER RREH
 * FB8E ARABIC LETTER KEHEH ISOLATED FORM
 * 	06A9 ARABIC LETTER KEHEH
 * FB8F ARABIC LETTER KEHEH FINAL FORM
 * 	06A9 ARABIC LETTER KEHEH
 * FB90 ARABIC LETTER KEHEH INITIAL FORM
 * 	06A9 ARABIC LETTER KEHEH
 * FB91 ARABIC LETTER KEHEH MEDIAL FORM
 * 	06A9 ARABIC LETTER KEHEH
 * FB92 ARABIC LETTER GAF ISOLATED FORM
 * 	06AF ARABIC LETTER GAF
 * FB93 ARABIC LETTER GAF FINAL FORM
 * 	06AF ARABIC LETTER GAF
 * FB94 ARABIC LETTER GAF INITIAL FORM
 * 	06AF ARABIC LETTER GAF
 * FB95 ARABIC LETTER GAF MEDIAL FORM
 * 	06AF ARABIC LETTER GAF
 * FB96 ARABIC LETTER GUEH ISOLATED FORM
 * 	06B3 ARABIC LETTER GUEH
 * FB97 ARABIC LETTER GUEH FINAL FORM
 * 	06B3 ARABIC LETTER GUEH
 * FB98 ARABIC LETTER GUEH INITIAL FORM
 * 	06B3 ARABIC LETTER GUEH
 * FB99 ARABIC LETTER GUEH MEDIAL FORM
 * 	06B3 ARABIC LETTER GUEH
 * FB9A ARABIC LETTER NGOEH ISOLATED FORM
 * 	06B1 ARABIC LETTER NGOEH
 * FB9B ARABIC LETTER NGOEH FINAL FORM
 * 	06B1 ARABIC LETTER NGOEH
 * FB9C ARABIC LETTER NGOEH INITIAL FORM
 * 	06B1 ARABIC LETTER NGOEH
 * FB9D ARABIC LETTER NGOEH MEDIAL FORM
 * 	06B1 ARABIC LETTER NGOEH
 * FB9E ARABIC LETTER NOON GHUNNA ISOLATED FORM
 * 	06BA ARABIC LETTER NOON GHUNNA
 * FB9F ARABIC LETTER NOON GHUNNA FINAL FORM
 * 	06BA ARABIC LETTER NOON GHUNNA
 * FBA0 ARABIC LETTER RNOON ISOLATED FORM
 * 	06BB ARABIC LETTER RNOON
 * FBA1 ARABIC LETTER RNOON FINAL FORM
 * 	06BB ARABIC LETTER RNOON
 * FBA2 ARABIC LETTER RNOON INITIAL FORM
 * 	06BB ARABIC LETTER RNOON
 * FBA3 ARABIC LETTER RNOON MEDIAL FORM
 * 	06BB ARABIC LETTER RNOON
 * FBA4 ARABIC LETTER HEH WITH YEH ABOVE ISOLATED FORM
 * 	06D5 ARABIC LETTER AE
 * FBA5 ARABIC LETTER HEH WITH YEH ABOVE FINAL FORM
 * 	06D5 ARABIC LETTER AE
 * FBA6 ARABIC LETTER HEH GOAL ISOLATED FORM
 * 	06C1 ARABIC LETTER HEH GOAL
 * FBA7 ARABIC LETTER HEH GOAL FINAL FORM
 * 	06C1 ARABIC LETTER HEH GOAL
 * FBA8 ARABIC LETTER HEH GOAL INITIAL FORM
 * 	06C1 ARABIC LETTER HEH GOAL
 * FBA9 ARABIC LETTER HEH GOAL MEDIAL FORM
 * 	06C1 ARABIC LETTER HEH GOAL
 * FBAA ARABIC LETTER HEH DOACHASHMEE ISOLATED FORM
 * 	06BE ARABIC LETTER HEH DOACHASHMEE
 * FBAB ARABIC LETTER HEH DOACHASHMEE FINAL FORM
 * 	06BE ARABIC LETTER HEH DOACHASHMEE
 * FBAC ARABIC LETTER HEH DOACHASHMEE INITIAL FORM
 * 	06BE ARABIC LETTER HEH DOACHASHMEE
 * FBAD ARABIC LETTER HEH DOACHASHMEE MEDIAL FORM
 * 	06BE ARABIC LETTER HEH DOACHASHMEE
 * FBAE ARABIC LETTER YEH BARREE ISOLATED FORM
 * 	06D2 ARABIC LETTER YEH BARREE
 * FBAF ARABIC LETTER YEH BARREE FINAL FORM
 * 	06D2 ARABIC LETTER YEH BARREE
 * FBB0 ARABIC LETTER YEH BARREE WITH HAMZA ABOVE ISOLATED FORM
 * 	06D2 ARABIC LETTER YEH BARREE
 * FBB1 ARABIC LETTER YEH BARREE WITH HAMZA ABOVE FINAL FORM
 * 	06D2 ARABIC LETTER YEH BARREE
 * FBD3 ARABIC LETTER NG ISOLATED FORM
 * 	06AD ARABIC LETTER NG
 * FBD4 ARABIC LETTER NG FINAL FORM
 * 	06AD ARABIC LETTER NG
 * FBD5 ARABIC LETTER NG INITIAL FORM
 * 	06AD ARABIC LETTER NG
 * FBD6 ARABIC LETTER NG MEDIAL FORM
 * 	06AD ARABIC LETTER NG
 * FBD7 ARABIC LETTER U ISOLATED FORM
 * 	06C7 ARABIC LETTER U
 * FBD8 ARABIC LETTER U FINAL FORM
 * 	06C7 ARABIC LETTER U
 * FBD9 ARABIC LETTER OE ISOLATED FORM
 * 	06C6 ARABIC LETTER OE
 * FBDA ARABIC LETTER OE FINAL FORM
 * 	06C6 ARABIC LETTER OE
 * FBDB ARABIC LETTER YU ISOLATED FORM
 * 	06C8 ARABIC LETTER YU
 * FBDC ARABIC LETTER YU FINAL FORM
 * 	06C8 ARABIC LETTER YU
 * FBDD ARABIC LETTER U WITH HAMZA ABOVE ISOLATED FORM
 * 	06C7 ARABIC LETTER U
 * 	0674 ARABIC LETTER HIGH HAMZA
 * FBDE ARABIC LETTER VE ISOLATED FORM
 * 	06CB ARABIC LETTER VE
 * FBDF ARABIC LETTER VE FINAL FORM
 * 	06CB ARABIC LETTER VE
 * FBE0 ARABIC LETTER KIRGHIZ OE ISOLATED FORM
 * 	06C5 ARABIC LETTER KIRGHIZ OE
 * FBE1 ARABIC LETTER KIRGHIZ OE FINAL FORM
 * 	06C5 ARABIC LETTER KIRGHIZ OE
 * FBE2 ARABIC LETTER KIRGHIZ YU ISOLATED FORM
 * 	06C9 ARABIC LETTER KIRGHIZ YU
 * FBE3 ARABIC LETTER KIRGHIZ YU FINAL FORM
 * 	06C9 ARABIC LETTER KIRGHIZ YU
 * FBE4 ARABIC LETTER E ISOLATED FORM
 * 	06D0 ARABIC LETTER E
 * FBE5 ARABIC LETTER E FINAL FORM
 * 	06D0 ARABIC LETTER E
 * FBE6 ARABIC LETTER E INITIAL FORM
 * 	06D0 ARABIC LETTER E
 * FBE7 ARABIC LETTER E MEDIAL FORM
 * 	06D0 ARABIC LETTER E
 * FBE8 ARABIC LETTER UIGHUR KAZAKH KIRGHIZ ALEF MAKSURA INITIAL FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FBE9 ARABIC LETTER UIGHUR KAZAKH KIRGHIZ ALEF MAKSURA MEDIAL FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FBEA ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ALEF ISOLATED FORM
 * 	0627 ARABIC LETTER ALEF
 * 	064A ARABIC LETTER YEH
 * FBEB ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ALEF FINAL FORM
 * 	0627 ARABIC LETTER ALEF
 * 	064A ARABIC LETTER YEH
 * FBEC ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH AE ISOLATED FORM
 * 	06D5 ARABIC LETTER AE
 * 	064A ARABIC LETTER YEH
 * FBED ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH AE FINAL FORM
 * 	06D5 ARABIC LETTER AE
 * 	064A ARABIC LETTER YEH
 * FBEE ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH WAW ISOLATED FORM
 * 	0648 ARABIC LETTER WAW
 * 	064A ARABIC LETTER YEH
 * FBEF ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH WAW FINAL FORM
 * 	0648 ARABIC LETTER WAW
 * 	064A ARABIC LETTER YEH
 * FBF0 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH U ISOLATED FORM
 * 	06C7 ARABIC LETTER U
 * 	064A ARABIC LETTER YEH
 * FBF1 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH U FINAL FORM
 * 	06C7 ARABIC LETTER U
 * 	064A ARABIC LETTER YEH
 * FBF2 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH OE ISOLATED FORM
 * 	06C6 ARABIC LETTER OE
 * 	064A ARABIC LETTER YEH
 * FBF3 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH OE FINAL FORM
 * 	06C6 ARABIC LETTER OE
 * 	064A ARABIC LETTER YEH
 * FBF4 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH YU ISOLATED FORM
 * 	06C8 ARABIC LETTER YU
 * 	064A ARABIC LETTER YEH
 * FBF5 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH YU FINAL FORM
 * 	06C8 ARABIC LETTER YU
 * 	064A ARABIC LETTER YEH
 * FBF6 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH E ISOLATED FORM
 * 	06D0 ARABIC LETTER E
 * 	064A ARABIC LETTER YEH
 * FBF7 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH E FINAL FORM
 * 	06D0 ARABIC LETTER E
 * 	064A ARABIC LETTER YEH
 * FBF8 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH E INITIAL FORM
 * 	06D0 ARABIC LETTER E
 * 	064A ARABIC LETTER YEH
 * FBF9 ARABIC LIGATURE UIGHUR KIRGHIZ YEH WITH HAMZA ABOVE WITH ALEF MAKSURA ISOLATED FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * 	064A ARABIC LETTER YEH
 * FBFA ARABIC LIGATURE UIGHUR KIRGHIZ YEH WITH HAMZA ABOVE WITH ALEF MAKSURA FINAL FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * 	064A ARABIC LETTER YEH
 * FBFB ARABIC LIGATURE UIGHUR KIRGHIZ YEH WITH HAMZA ABOVE WITH ALEF MAKSURA INITIAL FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * 	064A ARABIC LETTER YEH
 * FBFC ARABIC LETTER FARSI YEH ISOLATED FORM
 * 	06CC ARABIC LETTER FARSI YEH
 * FBFD ARABIC LETTER FARSI YEH FINAL FORM
 * 	06CC ARABIC LETTER FARSI YEH
 * FBFE ARABIC LETTER FARSI YEH INITIAL FORM
 * 	06CC ARABIC LETTER FARSI YEH
 * FBFF ARABIC LETTER FARSI YEH MEDIAL FORM
 * 	06CC ARABIC LETTER FARSI YEH
 * FC00 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH JEEM ISOLATED FORM
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FC01 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH HAH ISOLATED FORM
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FC02 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH MEEM ISOLATED FORM
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FC03 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ALEF MAKSURA ISOLATED FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * 	064A ARABIC LETTER YEH
 * FC04 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH YEH ISOLATED FORM
 * 	064A ARABIC LETTER YEH
 * 	064A ARABIC LETTER YEH
 * FC05 ARABIC LIGATURE BEH WITH JEEM ISOLATED FORM
 * 	0628 ARABIC LETTER BEH
 * 	062C ARABIC LETTER JEEM
 * FC06 ARABIC LIGATURE BEH WITH HAH ISOLATED FORM
 * 	0628 ARABIC LETTER BEH
 * 	062D ARABIC LETTER HAH
 * FC07 ARABIC LIGATURE BEH WITH KHAH ISOLATED FORM
 * 	0628 ARABIC LETTER BEH
 * 	062E ARABIC LETTER KHAH
 * FC08 ARABIC LIGATURE BEH WITH MEEM ISOLATED FORM
 * 	0628 ARABIC LETTER BEH
 * 	0645 ARABIC LETTER MEEM
 * FC09 ARABIC LIGATURE BEH WITH ALEF MAKSURA ISOLATED FORM
 * 	0628 ARABIC LETTER BEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC0A ARABIC LIGATURE BEH WITH YEH ISOLATED FORM
 * 	0628 ARABIC LETTER BEH
 * 	064A ARABIC LETTER YEH
 * FC0B ARABIC LIGATURE TEH WITH JEEM ISOLATED FORM
 * 	062A ARABIC LETTER TEH
 * 	062C ARABIC LETTER JEEM
 * FC0C ARABIC LIGATURE TEH WITH HAH ISOLATED FORM
 * 	062A ARABIC LETTER TEH
 * 	062D ARABIC LETTER HAH
 * FC0D ARABIC LIGATURE TEH WITH KHAH ISOLATED FORM
 * 	062A ARABIC LETTER TEH
 * 	062E ARABIC LETTER KHAH
 * FC0E ARABIC LIGATURE TEH WITH MEEM ISOLATED FORM
 * 	062A ARABIC LETTER TEH
 * 	0645 ARABIC LETTER MEEM
 * FC0F ARABIC LIGATURE TEH WITH ALEF MAKSURA ISOLATED FORM
 * 	062A ARABIC LETTER TEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC10 ARABIC LIGATURE TEH WITH YEH ISOLATED FORM
 * 	062A ARABIC LETTER TEH
 * 	064A ARABIC LETTER YEH
 * FC11 ARABIC LIGATURE THEH WITH JEEM ISOLATED FORM
 * 	062B ARABIC LETTER THEH
 * 	062C ARABIC LETTER JEEM
 * FC12 ARABIC LIGATURE THEH WITH MEEM ISOLATED FORM
 * 	062B ARABIC LETTER THEH
 * 	0645 ARABIC LETTER MEEM
 * FC13 ARABIC LIGATURE THEH WITH ALEF MAKSURA ISOLATED FORM
 * 	062B ARABIC LETTER THEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC14 ARABIC LIGATURE THEH WITH YEH ISOLATED FORM
 * 	062B ARABIC LETTER THEH
 * 	064A ARABIC LETTER YEH
 * FC15 ARABIC LIGATURE JEEM WITH HAH ISOLATED FORM
 * 	062C ARABIC LETTER JEEM
 * 	062D ARABIC LETTER HAH
 * FC16 ARABIC LIGATURE JEEM WITH MEEM ISOLATED FORM
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FC17 ARABIC LIGATURE HAH WITH JEEM ISOLATED FORM
 * 	062D ARABIC LETTER HAH
 * 	062C ARABIC LETTER JEEM
 * FC18 ARABIC LIGATURE HAH WITH MEEM ISOLATED FORM
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * FC19 ARABIC LIGATURE KHAH WITH JEEM ISOLATED FORM
 * 	062E ARABIC LETTER KHAH
 * 	062C ARABIC LETTER JEEM
 * FC1A ARABIC LIGATURE KHAH WITH HAH ISOLATED FORM
 * 	062E ARABIC LETTER KHAH
 * 	062D ARABIC LETTER HAH
 * FC1B ARABIC LIGATURE KHAH WITH MEEM ISOLATED FORM
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FC1C ARABIC LIGATURE SEEN WITH JEEM ISOLATED FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062C ARABIC LETTER JEEM
 * FC1D ARABIC LIGATURE SEEN WITH HAH ISOLATED FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062D ARABIC LETTER HAH
 * FC1E ARABIC LIGATURE SEEN WITH KHAH ISOLATED FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062E ARABIC LETTER KHAH
 * FC1F ARABIC LIGATURE SEEN WITH MEEM ISOLATED FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0645 ARABIC LETTER MEEM
 * FC20 ARABIC LIGATURE SAD WITH HAH ISOLATED FORM
 * 	0635 ARABIC LETTER SAD
 * 	062D ARABIC LETTER HAH
 * FC21 ARABIC LIGATURE SAD WITH MEEM ISOLATED FORM
 * 	0635 ARABIC LETTER SAD
 * 	0645 ARABIC LETTER MEEM
 * FC22 ARABIC LIGATURE DAD WITH JEEM ISOLATED FORM
 * 	0636 ARABIC LETTER DAD
 * 	062C ARABIC LETTER JEEM
 * FC23 ARABIC LIGATURE DAD WITH HAH ISOLATED FORM
 * 	0636 ARABIC LETTER DAD
 * 	062D ARABIC LETTER HAH
 * FC24 ARABIC LIGATURE DAD WITH KHAH ISOLATED FORM
 * 	0636 ARABIC LETTER DAD
 * 	062E ARABIC LETTER KHAH
 * FC25 ARABIC LIGATURE DAD WITH MEEM ISOLATED FORM
 * 	0636 ARABIC LETTER DAD
 * 	0645 ARABIC LETTER MEEM
 * FC26 ARABIC LIGATURE TAH WITH HAH ISOLATED FORM
 * 	0637 ARABIC LETTER TAH
 * 	062D ARABIC LETTER HAH
 * FC27 ARABIC LIGATURE TAH WITH MEEM ISOLATED FORM
 * 	0637 ARABIC LETTER TAH
 * 	0645 ARABIC LETTER MEEM
 * FC28 ARABIC LIGATURE ZAH WITH MEEM ISOLATED FORM
 * 	0638 ARABIC LETTER ZAH
 * 	0645 ARABIC LETTER MEEM
 * FC29 ARABIC LIGATURE AIN WITH JEEM ISOLATED FORM
 * 	0639 ARABIC LETTER AIN
 * 	062C ARABIC LETTER JEEM
 * FC2A ARABIC LIGATURE AIN WITH MEEM ISOLATED FORM
 * 	0639 ARABIC LETTER AIN
 * 	0645 ARABIC LETTER MEEM
 * FC2B ARABIC LIGATURE GHAIN WITH JEEM ISOLATED FORM
 * 	063A ARABIC LETTER GHAIN
 * 	062C ARABIC LETTER JEEM
 * FC2C ARABIC LIGATURE GHAIN WITH MEEM ISOLATED FORM
 * 	063A ARABIC LETTER GHAIN
 * 	0645 ARABIC LETTER MEEM
 * FC2D ARABIC LIGATURE FEH WITH JEEM ISOLATED FORM
 * 	0641 ARABIC LETTER FEH
 * 	062C ARABIC LETTER JEEM
 * FC2E ARABIC LIGATURE FEH WITH HAH ISOLATED FORM
 * 	0641 ARABIC LETTER FEH
 * 	062D ARABIC LETTER HAH
 * FC2F ARABIC LIGATURE FEH WITH KHAH ISOLATED FORM
 * 	0641 ARABIC LETTER FEH
 * 	062E ARABIC LETTER KHAH
 * FC30 ARABIC LIGATURE FEH WITH MEEM ISOLATED FORM
 * 	0641 ARABIC LETTER FEH
 * 	0645 ARABIC LETTER MEEM
 * FC31 ARABIC LIGATURE FEH WITH ALEF MAKSURA ISOLATED FORM
 * 	0641 ARABIC LETTER FEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC32 ARABIC LIGATURE FEH WITH YEH ISOLATED FORM
 * 	0641 ARABIC LETTER FEH
 * 	064A ARABIC LETTER YEH
 * FC33 ARABIC LIGATURE QAF WITH HAH ISOLATED FORM
 * 	0642 ARABIC LETTER QAF
 * 	062D ARABIC LETTER HAH
 * FC34 ARABIC LIGATURE QAF WITH MEEM ISOLATED FORM
 * 	0642 ARABIC LETTER QAF
 * 	0645 ARABIC LETTER MEEM
 * FC35 ARABIC LIGATURE QAF WITH ALEF MAKSURA ISOLATED FORM
 * 	0642 ARABIC LETTER QAF
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC36 ARABIC LIGATURE QAF WITH YEH ISOLATED FORM
 * 	0642 ARABIC LETTER QAF
 * 	064A ARABIC LETTER YEH
 * FC37 ARABIC LIGATURE KAF WITH ALEF ISOLATED FORM
 * 	0643 ARABIC LETTER KAF
 * 	0627 ARABIC LETTER ALEF
 * FC38 ARABIC LIGATURE KAF WITH JEEM ISOLATED FORM
 * 	0643 ARABIC LETTER KAF
 * 	062C ARABIC LETTER JEEM
 * FC39 ARABIC LIGATURE KAF WITH HAH ISOLATED FORM
 * 	0643 ARABIC LETTER KAF
 * 	062D ARABIC LETTER HAH
 * FC3A ARABIC LIGATURE KAF WITH KHAH ISOLATED FORM
 * 	0643 ARABIC LETTER KAF
 * 	062E ARABIC LETTER KHAH
 * FC3B ARABIC LIGATURE KAF WITH LAM ISOLATED FORM
 * 	0643 ARABIC LETTER KAF
 * 	0644 ARABIC LETTER LAM
 * FC3C ARABIC LIGATURE KAF WITH MEEM ISOLATED FORM
 * 	0643 ARABIC LETTER KAF
 * 	0645 ARABIC LETTER MEEM
 * FC3D ARABIC LIGATURE KAF WITH ALEF MAKSURA ISOLATED FORM
 * 	0643 ARABIC LETTER KAF
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC3E ARABIC LIGATURE KAF WITH YEH ISOLATED FORM
 * 	0643 ARABIC LETTER KAF
 * 	064A ARABIC LETTER YEH
 * FC3F ARABIC LIGATURE LAM WITH JEEM ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	062C ARABIC LETTER JEEM
 * FC40 ARABIC LIGATURE LAM WITH HAH ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	062D ARABIC LETTER HAH
 * FC41 ARABIC LIGATURE LAM WITH KHAH ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	062E ARABIC LETTER KHAH
 * FC42 ARABIC LIGATURE LAM WITH MEEM ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	0645 ARABIC LETTER MEEM
 * FC43 ARABIC LIGATURE LAM WITH ALEF MAKSURA ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC44 ARABIC LIGATURE LAM WITH YEH ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	064A ARABIC LETTER YEH
 * FC45 ARABIC LIGATURE MEEM WITH JEEM ISOLATED FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062C ARABIC LETTER JEEM
 * FC46 ARABIC LIGATURE MEEM WITH HAH ISOLATED FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FC47 ARABIC LIGATURE MEEM WITH KHAH ISOLATED FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062E ARABIC LETTER KHAH
 * FC48 ARABIC LIGATURE MEEM WITH MEEM ISOLATED FORM
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FC49 ARABIC LIGATURE MEEM WITH ALEF MAKSURA ISOLATED FORM
 * 	0645 ARABIC LETTER MEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC4A ARABIC LIGATURE MEEM WITH YEH ISOLATED FORM
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FC4B ARABIC LIGATURE NOON WITH JEEM ISOLATED FORM
 * 	0646 ARABIC LETTER NOON
 * 	062C ARABIC LETTER JEEM
 * FC4C ARABIC LIGATURE NOON WITH HAH ISOLATED FORM
 * 	0646 ARABIC LETTER NOON
 * 	062D ARABIC LETTER HAH
 * FC4D ARABIC LIGATURE NOON WITH KHAH ISOLATED FORM
 * 	0646 ARABIC LETTER NOON
 * 	062E ARABIC LETTER KHAH
 * FC4E ARABIC LIGATURE NOON WITH MEEM ISOLATED FORM
 * 	0646 ARABIC LETTER NOON
 * 	0645 ARABIC LETTER MEEM
 * FC4F ARABIC LIGATURE NOON WITH ALEF MAKSURA ISOLATED FORM
 * 	0646 ARABIC LETTER NOON
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC50 ARABIC LIGATURE NOON WITH YEH ISOLATED FORM
 * 	0646 ARABIC LETTER NOON
 * 	064A ARABIC LETTER YEH
 * FC51 ARABIC LIGATURE HEH WITH JEEM ISOLATED FORM
 * 	0647 ARABIC LETTER HEH
 * 	062C ARABIC LETTER JEEM
 * FC52 ARABIC LIGATURE HEH WITH MEEM ISOLATED FORM
 * 	0647 ARABIC LETTER HEH
 * 	0645 ARABIC LETTER MEEM
 * FC53 ARABIC LIGATURE HEH WITH ALEF MAKSURA ISOLATED FORM
 * 	0647 ARABIC LETTER HEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC54 ARABIC LIGATURE HEH WITH YEH ISOLATED FORM
 * 	0647 ARABIC LETTER HEH
 * 	064A ARABIC LETTER YEH
 * FC55 ARABIC LIGATURE YEH WITH JEEM ISOLATED FORM
 * 	064A ARABIC LETTER YEH
 * 	062C ARABIC LETTER JEEM
 * FC56 ARABIC LIGATURE YEH WITH HAH ISOLATED FORM
 * 	064A ARABIC LETTER YEH
 * 	062D ARABIC LETTER HAH
 * FC57 ARABIC LIGATURE YEH WITH KHAH ISOLATED FORM
 * 	064A ARABIC LETTER YEH
 * 	062E ARABIC LETTER KHAH
 * FC58 ARABIC LIGATURE YEH WITH MEEM ISOLATED FORM
 * 	064A ARABIC LETTER YEH
 * 	0645 ARABIC LETTER MEEM
 * FC59 ARABIC LIGATURE YEH WITH ALEF MAKSURA ISOLATED FORM
 * 	064A ARABIC LETTER YEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC5A ARABIC LIGATURE YEH WITH YEH ISOLATED FORM
 * 	064A ARABIC LETTER YEH
 * 	064A ARABIC LETTER YEH
 * FC5B ARABIC LIGATURE THAL WITH SUPERSCRIPT ALEF ISOLATED FORM
 * 	0630 ARABIC LETTER THAL
 * FC5C ARABIC LIGATURE REH WITH SUPERSCRIPT ALEF ISOLATED FORM
 * 	0631 ARABIC LETTER REH
 * FC5D ARABIC LIGATURE ALEF MAKSURA WITH SUPERSCRIPT ALEF ISOLATED FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC5E ARABIC LIGATURE SHADDA WITH DAMMATAN ISOLATED FORM
 * 	0020 SPACE
 * FC5F ARABIC LIGATURE SHADDA WITH KASRATAN ISOLATED FORM
 * 	0020 SPACE
 * FC60 ARABIC LIGATURE SHADDA WITH FATHA ISOLATED FORM
 * 	0020 SPACE
 * FC61 ARABIC LIGATURE SHADDA WITH DAMMA ISOLATED FORM
 * 	0020 SPACE
 * FC62 ARABIC LIGATURE SHADDA WITH KASRA ISOLATED FORM
 * 	0020 SPACE
 * FC63 ARABIC LIGATURE SHADDA WITH SUPERSCRIPT ALEF ISOLATED FORM
 * 	0020 SPACE
 * FC64 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH REH FINAL FORM
 * 	0631 ARABIC LETTER REH
 * 	064A ARABIC LETTER YEH
 * FC65 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ZAIN FINAL FORM
 * 	0632 ARABIC LETTER ZAIN
 * 	064A ARABIC LETTER YEH
 * FC66 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH MEEM FINAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FC67 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH NOON FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	064A ARABIC LETTER YEH
 * FC68 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH ALEF MAKSURA FINAL FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * 	064A ARABIC LETTER YEH
 * FC69 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH YEH FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	064A ARABIC LETTER YEH
 * FC6A ARABIC LIGATURE BEH WITH REH FINAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	0631 ARABIC LETTER REH
 * FC6B ARABIC LIGATURE BEH WITH ZAIN FINAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	0632 ARABIC LETTER ZAIN
 * FC6C ARABIC LIGATURE BEH WITH MEEM FINAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	0645 ARABIC LETTER MEEM
 * FC6D ARABIC LIGATURE BEH WITH NOON FINAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	0646 ARABIC LETTER NOON
 * FC6E ARABIC LIGATURE BEH WITH ALEF MAKSURA FINAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC6F ARABIC LIGATURE BEH WITH YEH FINAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	064A ARABIC LETTER YEH
 * FC70 ARABIC LIGATURE TEH WITH REH FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0631 ARABIC LETTER REH
 * FC71 ARABIC LIGATURE TEH WITH ZAIN FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0632 ARABIC LETTER ZAIN
 * FC72 ARABIC LIGATURE TEH WITH MEEM FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0645 ARABIC LETTER MEEM
 * FC73 ARABIC LIGATURE TEH WITH NOON FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0646 ARABIC LETTER NOON
 * FC74 ARABIC LIGATURE TEH WITH ALEF MAKSURA FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC75 ARABIC LIGATURE TEH WITH YEH FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	064A ARABIC LETTER YEH
 * FC76 ARABIC LIGATURE THEH WITH REH FINAL FORM
 * 	062B ARABIC LETTER THEH
 * 	0631 ARABIC LETTER REH
 * FC77 ARABIC LIGATURE THEH WITH ZAIN FINAL FORM
 * 	062B ARABIC LETTER THEH
 * 	0632 ARABIC LETTER ZAIN
 * FC78 ARABIC LIGATURE THEH WITH MEEM FINAL FORM
 * 	062B ARABIC LETTER THEH
 * 	0645 ARABIC LETTER MEEM
 * FC79 ARABIC LIGATURE THEH WITH NOON FINAL FORM
 * 	062B ARABIC LETTER THEH
 * 	0646 ARABIC LETTER NOON
 * FC7A ARABIC LIGATURE THEH WITH ALEF MAKSURA FINAL FORM
 * 	062B ARABIC LETTER THEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC7B ARABIC LIGATURE THEH WITH YEH FINAL FORM
 * 	062B ARABIC LETTER THEH
 * 	064A ARABIC LETTER YEH
 * FC7C ARABIC LIGATURE FEH WITH ALEF MAKSURA FINAL FORM
 * 	0641 ARABIC LETTER FEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC7D ARABIC LIGATURE FEH WITH YEH FINAL FORM
 * 	0641 ARABIC LETTER FEH
 * 	064A ARABIC LETTER YEH
 * FC7E ARABIC LIGATURE QAF WITH ALEF MAKSURA FINAL FORM
 * 	0642 ARABIC LETTER QAF
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC7F ARABIC LIGATURE QAF WITH YEH FINAL FORM
 * 	0642 ARABIC LETTER QAF
 * 	064A ARABIC LETTER YEH
 * FC80 ARABIC LIGATURE KAF WITH ALEF FINAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0627 ARABIC LETTER ALEF
 * FC81 ARABIC LIGATURE KAF WITH LAM FINAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0644 ARABIC LETTER LAM
 * FC82 ARABIC LIGATURE KAF WITH MEEM FINAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0645 ARABIC LETTER MEEM
 * FC83 ARABIC LIGATURE KAF WITH ALEF MAKSURA FINAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC84 ARABIC LIGATURE KAF WITH YEH FINAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	064A ARABIC LETTER YEH
 * FC85 ARABIC LIGATURE LAM WITH MEEM FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0645 ARABIC LETTER MEEM
 * FC86 ARABIC LIGATURE LAM WITH ALEF MAKSURA FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC87 ARABIC LIGATURE LAM WITH YEH FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	064A ARABIC LETTER YEH
 * FC88 ARABIC LIGATURE MEEM WITH ALEF FINAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	0627 ARABIC LETTER ALEF
 * FC89 ARABIC LIGATURE MEEM WITH MEEM FINAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FC8A ARABIC LIGATURE NOON WITH REH FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0631 ARABIC LETTER REH
 * FC8B ARABIC LIGATURE NOON WITH ZAIN FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0632 ARABIC LETTER ZAIN
 * FC8C ARABIC LIGATURE NOON WITH MEEM FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0645 ARABIC LETTER MEEM
 * FC8D ARABIC LIGATURE NOON WITH NOON FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0646 ARABIC LETTER NOON
 * FC8E ARABIC LIGATURE NOON WITH ALEF MAKSURA FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC8F ARABIC LIGATURE NOON WITH YEH FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	064A ARABIC LETTER YEH
 * FC90 ARABIC LIGATURE ALEF MAKSURA WITH SUPERSCRIPT ALEF FINAL FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC91 ARABIC LIGATURE YEH WITH REH FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0631 ARABIC LETTER REH
 * FC92 ARABIC LIGATURE YEH WITH ZAIN FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0632 ARABIC LETTER ZAIN
 * FC93 ARABIC LIGATURE YEH WITH MEEM FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0645 ARABIC LETTER MEEM
 * FC94 ARABIC LIGATURE YEH WITH NOON FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0646 ARABIC LETTER NOON
 * FC95 ARABIC LIGATURE YEH WITH ALEF MAKSURA FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FC96 ARABIC LIGATURE YEH WITH YEH FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	064A ARABIC LETTER YEH
 * FC97 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH JEEM INITIAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FC98 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH HAH INITIAL FORM
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FC99 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH KHAH INITIAL FORM
 * 	062E ARABIC LETTER KHAH
 * 	064A ARABIC LETTER YEH
 * FC9A ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH MEEM INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FC9B ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH HEH INITIAL FORM
 * 	0647 ARABIC LETTER HEH
 * 	064A ARABIC LETTER YEH
 * FC9C ARABIC LIGATURE BEH WITH JEEM INITIAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	062C ARABIC LETTER JEEM
 * FC9D ARABIC LIGATURE BEH WITH HAH INITIAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	062D ARABIC LETTER HAH
 * FC9E ARABIC LIGATURE BEH WITH KHAH INITIAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	062E ARABIC LETTER KHAH
 * FC9F ARABIC LIGATURE BEH WITH MEEM INITIAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	0645 ARABIC LETTER MEEM
 * FCA0 ARABIC LIGATURE BEH WITH HEH INITIAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	0647 ARABIC LETTER HEH
 * FCA1 ARABIC LIGATURE TEH WITH JEEM INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062C ARABIC LETTER JEEM
 * FCA2 ARABIC LIGATURE TEH WITH HAH INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062D ARABIC LETTER HAH
 * FCA3 ARABIC LIGATURE TEH WITH KHAH INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062E ARABIC LETTER KHAH
 * FCA4 ARABIC LIGATURE TEH WITH MEEM INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0645 ARABIC LETTER MEEM
 * FCA5 ARABIC LIGATURE TEH WITH HEH INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0647 ARABIC LETTER HEH
 * FCA6 ARABIC LIGATURE THEH WITH MEEM INITIAL FORM
 * 	062B ARABIC LETTER THEH
 * 	0645 ARABIC LETTER MEEM
 * FCA7 ARABIC LIGATURE JEEM WITH HAH INITIAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	062D ARABIC LETTER HAH
 * FCA8 ARABIC LIGATURE JEEM WITH MEEM INITIAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FCA9 ARABIC LIGATURE HAH WITH JEEM INITIAL FORM
 * 	062D ARABIC LETTER HAH
 * 	062C ARABIC LETTER JEEM
 * FCAA ARABIC LIGATURE HAH WITH MEEM INITIAL FORM
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * FCAB ARABIC LIGATURE KHAH WITH JEEM INITIAL FORM
 * 	062E ARABIC LETTER KHAH
 * 	062C ARABIC LETTER JEEM
 * FCAC ARABIC LIGATURE KHAH WITH MEEM INITIAL FORM
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FCAD ARABIC LIGATURE SEEN WITH JEEM INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062C ARABIC LETTER JEEM
 * FCAE ARABIC LIGATURE SEEN WITH HAH INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062D ARABIC LETTER HAH
 * FCAF ARABIC LIGATURE SEEN WITH KHAH INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062E ARABIC LETTER KHAH
 * FCB0 ARABIC LIGATURE SEEN WITH MEEM INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0645 ARABIC LETTER MEEM
 * FCB1 ARABIC LIGATURE SAD WITH HAH INITIAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	062D ARABIC LETTER HAH
 * FCB2 ARABIC LIGATURE SAD WITH KHAH INITIAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	062E ARABIC LETTER KHAH
 * FCB3 ARABIC LIGATURE SAD WITH MEEM INITIAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	0645 ARABIC LETTER MEEM
 * FCB4 ARABIC LIGATURE DAD WITH JEEM INITIAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	062C ARABIC LETTER JEEM
 * FCB5 ARABIC LIGATURE DAD WITH HAH INITIAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	062D ARABIC LETTER HAH
 * FCB6 ARABIC LIGATURE DAD WITH KHAH INITIAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	062E ARABIC LETTER KHAH
 * FCB7 ARABIC LIGATURE DAD WITH MEEM INITIAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	0645 ARABIC LETTER MEEM
 * FCB8 ARABIC LIGATURE TAH WITH HAH INITIAL FORM
 * 	0637 ARABIC LETTER TAH
 * 	062D ARABIC LETTER HAH
 * FCB9 ARABIC LIGATURE ZAH WITH MEEM INITIAL FORM
 * 	0638 ARABIC LETTER ZAH
 * 	0645 ARABIC LETTER MEEM
 * FCBA ARABIC LIGATURE AIN WITH JEEM INITIAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	062C ARABIC LETTER JEEM
 * FCBB ARABIC LIGATURE AIN WITH MEEM INITIAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	0645 ARABIC LETTER MEEM
 * FCBC ARABIC LIGATURE GHAIN WITH JEEM INITIAL FORM
 * 	063A ARABIC LETTER GHAIN
 * 	062C ARABIC LETTER JEEM
 * FCBD ARABIC LIGATURE GHAIN WITH MEEM INITIAL FORM
 * 	063A ARABIC LETTER GHAIN
 * 	0645 ARABIC LETTER MEEM
 * FCBE ARABIC LIGATURE FEH WITH JEEM INITIAL FORM
 * 	0641 ARABIC LETTER FEH
 * 	062C ARABIC LETTER JEEM
 * FCBF ARABIC LIGATURE FEH WITH HAH INITIAL FORM
 * 	0641 ARABIC LETTER FEH
 * 	062D ARABIC LETTER HAH
 * FCC0 ARABIC LIGATURE FEH WITH KHAH INITIAL FORM
 * 	0641 ARABIC LETTER FEH
 * 	062E ARABIC LETTER KHAH
 * FCC1 ARABIC LIGATURE FEH WITH MEEM INITIAL FORM
 * 	0641 ARABIC LETTER FEH
 * 	0645 ARABIC LETTER MEEM
 * FCC2 ARABIC LIGATURE QAF WITH HAH INITIAL FORM
 * 	0642 ARABIC LETTER QAF
 * 	062D ARABIC LETTER HAH
 * FCC3 ARABIC LIGATURE QAF WITH MEEM INITIAL FORM
 * 	0642 ARABIC LETTER QAF
 * 	0645 ARABIC LETTER MEEM
 * FCC4 ARABIC LIGATURE KAF WITH JEEM INITIAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	062C ARABIC LETTER JEEM
 * FCC5 ARABIC LIGATURE KAF WITH HAH INITIAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	062D ARABIC LETTER HAH
 * FCC6 ARABIC LIGATURE KAF WITH KHAH INITIAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	062E ARABIC LETTER KHAH
 * FCC7 ARABIC LIGATURE KAF WITH LAM INITIAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0644 ARABIC LETTER LAM
 * FCC8 ARABIC LIGATURE KAF WITH MEEM INITIAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0645 ARABIC LETTER MEEM
 * FCC9 ARABIC LIGATURE LAM WITH JEEM INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062C ARABIC LETTER JEEM
 * FCCA ARABIC LIGATURE LAM WITH HAH INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062D ARABIC LETTER HAH
 * FCCB ARABIC LIGATURE LAM WITH KHAH INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062E ARABIC LETTER KHAH
 * FCCC ARABIC LIGATURE LAM WITH MEEM INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0645 ARABIC LETTER MEEM
 * FCCD ARABIC LIGATURE LAM WITH HEH INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0647 ARABIC LETTER HEH
 * FCCE ARABIC LIGATURE MEEM WITH JEEM INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062C ARABIC LETTER JEEM
 * FCCF ARABIC LIGATURE MEEM WITH HAH INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FCD0 ARABIC LIGATURE MEEM WITH KHAH INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062E ARABIC LETTER KHAH
 * FCD1 ARABIC LIGATURE MEEM WITH MEEM INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FCD2 ARABIC LIGATURE NOON WITH JEEM INITIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062C ARABIC LETTER JEEM
 * FCD3 ARABIC LIGATURE NOON WITH HAH INITIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062D ARABIC LETTER HAH
 * FCD4 ARABIC LIGATURE NOON WITH KHAH INITIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062E ARABIC LETTER KHAH
 * FCD5 ARABIC LIGATURE NOON WITH MEEM INITIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0645 ARABIC LETTER MEEM
 * FCD6 ARABIC LIGATURE NOON WITH HEH INITIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0647 ARABIC LETTER HEH
 * FCD7 ARABIC LIGATURE HEH WITH JEEM INITIAL FORM
 * 	0647 ARABIC LETTER HEH
 * 	062C ARABIC LETTER JEEM
 * FCD8 ARABIC LIGATURE HEH WITH MEEM INITIAL FORM
 * 	0647 ARABIC LETTER HEH
 * 	0645 ARABIC LETTER MEEM
 * FCD9 ARABIC LIGATURE HEH WITH SUPERSCRIPT ALEF INITIAL FORM
 * 	0647 ARABIC LETTER HEH
 * FCDA ARABIC LIGATURE YEH WITH JEEM INITIAL FORM
 * 	064A ARABIC LETTER YEH
 * 	062C ARABIC LETTER JEEM
 * FCDB ARABIC LIGATURE YEH WITH HAH INITIAL FORM
 * 	064A ARABIC LETTER YEH
 * 	062D ARABIC LETTER HAH
 * FCDC ARABIC LIGATURE YEH WITH KHAH INITIAL FORM
 * 	064A ARABIC LETTER YEH
 * 	062E ARABIC LETTER KHAH
 * FCDD ARABIC LIGATURE YEH WITH MEEM INITIAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0645 ARABIC LETTER MEEM
 * FCDE ARABIC LIGATURE YEH WITH HEH INITIAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0647 ARABIC LETTER HEH
 * FCDF ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH MEEM MEDIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FCE0 ARABIC LIGATURE YEH WITH HAMZA ABOVE WITH HEH MEDIAL FORM
 * 	0647 ARABIC LETTER HEH
 * 	064A ARABIC LETTER YEH
 * FCE1 ARABIC LIGATURE BEH WITH MEEM MEDIAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	0645 ARABIC LETTER MEEM
 * FCE2 ARABIC LIGATURE BEH WITH HEH MEDIAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	0647 ARABIC LETTER HEH
 * FCE3 ARABIC LIGATURE TEH WITH MEEM MEDIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0645 ARABIC LETTER MEEM
 * FCE4 ARABIC LIGATURE TEH WITH HEH MEDIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0647 ARABIC LETTER HEH
 * FCE5 ARABIC LIGATURE THEH WITH MEEM MEDIAL FORM
 * 	062B ARABIC LETTER THEH
 * 	0645 ARABIC LETTER MEEM
 * FCE6 ARABIC LIGATURE THEH WITH HEH MEDIAL FORM
 * 	062B ARABIC LETTER THEH
 * 	0647 ARABIC LETTER HEH
 * FCE7 ARABIC LIGATURE SEEN WITH MEEM MEDIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0645 ARABIC LETTER MEEM
 * FCE8 ARABIC LIGATURE SEEN WITH HEH MEDIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0647 ARABIC LETTER HEH
 * FCE9 ARABIC LIGATURE SHEEN WITH MEEM MEDIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0645 ARABIC LETTER MEEM
 * FCEA ARABIC LIGATURE SHEEN WITH HEH MEDIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0647 ARABIC LETTER HEH
 * FCEB ARABIC LIGATURE KAF WITH LAM MEDIAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0644 ARABIC LETTER LAM
 * FCEC ARABIC LIGATURE KAF WITH MEEM MEDIAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0645 ARABIC LETTER MEEM
 * FCED ARABIC LIGATURE LAM WITH MEEM MEDIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0645 ARABIC LETTER MEEM
 * FCEE ARABIC LIGATURE NOON WITH MEEM MEDIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0645 ARABIC LETTER MEEM
 * FCEF ARABIC LIGATURE NOON WITH HEH MEDIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0647 ARABIC LETTER HEH
 * FCF0 ARABIC LIGATURE YEH WITH MEEM MEDIAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0645 ARABIC LETTER MEEM
 * FCF1 ARABIC LIGATURE YEH WITH HEH MEDIAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0647 ARABIC LETTER HEH
 * FCF2 ARABIC LIGATURE SHADDA WITH FATHA MEDIAL FORM
 * 	0640 ARABIC TATWEEL
 * FCF3 ARABIC LIGATURE SHADDA WITH DAMMA MEDIAL FORM
 * 	0640 ARABIC TATWEEL
 * FCF4 ARABIC LIGATURE SHADDA WITH KASRA MEDIAL FORM
 * 	0640 ARABIC TATWEEL
 * FCF5 ARABIC LIGATURE TAH WITH ALEF MAKSURA ISOLATED FORM
 * 	0637 ARABIC LETTER TAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FCF6 ARABIC LIGATURE TAH WITH YEH ISOLATED FORM
 * 	0637 ARABIC LETTER TAH
 * 	064A ARABIC LETTER YEH
 * FCF7 ARABIC LIGATURE AIN WITH ALEF MAKSURA ISOLATED FORM
 * 	0639 ARABIC LETTER AIN
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FCF8 ARABIC LIGATURE AIN WITH YEH ISOLATED FORM
 * 	0639 ARABIC LETTER AIN
 * 	064A ARABIC LETTER YEH
 * FCF9 ARABIC LIGATURE GHAIN WITH ALEF MAKSURA ISOLATED FORM
 * 	063A ARABIC LETTER GHAIN
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FCFA ARABIC LIGATURE GHAIN WITH YEH ISOLATED FORM
 * 	063A ARABIC LETTER GHAIN
 * 	064A ARABIC LETTER YEH
 * FCFB ARABIC LIGATURE SEEN WITH ALEF MAKSURA ISOLATED FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FCFC ARABIC LIGATURE SEEN WITH YEH ISOLATED FORM
 * 	0633 ARABIC LETTER SEEN
 * 	064A ARABIC LETTER YEH
 * FCFD ARABIC LIGATURE SHEEN WITH ALEF MAKSURA ISOLATED FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FCFE ARABIC LIGATURE SHEEN WITH YEH ISOLATED FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	064A ARABIC LETTER YEH
 * FCFF ARABIC LIGATURE HAH WITH ALEF MAKSURA ISOLATED FORM
 * 	062D ARABIC LETTER HAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD00 ARABIC LIGATURE HAH WITH YEH ISOLATED FORM
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FD01 ARABIC LIGATURE JEEM WITH ALEF MAKSURA ISOLATED FORM
 * 	062C ARABIC LETTER JEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD02 ARABIC LIGATURE JEEM WITH YEH ISOLATED FORM
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FD03 ARABIC LIGATURE KHAH WITH ALEF MAKSURA ISOLATED FORM
 * 	062E ARABIC LETTER KHAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD04 ARABIC LIGATURE KHAH WITH YEH ISOLATED FORM
 * 	062E ARABIC LETTER KHAH
 * 	064A ARABIC LETTER YEH
 * FD05 ARABIC LIGATURE SAD WITH ALEF MAKSURA ISOLATED FORM
 * 	0635 ARABIC LETTER SAD
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD06 ARABIC LIGATURE SAD WITH YEH ISOLATED FORM
 * 	0635 ARABIC LETTER SAD
 * 	064A ARABIC LETTER YEH
 * FD07 ARABIC LIGATURE DAD WITH ALEF MAKSURA ISOLATED FORM
 * 	0636 ARABIC LETTER DAD
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD08 ARABIC LIGATURE DAD WITH YEH ISOLATED FORM
 * 	0636 ARABIC LETTER DAD
 * 	064A ARABIC LETTER YEH
 * FD09 ARABIC LIGATURE SHEEN WITH JEEM ISOLATED FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062C ARABIC LETTER JEEM
 * FD0A ARABIC LIGATURE SHEEN WITH HAH ISOLATED FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062D ARABIC LETTER HAH
 * FD0B ARABIC LIGATURE SHEEN WITH KHAH ISOLATED FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062E ARABIC LETTER KHAH
 * FD0C ARABIC LIGATURE SHEEN WITH MEEM ISOLATED FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0645 ARABIC LETTER MEEM
 * FD0D ARABIC LIGATURE SHEEN WITH REH ISOLATED FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0631 ARABIC LETTER REH
 * FD0E ARABIC LIGATURE SEEN WITH REH ISOLATED FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0631 ARABIC LETTER REH
 * FD0F ARABIC LIGATURE SAD WITH REH ISOLATED FORM
 * 	0635 ARABIC LETTER SAD
 * 	0631 ARABIC LETTER REH
 * FD10 ARABIC LIGATURE DAD WITH REH ISOLATED FORM
 * 	0636 ARABIC LETTER DAD
 * 	0631 ARABIC LETTER REH
 * FD11 ARABIC LIGATURE TAH WITH ALEF MAKSURA FINAL FORM
 * 	0637 ARABIC LETTER TAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD12 ARABIC LIGATURE TAH WITH YEH FINAL FORM
 * 	0637 ARABIC LETTER TAH
 * 	064A ARABIC LETTER YEH
 * FD13 ARABIC LIGATURE AIN WITH ALEF MAKSURA FINAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD14 ARABIC LIGATURE AIN WITH YEH FINAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	064A ARABIC LETTER YEH
 * FD15 ARABIC LIGATURE GHAIN WITH ALEF MAKSURA FINAL FORM
 * 	063A ARABIC LETTER GHAIN
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD16 ARABIC LIGATURE GHAIN WITH YEH FINAL FORM
 * 	063A ARABIC LETTER GHAIN
 * 	064A ARABIC LETTER YEH
 * FD17 ARABIC LIGATURE SEEN WITH ALEF MAKSURA FINAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD18 ARABIC LIGATURE SEEN WITH YEH FINAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	064A ARABIC LETTER YEH
 * FD19 ARABIC LIGATURE SHEEN WITH ALEF MAKSURA FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD1A ARABIC LIGATURE SHEEN WITH YEH FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	064A ARABIC LETTER YEH
 * FD1B ARABIC LIGATURE HAH WITH ALEF MAKSURA FINAL FORM
 * 	062D ARABIC LETTER HAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD1C ARABIC LIGATURE HAH WITH YEH FINAL FORM
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FD1D ARABIC LIGATURE JEEM WITH ALEF MAKSURA FINAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD1E ARABIC LIGATURE JEEM WITH YEH FINAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FD1F ARABIC LIGATURE KHAH WITH ALEF MAKSURA FINAL FORM
 * 	062E ARABIC LETTER KHAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD20 ARABIC LIGATURE KHAH WITH YEH FINAL FORM
 * 	062E ARABIC LETTER KHAH
 * 	064A ARABIC LETTER YEH
 * FD21 ARABIC LIGATURE SAD WITH ALEF MAKSURA FINAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD22 ARABIC LIGATURE SAD WITH YEH FINAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	064A ARABIC LETTER YEH
 * FD23 ARABIC LIGATURE DAD WITH ALEF MAKSURA FINAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD24 ARABIC LIGATURE DAD WITH YEH FINAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	064A ARABIC LETTER YEH
 * FD25 ARABIC LIGATURE SHEEN WITH JEEM FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062C ARABIC LETTER JEEM
 * FD26 ARABIC LIGATURE SHEEN WITH HAH FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062D ARABIC LETTER HAH
 * FD27 ARABIC LIGATURE SHEEN WITH KHAH FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062E ARABIC LETTER KHAH
 * FD28 ARABIC LIGATURE SHEEN WITH MEEM FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0645 ARABIC LETTER MEEM
 * FD29 ARABIC LIGATURE SHEEN WITH REH FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0631 ARABIC LETTER REH
 * FD2A ARABIC LIGATURE SEEN WITH REH FINAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0631 ARABIC LETTER REH
 * FD2B ARABIC LIGATURE SAD WITH REH FINAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	0631 ARABIC LETTER REH
 * FD2C ARABIC LIGATURE DAD WITH REH FINAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	0631 ARABIC LETTER REH
 * FD2D ARABIC LIGATURE SHEEN WITH JEEM INITIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062C ARABIC LETTER JEEM
 * FD2E ARABIC LIGATURE SHEEN WITH HAH INITIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062D ARABIC LETTER HAH
 * FD2F ARABIC LIGATURE SHEEN WITH KHAH INITIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062E ARABIC LETTER KHAH
 * FD30 ARABIC LIGATURE SHEEN WITH MEEM INITIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0645 ARABIC LETTER MEEM
 * FD31 ARABIC LIGATURE SEEN WITH HEH INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0647 ARABIC LETTER HEH
 * FD32 ARABIC LIGATURE SHEEN WITH HEH INITIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0647 ARABIC LETTER HEH
 * FD33 ARABIC LIGATURE TAH WITH MEEM INITIAL FORM
 * 	0637 ARABIC LETTER TAH
 * 	0645 ARABIC LETTER MEEM
 * FD34 ARABIC LIGATURE SEEN WITH JEEM MEDIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062C ARABIC LETTER JEEM
 * FD35 ARABIC LIGATURE SEEN WITH HAH MEDIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062D ARABIC LETTER HAH
 * FD36 ARABIC LIGATURE SEEN WITH KHAH MEDIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062E ARABIC LETTER KHAH
 * FD37 ARABIC LIGATURE SHEEN WITH JEEM MEDIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062C ARABIC LETTER JEEM
 * FD38 ARABIC LIGATURE SHEEN WITH HAH MEDIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062D ARABIC LETTER HAH
 * FD39 ARABIC LIGATURE SHEEN WITH KHAH MEDIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062E ARABIC LETTER KHAH
 * FD3A ARABIC LIGATURE TAH WITH MEEM MEDIAL FORM
 * 	0637 ARABIC LETTER TAH
 * 	0645 ARABIC LETTER MEEM
 * FD3B ARABIC LIGATURE ZAH WITH MEEM MEDIAL FORM
 * 	0638 ARABIC LETTER ZAH
 * 	0645 ARABIC LETTER MEEM
 * FD3C ARABIC LIGATURE ALEF WITH FATHATAN FINAL FORM
 * 	0627 ARABIC LETTER ALEF
 * FD3D ARABIC LIGATURE ALEF WITH FATHATAN ISOLATED FORM
 * 	0627 ARABIC LETTER ALEF
 * FD50 ARABIC LIGATURE TEH WITH JEEM WITH MEEM INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FD51 ARABIC LIGATURE TEH WITH HAH WITH JEEM FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062D ARABIC LETTER HAH
 * 	062C ARABIC LETTER JEEM
 * FD52 ARABIC LIGATURE TEH WITH HAH WITH JEEM INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062D ARABIC LETTER HAH
 * 	062C ARABIC LETTER JEEM
 * FD53 ARABIC LIGATURE TEH WITH HAH WITH MEEM INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * FD54 ARABIC LIGATURE TEH WITH KHAH WITH MEEM INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FD55 ARABIC LIGATURE TEH WITH MEEM WITH JEEM INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0645 ARABIC LETTER MEEM
 * 	062C ARABIC LETTER JEEM
 * FD56 ARABIC LIGATURE TEH WITH MEEM WITH HAH INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD57 ARABIC LIGATURE TEH WITH MEEM WITH KHAH INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0645 ARABIC LETTER MEEM
 * 	062E ARABIC LETTER KHAH
 * FD58 ARABIC LIGATURE JEEM WITH MEEM WITH HAH FINAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD59 ARABIC LIGATURE JEEM WITH MEEM WITH HAH INITIAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD5A ARABIC LIGATURE HAH WITH MEEM WITH YEH FINAL FORM
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FD5B ARABIC LIGATURE HAH WITH MEEM WITH ALEF MAKSURA FINAL FORM
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD5C ARABIC LIGATURE SEEN WITH HAH WITH JEEM INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062D ARABIC LETTER HAH
 * 	062C ARABIC LETTER JEEM
 * FD5D ARABIC LIGATURE SEEN WITH JEEM WITH HAH INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062C ARABIC LETTER JEEM
 * 	062D ARABIC LETTER HAH
 * FD5E ARABIC LIGATURE SEEN WITH JEEM WITH ALEF MAKSURA FINAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062C ARABIC LETTER JEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD5F ARABIC LIGATURE SEEN WITH MEEM WITH HAH FINAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD60 ARABIC LIGATURE SEEN WITH MEEM WITH HAH INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD61 ARABIC LIGATURE SEEN WITH MEEM WITH JEEM INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0645 ARABIC LETTER MEEM
 * 	062C ARABIC LETTER JEEM
 * FD62 ARABIC LIGATURE SEEN WITH MEEM WITH MEEM FINAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD63 ARABIC LIGATURE SEEN WITH MEEM WITH MEEM INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD64 ARABIC LIGATURE SAD WITH HAH WITH HAH FINAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	062D ARABIC LETTER HAH
 * 	062D ARABIC LETTER HAH
 * FD65 ARABIC LIGATURE SAD WITH HAH WITH HAH INITIAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	062D ARABIC LETTER HAH
 * 	062D ARABIC LETTER HAH
 * FD66 ARABIC LIGATURE SAD WITH MEEM WITH MEEM FINAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD67 ARABIC LIGATURE SHEEN WITH HAH WITH MEEM FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * FD68 ARABIC LIGATURE SHEEN WITH HAH WITH MEEM INITIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * FD69 ARABIC LIGATURE SHEEN WITH JEEM WITH YEH FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FD6A ARABIC LIGATURE SHEEN WITH MEEM WITH KHAH FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0645 ARABIC LETTER MEEM
 * 	062E ARABIC LETTER KHAH
 * FD6B ARABIC LIGATURE SHEEN WITH MEEM WITH KHAH INITIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0645 ARABIC LETTER MEEM
 * 	062E ARABIC LETTER KHAH
 * FD6C ARABIC LIGATURE SHEEN WITH MEEM WITH MEEM FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD6D ARABIC LIGATURE SHEEN WITH MEEM WITH MEEM INITIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD6E ARABIC LIGATURE DAD WITH HAH WITH ALEF MAKSURA FINAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	062D ARABIC LETTER HAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD6F ARABIC LIGATURE DAD WITH KHAH WITH MEEM FINAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FD70 ARABIC LIGATURE DAD WITH KHAH WITH MEEM INITIAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FD71 ARABIC LIGATURE TAH WITH MEEM WITH HAH FINAL FORM
 * 	0637 ARABIC LETTER TAH
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD72 ARABIC LIGATURE TAH WITH MEEM WITH HAH INITIAL FORM
 * 	0637 ARABIC LETTER TAH
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD73 ARABIC LIGATURE TAH WITH MEEM WITH MEEM INITIAL FORM
 * 	0637 ARABIC LETTER TAH
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD74 ARABIC LIGATURE TAH WITH MEEM WITH YEH FINAL FORM
 * 	0637 ARABIC LETTER TAH
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FD75 ARABIC LIGATURE AIN WITH JEEM WITH MEEM FINAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FD76 ARABIC LIGATURE AIN WITH MEEM WITH MEEM FINAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD77 ARABIC LIGATURE AIN WITH MEEM WITH MEEM INITIAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD78 ARABIC LIGATURE AIN WITH MEEM WITH ALEF MAKSURA FINAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	0645 ARABIC LETTER MEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD79 ARABIC LIGATURE GHAIN WITH MEEM WITH MEEM FINAL FORM
 * 	063A ARABIC LETTER GHAIN
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD7A ARABIC LIGATURE GHAIN WITH MEEM WITH YEH FINAL FORM
 * 	063A ARABIC LETTER GHAIN
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FD7B ARABIC LIGATURE GHAIN WITH MEEM WITH ALEF MAKSURA FINAL FORM
 * 	063A ARABIC LETTER GHAIN
 * 	0645 ARABIC LETTER MEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD7C ARABIC LIGATURE FEH WITH KHAH WITH MEEM FINAL FORM
 * 	0641 ARABIC LETTER FEH
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FD7D ARABIC LIGATURE FEH WITH KHAH WITH MEEM INITIAL FORM
 * 	0641 ARABIC LETTER FEH
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FD7E ARABIC LIGATURE QAF WITH MEEM WITH HAH FINAL FORM
 * 	0642 ARABIC LETTER QAF
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD7F ARABIC LIGATURE QAF WITH MEEM WITH MEEM FINAL FORM
 * 	0642 ARABIC LETTER QAF
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD80 ARABIC LIGATURE LAM WITH HAH WITH MEEM FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * FD81 ARABIC LIGATURE LAM WITH HAH WITH YEH FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FD82 ARABIC LIGATURE LAM WITH HAH WITH ALEF MAKSURA FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062D ARABIC LETTER HAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD83 ARABIC LIGATURE LAM WITH JEEM WITH JEEM INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062C ARABIC LETTER JEEM
 * 	062C ARABIC LETTER JEEM
 * FD84 ARABIC LIGATURE LAM WITH JEEM WITH JEEM FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062C ARABIC LETTER JEEM
 * 	062C ARABIC LETTER JEEM
 * FD85 ARABIC LIGATURE LAM WITH KHAH WITH MEEM FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FD86 ARABIC LIGATURE LAM WITH KHAH WITH MEEM INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FD87 ARABIC LIGATURE LAM WITH MEEM WITH HAH FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD88 ARABIC LIGATURE LAM WITH MEEM WITH HAH INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FD89 ARABIC LIGATURE MEEM WITH HAH WITH JEEM INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * 	062C ARABIC LETTER JEEM
 * FD8A ARABIC LIGATURE MEEM WITH HAH WITH MEEM INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * FD8B ARABIC LIGATURE MEEM WITH HAH WITH YEH FINAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FD8C ARABIC LIGATURE MEEM WITH JEEM WITH HAH INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062C ARABIC LETTER JEEM
 * 	062D ARABIC LETTER HAH
 * FD8D ARABIC LIGATURE MEEM WITH JEEM WITH MEEM INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FD8E ARABIC LIGATURE MEEM WITH KHAH WITH JEEM INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062E ARABIC LETTER KHAH
 * 	062C ARABIC LETTER JEEM
 * FD8F ARABIC LIGATURE MEEM WITH KHAH WITH MEEM INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062E ARABIC LETTER KHAH
 * 	0645 ARABIC LETTER MEEM
 * FD92 ARABIC LIGATURE MEEM WITH JEEM WITH KHAH INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062C ARABIC LETTER JEEM
 * 	062E ARABIC LETTER KHAH
 * FD93 ARABIC LIGATURE HEH WITH MEEM WITH JEEM INITIAL FORM
 * 	0647 ARABIC LETTER HEH
 * 	0645 ARABIC LETTER MEEM
 * 	062C ARABIC LETTER JEEM
 * FD94 ARABIC LIGATURE HEH WITH MEEM WITH MEEM INITIAL FORM
 * 	0647 ARABIC LETTER HEH
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD95 ARABIC LIGATURE NOON WITH HAH WITH MEEM INITIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * FD96 ARABIC LIGATURE NOON WITH HAH WITH ALEF MAKSURA FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062D ARABIC LETTER HAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD97 ARABIC LIGATURE NOON WITH JEEM WITH MEEM FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FD98 ARABIC LIGATURE NOON WITH JEEM WITH MEEM INITIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FD99 ARABIC LIGATURE NOON WITH JEEM WITH ALEF MAKSURA FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062C ARABIC LETTER JEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD9A ARABIC LIGATURE NOON WITH MEEM WITH YEH FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FD9B ARABIC LIGATURE NOON WITH MEEM WITH ALEF MAKSURA FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	0645 ARABIC LETTER MEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FD9C ARABIC LIGATURE YEH WITH MEEM WITH MEEM FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD9D ARABIC LIGATURE YEH WITH MEEM WITH MEEM INITIAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FD9E ARABIC LIGATURE BEH WITH KHAH WITH YEH FINAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	062E ARABIC LETTER KHAH
 * 	064A ARABIC LETTER YEH
 * FD9F ARABIC LIGATURE TEH WITH JEEM WITH YEH FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FDA0 ARABIC LIGATURE TEH WITH JEEM WITH ALEF MAKSURA FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062C ARABIC LETTER JEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FDA1 ARABIC LIGATURE TEH WITH KHAH WITH YEH FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062E ARABIC LETTER KHAH
 * 	064A ARABIC LETTER YEH
 * FDA2 ARABIC LIGATURE TEH WITH KHAH WITH ALEF MAKSURA FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	062E ARABIC LETTER KHAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FDA3 ARABIC LIGATURE TEH WITH MEEM WITH YEH FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FDA4 ARABIC LIGATURE TEH WITH MEEM WITH ALEF MAKSURA FINAL FORM
 * 	062A ARABIC LETTER TEH
 * 	0645 ARABIC LETTER MEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FDA5 ARABIC LIGATURE JEEM WITH MEEM WITH YEH FINAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FDA6 ARABIC LIGATURE JEEM WITH HAH WITH ALEF MAKSURA FINAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	062D ARABIC LETTER HAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FDA7 ARABIC LIGATURE JEEM WITH MEEM WITH ALEF MAKSURA FINAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FDA8 ARABIC LIGATURE SEEN WITH KHAH WITH ALEF MAKSURA FINAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062E ARABIC LETTER KHAH
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FDA9 ARABIC LIGATURE SAD WITH HAH WITH YEH FINAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FDAA ARABIC LIGATURE SHEEN WITH HAH WITH YEH FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FDAB ARABIC LIGATURE DAD WITH HAH WITH YEH FINAL FORM
 * 	0636 ARABIC LETTER DAD
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FDAC ARABIC LIGATURE LAM WITH JEEM WITH YEH FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FDAD ARABIC LIGATURE LAM WITH MEEM WITH YEH FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FDAE ARABIC LIGATURE YEH WITH HAH WITH YEH FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FDAF ARABIC LIGATURE YEH WITH JEEM WITH YEH FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FDB0 ARABIC LIGATURE YEH WITH MEEM WITH YEH FINAL FORM
 * 	064A ARABIC LETTER YEH
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FDB1 ARABIC LIGATURE MEEM WITH MEEM WITH YEH FINAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FDB2 ARABIC LIGATURE QAF WITH MEEM WITH YEH FINAL FORM
 * 	0642 ARABIC LETTER QAF
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FDB3 ARABIC LIGATURE NOON WITH HAH WITH YEH FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FDB4 ARABIC LIGATURE QAF WITH MEEM WITH HAH INITIAL FORM
 * 	0642 ARABIC LETTER QAF
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * FDB5 ARABIC LIGATURE LAM WITH HAH WITH MEEM INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * FDB6 ARABIC LIGATURE AIN WITH MEEM WITH YEH FINAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FDB7 ARABIC LIGATURE KAF WITH MEEM WITH YEH FINAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FDB8 ARABIC LIGATURE NOON WITH JEEM WITH HAH INITIAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062C ARABIC LETTER JEEM
 * 	062D ARABIC LETTER HAH
 * FDB9 ARABIC LIGATURE MEEM WITH KHAH WITH YEH FINAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062E ARABIC LETTER KHAH
 * 	064A ARABIC LETTER YEH
 * FDBA ARABIC LIGATURE LAM WITH JEEM WITH MEEM INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FDBB ARABIC LIGATURE KAF WITH MEEM WITH MEEM FINAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FDBC ARABIC LIGATURE LAM WITH JEEM WITH MEEM FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FDBD ARABIC LIGATURE NOON WITH JEEM WITH HAH FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062C ARABIC LETTER JEEM
 * 	062D ARABIC LETTER HAH
 * FDBE ARABIC LIGATURE JEEM WITH HAH WITH YEH FINAL FORM
 * 	062C ARABIC LETTER JEEM
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FDBF ARABIC LIGATURE HAH WITH JEEM WITH YEH FINAL FORM
 * 	062D ARABIC LETTER HAH
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FDC0 ARABIC LIGATURE MEEM WITH JEEM WITH YEH FINAL FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FDC1 ARABIC LIGATURE FEH WITH MEEM WITH YEH FINAL FORM
 * 	0641 ARABIC LETTER FEH
 * 	0645 ARABIC LETTER MEEM
 * 	064A ARABIC LETTER YEH
 * FDC2 ARABIC LIGATURE BEH WITH HAH WITH YEH FINAL FORM
 * 	0628 ARABIC LETTER BEH
 * 	062D ARABIC LETTER HAH
 * 	064A ARABIC LETTER YEH
 * FDC3 ARABIC LIGATURE KAF WITH MEEM WITH MEEM INITIAL FORM
 * 	0643 ARABIC LETTER KAF
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FDC4 ARABIC LIGATURE AIN WITH JEEM WITH MEEM INITIAL FORM
 * 	0639 ARABIC LETTER AIN
 * 	062C ARABIC LETTER JEEM
 * 	0645 ARABIC LETTER MEEM
 * FDC5 ARABIC LIGATURE SAD WITH MEEM WITH MEEM INITIAL FORM
 * 	0635 ARABIC LETTER SAD
 * 	0645 ARABIC LETTER MEEM
 * 	0645 ARABIC LETTER MEEM
 * FDC6 ARABIC LIGATURE SEEN WITH KHAH WITH YEH FINAL FORM
 * 	0633 ARABIC LETTER SEEN
 * 	062E ARABIC LETTER KHAH
 * 	064A ARABIC LETTER YEH
 * FDC7 ARABIC LIGATURE NOON WITH JEEM WITH YEH FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * 	062C ARABIC LETTER JEEM
 * 	064A ARABIC LETTER YEH
 * FDF0 ARABIC LIGATURE SALLA USED AS KORANIC STOP SIGN ISOLATED FORM
 * 	0635 ARABIC LETTER SAD
 * 	0644 ARABIC LETTER LAM
 * 	06D2 ARABIC LETTER YEH BARREE
 * FDF1 ARABIC LIGATURE QALA USED AS KORANIC STOP SIGN ISOLATED FORM
 * 	0642 ARABIC LETTER QAF
 * 	0644 ARABIC LETTER LAM
 * 	06D2 ARABIC LETTER YEH BARREE
 * FDF2 ARABIC LIGATURE ALLAH ISOLATED FORM
 * 	0627 ARABIC LETTER ALEF
 * 	0644 ARABIC LETTER LAM
 * 	0644 ARABIC LETTER LAM
 * 	0647 ARABIC LETTER HEH
 * FDF3 ARABIC LIGATURE AKBAR ISOLATED FORM
 * 	0627 ARABIC LETTER ALEF
 * 	0643 ARABIC LETTER KAF
 * 	0628 ARABIC LETTER BEH
 * 	0631 ARABIC LETTER REH
 * FDF4 ARABIC LIGATURE MOHAMMAD ISOLATED FORM
 * 	0645 ARABIC LETTER MEEM
 * 	062D ARABIC LETTER HAH
 * 	0645 ARABIC LETTER MEEM
 * 	062F ARABIC LETTER DAL
 * FDF5 ARABIC LIGATURE SALAM ISOLATED FORM
 * 	0635 ARABIC LETTER SAD
 * 	0644 ARABIC LETTER LAM
 * 	0639 ARABIC LETTER AIN
 * 	0645 ARABIC LETTER MEEM
 * FDF6 ARABIC LIGATURE RASOUL ISOLATED FORM
 * 	0631 ARABIC LETTER REH
 * 	0633 ARABIC LETTER SEEN
 * 	0648 ARABIC LETTER WAW
 * 	0644 ARABIC LETTER LAM
 * FDF7 ARABIC LIGATURE ALAYHE ISOLATED FORM
 * 	0639 ARABIC LETTER AIN
 * 	0644 ARABIC LETTER LAM
 * 	064A ARABIC LETTER YEH
 * 	0647 ARABIC LETTER HEH
 * FDF8 ARABIC LIGATURE WASALLAM ISOLATED FORM
 * 	0648 ARABIC LETTER WAW
 * 	0633 ARABIC LETTER SEEN
 * 	0644 ARABIC LETTER LAM
 * 	0645 ARABIC LETTER MEEM
 * FDF9 ARABIC LIGATURE SALLA ISOLATED FORM
 * 	0635 ARABIC LETTER SAD
 * 	0644 ARABIC LETTER LAM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FDFA ARABIC LIGATURE SALLALLAHOU ALAYHE WASALLAM
 * 	0635 ARABIC LETTER SAD
 * 	0644 ARABIC LETTER LAM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * 	0020 SPACE
 * 	0627 ARABIC LETTER ALEF
 * 	0644 ARABIC LETTER LAM
 * 	0644 ARABIC LETTER LAM
 * 	0647 ARABIC LETTER HEH
 * 	0020 SPACE
 * 	0639 ARABIC LETTER AIN
 * 	0644 ARABIC LETTER LAM
 * 	064A ARABIC LETTER YEH
 * 	0647 ARABIC LETTER HEH
 * 	0020 SPACE
 * 	0648 ARABIC LETTER WAW
 * 	0633 ARABIC LETTER SEEN
 * 	0644 ARABIC LETTER LAM
 * 	0645 ARABIC LETTER MEEM
 * FDFB ARABIC LIGATURE JALLAJALALOUHOU
 * 	062C ARABIC LETTER JEEM
 * 	0644 ARABIC LETTER LAM
 * 	0020 SPACE
 * 	062C ARABIC LETTER JEEM
 * 	0644 ARABIC LETTER LAM
 * 	0627 ARABIC LETTER ALEF
 * 	0644 ARABIC LETTER LAM
 * 	0647 ARABIC LETTER HEH
 * FDFC RIAL SIGN
 * 	0631 ARABIC LETTER REH
 * 	06CC ARABIC LETTER FARSI YEH
 * 	0627 ARABIC LETTER ALEF
 * 	0644 ARABIC LETTER LAM
 * FE00 VARIATION SELECTOR-1
 * 	0000 
 * FE01 VARIATION SELECTOR-2
 * 	0000 
 * FE02 VARIATION SELECTOR-3
 * 	0000 
 * FE03 VARIATION SELECTOR-4
 * 	0000 
 * FE04 VARIATION SELECTOR-5
 * 	0000 
 * FE05 VARIATION SELECTOR-6
 * 	0000 
 * FE06 VARIATION SELECTOR-7
 * 	0000 
 * FE07 VARIATION SELECTOR-8
 * 	0000 
 * FE08 VARIATION SELECTOR-9
 * 	0000 
 * FE09 VARIATION SELECTOR-10
 * 	0000 
 * FE0A VARIATION SELECTOR-11
 * 	0000 
 * FE0B VARIATION SELECTOR-12
 * 	0000 
 * FE0C VARIATION SELECTOR-13
 * 	0000 
 * FE0D VARIATION SELECTOR-14
 * 	0000 
 * FE0E VARIATION SELECTOR-15
 * 	0000 
 * FE0F VARIATION SELECTOR-16
 * 	0000 
 * FE10 PRESENTATION FORM FOR VERTICAL COMMA
 * 	002C COMMA
 * FE11 PRESENTATION FORM FOR VERTICAL IDEOGRAPHIC COMMA
 * 	3001 IDEOGRAPHIC COMMA
 * FE12 PRESENTATION FORM FOR VERTICAL IDEOGRAPHIC FULL STOP
 * 	3002 IDEOGRAPHIC FULL STOP
 * FE13 PRESENTATION FORM FOR VERTICAL COLON
 * 	003A COLON
 * FE14 PRESENTATION FORM FOR VERTICAL SEMICOLON
 * 	003B SEMICOLON
 * FE15 PRESENTATION FORM FOR VERTICAL EXCLAMATION MARK
 * 	0021 EXCLAMATION MARK
 * FE16 PRESENTATION FORM FOR VERTICAL QUESTION MARK
 * 	003F QUESTION MARK
 * FE17 PRESENTATION FORM FOR VERTICAL LEFT WHITE LENTICULAR BRACKET
 * 	3016 LEFT WHITE LENTICULAR BRACKET
 * FE18 PRESENTATION FORM FOR VERTICAL RIGHT WHITE LENTICULAR BRAKCET
 * 	3017 RIGHT WHITE LENTICULAR BRACKET
 * FE19 PRESENTATION FORM FOR VERTICAL HORIZONTAL ELLIPSIS
 * 	002E FULL STOP
 * 	002E FULL STOP
 * 	002E FULL STOP
 * FE20 COMBINING LIGATURE LEFT HALF
 * 	0000 
 * FE21 COMBINING LIGATURE RIGHT HALF
 * 	0000 
 * FE22 COMBINING DOUBLE TILDE LEFT HALF
 * 	0000 
 * FE23 COMBINING DOUBLE TILDE RIGHT HALF
 * 	0000 
 * FE24 COMBINING MACRON LEFT HALF
 * 	0000 
 * FE25 COMBINING MACRON RIGHT HALF
 * 	0000 
 * FE26 COMBINING CONJOINING MACRON
 * 	0000 
 * FE30 PRESENTATION FORM FOR VERTICAL TWO DOT LEADER
 * 	002E FULL STOP
 * 	002E FULL STOP
 * FE31 PRESENTATION FORM FOR VERTICAL EM DASH
 * 	2014 EM DASH
 * FE32 PRESENTATION FORM FOR VERTICAL EN DASH
 * 	2013 EN DASH
 * FE33 PRESENTATION FORM FOR VERTICAL LOW LINE
 * 	005F LOW LINE
 * FE34 PRESENTATION FORM FOR VERTICAL WAVY LOW LINE
 * 	005F LOW LINE
 * FE35 PRESENTATION FORM FOR VERTICAL LEFT PARENTHESIS
 * 	0028 LEFT PARENTHESIS
 * FE36 PRESENTATION FORM FOR VERTICAL RIGHT PARENTHESIS
 * 	0029 RIGHT PARENTHESIS
 * FE37 PRESENTATION FORM FOR VERTICAL LEFT CURLY BRACKET
 * 	007B LEFT CURLY BRACKET
 * FE38 PRESENTATION FORM FOR VERTICAL RIGHT CURLY BRACKET
 * 	007D RIGHT CURLY BRACKET
 * FE39 PRESENTATION FORM FOR VERTICAL LEFT TORTOISE SHELL BRACKET
 * 	3014 LEFT TORTOISE SHELL BRACKET
 * FE3A PRESENTATION FORM FOR VERTICAL RIGHT TORTOISE SHELL BRACKET
 * 	3015 RIGHT TORTOISE SHELL BRACKET
 * FE3B PRESENTATION FORM FOR VERTICAL LEFT BLACK LENTICULAR BRACKET
 * 	3010 LEFT BLACK LENTICULAR BRACKET
 * FE3C PRESENTATION FORM FOR VERTICAL RIGHT BLACK LENTICULAR BRACKET
 * 	3011 RIGHT BLACK LENTICULAR BRACKET
 * FE3D PRESENTATION FORM FOR VERTICAL LEFT DOUBLE ANGLE BRACKET
 * 	300A LEFT DOUBLE ANGLE BRACKET
 * FE3E PRESENTATION FORM FOR VERTICAL RIGHT DOUBLE ANGLE BRACKET
 * 	300B RIGHT DOUBLE ANGLE BRACKET
 * FE3F PRESENTATION FORM FOR VERTICAL LEFT ANGLE BRACKET
 * 	3008 LEFT ANGLE BRACKET
 * FE40 PRESENTATION FORM FOR VERTICAL RIGHT ANGLE BRACKET
 * 	3009 RIGHT ANGLE BRACKET
 * FE41 PRESENTATION FORM FOR VERTICAL LEFT CORNER BRACKET
 * 	300C LEFT CORNER BRACKET
 * FE42 PRESENTATION FORM FOR VERTICAL RIGHT CORNER BRACKET
 * 	300D RIGHT CORNER BRACKET
 * FE43 PRESENTATION FORM FOR VERTICAL LEFT WHITE CORNER BRACKET
 * 	300E LEFT WHITE CORNER BRACKET
 * FE44 PRESENTATION FORM FOR VERTICAL RIGHT WHITE CORNER BRACKET
 * 	300F RIGHT WHITE CORNER BRACKET
 * FE47 PRESENTATION FORM FOR VERTICAL LEFT SQUARE BRACKET
 * 	005B LEFT SQUARE BRACKET
 * FE48 PRESENTATION FORM FOR VERTICAL RIGHT SQUARE BRACKET
 * 	005D RIGHT SQUARE BRACKET
 * FE49 DASHED OVERLINE
 * 	0020 SPACE
 * FE4A CENTRELINE OVERLINE
 * 	0020 SPACE
 * FE4B WAVY OVERLINE
 * 	0020 SPACE
 * FE4C DOUBLE WAVY OVERLINE
 * 	0020 SPACE
 * FE4D DASHED LOW LINE
 * 	005F LOW LINE
 * FE4E CENTRELINE LOW LINE
 * 	005F LOW LINE
 * FE4F WAVY LOW LINE
 * 	005F LOW LINE
 * FE50 SMALL COMMA
 * 	002C COMMA
 * FE51 SMALL IDEOGRAPHIC COMMA
 * 	3001 IDEOGRAPHIC COMMA
 * FE52 SMALL FULL STOP
 * 	002E FULL STOP
 * FE54 SMALL SEMICOLON
 * 	003B SEMICOLON
 * FE55 SMALL COLON
 * 	003A COLON
 * FE56 SMALL QUESTION MARK
 * 	003F QUESTION MARK
 * FE57 SMALL EXCLAMATION MARK
 * 	0021 EXCLAMATION MARK
 * FE58 SMALL EM DASH
 * 	2014 EM DASH
 * FE59 SMALL LEFT PARENTHESIS
 * 	0028 LEFT PARENTHESIS
 * FE5A SMALL RIGHT PARENTHESIS
 * 	0029 RIGHT PARENTHESIS
 * FE5B SMALL LEFT CURLY BRACKET
 * 	007B LEFT CURLY BRACKET
 * FE5C SMALL RIGHT CURLY BRACKET
 * 	007D RIGHT CURLY BRACKET
 * FE5D SMALL LEFT TORTOISE SHELL BRACKET
 * 	3014 LEFT TORTOISE SHELL BRACKET
 * FE5E SMALL RIGHT TORTOISE SHELL BRACKET
 * 	3015 RIGHT TORTOISE SHELL BRACKET
 * FE5F SMALL NUMBER SIGN
 * 	0023 NUMBER SIGN
 * FE60 SMALL AMPERSAND
 * 	0026 AMPERSAND
 * FE61 SMALL ASTERISK
 * 	002A ASTERISK
 * FE62 SMALL PLUS SIGN
 * 	002B PLUS SIGN
 * FE63 SMALL HYPHEN-MINUS
 * 	002D HYPHEN-MINUS
 * FE64 SMALL LESS-THAN SIGN
 * 	003C LESS-THAN SIGN
 * FE65 SMALL GREATER-THAN SIGN
 * 	003E GREATER-THAN SIGN
 * FE66 SMALL EQUALS SIGN
 * 	003D EQUALS SIGN
 * FE68 SMALL REVERSE SOLIDUS
 * 	005C REVERSE SOLIDUS
 * FE69 SMALL DOLLAR SIGN
 * 	0024 DOLLAR SIGN
 * FE6A SMALL PERCENT SIGN
 * 	0025 PERCENT SIGN
 * FE6B SMALL COMMERCIAL AT
 * 	0040 COMMERCIAL AT
 * FE70 ARABIC FATHATAN ISOLATED FORM
 * 	0020 SPACE
 * FE71 ARABIC TATWEEL WITH FATHATAN ABOVE
 * 	0640 ARABIC TATWEEL
 * FE72 ARABIC DAMMATAN ISOLATED FORM
 * 	0020 SPACE
 * FE74 ARABIC KASRATAN ISOLATED FORM
 * 	0020 SPACE
 * FE76 ARABIC FATHA ISOLATED FORM
 * 	0020 SPACE
 * FE77 ARABIC FATHA MEDIAL FORM
 * 	0640 ARABIC TATWEEL
 * FE78 ARABIC DAMMA ISOLATED FORM
 * 	0020 SPACE
 * FE79 ARABIC DAMMA MEDIAL FORM
 * 	0640 ARABIC TATWEEL
 * FE7A ARABIC KASRA ISOLATED FORM
 * 	0020 SPACE
 * FE7B ARABIC KASRA MEDIAL FORM
 * 	0640 ARABIC TATWEEL
 * FE7C ARABIC SHADDA ISOLATED FORM
 * 	0020 SPACE
 * FE7D ARABIC SHADDA MEDIAL FORM
 * 	0640 ARABIC TATWEEL
 * FE7E ARABIC SUKUN ISOLATED FORM
 * 	0020 SPACE
 * FE7F ARABIC SUKUN MEDIAL FORM
 * 	0640 ARABIC TATWEEL
 * FE80 ARABIC LETTER HAMZA ISOLATED FORM
 * 	0621 ARABIC LETTER HAMZA
 * FE81 ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
 * 	0627 ARABIC LETTER ALEF
 * FE82 ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
 * 	0627 ARABIC LETTER ALEF
 * FE83 ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
 * 	0627 ARABIC LETTER ALEF
 * FE84 ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
 * 	0627 ARABIC LETTER ALEF
 * FE85 ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
 * 	0648 ARABIC LETTER WAW
 * FE86 ARABIC LETTER WAW WITH HAMZA ABOVE FINAL FORM
 * 	0648 ARABIC LETTER WAW
 * FE87 ARABIC LETTER ALEF WITH HAMZA BELOW ISOLATED FORM
 * 	0627 ARABIC LETTER ALEF
 * FE88 ARABIC LETTER ALEF WITH HAMZA BELOW FINAL FORM
 * 	0627 ARABIC LETTER ALEF
 * FE89 ARABIC LETTER YEH WITH HAMZA ABOVE ISOLATED FORM
 * 	064A ARABIC LETTER YEH
 * FE8A ARABIC LETTER YEH WITH HAMZA ABOVE FINAL FORM
 * 	064A ARABIC LETTER YEH
 * FE8B ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
 * 	064A ARABIC LETTER YEH
 * FE8C ARABIC LETTER YEH WITH HAMZA ABOVE MEDIAL FORM
 * 	064A ARABIC LETTER YEH
 * FE8D ARABIC LETTER ALEF ISOLATED FORM
 * 	0627 ARABIC LETTER ALEF
 * FE8E ARABIC LETTER ALEF FINAL FORM
 * 	0627 ARABIC LETTER ALEF
 * FE8F ARABIC LETTER BEH ISOLATED FORM
 * 	0628 ARABIC LETTER BEH
 * FE90 ARABIC LETTER BEH FINAL FORM
 * 	0628 ARABIC LETTER BEH
 * FE91 ARABIC LETTER BEH INITIAL FORM
 * 	0628 ARABIC LETTER BEH
 * FE92 ARABIC LETTER BEH MEDIAL FORM
 * 	0628 ARABIC LETTER BEH
 * FE93 ARABIC LETTER TEH MARBUTA ISOLATED FORM
 * 	0629 ARABIC LETTER TEH MARBUTA
 * FE94 ARABIC LETTER TEH MARBUTA FINAL FORM
 * 	0629 ARABIC LETTER TEH MARBUTA
 * FE95 ARABIC LETTER TEH ISOLATED FORM
 * 	062A ARABIC LETTER TEH
 * FE96 ARABIC LETTER TEH FINAL FORM
 * 	062A ARABIC LETTER TEH
 * FE97 ARABIC LETTER TEH INITIAL FORM
 * 	062A ARABIC LETTER TEH
 * FE98 ARABIC LETTER TEH MEDIAL FORM
 * 	062A ARABIC LETTER TEH
 * FE99 ARABIC LETTER THEH ISOLATED FORM
 * 	062B ARABIC LETTER THEH
 * FE9A ARABIC LETTER THEH FINAL FORM
 * 	062B ARABIC LETTER THEH
 * FE9B ARABIC LETTER THEH INITIAL FORM
 * 	062B ARABIC LETTER THEH
 * FE9C ARABIC LETTER THEH MEDIAL FORM
 * 	062B ARABIC LETTER THEH
 * FE9D ARABIC LETTER JEEM ISOLATED FORM
 * 	062C ARABIC LETTER JEEM
 * FE9E ARABIC LETTER JEEM FINAL FORM
 * 	062C ARABIC LETTER JEEM
 * FE9F ARABIC LETTER JEEM INITIAL FORM
 * 	062C ARABIC LETTER JEEM
 * FEA0 ARABIC LETTER JEEM MEDIAL FORM
 * 	062C ARABIC LETTER JEEM
 * FEA1 ARABIC LETTER HAH ISOLATED FORM
 * 	062D ARABIC LETTER HAH
 * FEA2 ARABIC LETTER HAH FINAL FORM
 * 	062D ARABIC LETTER HAH
 * FEA3 ARABIC LETTER HAH INITIAL FORM
 * 	062D ARABIC LETTER HAH
 * FEA4 ARABIC LETTER HAH MEDIAL FORM
 * 	062D ARABIC LETTER HAH
 * FEA5 ARABIC LETTER KHAH ISOLATED FORM
 * 	062E ARABIC LETTER KHAH
 * FEA6 ARABIC LETTER KHAH FINAL FORM
 * 	062E ARABIC LETTER KHAH
 * FEA7 ARABIC LETTER KHAH INITIAL FORM
 * 	062E ARABIC LETTER KHAH
 * FEA8 ARABIC LETTER KHAH MEDIAL FORM
 * 	062E ARABIC LETTER KHAH
 * FEA9 ARABIC LETTER DAL ISOLATED FORM
 * 	062F ARABIC LETTER DAL
 * FEAA ARABIC LETTER DAL FINAL FORM
 * 	062F ARABIC LETTER DAL
 * FEAB ARABIC LETTER THAL ISOLATED FORM
 * 	0630 ARABIC LETTER THAL
 * FEAC ARABIC LETTER THAL FINAL FORM
 * 	0630 ARABIC LETTER THAL
 * FEAD ARABIC LETTER REH ISOLATED FORM
 * 	0631 ARABIC LETTER REH
 * FEAE ARABIC LETTER REH FINAL FORM
 * 	0631 ARABIC LETTER REH
 * FEAF ARABIC LETTER ZAIN ISOLATED FORM
 * 	0632 ARABIC LETTER ZAIN
 * FEB0 ARABIC LETTER ZAIN FINAL FORM
 * 	0632 ARABIC LETTER ZAIN
 * FEB1 ARABIC LETTER SEEN ISOLATED FORM
 * 	0633 ARABIC LETTER SEEN
 * FEB2 ARABIC LETTER SEEN FINAL FORM
 * 	0633 ARABIC LETTER SEEN
 * FEB3 ARABIC LETTER SEEN INITIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * FEB4 ARABIC LETTER SEEN MEDIAL FORM
 * 	0633 ARABIC LETTER SEEN
 * FEB5 ARABIC LETTER SHEEN ISOLATED FORM
 * 	0634 ARABIC LETTER SHEEN
 * FEB6 ARABIC LETTER SHEEN FINAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * FEB7 ARABIC LETTER SHEEN INITIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * FEB8 ARABIC LETTER SHEEN MEDIAL FORM
 * 	0634 ARABIC LETTER SHEEN
 * FEB9 ARABIC LETTER SAD ISOLATED FORM
 * 	0635 ARABIC LETTER SAD
 * FEBA ARABIC LETTER SAD FINAL FORM
 * 	0635 ARABIC LETTER SAD
 * FEBB ARABIC LETTER SAD INITIAL FORM
 * 	0635 ARABIC LETTER SAD
 * FEBC ARABIC LETTER SAD MEDIAL FORM
 * 	0635 ARABIC LETTER SAD
 * FEBD ARABIC LETTER DAD ISOLATED FORM
 * 	0636 ARABIC LETTER DAD
 * FEBE ARABIC LETTER DAD FINAL FORM
 * 	0636 ARABIC LETTER DAD
 * FEBF ARABIC LETTER DAD INITIAL FORM
 * 	0636 ARABIC LETTER DAD
 * FEC0 ARABIC LETTER DAD MEDIAL FORM
 * 	0636 ARABIC LETTER DAD
 * FEC1 ARABIC LETTER TAH ISOLATED FORM
 * 	0637 ARABIC LETTER TAH
 * FEC2 ARABIC LETTER TAH FINAL FORM
 * 	0637 ARABIC LETTER TAH
 * FEC3 ARABIC LETTER TAH INITIAL FORM
 * 	0637 ARABIC LETTER TAH
 * FEC4 ARABIC LETTER TAH MEDIAL FORM
 * 	0637 ARABIC LETTER TAH
 * FEC5 ARABIC LETTER ZAH ISOLATED FORM
 * 	0638 ARABIC LETTER ZAH
 * FEC6 ARABIC LETTER ZAH FINAL FORM
 * 	0638 ARABIC LETTER ZAH
 * FEC7 ARABIC LETTER ZAH INITIAL FORM
 * 	0638 ARABIC LETTER ZAH
 * FEC8 ARABIC LETTER ZAH MEDIAL FORM
 * 	0638 ARABIC LETTER ZAH
 * FEC9 ARABIC LETTER AIN ISOLATED FORM
 * 	0639 ARABIC LETTER AIN
 * FECA ARABIC LETTER AIN FINAL FORM
 * 	0639 ARABIC LETTER AIN
 * FECB ARABIC LETTER AIN INITIAL FORM
 * 	0639 ARABIC LETTER AIN
 * FECC ARABIC LETTER AIN MEDIAL FORM
 * 	0639 ARABIC LETTER AIN
 * FECD ARABIC LETTER GHAIN ISOLATED FORM
 * 	063A ARABIC LETTER GHAIN
 * FECE ARABIC LETTER GHAIN FINAL FORM
 * 	063A ARABIC LETTER GHAIN
 * FECF ARABIC LETTER GHAIN INITIAL FORM
 * 	063A ARABIC LETTER GHAIN
 * FED0 ARABIC LETTER GHAIN MEDIAL FORM
 * 	063A ARABIC LETTER GHAIN
 * FED1 ARABIC LETTER FEH ISOLATED FORM
 * 	0641 ARABIC LETTER FEH
 * FED2 ARABIC LETTER FEH FINAL FORM
 * 	0641 ARABIC LETTER FEH
 * FED3 ARABIC LETTER FEH INITIAL FORM
 * 	0641 ARABIC LETTER FEH
 * FED4 ARABIC LETTER FEH MEDIAL FORM
 * 	0641 ARABIC LETTER FEH
 * FED5 ARABIC LETTER QAF ISOLATED FORM
 * 	0642 ARABIC LETTER QAF
 * FED6 ARABIC LETTER QAF FINAL FORM
 * 	0642 ARABIC LETTER QAF
 * FED7 ARABIC LETTER QAF INITIAL FORM
 * 	0642 ARABIC LETTER QAF
 * FED8 ARABIC LETTER QAF MEDIAL FORM
 * 	0642 ARABIC LETTER QAF
 * FED9 ARABIC LETTER KAF ISOLATED FORM
 * 	0643 ARABIC LETTER KAF
 * FEDA ARABIC LETTER KAF FINAL FORM
 * 	0643 ARABIC LETTER KAF
 * FEDB ARABIC LETTER KAF INITIAL FORM
 * 	0643 ARABIC LETTER KAF
 * FEDC ARABIC LETTER KAF MEDIAL FORM
 * 	0643 ARABIC LETTER KAF
 * FEDD ARABIC LETTER LAM ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * FEDE ARABIC LETTER LAM FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * FEDF ARABIC LETTER LAM INITIAL FORM
 * 	0644 ARABIC LETTER LAM
 * FEE0 ARABIC LETTER LAM MEDIAL FORM
 * 	0644 ARABIC LETTER LAM
 * FEE1 ARABIC LETTER MEEM ISOLATED FORM
 * 	0645 ARABIC LETTER MEEM
 * FEE2 ARABIC LETTER MEEM FINAL FORM
 * 	0645 ARABIC LETTER MEEM
 * FEE3 ARABIC LETTER MEEM INITIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * FEE4 ARABIC LETTER MEEM MEDIAL FORM
 * 	0645 ARABIC LETTER MEEM
 * FEE5 ARABIC LETTER NOON ISOLATED FORM
 * 	0646 ARABIC LETTER NOON
 * FEE6 ARABIC LETTER NOON FINAL FORM
 * 	0646 ARABIC LETTER NOON
 * FEE7 ARABIC LETTER NOON INITIAL FORM
 * 	0646 ARABIC LETTER NOON
 * FEE8 ARABIC LETTER NOON MEDIAL FORM
 * 	0646 ARABIC LETTER NOON
 * FEE9 ARABIC LETTER HEH ISOLATED FORM
 * 	0647 ARABIC LETTER HEH
 * FEEA ARABIC LETTER HEH FINAL FORM
 * 	0647 ARABIC LETTER HEH
 * FEEB ARABIC LETTER HEH INITIAL FORM
 * 	0647 ARABIC LETTER HEH
 * FEEC ARABIC LETTER HEH MEDIAL FORM
 * 	0647 ARABIC LETTER HEH
 * FEED ARABIC LETTER WAW ISOLATED FORM
 * 	0648 ARABIC LETTER WAW
 * FEEE ARABIC LETTER WAW FINAL FORM
 * 	0648 ARABIC LETTER WAW
 * FEEF ARABIC LETTER ALEF MAKSURA ISOLATED FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FEF0 ARABIC LETTER ALEF MAKSURA FINAL FORM
 * 	0649 ARABIC LETTER ALEF MAKSURA
 * FEF1 ARABIC LETTER YEH ISOLATED FORM
 * 	064A ARABIC LETTER YEH
 * FEF2 ARABIC LETTER YEH FINAL FORM
 * 	064A ARABIC LETTER YEH
 * FEF3 ARABIC LETTER YEH INITIAL FORM
 * 	064A ARABIC LETTER YEH
 * FEF4 ARABIC LETTER YEH MEDIAL FORM
 * 	064A ARABIC LETTER YEH
 * FEF5 ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	0627 ARABIC LETTER ALEF
 * FEF6 ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0627 ARABIC LETTER ALEF
 * FEF7 ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	0627 ARABIC LETTER ALEF
 * FEF8 ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0627 ARABIC LETTER ALEF
 * FEF9 ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	0627 ARABIC LETTER ALEF
 * FEFA ARABIC LIGATURE LAM WITH ALEF WITH HAMZA BELOW FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0627 ARABIC LETTER ALEF
 * FEFB ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
 * 	0644 ARABIC LETTER LAM
 * 	0627 ARABIC LETTER ALEF
 * FEFC ARABIC LIGATURE LAM WITH ALEF FINAL FORM
 * 	0644 ARABIC LETTER LAM
 * 	0627 ARABIC LETTER ALEF
 * FF01 FULLWIDTH EXCLAMATION MARK
 * 	0021 EXCLAMATION MARK
 * FF02 FULLWIDTH QUOTATION MARK
 * 	0022 QUOTATION MARK
 * FF03 FULLWIDTH NUMBER SIGN
 * 	0023 NUMBER SIGN
 * FF04 FULLWIDTH DOLLAR SIGN
 * 	0024 DOLLAR SIGN
 * FF05 FULLWIDTH PERCENT SIGN
 * 	0025 PERCENT SIGN
 * FF06 FULLWIDTH AMPERSAND
 * 	0026 AMPERSAND
 * FF07 FULLWIDTH APOSTROPHE
 * 	0027 APOSTROPHE
 * FF08 FULLWIDTH LEFT PARENTHESIS
 * 	0028 LEFT PARENTHESIS
 * FF09 FULLWIDTH RIGHT PARENTHESIS
 * 	0029 RIGHT PARENTHESIS
 * FF0A FULLWIDTH ASTERISK
 * 	002A ASTERISK
 * FF0B FULLWIDTH PLUS SIGN
 * 	002B PLUS SIGN
 * FF0C FULLWIDTH COMMA
 * 	002C COMMA
 * FF0D FULLWIDTH HYPHEN-MINUS
 * 	002D HYPHEN-MINUS
 * FF0E FULLWIDTH FULL STOP
 * 	002E FULL STOP
 * FF0F FULLWIDTH SOLIDUS
 * 	002F SOLIDUS
 * FF10 FULLWIDTH DIGIT ZERO
 * 	0030 DIGIT ZERO
 * FF11 FULLWIDTH DIGIT ONE
 * 	0031 DIGIT ONE
 * FF12 FULLWIDTH DIGIT TWO
 * 	0032 DIGIT TWO
 * FF13 FULLWIDTH DIGIT THREE
 * 	0033 DIGIT THREE
 * FF14 FULLWIDTH DIGIT FOUR
 * 	0034 DIGIT FOUR
 * FF15 FULLWIDTH DIGIT FIVE
 * 	0035 DIGIT FIVE
 * FF16 FULLWIDTH DIGIT SIX
 * 	0036 DIGIT SIX
 * FF17 FULLWIDTH DIGIT SEVEN
 * 	0037 DIGIT SEVEN
 * FF18 FULLWIDTH DIGIT EIGHT
 * 	0038 DIGIT EIGHT
 * FF19 FULLWIDTH DIGIT NINE
 * 	0039 DIGIT NINE
 * FF1A FULLWIDTH COLON
 * 	003A COLON
 * FF1B FULLWIDTH SEMICOLON
 * 	003B SEMICOLON
 * FF1C FULLWIDTH LESS-THAN SIGN
 * 	003C LESS-THAN SIGN
 * FF1D FULLWIDTH EQUALS SIGN
 * 	003D EQUALS SIGN
 * FF1E FULLWIDTH GREATER-THAN SIGN
 * 	003E GREATER-THAN SIGN
 * FF1F FULLWIDTH QUESTION MARK
 * 	003F QUESTION MARK
 * FF20 FULLWIDTH COMMERCIAL AT
 * 	0040 COMMERCIAL AT
 * FF21 FULLWIDTH LATIN CAPITAL LETTER A
 * 	0041 LATIN CAPITAL LETTER A
 * FF22 FULLWIDTH LATIN CAPITAL LETTER B
 * 	0042 LATIN CAPITAL LETTER B
 * FF23 FULLWIDTH LATIN CAPITAL LETTER C
 * 	0043 LATIN CAPITAL LETTER C
 * FF24 FULLWIDTH LATIN CAPITAL LETTER D
 * 	0044 LATIN CAPITAL LETTER D
 * FF25 FULLWIDTH LATIN CAPITAL LETTER E
 * 	0045 LATIN CAPITAL LETTER E
 * FF26 FULLWIDTH LATIN CAPITAL LETTER F
 * 	0046 LATIN CAPITAL LETTER F
 * FF27 FULLWIDTH LATIN CAPITAL LETTER G
 * 	0047 LATIN CAPITAL LETTER G
 * FF28 FULLWIDTH LATIN CAPITAL LETTER H
 * 	0048 LATIN CAPITAL LETTER H
 * FF29 FULLWIDTH LATIN CAPITAL LETTER I
 * 	0049 LATIN CAPITAL LETTER I
 * FF2A FULLWIDTH LATIN CAPITAL LETTER J
 * 	004A LATIN CAPITAL LETTER J
 * FF2B FULLWIDTH LATIN CAPITAL LETTER K
 * 	004B LATIN CAPITAL LETTER K
 * FF2C FULLWIDTH LATIN CAPITAL LETTER L
 * 	004C LATIN CAPITAL LETTER L
 * FF2D FULLWIDTH LATIN CAPITAL LETTER M
 * 	004D LATIN CAPITAL LETTER M
 * FF2E FULLWIDTH LATIN CAPITAL LETTER N
 * 	004E LATIN CAPITAL LETTER N
 * FF2F FULLWIDTH LATIN CAPITAL LETTER O
 * 	004F LATIN CAPITAL LETTER O
 * FF30 FULLWIDTH LATIN CAPITAL LETTER P
 * 	0050 LATIN CAPITAL LETTER P
 * FF31 FULLWIDTH LATIN CAPITAL LETTER Q
 * 	0051 LATIN CAPITAL LETTER Q
 * FF32 FULLWIDTH LATIN CAPITAL LETTER R
 * 	0052 LATIN CAPITAL LETTER R
 * FF33 FULLWIDTH LATIN CAPITAL LETTER S
 * 	0053 LATIN CAPITAL LETTER S
 * FF34 FULLWIDTH LATIN CAPITAL LETTER T
 * 	0054 LATIN CAPITAL LETTER T
 * FF35 FULLWIDTH LATIN CAPITAL LETTER U
 * 	0055 LATIN CAPITAL LETTER U
 * FF36 FULLWIDTH LATIN CAPITAL LETTER V
 * 	0056 LATIN CAPITAL LETTER V
 * FF37 FULLWIDTH LATIN CAPITAL LETTER W
 * 	0057 LATIN CAPITAL LETTER W
 * FF38 FULLWIDTH LATIN CAPITAL LETTER X
 * 	0058 LATIN CAPITAL LETTER X
 * FF39 FULLWIDTH LATIN CAPITAL LETTER Y
 * 	0059 LATIN CAPITAL LETTER Y
 * FF3A FULLWIDTH LATIN CAPITAL LETTER Z
 * 	005A LATIN CAPITAL LETTER Z
 * FF3B FULLWIDTH LEFT SQUARE BRACKET
 * 	005B LEFT SQUARE BRACKET
 * FF3C FULLWIDTH REVERSE SOLIDUS
 * 	005C REVERSE SOLIDUS
 * FF3D FULLWIDTH RIGHT SQUARE BRACKET
 * 	005D RIGHT SQUARE BRACKET
 * FF3E FULLWIDTH CIRCUMFLEX ACCENT
 * 	005E CIRCUMFLEX ACCENT
 * FF3F FULLWIDTH LOW LINE
 * 	005F LOW LINE
 * FF40 FULLWIDTH GRAVE ACCENT
 * 	0060 GRAVE ACCENT
 * FF41 FULLWIDTH LATIN SMALL LETTER A
 * 	0061 LATIN SMALL LETTER A
 * FF42 FULLWIDTH LATIN SMALL LETTER B
 * 	0062 LATIN SMALL LETTER B
 * FF43 FULLWIDTH LATIN SMALL LETTER C
 * 	0063 LATIN SMALL LETTER C
 * FF44 FULLWIDTH LATIN SMALL LETTER D
 * 	0064 LATIN SMALL LETTER D
 * FF45 FULLWIDTH LATIN SMALL LETTER E
 * 	0065 LATIN SMALL LETTER E
 * FF46 FULLWIDTH LATIN SMALL LETTER F
 * 	0066 LATIN SMALL LETTER F
 * FF47 FULLWIDTH LATIN SMALL LETTER G
 * 	0067 LATIN SMALL LETTER G
 * FF48 FULLWIDTH LATIN SMALL LETTER H
 * 	0068 LATIN SMALL LETTER H
 * FF49 FULLWIDTH LATIN SMALL LETTER I
 * 	0069 LATIN SMALL LETTER I
 * FF4A FULLWIDTH LATIN SMALL LETTER J
 * 	006A LATIN SMALL LETTER J
 * FF4B FULLWIDTH LATIN SMALL LETTER K
 * 	006B LATIN SMALL LETTER K
 * FF4C FULLWIDTH LATIN SMALL LETTER L
 * 	006C LATIN SMALL LETTER L
 * FF4D FULLWIDTH LATIN SMALL LETTER M
 * 	006D LATIN SMALL LETTER M
 * FF4E FULLWIDTH LATIN SMALL LETTER N
 * 	006E LATIN SMALL LETTER N
 * FF4F FULLWIDTH LATIN SMALL LETTER O
 * 	006F LATIN SMALL LETTER O
 * FF50 FULLWIDTH LATIN SMALL LETTER P
 * 	0070 LATIN SMALL LETTER P
 * FF51 FULLWIDTH LATIN SMALL LETTER Q
 * 	0071 LATIN SMALL LETTER Q
 * FF52 FULLWIDTH LATIN SMALL LETTER R
 * 	0072 LATIN SMALL LETTER R
 * FF53 FULLWIDTH LATIN SMALL LETTER S
 * 	0073 LATIN SMALL LETTER S
 * FF54 FULLWIDTH LATIN SMALL LETTER T
 * 	0074 LATIN SMALL LETTER T
 * FF55 FULLWIDTH LATIN SMALL LETTER U
 * 	0075 LATIN SMALL LETTER U
 * FF56 FULLWIDTH LATIN SMALL LETTER V
 * 	0076 LATIN SMALL LETTER V
 * FF57 FULLWIDTH LATIN SMALL LETTER W
 * 	0077 LATIN SMALL LETTER W
 * FF58 FULLWIDTH LATIN SMALL LETTER X
 * 	0078 LATIN SMALL LETTER X
 * FF59 FULLWIDTH LATIN SMALL LETTER Y
 * 	0079 LATIN SMALL LETTER Y
 * FF5A FULLWIDTH LATIN SMALL LETTER Z
 * 	007A LATIN SMALL LETTER Z
 * FF5B FULLWIDTH LEFT CURLY BRACKET
 * 	007B LEFT CURLY BRACKET
 * FF5C FULLWIDTH VERTICAL LINE
 * 	007C VERTICAL LINE
 * FF5D FULLWIDTH RIGHT CURLY BRACKET
 * 	007D RIGHT CURLY BRACKET
 * FF5E FULLWIDTH TILDE
 * 	007E TILDE
 * FF5F FULLWIDTH LEFT WHITE PARENTHESIS
 * 	2985 LEFT WHITE PARENTHESIS
 * FF60 FULLWIDTH RIGHT WHITE PARENTHESIS
 * 	2986 RIGHT WHITE PARENTHESIS
 * FF61 HALFWIDTH IDEOGRAPHIC FULL STOP
 * 	3002 IDEOGRAPHIC FULL STOP
 * FF62 HALFWIDTH LEFT CORNER BRACKET
 * 	300C LEFT CORNER BRACKET
 * FF63 HALFWIDTH RIGHT CORNER BRACKET
 * 	300D RIGHT CORNER BRACKET
 * FF64 HALFWIDTH IDEOGRAPHIC COMMA
 * 	3001 IDEOGRAPHIC COMMA
 * FFA0 HALFWIDTH HANGUL FILLER
 * 	1160 HANGUL JUNGSEONG FILLER
 * FFA1 HALFWIDTH HANGUL LETTER KIYEOK
 * 	1100 HANGUL CHOSEONG KIYEOK
 * FFA2 HALFWIDTH HANGUL LETTER SSANGKIYEOK
 * 	1101 HANGUL CHOSEONG SSANGKIYEOK
 * FFA3 HALFWIDTH HANGUL LETTER KIYEOK-SIOS
 * 	11AA HANGUL JONGSEONG KIYEOK-SIOS
 * FFA4 HALFWIDTH HANGUL LETTER NIEUN
 * 	1102 HANGUL CHOSEONG NIEUN
 * FFA5 HALFWIDTH HANGUL LETTER NIEUN-CIEUC
 * 	11AC HANGUL JONGSEONG NIEUN-CIEUC
 * FFA6 HALFWIDTH HANGUL LETTER NIEUN-HIEUH
 * 	11AD HANGUL JONGSEONG NIEUN-HIEUH
 * FFA7 HALFWIDTH HANGUL LETTER TIKEUT
 * 	1103 HANGUL CHOSEONG TIKEUT
 * FFA8 HALFWIDTH HANGUL LETTER SSANGTIKEUT
 * 	1104 HANGUL CHOSEONG SSANGTIKEUT
 * FFA9 HALFWIDTH HANGUL LETTER RIEUL
 * 	1105 HANGUL CHOSEONG RIEUL
 * FFAA HALFWIDTH HANGUL LETTER RIEUL-KIYEOK
 * 	11B0 HANGUL JONGSEONG RIEUL-KIYEOK
 * FFAB HALFWIDTH HANGUL LETTER RIEUL-MIEUM
 * 	11B1 HANGUL JONGSEONG RIEUL-MIEUM
 * FFAC HALFWIDTH HANGUL LETTER RIEUL-PIEUP
 * 	11B2 HANGUL JONGSEONG RIEUL-PIEUP
 * FFAD HALFWIDTH HANGUL LETTER RIEUL-SIOS
 * 	11B3 HANGUL JONGSEONG RIEUL-SIOS
 * FFAE HALFWIDTH HANGUL LETTER RIEUL-THIEUTH
 * 	11B4 HANGUL JONGSEONG RIEUL-THIEUTH
 * FFAF HALFWIDTH HANGUL LETTER RIEUL-PHIEUPH
 * 	11B5 HANGUL JONGSEONG RIEUL-PHIEUPH
 * FFB0 HALFWIDTH HANGUL LETTER RIEUL-HIEUH
 * 	111A HANGUL CHOSEONG RIEUL-HIEUH
 * FFB1 HALFWIDTH HANGUL LETTER MIEUM
 * 	1106 HANGUL CHOSEONG MIEUM
 * FFB2 HALFWIDTH HANGUL LETTER PIEUP
 * 	1107 HANGUL CHOSEONG PIEUP
 * FFB3 HALFWIDTH HANGUL LETTER SSANGPIEUP
 * 	1108 HANGUL CHOSEONG SSANGPIEUP
 * FFB4 HALFWIDTH HANGUL LETTER PIEUP-SIOS
 * 	1121 HANGUL CHOSEONG PIEUP-SIOS
 * FFB5 HALFWIDTH HANGUL LETTER SIOS
 * 	1109 HANGUL CHOSEONG SIOS
 * FFB6 HALFWIDTH HANGUL LETTER SSANGSIOS
 * 	110A HANGUL CHOSEONG SSANGSIOS
 * FFB7 HALFWIDTH HANGUL LETTER IEUNG
 * 	110B HANGUL CHOSEONG IEUNG
 * FFB8 HALFWIDTH HANGUL LETTER CIEUC
 * 	110C HANGUL CHOSEONG CIEUC
 * FFB9 HALFWIDTH HANGUL LETTER SSANGCIEUC
 * 	110D HANGUL CHOSEONG SSANGCIEUC
 * FFBA HALFWIDTH HANGUL LETTER CHIEUCH
 * 	110E HANGUL CHOSEONG CHIEUCH
 * FFBB HALFWIDTH HANGUL LETTER KHIEUKH
 * 	110F HANGUL CHOSEONG KHIEUKH
 * FFBC HALFWIDTH HANGUL LETTER THIEUTH
 * 	1110 HANGUL CHOSEONG THIEUTH
 * FFBD HALFWIDTH HANGUL LETTER PHIEUPH
 * 	1111 HANGUL CHOSEONG PHIEUPH
 * FFBE HALFWIDTH HANGUL LETTER HIEUH
 * 	1112 HANGUL CHOSEONG HIEUH
 * FFC2 HALFWIDTH HANGUL LETTER A
 * 	1161 HANGUL JUNGSEONG A
 * FFC3 HALFWIDTH HANGUL LETTER AE
 * 	1162 HANGUL JUNGSEONG AE
 * FFC4 HALFWIDTH HANGUL LETTER YA
 * 	1163 HANGUL JUNGSEONG YA
 * FFC5 HALFWIDTH HANGUL LETTER YAE
 * 	1164 HANGUL JUNGSEONG YAE
 * FFC6 HALFWIDTH HANGUL LETTER EO
 * 	1165 HANGUL JUNGSEONG EO
 * FFC7 HALFWIDTH HANGUL LETTER E
 * 	1166 HANGUL JUNGSEONG E
 * FFCA HALFWIDTH HANGUL LETTER YEO
 * 	1167 HANGUL JUNGSEONG YEO
 * FFCB HALFWIDTH HANGUL LETTER YE
 * 	1168 HANGUL JUNGSEONG YE
 * FFCC HALFWIDTH HANGUL LETTER O
 * 	1169 HANGUL JUNGSEONG O
 * FFCD HALFWIDTH HANGUL LETTER WA
 * 	116A HANGUL JUNGSEONG WA
 * FFCE HALFWIDTH HANGUL LETTER WAE
 * 	116B HANGUL JUNGSEONG WAE
 * FFCF HALFWIDTH HANGUL LETTER OE
 * 	116C HANGUL JUNGSEONG OE
 * FFD2 HALFWIDTH HANGUL LETTER YO
 * 	116D HANGUL JUNGSEONG YO
 * FFD3 HALFWIDTH HANGUL LETTER U
 * 	116E HANGUL JUNGSEONG U
 * FFD4 HALFWIDTH HANGUL LETTER WEO
 * 	116F HANGUL JUNGSEONG WEO
 * FFD5 HALFWIDTH HANGUL LETTER WE
 * 	1170 HANGUL JUNGSEONG WE
 * FFD6 HALFWIDTH HANGUL LETTER WI
 * 	1171 HANGUL JUNGSEONG WI
 * FFD7 HALFWIDTH HANGUL LETTER YU
 * 	1172 HANGUL JUNGSEONG YU
 * FFDA HALFWIDTH HANGUL LETTER EU
 * 	1173 HANGUL JUNGSEONG EU
 * FFDB HALFWIDTH HANGUL LETTER YI
 * 	1174 HANGUL JUNGSEONG YI
 * FFDC HALFWIDTH HANGUL LETTER I
 * 	1175 HANGUL JUNGSEONG I
 * FFE0 FULLWIDTH CENT SIGN
 * 	00A2 CENT SIGN
 * FFE1 FULLWIDTH POUND SIGN
 * 	00A3 POUND SIGN
 * FFE2 FULLWIDTH NOT SIGN
 * 	00AC NOT SIGN
 * FFE3 FULLWIDTH MACRON
 * 	0020 SPACE
 * FFE4 FULLWIDTH BROKEN BAR
 * 	00A6 BROKEN BAR
 * FFE5 FULLWIDTH YEN SIGN
 * 	00A5 YEN SIGN
 * FFE6 FULLWIDTH WON SIGN
 * 	20A9 WON SIGN
 * FFE8 HALFWIDTH FORMS LIGHT VERTICAL
 * 	2502 BOX DRAWINGS LIGHT VERTICAL
 * FFE9 HALFWIDTH LEFTWARDS ARROW
 * 	2190 LEFTWARDS ARROW
 * FFEA HALFWIDTH UPWARDS ARROW
 * 	2191 UPWARDS ARROW
 * FFEB HALFWIDTH RIGHTWARDS ARROW
 * 	2192 RIGHTWARDS ARROW
 * FFEC HALFWIDTH DOWNWARDS ARROW
 * 	2193 DOWNWARDS ARROW
 * FFED HALFWIDTH BLACK SQUARE
 * 	25A0 BLACK SQUARE
 * FFEE HALFWIDTH WHITE CIRCLE
 * 	25CB WHITE CIRCLE
*/

unsigned short unac_indexes[UNAC_INDEXES_SIZE] = {
   0,   0,   0,   0,   0,   0,   0,   0,   1,   2,   3,   4,   0,   0,   0,
   0,   0,   0,   0,   0,   5,   6,   7,   8,   9,  10,  11,  12,  13,  14,
  15,  16,  17,  18,  19,  20,  21,  22,  23,  24,  25,  26,  27,  28,  29,
  30,  31,  32,  33,  34,  35,  36,  37,  38,  39,  40,  41,  42,  43,  44,
  45,  46,  47,  48,  49,  50,  51,  52,  53,  54,  55,  56,  57,  58,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  59,  60,   0,   0,
   0,  61,  62,   0,   0,   0,  63,  63,  63,  63,  63,  63,  63,  63,  64,
  63,  63,  63,  63,  63,  65,  66,  67,  68,  69,  70,  71,  72,  73,   0,
  74,  75,  76,  77,  78,  79,  80,  81,  82,  83,  84,  85,  86,  87,   0,
  88,   0,   0,  89,  90,  91,  92,  93,  94,  95,  96,  97,  98,  99, 100,
 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115,
   0, 116, 117, 118, 119, 120,   0,   0,   0,   0,   0, 121,   0, 122,  63,
  63,  63,  63, 123, 124,   0,   0,   0,   0,   0,   0,   0,   0,   0,  63,
 125, 126,   0,   0,   0,   0, 127,  63,  63,   0,   0, 128, 129,   0,   0,
   0,   0,   0,   0,   0,   0, 130,   0, 131, 132, 132, 133,   0,   0,   0,
   0, 134,   0,   0,   0,  63,  63,  63, 125,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0, 135,  63, 136,   0,   0,   0,   0,   0,   0, 127, 137,
   0,   0,   0, 135, 138, 139, 140,   0,   0,   0,   0,   0, 141,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 142,
  63,  63, 143,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0, 141,   0,   0,   0,   0,   0, 144, 145, 146, 147,
 134, 148,   0,   0, 149,   0, 141,   0,   0,   0,   0,   0,   0, 145, 123,
 150,   0,   0, 151,   0,   0,   0, 141,   0,   0,   0,   0,   0,   0, 145,
 132, 147, 135, 152, 151,   0,   0,   0, 153,   0, 154,   0,   0,   0,   0,
 135, 155, 133, 156,   0,   0,   0,   0,   0, 141,   0,   0,   0,   0,   0,
   0, 135, 157, 133, 158,   0, 151,   0,   0,   0, 151,   0,   0,   0,   0,
   0,   0, 145, 157, 133, 158,   0, 151,   0,   0,   0, 151,   0,   0,   0,
   0,   0,   0, 135, 157, 133, 156,   0, 151,   0,   0,   0, 151,   0,   0,
   0,   0,   0,   0,   0,   0, 159, 160,  63,   0,   0, 151,   0,   0,   0,
   0,   0,   0,   0, 161, 125, 156, 143,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0, 162, 163,   0, 164,   0, 165,   0,   0,   0,   0,
   0, 166,   0, 167,   0,   0, 168, 169, 170, 171, 172, 173,   0, 174, 122,
  63, 157, 175,  63, 122,  63,  63,  63, 176, 177,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0, 178, 127,  63, 143,   0,   0, 135, 179, 180,
 164, 181,   0, 182, 123,   0, 183, 184, 185, 186, 187, 188, 189,   0,   0,
   0,   0,   0, 190,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0, 175,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0, 191,   0,   0,   0, 191,   0,   0,   0, 151,   0,   0,   0,
 151,   0,   0,   0,   0,   0,   0,   0, 142,  63,  63,  63, 137, 192,   0,
   0,   0,   0,   0, 193,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0, 134,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,  63, 137,  63, 137,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  63,  63, 136,
 167,   0,   0,   0,   0,   0,   0,   0,   0, 156, 137,   0,   0,   0,   0,
   0,   0, 175, 143,  63,  63,  63, 132,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0, 194, 195, 196,   0,   0,   0,
 142,  63, 176,   0,   0,   0,   0, 127, 137,   0, 125,   0,   0,   0, 122,
 164,   0,   0,   0,   0,   0,   0, 135,  63, 137,   0,   0,   0,   0,   0,
 142,  63,  63,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0, 197,  63,  63, 198, 191,   0,   0,   0,
   0,   0,   0, 199, 200, 201, 202, 203, 204, 205, 206, 207,   0, 208,   0,
   0,   0, 209, 210, 211, 212, 213,  63,  63,  63,  63, 143,   0,   0, 142,
 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243,
 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258,
 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, 273,
 274, 275, 276, 277, 278, 279, 280,   0, 281, 282, 283, 284, 285, 286, 287,
 282,   0,   0, 288, 289, 290, 291, 292, 293,   0, 294,   0,   0,   0,   0,
  63,  63,  63,  63, 136,   0, 295, 296, 297, 298, 299, 300, 301, 302, 303,
 304, 305, 306, 307, 308, 309, 310, 311, 312,   0, 313,   0, 314,   0,   0,
   0, 315,   0,   0,   0,   0,   0,   0, 316, 317,   0,   0, 318, 319, 320,
   0, 321, 322,   0,   0, 323, 324, 325, 326, 327, 328,   0,   0,   0, 329,
   0,   0,   0,   0,   0,   0, 330, 331,   0,   0,   0,   0,   0,   0,   0,
 332,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0, 333, 334, 335, 336, 337, 338,
 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 351,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0, 352,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0, 353,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 354, 355,
 356, 357, 358, 359,   0,   0,   0,   0,   0,   0, 360, 361, 362, 363, 364,
 365, 366, 367, 368, 369, 370, 371, 372, 373, 374, 375, 376, 377, 378,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 379,   0,
 156,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,  63,  63,
  63,  63,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0, 380,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0, 381,   0, 382, 383, 384, 385, 386, 387, 388, 389, 390, 391, 392,
 393, 394, 395, 396, 397, 398, 399, 400, 401, 402, 403, 404, 405, 406, 407,
 408,   0,   0,   0,   0,   0,   5,   0,   0,   0,   0, 182, 409, 410,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 411,
 412, 413, 414, 415, 416, 417, 418, 419, 420, 421, 422, 423, 424,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 425, 426, 427, 428, 429,
 430, 431, 432, 433,   0, 434, 435, 436, 437, 438, 439, 440, 441, 442, 443,
 444, 445, 446, 447, 448, 449, 450, 451, 452, 453, 454, 455, 456, 457, 458,
 459, 460, 461, 462, 463, 464, 465, 466, 467, 468, 469, 470, 471, 472, 473,
 474, 475, 476, 477, 478, 479, 480, 481, 482, 483, 484, 485, 486, 487,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 488, 489, 490, 491, 492,
 493, 197, 164, 494, 495, 496, 156,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0, 167,   0,   0,   0,   0,   0, 497, 498, 499, 500, 501, 502, 503,
 504, 505, 506, 507, 508, 509, 510, 511,   0, 512, 513,   0,   0,   0,   0,
   0,   0,   0,   0,   0, 514, 515, 516,   0,   0, 127,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0, 167,   0,   0,   0,   0,   0, 142,  63,
 176,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 135, 164,   0,
   0, 156,  63, 137,   0,   0,   0,   0,   0, 137,   0,   0,   0,   0,   0,
 127,  63, 136,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
 122, 143,   0, 516, 517,   0,   0,   0,   0,   0, 516,   0,   0,   0,   0,
   0,   0, 180, 518, 134,   0,   0,   0,   0, 127, 158,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0, 127, 519,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,   0,
   0,   0,   0, 520, 521, 522, 523, 524, 525, 526, 527, 528, 529, 530, 531,
 532, 533, 534, 535, 536, 537, 538, 539, 540, 541, 542, 543, 544, 545, 546,
 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, 557, 558, 559, 560, 561,
 562, 563, 564, 565, 566, 567, 568, 569, 570, 571, 572, 573, 574, 575, 576,
 577, 578, 579,   0,   0,   0,   0, 580,   0, 581, 582, 583, 584, 585, 586,
 587, 588, 589, 590, 591, 592, 593, 594, 595, 596, 597, 598, 599, 600, 601,
   0,   0,   0, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613,
 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, 624, 625, 626, 627, 628,
 629, 630, 631, 632, 633, 634, 635, 636, 637, 638, 639, 640, 641, 642, 643,
 644, 645, 646, 647,   0,   0, 648, 649, 650, 651, 652, 653, 654, 655, 656,
 657, 658, 659, 660, 661, 662,   0,   0,   0,   0,   0, 663, 664,  63,  63,
 665, 666, 143,   0, 667, 668, 669, 670, 671, 672, 673, 674, 675, 676, 677,
 678, 679, 680, 681, 682, 683, 684, 685, 686, 687, 688, 689, 690, 691, 692,
 693, 694, 290, 695, 696, 697, 698, 699, 700, 701, 702, 703, 704,   0,   0,
   0,   0,   0,   0,   0, 705, 412, 413, 706, 707, 708, 709, 710, 711, 712,
   0,   0
};

unsigned char unac_positions[UNAC_BLOCK_COUNT][3*UNAC_BLOCK_SIZE + 1] = {
/* 0 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 1 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 2 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 3 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 4 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 5 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 6 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 7 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 8 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 18, 19, 22, 25, 26, 29, 32, 33, 34, 35, 36 },
/* 9 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 10 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 11 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 12 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 24, 26 },
/* 13 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 14 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 15 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 16 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 17 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 18 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 19 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 20 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 21 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 22 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 23 */ { 0, 1, 2, 4, 5, 6, 7, 9, 11, 12, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29 },
/* 24 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 25, 26 },
/* 25 */ { 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 },
/* 26 */ { 0, 1, 2, 3, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27 },
/* 27 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 28 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 29 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 30 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 31 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 32 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 33 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 34 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 35 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 36 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 37 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 38 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 39 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 40 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 41 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 17, 19, 21, 22, 24, 26, 27, 29, 31, 32 },
/* 42 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34 },
/* 43 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 44 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 45 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 46 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 47 */ { 0, 1, 2, 4, 6, 8, 9, 11, 13, 14, 16, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31 },
/* 48 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 49 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 50 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 51 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 52 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 53 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 54 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 55 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 56 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 57 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 58 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 59 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 60 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 61 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 62 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 63 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 64 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 65 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 66 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 67 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 68 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 69 */ { 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 },
/* 70 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 71 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 72 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 73 */ { 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 },
/* 74 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 75 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 76 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 77 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 78 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 79 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 80 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 81 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 82 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 83 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 84 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 85 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 86 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 87 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 88 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 89 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 90 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 91 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 92 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 93 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 94 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 95 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 96 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 97 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 98 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 99 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 100 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 101 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 102 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 103 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 104 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 105 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 106 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 107 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 108 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 109 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 110 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 111 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 112 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 113 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 114 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 115 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 116 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 117 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 118 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 119 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 120 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 121 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 25, 27 },
/* 122 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 123 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 124 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 125 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 126 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 127 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 128 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30 },
/* 129 */ { 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 },
/* 130 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 131 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 132 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 133 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 134 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 135 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 136 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 137 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 138 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 139 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 140 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 141 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 142 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 143 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 144 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 145 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 146 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 147 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 148 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 149 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 150 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 151 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 152 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 153 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 154 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 155 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 156 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 157 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 158 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 159 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 160 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 161 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 162 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 163 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 164 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 165 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 17, 19, 21, 22, 23, 24, 25, 26, 27, 28 },
/* 166 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 167 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 168 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 169 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 170 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 171 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 172 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 173 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 174 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 175 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 176 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 177 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 178 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 179 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 180 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 181 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 182 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 183 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 184 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 185 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 186 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 187 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 188 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 189 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 190 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 191 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 192 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 193 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 194 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 195 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 196 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 197 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 198 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 199 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 200 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 201 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 202 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 203 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 204 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 205 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 206 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 207 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 208 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 209 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 210 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 211 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 212 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 213 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 214 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 215 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 216 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 217 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 218 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 219 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 220 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 221 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 222 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 223 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 224 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 225 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 226 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 227 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 228 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 229 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 230 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 231 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 232 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 22, 23, 24, 26 },
/* 233 */ { 0, 1, 2, 4, 5, 6, 8, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 28, 29, 30, 31 },
/* 234 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 235 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 236 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 237 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 238 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 239 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 240 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 241 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 242 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 243 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 244 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 245 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 246 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 247 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 248 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 249 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 250 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 251 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 252 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 253 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 254 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 255 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 256 */ { 0, 1, 2, 4, 5, 6, 7, 8, 9, 12, 13, 14, 15, 16, 17, 20, 21, 22, 23, 24, 25, 28, 29, 30, 31 },
/* 257 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 258 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 259 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 260 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 261 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 262 */ { 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32 },
/* 263 */ { 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32 },
/* 264 */ { 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32 },
/* 265 */ { 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32 },
/* 266 */ { 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32 },
/* 267 */ { 0, 1, 2, 4, 5, 6, 8, 9, 10, 12, 13, 14, 16, 17, 18, 20, 21, 22, 24, 25, 26, 28, 29, 30, 32 },
/* 268 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 14, 15, 16, 18, 19, 20, 21, 22, 23, 25, 26, 27, 30 },
/* 269 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 },
/* 270 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 14, 15, 16, 18, 19, 20, 21, 22, 23, 25, 26, 27, 30 },
/* 271 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 },
/* 272 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 27, 28, 31 },
/* 273 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 274 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 11, 12, 13, 16, 17, 18, 20, 21, 22, 23, 24, 25, 27, 28, 29, 32 },
/* 275 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 276 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 14, 15, 16, 18, 19, 20, 21, 22, 23, 25, 26, 27, 30 },
/* 277 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 },
/* 278 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 279 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 280 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 281 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19, 20, 23, 26, 27, 28, 29, 30 },
/* 282 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 283 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 13, 14, 17, 20, 21, 22, 23, 24, 26, 28, 29, 32, 35, 36 },
/* 284 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 },
/* 285 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 25, 26 },
/* 286 */ { 0, 2, 4, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 },
/* 287 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 25, 29, 30 },
/* 288 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 289 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 290 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 291 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 292 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 293 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 294 */ { 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 },
/* 295 */ { 0, 3, 6, 7, 10, 13, 14, 15, 16, 17, 19, 21, 22, 23, 24, 25, 28, 31, 32, 35, 38, 39, 40, 41, 42 },
/* 296 */ { 0, 1, 2, 3, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 },
/* 297 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 23, 24, 25, 26 },
/* 298 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 299 */ { 0, 2, 4, 5, 8, 11, 12, 14, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 },
/* 300 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 301 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 302 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 12, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 },
/* 303 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 304 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 305 */ { 0, 3, 6, 7, 10, 13, 14, 18, 22, 23, 26, 29, 30, 33, 36, 37, 40, 43, 44, 47, 50, 51, 54, 57, 58 },
/* 306 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 51, 53, 54 },
/* 307 */ { 0, 1, 2, 3, 5, 7, 8, 11, 14, 15, 17, 19, 20, 21, 22, 23, 25, 27, 28, 31, 34, 35, 39, 43, 44 },
/* 308 */ { 0, 2, 4, 5, 6, 7, 8, 10, 12, 13, 16, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 },
/* 309 */ { 0, 1, 2, 3, 5, 7, 8, 11, 14, 15, 17, 19, 20, 21, 22, 23, 25, 27, 28, 31, 34, 35, 39, 43, 44 },
/* 310 */ { 0, 2, 4, 5, 6, 7, 8, 10, 12, 13, 16, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 },
/* 311 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 312 */ { 0, 1, 2, 3, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 },
/* 313 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 314 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 315 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 316 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 317 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 318 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 319 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 17, 20, 23, 24, 25, 26, 27, 29, 31, 32 },
/* 320 */ { 0, 3, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 },
/* 321 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 322 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 323 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 324 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 325 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 326 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 327 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 328 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 329 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 330 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 331 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 332 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 333 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 334 */ { 0, 1, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 20, 22, 23, 25, 27, 28, 30, 32, 33, 35, 37, 38 },
/* 335 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 23, 26, 27, 30, 33, 34, 37, 40, 41, 44, 47, 48 },
/* 336 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 39, 43, 44, 48, 52, 53, 57, 61, 62 },
/* 337 */ { 0, 4, 8, 9, 13, 17, 18, 22, 26, 27, 31, 35, 36, 40, 44, 45, 49, 53, 54, 58, 62, 63, 67, 71, 72 },
/* 338 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 339 */ { 0, 2, 4, 5, 8, 11, 12, 15, 18, 19, 22, 25, 26, 29, 32, 33, 36, 39, 40, 43, 46, 47, 50, 53, 54 },
/* 340 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 341 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 342 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 343 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 43, 44, 45, 46, 47, 48 },
/* 344 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 345 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 346 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 347 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 348 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 349 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 350 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 351 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
/* 352 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 15, 18, 19, 21, 23, 24, 27, 30, 31, 32, 33, 34 },
/* 353 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 354 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 355 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 356 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 357 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 358 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 359 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 360 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 361 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 362 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 363 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 364 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 365 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 366 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 367 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 368 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 369 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 370 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 371 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 372 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 373 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 374 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 375 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 376 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 377 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 378 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 379 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 380 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 381 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 382 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 383 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 384 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 385 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 386 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 387 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 388 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 389 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 390 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 391 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 392 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 393 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 394 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 395 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 396 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 397 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 398 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 399 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 400 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 401 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 402 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 403 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 404 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 405 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 406 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 407 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 408 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 409 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 410 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 411 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 412 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 413 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 414 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 415 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 416 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 417 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 418 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 419 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 420 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 421 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 422 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 423 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 424 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 425 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 426 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 46, 50, 51, 55, 59, 60 },
/* 427 */ { 0, 4, 8, 9, 13, 17, 18, 22, 26, 27, 31, 35, 36, 40, 44, 45, 49, 53, 54, 58, 62, 63, 67, 71, 72 },
/* 428 */ { 0, 4, 8, 9, 13, 17, 18, 22, 26, 27, 31, 35, 36, 40, 44, 45, 52, 59, 60, 66, 72, 73, 74, 75, 76 },
/* 429 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 430 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 431 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 432 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 433 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 },
/* 434 */ { 0, 3, 6, 7, 9, 11, 12, 14, 16, 17, 19, 21, 22, 24, 26, 27, 29, 31, 32, 34, 36, 37, 39, 41, 42 },
/* 435 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 436 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 437 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 22, 23, 25, 27, 28 },
/* 438 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 439 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 25, 30, 31, 35, 39, 40, 42, 44, 45, 46, 47, 48 },
/* 440 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 441 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 442 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 443 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 444 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 445 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 446 */ { 0, 1, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 20, 22, 23, 25, 27, 28, 30, 32, 33, 35, 37, 38 },
/* 447 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 448 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 449 */ { 0, 2, 4, 5, 8, 11, 12, 15, 18, 19, 22, 25, 26, 28, 30, 31, 34, 37, 38, 40, 42, 43, 46, 49, 50 },
/* 450 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 451 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 452 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 453 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 454 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 455 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 456 */ { 0, 4, 8, 9, 13, 17, 18, 22, 26, 27, 30, 33, 34, 38, 42, 43, 46, 49, 50, 53, 56, 57, 62, 67, 68 },
/* 457 */ { 0, 4, 8, 9, 12, 15, 16, 19, 22, 23, 26, 29, 30, 34, 38, 39, 43, 47, 48, 51, 54, 55, 58, 61, 62 },
/* 458 */ { 0, 2, 4, 5, 8, 11, 12, 16, 20, 21, 25, 29, 30, 32, 34, 35, 40, 45, 46, 52, 58, 59, 64, 69, 70 },
/* 459 */ { 0, 3, 6, 7, 12, 17, 18, 23, 28, 29, 33, 37, 38, 41, 44, 45, 48, 51, 52, 55, 58, 59, 63, 67, 68 },
/* 460 */ { 0, 5, 10, 11, 15, 19, 20, 23, 26, 27, 30, 33, 34, 37, 40, 41, 43, 45, 46, 48, 50, 51, 53, 55, 56 },
/* 461 */ { 0, 2, 4, 5, 8, 11, 12, 15, 18, 19, 24, 29, 30, 33, 36, 37, 41, 45, 46, 51, 56, 57, 60, 63, 64 },
/* 462 */ { 0, 2, 4, 5, 7, 9, 10, 15, 20, 21, 25, 29, 30, 35, 40, 41, 44, 47, 48, 53, 58, 59, 61, 63, 64 },
/* 463 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 39, 43, 44, 47, 50, 51, 53, 55, 56 },
/* 464 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 25, 29, 30, 33, 36, 37, 40, 43, 44, 47, 50, 51, 56, 61, 62 },
/* 465 */ { 0, 4, 8, 9, 11, 13, 14, 19, 24, 25, 27, 29, 30, 34, 38, 39, 43, 47, 48, 51, 54, 55, 58, 61, 62 },
/* 466 */ { 0, 3, 6, 7, 11, 15, 16, 18, 20, 21, 24, 27, 28, 32, 36, 37, 39, 41, 42, 47, 52, 53, 56, 59, 60 },
/* 467 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 468 */ { 0, 2, 4, 5, 7, 9, 10, 13, 16, 17, 20, 23, 24, 27, 30, 31, 34, 37, 38, 41, 44, 45, 48, 51, 52 },
/* 469 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 470 */ { 0, 3, 6, 7, 10, 13, 14, 16, 18, 19, 21, 23, 24, 27, 30, 31, 33, 35, 36, 38, 40, 41, 43, 45, 46 },
/* 471 */ { 0, 3, 6, 7, 10, 13, 14, 16, 18, 19, 21, 23, 24, 26, 28, 29, 31, 33, 34, 36, 38, 39, 43, 47, 48 },
/* 472 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 473 */ { 0, 3, 6, 7, 11, 15, 16, 18, 20, 21, 23, 25, 26, 28, 30, 31, 33, 35, 36, 38, 40, 41, 43, 45, 46 },
/* 474 */ { 0, 2, 4, 5, 8, 11, 12, 15, 18, 19, 22, 25, 26, 29, 32, 33, 35, 37, 38, 40, 42, 43, 45, 47, 48 },
/* 475 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 38, 41, 42 },
/* 476 */ { 0, 3, 6, 7, 9, 11, 12, 15, 18, 19, 22, 25, 26, 29, 32, 33, 35, 37, 38, 41, 44, 45, 48, 51, 52 },
/* 477 */ { 0, 4, 8, 9, 11, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 47, 52, 53, 59, 65, 66 },
/* 478 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 479 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 480 */ { 0, 2, 4, 5, 7, 9, 10, 14, 18, 19, 21, 23, 24, 26, 28, 29, 31, 33, 34, 38, 42, 43, 46, 49, 50 },
/* 481 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 482 */ { 0, 2, 4, 5, 7, 9, 10, 13, 16, 17, 19, 21, 22, 24, 26, 27, 30, 33, 34, 37, 40, 41, 43, 45, 46 },
/* 483 */ { 0, 4, 8, 9, 12, 15, 16, 18, 20, 21, 23, 25, 26, 28, 30, 31, 33, 35, 36, 39, 42, 43, 46, 49, 50 },
/* 484 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 485 */ { 0, 2, 4, 5, 8, 11, 12, 15, 18, 19, 22, 25, 26, 29, 32, 33, 36, 39, 40, 43, 46, 47, 50, 53, 54 },
/* 486 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 487 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 488 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 489 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 490 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 491 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 492 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 493 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 494 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 495 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 496 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 497 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 498 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 499 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 500 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 501 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 502 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 503 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 504 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 505 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 506 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 507 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 508 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 509 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 510 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 511 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 512 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 513 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 514 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 515 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 516 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 517 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 518 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 519 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 520 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 521 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 522 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 523 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 524 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 525 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 526 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 527 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 528 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 529 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 530 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 531 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 532 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 533 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 534 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 535 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 536 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 537 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 538 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 539 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 540 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 541 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 542 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 543 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 544 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 545 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 546 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 547 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 548 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 549 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 550 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 551 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 552 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 553 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 554 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 555 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 556 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 557 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 558 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 559 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 560 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 561 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 562 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 563 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 564 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 565 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 566 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 567 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 568 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 569 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 570 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 571 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 572 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 573 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 574 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 575 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 576 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 577 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 578 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 579 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 580 */ { 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 21, 24, 27, 30, 33, 36, 38, 40, 42, 44, 46, 48, 49, 50, 51 },
/* 581 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39 },
/* 582 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 583 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 584 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 585 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 586 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 587 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 588 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 25, 26 },
/* 589 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 590 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 591 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 592 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 593 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 594 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 595 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 596 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 597 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 598 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 599 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 600 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 601 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 602 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 603 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19, 20, 21, 22, 23, 24, 25, 26 },
/* 604 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 605 */ { 0, 1, 2, 3, 4, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25, 26, 28, 30, 31, 33, 35, 36 },
/* 606 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 607 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 },
/* 608 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 609 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 610 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 611 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 612 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 613 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 614 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 615 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 616 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 617 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 618 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 619 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30 },
/* 620 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 16, 17, 19, 21, 22, 24, 26, 27, 29, 31, 32 },
/* 621 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 622 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 623 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 624 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 625 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 626 */ { 0, 1, 2, 3, 5, 7, 8, 10, 12, 13, 15, 17, 18, 20, 22, 23, 25, 27, 28, 30, 32, 33, 35, 37, 38 },
/* 627 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 628 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 629 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 630 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 631 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 632 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 633 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 634 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 635 */ { 0, 2, 4, 5, 6, 7, 8, 10, 12, 13, 15, 17, 18, 20, 22, 23, 25, 27, 28, 30, 32, 33, 35, 37, 38 },
/* 636 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 637 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 638 */ { 0, 2, 4, 5, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 23, 24, 26, 28, 29, 31, 33, 34 },
/* 639 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 640 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 641 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 642 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 643 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 644 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 645 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 646 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30, 32, 34, 35, 37, 39, 40 },
/* 647 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32 },
/* 648 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 649 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 650 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 651 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 652 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 653 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 654 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 655 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 656 */ { 0, 1, 2, 3, 4, 5, 6, 9, 12, 13, 16, 19, 20, 23, 26, 27, 30, 33, 34, 37, 40, 41, 44, 47, 48 },
/* 657 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 658 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 659 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 660 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 661 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 662 */ { 0, 3, 6, 7, 10, 13, 14, 17, 20, 21, 24, 27, 28, 31, 34, 35, 38, 41, 42, 45, 48, 49, 52, 55, 56 },
/* 663 */ { 0, 3, 6, 7, 10, 13, 14, 18, 22, 23, 27, 31, 32, 36, 40, 41, 45, 49, 50, 54, 58, 59, 63, 67, 68 },
/* 664 */ { 0, 4, 8, 9, 12, 15, 16, 34, 52, 53, 61, 69, 70, 74, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88 },
/* 665 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 666 */ { 0, 1, 2, 3, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28 },
/* 667 */ { 0, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26 },
/* 668 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 669 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 670 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 671 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 672 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 673 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 674 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 675 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 676 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 677 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 678 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 679 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 680 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 681 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 682 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 683 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 684 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 685 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 686 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 687 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 688 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 689 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 690 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 691 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 19, 20, 22, 24, 25, 27, 29, 30 },
/* 692 */ { 0, 2, 4, 5, 7, 9, 10, 12, 14, 15, 17, 19, 20, 22, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34 },
/* 693 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 694 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 695 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 696 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 697 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 698 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 699 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 700 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 701 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 702 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 703 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 704 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 705 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 706 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 707 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 708 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 709 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 710 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 711 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 },
/* 712 */ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24 }
};

unsigned short unac_data0[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data1[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0061, 0x0061, 0xFFFF, 0x0062, 0x0062, 0xFFFF, 0x0063, 0x0063, 0xFFFF, 0x0064, 0x0064, 0xFFFF, 0x0065, 0x0065, 0xFFFF, 0x0066, 0x0066, 0xFFFF, 0x0067, 0x0067 };
unsigned short unac_data2[] = { 0xFFFF, 0x0068, 0x0068, 0xFFFF, 0x0069, 0x0069, 0xFFFF, 0x006A, 0x006A, 0xFFFF, 0x006B, 0x006B, 0xFFFF, 0x006C, 0x006C, 0xFFFF, 0x006D, 0x006D, 0xFFFF, 0x006E, 0x006E, 0xFFFF, 0x006F, 0x006F };
unsigned short unac_data3[] = { 0xFFFF, 0x0070, 0x0070, 0xFFFF, 0x0071, 0x0071, 0xFFFF, 0x0072, 0x0072, 0xFFFF, 0x0073, 0x0073, 0xFFFF, 0x0074, 0x0074, 0xFFFF, 0x0075, 0x0075, 0xFFFF, 0x0076, 0x0076, 0xFFFF, 0x0077, 0x0077 };
unsigned short unac_data4[] = { 0xFFFF, 0x0078, 0x0078, 0xFFFF, 0x0079, 0x0079, 0xFFFF, 0x007A, 0x007A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data5[] = { 0x0020, 0x0020, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data6[] = { 0x0020, 0x0020, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0061, 0x0061, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0020, 0x0020, 0xFFFF };
unsigned short unac_data7[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0032, 0x0032, 0xFFFF, 0x0033, 0x0033, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x03BC, 0x03BC, 0x03BC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data8[] = { 0x0020, 0x0020, 0xFFFF, 0x0031, 0x0031, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0031, 0x2044, 0x0034, 0x0031, 0x2044, 0x0034, 0xFFFF, 0x0031, 0x2044, 0x0032, 0x0031, 0x2044, 0x0032, 0xFFFF, 0x0033, 0x2044, 0x0034, 0x0033, 0x2044, 0x0034, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data9[] = { 0x0041, 0x0061, 0x00E0, 0x0041, 0x0061, 0x00E1, 0x0041, 0x0061, 0x00E2, 0x0041, 0x0061, 0x00E3, 0x0041, 0x0061, 0x00E4, 0x0041, 0x0061, 0x00E5, 0xFFFF, 0x00E6, 0x00E6, 0x0043, 0x0063, 0x00E7 };
unsigned short unac_data10[] = { 0x0045, 0x0065, 0x00E8, 0x0045, 0x0065, 0x00E9, 0x0045, 0x0065, 0x00EA, 0x0045, 0x0065, 0x00EB, 0x0049, 0x0069, 0x00EC, 0x0049, 0x0069, 0x00ED, 0x0049, 0x0069, 0x00EE, 0x0049, 0x0069, 0x00EF };
unsigned short unac_data11[] = { 0xFFFF, 0x00F0, 0x00F0, 0x004E, 0x006E, 0x00F1, 0x004F, 0x006F, 0x00F2, 0x004F, 0x006F, 0x00F3, 0x004F, 0x006F, 0x00F4, 0x004F, 0x006F, 0x00F5, 0x004F, 0x006F, 0x00F6, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data12[] = { 0xFFFF, 0x00F8, 0x00F8, 0x0055, 0x0075, 0x00F9, 0x0055, 0x0075, 0x00FA, 0x0055, 0x0075, 0x00FB, 0x0055, 0x0075, 0x00FC, 0x0059, 0x0079, 0x00FD, 0xFFFF, 0x00FE, 0x00FE, 0xFFFF, 0x0073, 0x0073, 0x0073, 0x0073 };
unsigned short unac_data13[] = { 0x0061, 0x0061, 0xFFFF, 0x0061, 0x0061, 0xFFFF, 0x0061, 0x0061, 0xFFFF, 0x0061, 0x0061, 0xFFFF, 0x0061, 0x0061, 0xFFFF, 0x0061, 0x0061, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0063, 0x0063, 0xFFFF };
unsigned short unac_data14[] = { 0x0065, 0x0065, 0xFFFF, 0x0065, 0x0065, 0xFFFF, 0x0065, 0x0065, 0xFFFF, 0x0065, 0x0065, 0xFFFF, 0x0069, 0x0069, 0xFFFF, 0x0069, 0x0069, 0xFFFF, 0x0069, 0x0069, 0xFFFF, 0x0069, 0x0069, 0xFFFF };
unsigned short unac_data15[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x006E, 0x006E, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data16[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0075, 0x0075, 0xFFFF, 0x0075, 0x0075, 0xFFFF, 0x0075, 0x0075, 0xFFFF, 0x0075, 0x0075, 0xFFFF, 0x0079, 0x0079, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0079, 0x0079, 0xFFFF };
unsigned short unac_data17[] = { 0x0041, 0x0061, 0x0101, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x0103, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x0105, 0x0061, 0x0061, 0xFFFF, 0x0043, 0x0063, 0x0107, 0x0063, 0x0063, 0xFFFF };
unsigned short unac_data18[] = { 0x0043, 0x0063, 0x0109, 0x0063, 0x0063, 0xFFFF, 0x0043, 0x0063, 0x010B, 0x0063, 0x0063, 0xFFFF, 0x0043, 0x0063, 0x010D, 0x0063, 0x0063, 0xFFFF, 0x0044, 0x0064, 0x010F, 0x0064, 0x0064, 0xFFFF };
unsigned short unac_data19[] = { 0xFFFF, 0x0111, 0x0111, 0xFFFF, 0xFFFF, 0xFFFF, 0x0045, 0x0065, 0x0113, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x0115, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x0117, 0x0065, 0x0065, 0xFFFF };
unsigned short unac_data20[] = { 0x0045, 0x0065, 0x0119, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x011B, 0x0065, 0x0065, 0xFFFF, 0x0047, 0x0067, 0x011D, 0x0067, 0x0067, 0xFFFF, 0x0047, 0x0067, 0x011F, 0x0067, 0x0067, 0xFFFF };
unsigned short unac_data21[] = { 0x0047, 0x0067, 0x0121, 0x0067, 0x0067, 0xFFFF, 0x0047, 0x0067, 0x0123, 0x0067, 0x0067, 0xFFFF, 0x0048, 0x0068, 0x0125, 0x0068, 0x0068, 0xFFFF, 0xFFFF, 0x0127, 0x0127, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data22[] = { 0x0049, 0x0069, 0x0129, 0x0069, 0x0069, 0xFFFF, 0x0049, 0x0069, 0x012B, 0x0069, 0x0069, 0xFFFF, 0x0049, 0x0069, 0x012D, 0x0069, 0x0069, 0xFFFF, 0x0049, 0x0069, 0x012F, 0x0069, 0x0069, 0xFFFF };
unsigned short unac_data23[] = { 0x0049, 0x0069, 0x0069, 0x0307, 0xFFFF, 0xFFFF, 0xFFFF, 0x0049, 0x004A, 0x0069, 0x006A, 0x0133, 0x0069, 0x006A, 0x0069, 0x006A, 0xFFFF, 0x004A, 0x006A, 0x0135, 0x006A, 0x006A, 0xFFFF, 0x004B, 0x006B, 0x0137, 0x006B, 0x006B, 0xFFFF };
unsigned short unac_data24[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x004C, 0x006C, 0x013A, 0x006C, 0x006C, 0xFFFF, 0x004C, 0x006C, 0x013C, 0x006C, 0x006C, 0xFFFF, 0x004C, 0x006C, 0x013E, 0x006C, 0x006C, 0xFFFF, 0x004C, 0x00B7, 0x006C, 0x00B7, 0x0140 };
unsigned short unac_data25[] = { 0x006C, 0x00B7, 0x006C, 0x00B7, 0xFFFF, 0xFFFF, 0x0142, 0x0142, 0xFFFF, 0xFFFF, 0xFFFF, 0x004E, 0x006E, 0x0144, 0x006E, 0x006E, 0xFFFF, 0x004E, 0x006E, 0x0146, 0x006E, 0x006E, 0xFFFF, 0x004E, 0x006E, 0x0148 };
unsigned short unac_data26[] = { 0x006E, 0x006E, 0xFFFF, 0x02BC, 0x006E, 0x02BC, 0x006E, 0x02BC, 0x006E, 0xFFFF, 0x014B, 0x014B, 0xFFFF, 0xFFFF, 0xFFFF, 0x004F, 0x006F, 0x014D, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x014F, 0x006F, 0x006F, 0xFFFF };
unsigned short unac_data27[] = { 0x004F, 0x006F, 0x0151, 0x006F, 0x006F, 0xFFFF, 0xFFFF, 0x0153, 0x0153, 0xFFFF, 0xFFFF, 0xFFFF, 0x0052, 0x0072, 0x0155, 0x0072, 0x0072, 0xFFFF, 0x0052, 0x0072, 0x0157, 0x0072, 0x0072, 0xFFFF };
unsigned short unac_data28[] = { 0x0052, 0x0072, 0x0159, 0x0072, 0x0072, 0xFFFF, 0x0053, 0x0073, 0x015B, 0x0073, 0x0073, 0xFFFF, 0x0053, 0x0073, 0x015D, 0x0073, 0x0073, 0xFFFF, 0x0053, 0x0073, 0x015F, 0x0073, 0x0073, 0xFFFF };
unsigned short unac_data29[] = { 0x0053, 0x0073, 0x0161, 0x0073, 0x0073, 0xFFFF, 0x0054, 0x0074, 0x0163, 0x0074, 0x0074, 0xFFFF, 0x0054, 0x0074, 0x0165, 0x0074, 0x0074, 0xFFFF, 0xFFFF, 0x0167, 0x0167, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data30[] = { 0x0055, 0x0075, 0x0169, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x016B, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x016D, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x016F, 0x0075, 0x0075, 0xFFFF };
unsigned short unac_data31[] = { 0x0055, 0x0075, 0x0171, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x0173, 0x0075, 0x0075, 0xFFFF, 0x0057, 0x0077, 0x0175, 0x0077, 0x0077, 0xFFFF, 0x0059, 0x0079, 0x0177, 0x0079, 0x0079, 0xFFFF };
unsigned short unac_data32[] = { 0x0059, 0x0079, 0x00FF, 0x005A, 0x007A, 0x017A, 0x007A, 0x007A, 0xFFFF, 0x005A, 0x007A, 0x017C, 0x007A, 0x007A, 0xFFFF, 0x005A, 0x007A, 0x017E, 0x007A, 0x007A, 0xFFFF, 0x0073, 0x0073, 0x0073 };
unsigned short unac_data33[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0253, 0x0253, 0xFFFF, 0x0183, 0x0183, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0185, 0x0185, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0254, 0x0254, 0xFFFF, 0x0188, 0x0188 };
unsigned short unac_data34[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0256, 0x0256, 0xFFFF, 0x0257, 0x0257, 0xFFFF, 0x018C, 0x018C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x01DD, 0x01DD, 0xFFFF, 0x0259, 0x0259 };
unsigned short unac_data35[] = { 0xFFFF, 0x025B, 0x025B, 0xFFFF, 0x0192, 0x0192, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0260, 0x0260, 0xFFFF, 0x0263, 0x0263, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0269, 0x0269, 0xFFFF, 0x0268, 0x0268 };
unsigned short unac_data36[] = { 0xFFFF, 0x0199, 0x0199, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x026F, 0x026F, 0xFFFF, 0x0272, 0x0272, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0275, 0x0275 };
unsigned short unac_data37[] = { 0x004F, 0x006F, 0x01A1, 0x006F, 0x006F, 0xFFFF, 0xFFFF, 0x01A3, 0x01A3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x01A5, 0x01A5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0280, 0x0280, 0xFFFF, 0x01A8, 0x01A8 };
unsigned short unac_data38[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0283, 0x0283, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x01AD, 0x01AD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0288, 0x0288, 0x0055, 0x0075, 0x01B0 };
unsigned short unac_data39[] = { 0x0075, 0x0075, 0xFFFF, 0xFFFF, 0x028A, 0x028A, 0xFFFF, 0x028B, 0x028B, 0xFFFF, 0x01B4, 0x01B4, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x01B6, 0x01B6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0292, 0x0292 };
unsigned short unac_data40[] = { 0xFFFF, 0x01B9, 0x01B9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x01BD, 0x01BD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data41[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0044, 0x005A, 0x0064, 0x007A, 0x01C6, 0x0044, 0x007A, 0x0064, 0x007A, 0x01C6, 0x0064, 0x007A, 0x0064, 0x007A, 0xFFFF, 0x004C, 0x004A, 0x006C, 0x006A, 0x01C9 };
unsigned short unac_data42[] = { 0x004C, 0x006A, 0x006C, 0x006A, 0x01C9, 0x006C, 0x006A, 0x006C, 0x006A, 0xFFFF, 0x004E, 0x004A, 0x006E, 0x006A, 0x01CC, 0x004E, 0x006A, 0x006E, 0x006A, 0x01CC, 0x006E, 0x006A, 0x006E, 0x006A, 0xFFFF, 0x0041, 0x0061, 0x01CE, 0x0061, 0x0061, 0xFFFF, 0x0049, 0x0069, 0x01D0 };
unsigned short unac_data43[] = { 0x0069, 0x0069, 0xFFFF, 0x004F, 0x006F, 0x01D2, 0x006F, 0x006F, 0xFFFF, 0x0055, 0x0075, 0x01D4, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x01D6, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x01D8 };
unsigned short unac_data44[] = { 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x01DA, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x01DC, 0x0075, 0x0075, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0041, 0x0061, 0x01DF, 0x0061, 0x0061, 0xFFFF };
unsigned short unac_data45[] = { 0x0041, 0x0061, 0x01E1, 0x0061, 0x0061, 0xFFFF, 0x00C6, 0x00E6, 0x01E3, 0x00E6, 0x00E6, 0xFFFF, 0xFFFF, 0x01E5, 0x01E5, 0xFFFF, 0xFFFF, 0xFFFF, 0x0047, 0x0067, 0x01E7, 0x0067, 0x0067, 0xFFFF };
unsigned short unac_data46[] = { 0x004B, 0x006B, 0x01E9, 0x006B, 0x006B, 0xFFFF, 0x004F, 0x006F, 0x01EB, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x01ED, 0x006F, 0x006F, 0xFFFF, 0x01B7, 0x0292, 0x01EF, 0x0292, 0x0292, 0xFFFF };
unsigned short unac_data47[] = { 0x006A, 0x006A, 0x006A, 0x030C, 0x0044, 0x005A, 0x0064, 0x007A, 0x01F3, 0x0044, 0x007A, 0x0064, 0x007A, 0x01F3, 0x0064, 0x007A, 0x0064, 0x007A, 0xFFFF, 0x0047, 0x0067, 0x01F5, 0x0067, 0x0067, 0xFFFF, 0xFFFF, 0x0195, 0x0195, 0xFFFF, 0x01BF, 0x01BF };
unsigned short unac_data48[] = { 0x004E, 0x006E, 0x01F9, 0x006E, 0x006E, 0xFFFF, 0x0041, 0x0061, 0x01FB, 0x0061, 0x0061, 0xFFFF, 0x00C6, 0x00E6, 0x01FD, 0x00E6, 0x00E6, 0xFFFF, 0x00D8, 0x00F8, 0x01FF, 0x00F8, 0x00F8, 0xFFFF };
unsigned short unac_data49[] = { 0x0041, 0x0061, 0x0201, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x0203, 0x0061, 0x0061, 0xFFFF, 0x0045, 0x0065, 0x0205, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x0207, 0x0065, 0x0065, 0xFFFF };
unsigned short unac_data50[] = { 0x0049, 0x0069, 0x0209, 0x0069, 0x0069, 0xFFFF, 0x0049, 0x0069, 0x020B, 0x0069, 0x0069, 0xFFFF, 0x004F, 0x006F, 0x020D, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x020F, 0x006F, 0x006F, 0xFFFF };
unsigned short unac_data51[] = { 0x0052, 0x0072, 0x0211, 0x0072, 0x0072, 0xFFFF, 0x0052, 0x0072, 0x0213, 0x0072, 0x0072, 0xFFFF, 0x0055, 0x0075, 0x0215, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x0217, 0x0075, 0x0075, 0xFFFF };
unsigned short unac_data52[] = { 0x0053, 0x0073, 0x0219, 0x0073, 0x0073, 0xFFFF, 0x0054, 0x0074, 0x021B, 0x0074, 0x0074, 0xFFFF, 0xFFFF, 0x021D, 0x021D, 0xFFFF, 0xFFFF, 0xFFFF, 0x0048, 0x0068, 0x021F, 0x0068, 0x0068, 0xFFFF };
unsigned short unac_data53[] = { 0xFFFF, 0x019E, 0x019E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0223, 0x0223, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0225, 0x0225, 0xFFFF, 0xFFFF, 0xFFFF, 0x0041, 0x0061, 0x0227, 0x0061, 0x0061, 0xFFFF };
unsigned short unac_data54[] = { 0x0045, 0x0065, 0x0229, 0x0065, 0x0065, 0xFFFF, 0x004F, 0x006F, 0x022B, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x022D, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x022F, 0x006F, 0x006F, 0xFFFF };
unsigned short unac_data55[] = { 0x004F, 0x006F, 0x0231, 0x006F, 0x006F, 0xFFFF, 0x0059, 0x0079, 0x0233, 0x0079, 0x0079, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data56[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C65, 0x2C65, 0xFFFF, 0x023C, 0x023C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x019A, 0x019A, 0xFFFF, 0x2C66, 0x2C66, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data57[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0242, 0x0242, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0180, 0x0180, 0xFFFF, 0x0289, 0x0289, 0xFFFF, 0x028C, 0x028C, 0xFFFF, 0x0247, 0x0247, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data58[] = { 0xFFFF, 0x0249, 0x0249, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x024B, 0x024B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x024D, 0x024D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x024F, 0x024F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data59[] = { 0x0068, 0x0068, 0xFFFF, 0x0266, 0x0266, 0xFFFF, 0x006A, 0x006A, 0xFFFF, 0x0072, 0x0072, 0xFFFF, 0x0279, 0x0279, 0xFFFF, 0x027B, 0x027B, 0xFFFF, 0x0281, 0x0281, 0xFFFF, 0x0077, 0x0077, 0xFFFF };
unsigned short unac_data60[] = { 0x0079, 0x0079, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data61[] = { 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data62[] = { 0x0263, 0x0263, 0xFFFF, 0x006C, 0x006C, 0xFFFF, 0x0073, 0x0073, 0xFFFF, 0x0078, 0x0078, 0xFFFF, 0x0295, 0x0295, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data63[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data64[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0x03B9, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data65[] = { 0xFFFF, 0x0371, 0x0371, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0373, 0x0373, 0xFFFF, 0xFFFF, 0xFFFF, 0x02B9, 0x02B9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0377, 0x0377, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data66[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x003B, 0x003B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data67[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0391, 0x03B1, 0x03AC, 0x00B7, 0x00B7, 0xFFFF };
unsigned short unac_data68[] = { 0x0395, 0x03B5, 0x03AD, 0x0397, 0x03B7, 0x03AE, 0x0399, 0x03B9, 0x03AF, 0xFFFF, 0xFFFF, 0xFFFF, 0x039F, 0x03BF, 0x03CC, 0xFFFF, 0xFFFF, 0xFFFF, 0x03A5, 0x03C5, 0x03CD, 0x03A9, 0x03C9, 0x03CE };
unsigned short unac_data69[] = { 0x03B9, 0x03B9, 0x03B9, 0x0308, 0x0301, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF, 0x03B2, 0x03B2, 0xFFFF, 0x03B3, 0x03B3, 0xFFFF, 0x03B4, 0x03B4, 0xFFFF, 0x03B5, 0x03B5, 0xFFFF, 0x03B6, 0x03B6, 0xFFFF, 0x03B7, 0x03B7 };
unsigned short unac_data70[] = { 0xFFFF, 0x03B8, 0x03B8, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03BA, 0x03BA, 0xFFFF, 0x03BB, 0x03BB, 0xFFFF, 0x03BC, 0x03BC, 0xFFFF, 0x03BD, 0x03BD, 0xFFFF, 0x03BE, 0x03BE, 0xFFFF, 0x03BF, 0x03BF };
unsigned short unac_data71[] = { 0xFFFF, 0x03C0, 0x03C0, 0xFFFF, 0x03C1, 0x03C1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03C3, 0x03C3, 0xFFFF, 0x03C4, 0x03C4, 0xFFFF, 0x03C5, 0x03C5, 0xFFFF, 0x03C6, 0x03C6, 0xFFFF, 0x03C7, 0x03C7 };
unsigned short unac_data72[] = { 0xFFFF, 0x03C8, 0x03C8, 0xFFFF, 0x03C9, 0x03C9, 0x0399, 0x03B9, 0x03CA, 0x03A5, 0x03C5, 0x03CB, 0x03B1, 0x03B1, 0xFFFF, 0x03B5, 0x03B5, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF };
unsigned short unac_data73[] = { 0x03C5, 0x03C5, 0x03C5, 0x0308, 0x0301, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data74[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03C3, 0x03C3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data75[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03C5, 0x03C5, 0xFFFF, 0x03BF, 0x03BF, 0xFFFF, 0x03C5, 0x03C5, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF, 0xFFFF, 0x03D7, 0x03D7 };
unsigned short unac_data76[] = { 0x03B2, 0x03B2, 0x03B2, 0x03B8, 0x03B8, 0x03B8, 0x03A5, 0x03C5, 0xFFFF, 0x03A5, 0x03C5, 0xFFFF, 0x03A5, 0x03C5, 0xFFFF, 0x03C6, 0x03C6, 0x03C6, 0x03C0, 0x03C0, 0x03C0, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data77[] = { 0xFFFF, 0x03D9, 0x03D9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03DB, 0x03DB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03DD, 0x03DD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03DF, 0x03DF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data78[] = { 0xFFFF, 0x03E1, 0x03E1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03E3, 0x03E3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03E5, 0x03E5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03E7, 0x03E7, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data79[] = { 0xFFFF, 0x03E9, 0x03E9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03EB, 0x03EB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03ED, 0x03ED, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03EF, 0x03EF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data80[] = { 0x03BA, 0x03BA, 0x03BA, 0x03C1, 0x03C1, 0x03C1, 0x03C2, 0x03C3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0398, 0x03B8, 0x03B8, 0x03B5, 0x03B5, 0x03B5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03F8, 0x03F8 };
unsigned short unac_data81[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x03A3, 0x03C3, 0x03F2, 0xFFFF, 0x03FB, 0x03FB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x037B, 0x037B, 0xFFFF, 0x037C, 0x037C, 0xFFFF, 0x037D, 0x037D };
unsigned short unac_data82[] = { 0x0415, 0x0435, 0x0450, 0x0415, 0x0435, 0x0451, 0xFFFF, 0x0452, 0x0452, 0x0413, 0x0433, 0x0453, 0xFFFF, 0x0454, 0x0454, 0xFFFF, 0x0455, 0x0455, 0xFFFF, 0x0456, 0x0456, 0x0406, 0x0456, 0x0457 };
unsigned short unac_data83[] = { 0xFFFF, 0x0458, 0x0458, 0xFFFF, 0x0459, 0x0459, 0xFFFF, 0x045A, 0x045A, 0xFFFF, 0x045B, 0x045B, 0x041A, 0x043A, 0x045C, 0x0418, 0x0438, 0x045D, 0x0423, 0x0443, 0x045E, 0xFFFF, 0x045F, 0x045F };
unsigned short unac_data84[] = { 0xFFFF, 0x0430, 0x0430, 0xFFFF, 0x0431, 0x0431, 0xFFFF, 0x0432, 0x0432, 0xFFFF, 0x0433, 0x0433, 0xFFFF, 0x0434, 0x0434, 0xFFFF, 0x0435, 0x0435, 0xFFFF, 0x0436, 0x0436, 0xFFFF, 0x0437, 0x0437 };
unsigned short unac_data85[] = { 0xFFFF, 0x0438, 0x0438, 0x0418, 0x0438, 0x0439, 0xFFFF, 0x043A, 0x043A, 0xFFFF, 0x043B, 0x043B, 0xFFFF, 0x043C, 0x043C, 0xFFFF, 0x043D, 0x043D, 0xFFFF, 0x043E, 0x043E, 0xFFFF, 0x043F, 0x043F };
unsigned short unac_data86[] = { 0xFFFF, 0x0440, 0x0440, 0xFFFF, 0x0441, 0x0441, 0xFFFF, 0x0442, 0x0442, 0xFFFF, 0x0443, 0x0443, 0xFFFF, 0x0444, 0x0444, 0xFFFF, 0x0445, 0x0445, 0xFFFF, 0x0446, 0x0446, 0xFFFF, 0x0447, 0x0447 };
unsigned short unac_data87[] = { 0xFFFF, 0x0448, 0x0448, 0xFFFF, 0x0449, 0x0449, 0xFFFF, 0x044A, 0x044A, 0xFFFF, 0x044B, 0x044B, 0xFFFF, 0x044C, 0x044C, 0xFFFF, 0x044D, 0x044D, 0xFFFF, 0x044E, 0x044E, 0xFFFF, 0x044F, 0x044F };
unsigned short unac_data88[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0438, 0x0438, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data89[] = { 0x0435, 0x0435, 0xFFFF, 0x0435, 0x0435, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0433, 0x0433, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0456, 0x0456, 0xFFFF };
unsigned short unac_data90[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x043A, 0x043A, 0xFFFF, 0x0438, 0x0438, 0xFFFF, 0x0443, 0x0443, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data91[] = { 0xFFFF, 0x0461, 0x0461, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0463, 0x0463, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0465, 0x0465, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0467, 0x0467, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data92[] = { 0xFFFF, 0x0469, 0x0469, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x046B, 0x046B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x046D, 0x046D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x046F, 0x046F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data93[] = { 0xFFFF, 0x0471, 0x0471, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0473, 0x0473, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0475, 0x0475, 0xFFFF, 0xFFFF, 0xFFFF, 0x0474, 0x0475, 0x0477, 0x0475, 0x0475, 0xFFFF };
unsigned short unac_data94[] = { 0xFFFF, 0x0479, 0x0479, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x047B, 0x047B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x047D, 0x047D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x047F, 0x047F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data95[] = { 0xFFFF, 0x0481, 0x0481, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data96[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x048B, 0x048B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x048D, 0x048D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x048F, 0x048F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data97[] = { 0xFFFF, 0x0491, 0x0491, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0493, 0x0493, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0495, 0x0495, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0497, 0x0497, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data98[] = { 0xFFFF, 0x0499, 0x0499, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x049B, 0x049B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x049D, 0x049D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x049F, 0x049F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data99[] = { 0xFFFF, 0x04A1, 0x04A1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04A3, 0x04A3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04A5, 0x04A5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04A7, 0x04A7, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data100[] = { 0xFFFF, 0x04A9, 0x04A9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04AB, 0x04AB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04AD, 0x04AD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04AF, 0x04AF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data101[] = { 0xFFFF, 0x04B1, 0x04B1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04B3, 0x04B3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04B5, 0x04B5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04B7, 0x04B7, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data102[] = { 0xFFFF, 0x04B9, 0x04B9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04BB, 0x04BB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04BD, 0x04BD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04BF, 0x04BF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data103[] = { 0xFFFF, 0x04CF, 0x04CF, 0x0416, 0x0436, 0x04C2, 0x0436, 0x0436, 0xFFFF, 0xFFFF, 0x04C4, 0x04C4, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04C6, 0x04C6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04C8, 0x04C8 };
unsigned short unac_data104[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04CA, 0x04CA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04CC, 0x04CC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04CE, 0x04CE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data105[] = { 0x0410, 0x0430, 0x04D1, 0x0430, 0x0430, 0xFFFF, 0x0410, 0x0430, 0x04D3, 0x0430, 0x0430, 0xFFFF, 0xFFFF, 0x04D5, 0x04D5, 0xFFFF, 0xFFFF, 0xFFFF, 0x0415, 0x0435, 0x04D7, 0x0435, 0x0435, 0xFFFF };
unsigned short unac_data106[] = { 0xFFFF, 0x04D9, 0x04D9, 0xFFFF, 0xFFFF, 0xFFFF, 0x04D8, 0x04D9, 0x04DB, 0x04D9, 0x04D9, 0xFFFF, 0x0416, 0x0436, 0x04DD, 0x0436, 0x0436, 0xFFFF, 0x0417, 0x0437, 0x04DF, 0x0437, 0x0437, 0xFFFF };
unsigned short unac_data107[] = { 0xFFFF, 0x04E1, 0x04E1, 0xFFFF, 0xFFFF, 0xFFFF, 0x0418, 0x0438, 0x04E3, 0x0438, 0x0438, 0xFFFF, 0x0418, 0x0438, 0x04E5, 0x0438, 0x0438, 0xFFFF, 0x041E, 0x043E, 0x04E7, 0x043E, 0x043E, 0xFFFF };
unsigned short unac_data108[] = { 0xFFFF, 0x04E9, 0x04E9, 0xFFFF, 0xFFFF, 0xFFFF, 0x04E8, 0x04E9, 0x04EB, 0x04E9, 0x04E9, 0xFFFF, 0x042D, 0x044D, 0x04ED, 0x044D, 0x044D, 0xFFFF, 0x0423, 0x0443, 0x04EF, 0x0443, 0x0443, 0xFFFF };
unsigned short unac_data109[] = { 0x0423, 0x0443, 0x04F1, 0x0443, 0x0443, 0xFFFF, 0x0423, 0x0443, 0x04F3, 0x0443, 0x0443, 0xFFFF, 0x0427, 0x0447, 0x04F5, 0x0447, 0x0447, 0xFFFF, 0xFFFF, 0x04F7, 0x04F7, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data110[] = { 0x042B, 0x044B, 0x04F9, 0x044B, 0x044B, 0xFFFF, 0xFFFF, 0x04FB, 0x04FB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04FD, 0x04FD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x04FF, 0x04FF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data111[] = { 0xFFFF, 0x0501, 0x0501, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0503, 0x0503, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0505, 0x0505, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0507, 0x0507, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data112[] = { 0xFFFF, 0x0509, 0x0509, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x050B, 0x050B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x050D, 0x050D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x050F, 0x050F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data113[] = { 0xFFFF, 0x0511, 0x0511, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0513, 0x0513, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0515, 0x0515, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0517, 0x0517, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data114[] = { 0xFFFF, 0x0519, 0x0519, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x051B, 0x051B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x051D, 0x051D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x051F, 0x051F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data115[] = { 0xFFFF, 0x0521, 0x0521, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0523, 0x0523, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0525, 0x0525, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0527, 0x0527, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data116[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0561, 0x0561, 0xFFFF, 0x0562, 0x0562, 0xFFFF, 0x0563, 0x0563, 0xFFFF, 0x0564, 0x0564, 0xFFFF, 0x0565, 0x0565, 0xFFFF, 0x0566, 0x0566, 0xFFFF, 0x0567, 0x0567 };
unsigned short unac_data117[] = { 0xFFFF, 0x0568, 0x0568, 0xFFFF, 0x0569, 0x0569, 0xFFFF, 0x056A, 0x056A, 0xFFFF, 0x056B, 0x056B, 0xFFFF, 0x056C, 0x056C, 0xFFFF, 0x056D, 0x056D, 0xFFFF, 0x056E, 0x056E, 0xFFFF, 0x056F, 0x056F };
unsigned short unac_data118[] = { 0xFFFF, 0x0570, 0x0570, 0xFFFF, 0x0571, 0x0571, 0xFFFF, 0x0572, 0x0572, 0xFFFF, 0x0573, 0x0573, 0xFFFF, 0x0574, 0x0574, 0xFFFF, 0x0575, 0x0575, 0xFFFF, 0x0576, 0x0576, 0xFFFF, 0x0577, 0x0577 };
unsigned short unac_data119[] = { 0xFFFF, 0x0578, 0x0578, 0xFFFF, 0x0579, 0x0579, 0xFFFF, 0x057A, 0x057A, 0xFFFF, 0x057B, 0x057B, 0xFFFF, 0x057C, 0x057C, 0xFFFF, 0x057D, 0x057D, 0xFFFF, 0x057E, 0x057E, 0xFFFF, 0x057F, 0x057F };
unsigned short unac_data120[] = { 0xFFFF, 0x0580, 0x0580, 0xFFFF, 0x0581, 0x0581, 0xFFFF, 0x0582, 0x0582, 0xFFFF, 0x0583, 0x0583, 0xFFFF, 0x0584, 0x0584, 0xFFFF, 0x0585, 0x0585, 0xFFFF, 0x0586, 0x0586, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data121[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0565, 0x0582, 0x0565, 0x0582, 0x0565, 0x0582 };
unsigned short unac_data122[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data123[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data124[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data125[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data126[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x0648, 0x0648, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x064A, 0x064A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data127[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data128[] = { 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0627, 0x0674, 0x0627, 0x0674, 0xFFFF, 0x0648, 0x0674, 0x0648, 0x0674, 0xFFFF, 0x06C7, 0x0674, 0x06C7, 0x0674, 0xFFFF };
unsigned short unac_data129[] = { 0x064A, 0x0674, 0x064A, 0x0674, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data130[] = { 0x06D5, 0x06D5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x06C1, 0x06C1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data131[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x06D2, 0x06D2, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data132[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data133[] = { 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data134[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data135[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data136[] = { 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data137[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data138[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data139[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data140[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data141[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data142[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data143[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data144[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0A32, 0x0A32, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0A38, 0x0A38, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data145[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data146[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data147[] = { 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data148[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0A16, 0x0A16, 0xFFFF, 0x0A17, 0x0A17, 0xFFFF, 0x0A1C, 0x0A1C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0A2B, 0x0A2B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data149[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data150[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data151[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data152[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0B21, 0x0B21, 0xFFFF, 0x0B22, 0x0B22, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data153[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data154[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0B92, 0x0B92, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data155[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data156[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data157[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data158[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data159[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data160[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data161[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0E32, 0x0E32, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data162[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0EB2, 0x0EB2, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data163[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data164[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data165[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0EAB, 0x0E99, 0x0EAB, 0x0E99, 0xFFFF, 0x0EAB, 0x0EA1, 0x0EAB, 0x0EA1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data166[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0F0B, 0x0F0B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data167[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data168[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data169[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data170[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0F42, 0x0F42, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data171[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0F4C, 0x0F4C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data172[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0F51, 0x0F51, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0F56, 0x0F56, 0xFFFF };
unsigned short unac_data173[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0F5B, 0x0F5B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data174[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0F40, 0x0F40, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data175[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data176[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data177[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data178[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1025, 0x1025, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data179[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data180[] = { 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data181[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data182[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data183[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data184[] = { 0xFFFF, 0x2D00, 0x2D00, 0xFFFF, 0x2D01, 0x2D01, 0xFFFF, 0x2D02, 0x2D02, 0xFFFF, 0x2D03, 0x2D03, 0xFFFF, 0x2D04, 0x2D04, 0xFFFF, 0x2D05, 0x2D05, 0xFFFF, 0x2D06, 0x2D06, 0xFFFF, 0x2D07, 0x2D07 };
unsigned short unac_data185[] = { 0xFFFF, 0x2D08, 0x2D08, 0xFFFF, 0x2D09, 0x2D09, 0xFFFF, 0x2D0A, 0x2D0A, 0xFFFF, 0x2D0B, 0x2D0B, 0xFFFF, 0x2D0C, 0x2D0C, 0xFFFF, 0x2D0D, 0x2D0D, 0xFFFF, 0x2D0E, 0x2D0E, 0xFFFF, 0x2D0F, 0x2D0F };
unsigned short unac_data186[] = { 0xFFFF, 0x2D10, 0x2D10, 0xFFFF, 0x2D11, 0x2D11, 0xFFFF, 0x2D12, 0x2D12, 0xFFFF, 0x2D13, 0x2D13, 0xFFFF, 0x2D14, 0x2D14, 0xFFFF, 0x2D15, 0x2D15, 0xFFFF, 0x2D16, 0x2D16, 0xFFFF, 0x2D17, 0x2D17 };
unsigned short unac_data187[] = { 0xFFFF, 0x2D18, 0x2D18, 0xFFFF, 0x2D19, 0x2D19, 0xFFFF, 0x2D1A, 0x2D1A, 0xFFFF, 0x2D1B, 0x2D1B, 0xFFFF, 0x2D1C, 0x2D1C, 0xFFFF, 0x2D1D, 0x2D1D, 0xFFFF, 0x2D1E, 0x2D1E, 0xFFFF, 0x2D1F, 0x2D1F };
unsigned short unac_data188[] = { 0xFFFF, 0x2D20, 0x2D20, 0xFFFF, 0x2D21, 0x2D21, 0xFFFF, 0x2D22, 0x2D22, 0xFFFF, 0x2D23, 0x2D23, 0xFFFF, 0x2D24, 0x2D24, 0xFFFF, 0x2D25, 0x2D25, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2D27, 0x2D27 };
unsigned short unac_data189[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2D2D, 0x2D2D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data190[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x10DC, 0x10DC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data191[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data192[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data193[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data194[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1B05, 0x1B05, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data195[] = { 0x1B07, 0x1B07, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1B09, 0x1B09, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1B0B, 0x1B0B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1B0D, 0x1B0D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data196[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1B11, 0x1B11, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data197[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data198[] = { 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data199[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0041, 0x0061, 0xFFFF, 0x00C6, 0x00E6, 0xFFFF, 0x0042, 0x0062, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data200[] = { 0x0044, 0x0064, 0xFFFF, 0x0045, 0x0065, 0xFFFF, 0x018E, 0x01DD, 0xFFFF, 0x0047, 0x0067, 0xFFFF, 0x0048, 0x0068, 0xFFFF, 0x0049, 0x0069, 0xFFFF, 0x004A, 0x006A, 0xFFFF, 0x004B, 0x006B, 0xFFFF };
unsigned short unac_data201[] = { 0x004C, 0x006C, 0xFFFF, 0x004D, 0x006D, 0xFFFF, 0x004E, 0x006E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x004F, 0x006F, 0xFFFF, 0x0222, 0x0223, 0xFFFF, 0x0050, 0x0070, 0xFFFF, 0x0052, 0x0072, 0xFFFF };
unsigned short unac_data202[] = { 0x0054, 0x0074, 0xFFFF, 0x0055, 0x0075, 0xFFFF, 0x0057, 0x0077, 0xFFFF, 0x0061, 0x0061, 0xFFFF, 0x0250, 0x0250, 0xFFFF, 0x0251, 0x0251, 0xFFFF, 0x1D02, 0x1D02, 0xFFFF, 0x0062, 0x0062, 0xFFFF };
unsigned short unac_data203[] = { 0x0064, 0x0064, 0xFFFF, 0x0065, 0x0065, 0xFFFF, 0x0259, 0x0259, 0xFFFF, 0x025B, 0x025B, 0xFFFF, 0x025C, 0x025C, 0xFFFF, 0x0067, 0x0067, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x006B, 0x006B, 0xFFFF };
unsigned short unac_data204[] = { 0x006D, 0x006D, 0xFFFF, 0x014B, 0x014B, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0x0254, 0x0254, 0xFFFF, 0x1D16, 0x1D16, 0xFFFF, 0x1D17, 0x1D17, 0xFFFF, 0x0070, 0x0070, 0xFFFF, 0x0074, 0x0074, 0xFFFF };
unsigned short unac_data205[] = { 0x0075, 0x0075, 0xFFFF, 0x1D1D, 0x1D1D, 0xFFFF, 0x026F, 0x026F, 0xFFFF, 0x0076, 0x0076, 0xFFFF, 0x1D25, 0x1D25, 0xFFFF, 0x03B2, 0x03B2, 0xFFFF, 0x03B3, 0x03B3, 0xFFFF, 0x03B4, 0x03B4, 0xFFFF };
unsigned short unac_data206[] = { 0x03C6, 0x03C6, 0xFFFF, 0x03C7, 0x03C7, 0xFFFF, 0x0069, 0x0069, 0xFFFF, 0x0072, 0x0072, 0xFFFF, 0x0075, 0x0075, 0xFFFF, 0x0076, 0x0076, 0xFFFF, 0x03B2, 0x03B2, 0xFFFF, 0x03B3, 0x03B3, 0xFFFF };
unsigned short unac_data207[] = { 0x03C1, 0x03C1, 0xFFFF, 0x03C6, 0x03C6, 0xFFFF, 0x03C7, 0x03C7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data208[] = { 0x043D, 0x043D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data209[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0252, 0x0252, 0xFFFF, 0x0063, 0x0063, 0xFFFF, 0x0255, 0x0255, 0xFFFF, 0x00F0, 0x00F0, 0xFFFF, 0x025C, 0x025C, 0xFFFF };
unsigned short unac_data210[] = { 0x0066, 0x0066, 0xFFFF, 0x025F, 0x025F, 0xFFFF, 0x0261, 0x0261, 0xFFFF, 0x0265, 0x0265, 0xFFFF, 0x0268, 0x0268, 0xFFFF, 0x0269, 0x0269, 0xFFFF, 0x026A, 0x026A, 0xFFFF, 0x1D7B, 0x1D7B, 0xFFFF };
unsigned short unac_data211[] = { 0x029D, 0x029D, 0xFFFF, 0x026D, 0x026D, 0xFFFF, 0x1D85, 0x1D85, 0xFFFF, 0x029F, 0x029F, 0xFFFF, 0x0271, 0x0271, 0xFFFF, 0x0270, 0x0270, 0xFFFF, 0x0272, 0x0272, 0xFFFF, 0x0273, 0x0273, 0xFFFF };
unsigned short unac_data212[] = { 0x0274, 0x0274, 0xFFFF, 0x0275, 0x0275, 0xFFFF, 0x0278, 0x0278, 0xFFFF, 0x0282, 0x0282, 0xFFFF, 0x0283, 0x0283, 0xFFFF, 0x01AB, 0x01AB, 0xFFFF, 0x0289, 0x0289, 0xFFFF, 0x028A, 0x028A, 0xFFFF };
unsigned short unac_data213[] = { 0x1D1C, 0x1D1C, 0xFFFF, 0x028B, 0x028B, 0xFFFF, 0x028C, 0x028C, 0xFFFF, 0x007A, 0x007A, 0xFFFF, 0x0290, 0x0290, 0xFFFF, 0x0291, 0x0291, 0xFFFF, 0x0292, 0x0292, 0xFFFF, 0x03B8, 0x03B8, 0xFFFF };
unsigned short unac_data214[] = { 0x0041, 0x0061, 0x1E01, 0x0061, 0x0061, 0xFFFF, 0x0042, 0x0062, 0x1E03, 0x0062, 0x0062, 0xFFFF, 0x0042, 0x0062, 0x1E05, 0x0062, 0x0062, 0xFFFF, 0x0042, 0x0062, 0x1E07, 0x0062, 0x0062, 0xFFFF };
unsigned short unac_data215[] = { 0x0043, 0x0063, 0x1E09, 0x0063, 0x0063, 0xFFFF, 0x0044, 0x0064, 0x1E0B, 0x0064, 0x0064, 0xFFFF, 0x0044, 0x0064, 0x1E0D, 0x0064, 0x0064, 0xFFFF, 0x0044, 0x0064, 0x1E0F, 0x0064, 0x0064, 0xFFFF };
unsigned short unac_data216[] = { 0x0044, 0x0064, 0x1E11, 0x0064, 0x0064, 0xFFFF, 0x0044, 0x0064, 0x1E13, 0x0064, 0x0064, 0xFFFF, 0x0045, 0x0065, 0x1E15, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x1E17, 0x0065, 0x0065, 0xFFFF };
unsigned short unac_data217[] = { 0x0045, 0x0065, 0x1E19, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x1E1B, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x1E1D, 0x0065, 0x0065, 0xFFFF, 0x0046, 0x0066, 0x1E1F, 0x0066, 0x0066, 0xFFFF };
unsigned short unac_data218[] = { 0x0047, 0x0067, 0x1E21, 0x0067, 0x0067, 0xFFFF, 0x0048, 0x0068, 0x1E23, 0x0068, 0x0068, 0xFFFF, 0x0048, 0x0068, 0x1E25, 0x0068, 0x0068, 0xFFFF, 0x0048, 0x0068, 0x1E27, 0x0068, 0x0068, 0xFFFF };
unsigned short unac_data219[] = { 0x0048, 0x0068, 0x1E29, 0x0068, 0x0068, 0xFFFF, 0x0048, 0x0068, 0x1E2B, 0x0068, 0x0068, 0xFFFF, 0x0049, 0x0069, 0x1E2D, 0x0069, 0x0069, 0xFFFF, 0x0049, 0x0069, 0x1E2F, 0x0069, 0x0069, 0xFFFF };
unsigned short unac_data220[] = { 0x004B, 0x006B, 0x1E31, 0x006B, 0x006B, 0xFFFF, 0x004B, 0x006B, 0x1E33, 0x006B, 0x006B, 0xFFFF, 0x004B, 0x006B, 0x1E35, 0x006B, 0x006B, 0xFFFF, 0x004C, 0x006C, 0x1E37, 0x006C, 0x006C, 0xFFFF };
unsigned short unac_data221[] = { 0x004C, 0x006C, 0x1E39, 0x006C, 0x006C, 0xFFFF, 0x004C, 0x006C, 0x1E3B, 0x006C, 0x006C, 0xFFFF, 0x004C, 0x006C, 0x1E3D, 0x006C, 0x006C, 0xFFFF, 0x004D, 0x006D, 0x1E3F, 0x006D, 0x006D, 0xFFFF };
unsigned short unac_data222[] = { 0x004D, 0x006D, 0x1E41, 0x006D, 0x006D, 0xFFFF, 0x004D, 0x006D, 0x1E43, 0x006D, 0x006D, 0xFFFF, 0x004E, 0x006E, 0x1E45, 0x006E, 0x006E, 0xFFFF, 0x004E, 0x006E, 0x1E47, 0x006E, 0x006E, 0xFFFF };
unsigned short unac_data223[] = { 0x004E, 0x006E, 0x1E49, 0x006E, 0x006E, 0xFFFF, 0x004E, 0x006E, 0x1E4B, 0x006E, 0x006E, 0xFFFF, 0x004F, 0x006F, 0x1E4D, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1E4F, 0x006F, 0x006F, 0xFFFF };
unsigned short unac_data224[] = { 0x004F, 0x006F, 0x1E51, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1E53, 0x006F, 0x006F, 0xFFFF, 0x0050, 0x0070, 0x1E55, 0x0070, 0x0070, 0xFFFF, 0x0050, 0x0070, 0x1E57, 0x0070, 0x0070, 0xFFFF };
unsigned short unac_data225[] = { 0x0052, 0x0072, 0x1E59, 0x0072, 0x0072, 0xFFFF, 0x0052, 0x0072, 0x1E5B, 0x0072, 0x0072, 0xFFFF, 0x0052, 0x0072, 0x1E5D, 0x0072, 0x0072, 0xFFFF, 0x0052, 0x0072, 0x1E5F, 0x0072, 0x0072, 0xFFFF };
unsigned short unac_data226[] = { 0x0053, 0x0073, 0x1E61, 0x0073, 0x0073, 0xFFFF, 0x0053, 0x0073, 0x1E63, 0x0073, 0x0073, 0xFFFF, 0x0053, 0x0073, 0x1E65, 0x0073, 0x0073, 0xFFFF, 0x0053, 0x0073, 0x1E67, 0x0073, 0x0073, 0xFFFF };
unsigned short unac_data227[] = { 0x0053, 0x0073, 0x1E69, 0x0073, 0x0073, 0xFFFF, 0x0054, 0x0074, 0x1E6B, 0x0074, 0x0074, 0xFFFF, 0x0054, 0x0074, 0x1E6D, 0x0074, 0x0074, 0xFFFF, 0x0054, 0x0074, 0x1E6F, 0x0074, 0x0074, 0xFFFF };
unsigned short unac_data228[] = { 0x0054, 0x0074, 0x1E71, 0x0074, 0x0074, 0xFFFF, 0x0055, 0x0075, 0x1E73, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x1E75, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x1E77, 0x0075, 0x0075, 0xFFFF };
unsigned short unac_data229[] = { 0x0055, 0x0075, 0x1E79, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x1E7B, 0x0075, 0x0075, 0xFFFF, 0x0056, 0x0076, 0x1E7D, 0x0076, 0x0076, 0xFFFF, 0x0056, 0x0076, 0x1E7F, 0x0076, 0x0076, 0xFFFF };
unsigned short unac_data230[] = { 0x0057, 0x0077, 0x1E81, 0x0077, 0x0077, 0xFFFF, 0x0057, 0x0077, 0x1E83, 0x0077, 0x0077, 0xFFFF, 0x0057, 0x0077, 0x1E85, 0x0077, 0x0077, 0xFFFF, 0x0057, 0x0077, 0x1E87, 0x0077, 0x0077, 0xFFFF };
unsigned short unac_data231[] = { 0x0057, 0x0077, 0x1E89, 0x0077, 0x0077, 0xFFFF, 0x0058, 0x0078, 0x1E8B, 0x0078, 0x0078, 0xFFFF, 0x0058, 0x0078, 0x1E8D, 0x0078, 0x0078, 0xFFFF, 0x0059, 0x0079, 0x1E8F, 0x0079, 0x0079, 0xFFFF };
unsigned short unac_data232[] = { 0x005A, 0x007A, 0x1E91, 0x007A, 0x007A, 0xFFFF, 0x005A, 0x007A, 0x1E93, 0x007A, 0x007A, 0xFFFF, 0x005A, 0x007A, 0x1E95, 0x007A, 0x007A, 0xFFFF, 0x0068, 0x0068, 0x0068, 0x0331, 0x0074, 0x0074, 0x0074, 0x0308 };
unsigned short unac_data233[] = { 0x0077, 0x0077, 0x0077, 0x030A, 0x0079, 0x0079, 0x0079, 0x030A, 0x0061, 0x02BE, 0x0061, 0x02BE, 0x0061, 0x02BE, 0x0073, 0x0073, 0x1E61, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0073, 0x0073, 0x0073, 0x0073, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data234[] = { 0x0041, 0x0061, 0x1EA1, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x1EA3, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x1EA5, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x1EA7, 0x0061, 0x0061, 0xFFFF };
unsigned short unac_data235[] = { 0x0041, 0x0061, 0x1EA9, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x1EAB, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x1EAD, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x1EAF, 0x0061, 0x0061, 0xFFFF };
unsigned short unac_data236[] = { 0x0041, 0x0061, 0x1EB1, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x1EB3, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x1EB5, 0x0061, 0x0061, 0xFFFF, 0x0041, 0x0061, 0x1EB7, 0x0061, 0x0061, 0xFFFF };
unsigned short unac_data237[] = { 0x0045, 0x0065, 0x1EB9, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x1EBB, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x1EBD, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x1EBF, 0x0065, 0x0065, 0xFFFF };
unsigned short unac_data238[] = { 0x0045, 0x0065, 0x1EC1, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x1EC3, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x1EC5, 0x0065, 0x0065, 0xFFFF, 0x0045, 0x0065, 0x1EC7, 0x0065, 0x0065, 0xFFFF };
unsigned short unac_data239[] = { 0x0049, 0x0069, 0x1EC9, 0x0069, 0x0069, 0xFFFF, 0x0049, 0x0069, 0x1ECB, 0x0069, 0x0069, 0xFFFF, 0x004F, 0x006F, 0x1ECD, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1ECF, 0x006F, 0x006F, 0xFFFF };
unsigned short unac_data240[] = { 0x004F, 0x006F, 0x1ED1, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1ED3, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1ED5, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1ED7, 0x006F, 0x006F, 0xFFFF };
unsigned short unac_data241[] = { 0x004F, 0x006F, 0x1ED9, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1EDB, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1EDD, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1EDF, 0x006F, 0x006F, 0xFFFF };
unsigned short unac_data242[] = { 0x004F, 0x006F, 0x1EE1, 0x006F, 0x006F, 0xFFFF, 0x004F, 0x006F, 0x1EE3, 0x006F, 0x006F, 0xFFFF, 0x0055, 0x0075, 0x1EE5, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x1EE7, 0x0075, 0x0075, 0xFFFF };
unsigned short unac_data243[] = { 0x0055, 0x0075, 0x1EE9, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x1EEB, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x1EED, 0x0075, 0x0075, 0xFFFF, 0x0055, 0x0075, 0x1EEF, 0x0075, 0x0075, 0xFFFF };
unsigned short unac_data244[] = { 0x0055, 0x0075, 0x1EF1, 0x0075, 0x0075, 0xFFFF, 0x0059, 0x0079, 0x1EF3, 0x0079, 0x0079, 0xFFFF, 0x0059, 0x0079, 0x1EF5, 0x0079, 0x0079, 0xFFFF, 0x0059, 0x0079, 0x1EF7, 0x0079, 0x0079, 0xFFFF };
unsigned short unac_data245[] = { 0x0059, 0x0079, 0x1EF9, 0x0079, 0x0079, 0xFFFF, 0xFFFF, 0x1EFB, 0x1EFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1EFD, 0x1EFD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1EFF, 0x1EFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data246[] = { 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF };
unsigned short unac_data247[] = { 0x0391, 0x03B1, 0x1F00, 0x0391, 0x03B1, 0x1F01, 0x0391, 0x03B1, 0x1F02, 0x0391, 0x03B1, 0x1F03, 0x0391, 0x03B1, 0x1F04, 0x0391, 0x03B1, 0x1F05, 0x0391, 0x03B1, 0x1F06, 0x0391, 0x03B1, 0x1F07 };
unsigned short unac_data248[] = { 0x03B5, 0x03B5, 0xFFFF, 0x03B5, 0x03B5, 0xFFFF, 0x03B5, 0x03B5, 0xFFFF, 0x03B5, 0x03B5, 0xFFFF, 0x03B5, 0x03B5, 0xFFFF, 0x03B5, 0x03B5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data249[] = { 0x0395, 0x03B5, 0x1F10, 0x0395, 0x03B5, 0x1F11, 0x0395, 0x03B5, 0x1F12, 0x0395, 0x03B5, 0x1F13, 0x0395, 0x03B5, 0x1F14, 0x0395, 0x03B5, 0x1F15, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data250[] = { 0x03B7, 0x03B7, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF };
unsigned short unac_data251[] = { 0x0397, 0x03B7, 0x1F20, 0x0397, 0x03B7, 0x1F21, 0x0397, 0x03B7, 0x1F22, 0x0397, 0x03B7, 0x1F23, 0x0397, 0x03B7, 0x1F24, 0x0397, 0x03B7, 0x1F25, 0x0397, 0x03B7, 0x1F26, 0x0397, 0x03B7, 0x1F27 };
unsigned short unac_data252[] = { 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF };
unsigned short unac_data253[] = { 0x0399, 0x03B9, 0x1F30, 0x0399, 0x03B9, 0x1F31, 0x0399, 0x03B9, 0x1F32, 0x0399, 0x03B9, 0x1F33, 0x0399, 0x03B9, 0x1F34, 0x0399, 0x03B9, 0x1F35, 0x0399, 0x03B9, 0x1F36, 0x0399, 0x03B9, 0x1F37 };
unsigned short unac_data254[] = { 0x03BF, 0x03BF, 0xFFFF, 0x03BF, 0x03BF, 0xFFFF, 0x03BF, 0x03BF, 0xFFFF, 0x03BF, 0x03BF, 0xFFFF, 0x03BF, 0x03BF, 0xFFFF, 0x03BF, 0x03BF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data255[] = { 0x039F, 0x03BF, 0x1F40, 0x039F, 0x03BF, 0x1F41, 0x039F, 0x03BF, 0x1F42, 0x039F, 0x03BF, 0x1F43, 0x039F, 0x03BF, 0x1F44, 0x039F, 0x03BF, 0x1F45, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data256[] = { 0x03C5, 0x03C5, 0x03C5, 0x0313, 0x03C5, 0x03C5, 0xFFFF, 0x03C5, 0x03C5, 0x03C5, 0x0313, 0x0300, 0x03C5, 0x03C5, 0xFFFF, 0x03C5, 0x03C5, 0x03C5, 0x0313, 0x0301, 0x03C5, 0x03C5, 0xFFFF, 0x03C5, 0x03C5, 0x03C5, 0x0313, 0x0342, 0x03C5, 0x03C5, 0xFFFF };
unsigned short unac_data257[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x03A5, 0x03C5, 0x1F51, 0xFFFF, 0xFFFF, 0xFFFF, 0x03A5, 0x03C5, 0x1F53, 0xFFFF, 0xFFFF, 0xFFFF, 0x03A5, 0x03C5, 0x1F55, 0xFFFF, 0xFFFF, 0xFFFF, 0x03A5, 0x03C5, 0x1F57 };
unsigned short unac_data258[] = { 0x03C9, 0x03C9, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF };
unsigned short unac_data259[] = { 0x03A9, 0x03C9, 0x1F60, 0x03A9, 0x03C9, 0x1F61, 0x03A9, 0x03C9, 0x1F62, 0x03A9, 0x03C9, 0x1F63, 0x03A9, 0x03C9, 0x1F64, 0x03A9, 0x03C9, 0x1F65, 0x03A9, 0x03C9, 0x1F66, 0x03A9, 0x03C9, 0x1F67 };
unsigned short unac_data260[] = { 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF, 0x03B5, 0x03B5, 0xFFFF, 0x03B5, 0x03B5, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF, 0x03B7, 0x03B7, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF };
unsigned short unac_data261[] = { 0x03BF, 0x03BF, 0xFFFF, 0x03BF, 0x03BF, 0xFFFF, 0x03C5, 0x03C5, 0xFFFF, 0x03C5, 0x03C5, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF, 0x03C9, 0x03C9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data262[] = { 0x03B1, 0x03B1, 0x1F00, 0x03B9, 0x03B1, 0x03B1, 0x1F01, 0x03B9, 0x03B1, 0x03B1, 0x1F02, 0x03B9, 0x03B1, 0x03B1, 0x1F03, 0x03B9, 0x03B1, 0x03B1, 0x1F04, 0x03B9, 0x03B1, 0x03B1, 0x1F05, 0x03B9, 0x03B1, 0x03B1, 0x1F06, 0x03B9, 0x03B1, 0x03B1, 0x1F07, 0x03B9 };
unsigned short unac_data263[] = { 0x0391, 0x03B1, 0x1F00, 0x03B9, 0x0391, 0x03B1, 0x1F01, 0x03B9, 0x0391, 0x03B1, 0x1F02, 0x03B9, 0x0391, 0x03B1, 0x1F03, 0x03B9, 0x0391, 0x03B1, 0x1F04, 0x03B9, 0x0391, 0x03B1, 0x1F05, 0x03B9, 0x0391, 0x03B1, 0x1F06, 0x03B9, 0x0391, 0x03B1, 0x1F07, 0x03B9 };
unsigned short unac_data264[] = { 0x03B7, 0x03B7, 0x1F20, 0x03B9, 0x03B7, 0x03B7, 0x1F21, 0x03B9, 0x03B7, 0x03B7, 0x1F22, 0x03B9, 0x03B7, 0x03B7, 0x1F23, 0x03B9, 0x03B7, 0x03B7, 0x1F24, 0x03B9, 0x03B7, 0x03B7, 0x1F25, 0x03B9, 0x03B7, 0x03B7, 0x1F26, 0x03B9, 0x03B7, 0x03B7, 0x1F27, 0x03B9 };
unsigned short unac_data265[] = { 0x0397, 0x03B7, 0x1F20, 0x03B9, 0x0397, 0x03B7, 0x1F21, 0x03B9, 0x0397, 0x03B7, 0x1F22, 0x03B9, 0x0397, 0x03B7, 0x1F23, 0x03B9, 0x0397, 0x03B7, 0x1F24, 0x03B9, 0x0397, 0x03B7, 0x1F25, 0x03B9, 0x0397, 0x03B7, 0x1F26, 0x03B9, 0x0397, 0x03B7, 0x1F27, 0x03B9 };
unsigned short unac_data266[] = { 0x03C9, 0x03C9, 0x1F60, 0x03B9, 0x03C9, 0x03C9, 0x1F61, 0x03B9, 0x03C9, 0x03C9, 0x1F62, 0x03B9, 0x03C9, 0x03C9, 0x1F63, 0x03B9, 0x03C9, 0x03C9, 0x1F64, 0x03B9, 0x03C9, 0x03C9, 0x1F65, 0x03B9, 0x03C9, 0x03C9, 0x1F66, 0x03B9, 0x03C9, 0x03C9, 0x1F67, 0x03B9 };
unsigned short unac_data267[] = { 0x03A9, 0x03C9, 0x1F60, 0x03B9, 0x03A9, 0x03C9, 0x1F61, 0x03B9, 0x03A9, 0x03C9, 0x1F62, 0x03B9, 0x03A9, 0x03C9, 0x1F63, 0x03B9, 0x03A9, 0x03C9, 0x1F64, 0x03B9, 0x03A9, 0x03C9, 0x1F65, 0x03B9, 0x03A9, 0x03C9, 0x1F66, 0x03B9, 0x03A9, 0x03C9, 0x1F67, 0x03B9 };
unsigned short unac_data268[] = { 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0xFFFF, 0x03B1, 0x03B1, 0x1F70, 0x03B9, 0x03B1, 0x03B1, 0x03B1, 0x03B9, 0x03B1, 0x03B1, 0x03AC, 0x03B9, 0xFFFF, 0xFFFF, 0xFFFF, 0x03B1, 0x03B1, 0x03B1, 0x0342, 0x03B1, 0x03B1, 0x03B1, 0x0342, 0x03B9 };
unsigned short unac_data269[] = { 0x0391, 0x03B1, 0x1FB0, 0x0391, 0x03B1, 0x1FB1, 0x0391, 0x03B1, 0x1F70, 0x0391, 0x03B1, 0x1F71, 0x0391, 0x03B1, 0x03B1, 0x03B9, 0x0020, 0x0020, 0xFFFF, 0x03B9, 0x03B9, 0x03B9, 0x0020, 0x0020, 0xFFFF };
unsigned short unac_data270[] = { 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x03B7, 0x03B7, 0x1F74, 0x03B9, 0x03B7, 0x03B7, 0x03B7, 0x03B9, 0x03B7, 0x03B7, 0x03AE, 0x03B9, 0xFFFF, 0xFFFF, 0xFFFF, 0x03B7, 0x03B7, 0x03B7, 0x0342, 0x03B7, 0x03B7, 0x03B7, 0x0342, 0x03B9 };
unsigned short unac_data271[] = { 0x0395, 0x03B5, 0x1F72, 0x0395, 0x03B5, 0x1F73, 0x0397, 0x03B7, 0x1F74, 0x0397, 0x03B7, 0x1F75, 0x0397, 0x03B7, 0x03B7, 0x03B9, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF };
unsigned short unac_data272[] = { 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0xFFFF, 0x03B9, 0x03B9, 0x03B9, 0x0308, 0x0300, 0x03B9, 0x03B9, 0x03B9, 0x0308, 0x0301, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03B9, 0x03B9, 0x03B9, 0x0342, 0x03B9, 0x03B9, 0x03B9, 0x0308, 0x0342 };
unsigned short unac_data273[] = { 0x0399, 0x03B9, 0x1FD0, 0x0399, 0x03B9, 0x1FD1, 0x0399, 0x03B9, 0x1F76, 0x0399, 0x03B9, 0x1F77, 0xFFFF, 0xFFFF, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF };
unsigned short unac_data274[] = { 0x03C5, 0x03C5, 0xFFFF, 0x03C5, 0x03C5, 0xFFFF, 0x03C5, 0x03C5, 0x03C5, 0x0308, 0x0300, 0x03C5, 0x03C5, 0x03C5, 0x0308, 0x0301, 0x03C1, 0x03C1, 0x03C1, 0x0313, 0x03C1, 0x03C1, 0xFFFF, 0x03C5, 0x03C5, 0x03C5, 0x0342, 0x03C5, 0x03C5, 0x03C5, 0x0308, 0x0342 };
unsigned short unac_data275[] = { 0x03A5, 0x03C5, 0x1FE0, 0x03A5, 0x03C5, 0x1FE1, 0x03A5, 0x03C5, 0x1F7A, 0x03A5, 0x03C5, 0x1F7B, 0x03A1, 0x03C1, 0x1FE5, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0060, 0x0060, 0xFFFF };
unsigned short unac_data276[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03C9, 0x03C9, 0x1F7C, 0x03B9, 0x03C9, 0x03C9, 0x03C9, 0x03B9, 0x03C9, 0x03C9, 0x03CE, 0x03B9, 0xFFFF, 0xFFFF, 0xFFFF, 0x03C9, 0x03C9, 0x03C9, 0x0342, 0x03C9, 0x03C9, 0x03C9, 0x0342, 0x03B9 };
unsigned short unac_data277[] = { 0x039F, 0x03BF, 0x1F78, 0x039F, 0x03BF, 0x1F79, 0x03A9, 0x03C9, 0x1F7C, 0x03A9, 0x03C9, 0x1F7D, 0x03A9, 0x03C9, 0x03C9, 0x03B9, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data278[] = { 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF };
unsigned short unac_data279[] = { 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data280[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x2010, 0x2010, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0020, 0x0020, 0xFFFF };
unsigned short unac_data281[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x002E, 0x002E, 0xFFFF, 0x002E, 0x002E, 0x002E, 0x002E, 0xFFFF, 0x002E, 0x002E, 0x002E, 0x002E, 0x002E, 0x002E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data282[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0020, 0x0020, 0xFFFF };
unsigned short unac_data283[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2032, 0x2032, 0x2032, 0x2032, 0xFFFF, 0x2032, 0x2032, 0x2032, 0x2032, 0x2032, 0x2032, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2035, 0x2035, 0x2035, 0x2035, 0xFFFF, 0x2035, 0x2035, 0x2035, 0x2035, 0x2035, 0x2035, 0xFFFF };
unsigned short unac_data284[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0021, 0x0021, 0x0021, 0x0021, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data285[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x003F, 0x003F, 0x003F, 0x003F, 0xFFFF };
unsigned short unac_data286[] = { 0x003F, 0x0021, 0x003F, 0x0021, 0xFFFF, 0x0021, 0x003F, 0x0021, 0x003F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data287[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2032, 0x2032, 0x2032, 0x2032, 0x2032, 0x2032, 0x2032, 0x2032, 0xFFFF };
unsigned short unac_data288[] = { 0x0030, 0x0030, 0xFFFF, 0x0069, 0x0069, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0034, 0x0034, 0xFFFF, 0x0035, 0x0035, 0xFFFF, 0x0036, 0x0036, 0xFFFF, 0x0037, 0x0037, 0xFFFF };
unsigned short unac_data289[] = { 0x0038, 0x0038, 0xFFFF, 0x0039, 0x0039, 0xFFFF, 0x002B, 0x002B, 0xFFFF, 0x2212, 0x2212, 0xFFFF, 0x003D, 0x003D, 0xFFFF, 0x0028, 0x0028, 0xFFFF, 0x0029, 0x0029, 0xFFFF, 0x006E, 0x006E, 0xFFFF };
unsigned short unac_data290[] = { 0x0030, 0x0030, 0xFFFF, 0x0031, 0x0031, 0xFFFF, 0x0032, 0x0032, 0xFFFF, 0x0033, 0x0033, 0xFFFF, 0x0034, 0x0034, 0xFFFF, 0x0035, 0x0035, 0xFFFF, 0x0036, 0x0036, 0xFFFF, 0x0037, 0x0037, 0xFFFF };
unsigned short unac_data291[] = { 0x0038, 0x0038, 0xFFFF, 0x0039, 0x0039, 0xFFFF, 0x002B, 0x002B, 0xFFFF, 0x2212, 0x2212, 0xFFFF, 0x003D, 0x003D, 0xFFFF, 0x0028, 0x0028, 0xFFFF, 0x0029, 0x0029, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data292[] = { 0x0061, 0x0061, 0xFFFF, 0x0065, 0x0065, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0x0078, 0x0078, 0xFFFF, 0x0259, 0x0259, 0xFFFF, 0x0068, 0x0068, 0xFFFF, 0x006B, 0x006B, 0xFFFF, 0x006C, 0x006C, 0xFFFF };
unsigned short unac_data293[] = { 0x006D, 0x006D, 0xFFFF, 0x006E, 0x006E, 0xFFFF, 0x0070, 0x0070, 0xFFFF, 0x0073, 0x0073, 0xFFFF, 0x0074, 0x0074, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data294[] = { 0x0052, 0x0073, 0x0072, 0x0073, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data295[] = { 0x0061, 0x002F, 0x0063, 0x0061, 0x002F, 0x0063, 0xFFFF, 0x0061, 0x002F, 0x0073, 0x0061, 0x002F, 0x0073, 0xFFFF, 0x0043, 0x0063, 0xFFFF, 0x00B0, 0x0043, 0x00B0, 0x0063, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0063, 0x002F, 0x006F, 0x0063, 0x002F, 0x006F, 0xFFFF, 0x0063, 0x002F, 0x0075, 0x0063, 0x002F, 0x0075, 0xFFFF, 0x0190, 0x025B, 0xFFFF };
unsigned short unac_data296[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x00B0, 0x0046, 0x00B0, 0x0066, 0xFFFF, 0x0067, 0x0067, 0xFFFF, 0x0048, 0x0068, 0xFFFF, 0x0048, 0x0068, 0xFFFF, 0x0048, 0x0068, 0xFFFF, 0x0068, 0x0068, 0xFFFF, 0x0127, 0x0127, 0xFFFF };
unsigned short unac_data297[] = { 0x0049, 0x0069, 0xFFFF, 0x0049, 0x0069, 0xFFFF, 0x004C, 0x006C, 0xFFFF, 0x006C, 0x006C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x004E, 0x006E, 0xFFFF, 0x004E, 0x006F, 0x006E, 0x006F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data298[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0050, 0x0070, 0xFFFF, 0x0051, 0x0071, 0xFFFF, 0x0052, 0x0072, 0xFFFF, 0x0052, 0x0072, 0xFFFF, 0x0052, 0x0072, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data299[] = { 0x0053, 0x004D, 0x0073, 0x006D, 0xFFFF, 0x0054, 0x0045, 0x004C, 0x0074, 0x0065, 0x006C, 0xFFFF, 0x0054, 0x004D, 0x0074, 0x006D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x005A, 0x007A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x03A9, 0x03C9, 0x03C9, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data300[] = { 0x005A, 0x007A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x004B, 0x006B, 0x006B, 0x0041, 0x0061, 0x00E5, 0x0042, 0x0062, 0xFFFF, 0x0043, 0x0063, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0065, 0x0065, 0xFFFF };
unsigned short unac_data301[] = { 0x0045, 0x0065, 0xFFFF, 0x0046, 0x0066, 0xFFFF, 0xFFFF, 0x214E, 0x214E, 0x004D, 0x006D, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0x05D0, 0x05D0, 0xFFFF, 0x05D1, 0x05D1, 0xFFFF, 0x05D2, 0x05D2, 0xFFFF };
unsigned short unac_data302[] = { 0x05D3, 0x05D3, 0xFFFF, 0x0069, 0x0069, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0046, 0x0041, 0x0058, 0x0066, 0x0061, 0x0078, 0xFFFF, 0x03C0, 0x03C0, 0xFFFF, 0x03B3, 0x03B3, 0xFFFF, 0x0393, 0x03B3, 0xFFFF, 0x03A0, 0x03C0, 0xFFFF };
unsigned short unac_data303[] = { 0x2211, 0x2211, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0044, 0x0064, 0xFFFF, 0x0064, 0x0064, 0xFFFF, 0x0065, 0x0065, 0xFFFF };
unsigned short unac_data304[] = { 0x0069, 0x0069, 0xFFFF, 0x006A, 0x006A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data305[] = { 0x0031, 0x2044, 0x0037, 0x0031, 0x2044, 0x0037, 0xFFFF, 0x0031, 0x2044, 0x0039, 0x0031, 0x2044, 0x0039, 0xFFFF, 0x0031, 0x2044, 0x0031, 0x0030, 0x0031, 0x2044, 0x0031, 0x0030, 0xFFFF, 0x0031, 0x2044, 0x0033, 0x0031, 0x2044, 0x0033, 0xFFFF, 0x0032, 0x2044, 0x0033, 0x0032, 0x2044, 0x0033, 0xFFFF, 0x0031, 0x2044, 0x0035, 0x0031, 0x2044, 0x0035, 0xFFFF, 0x0032, 0x2044, 0x0035, 0x0032, 0x2044, 0x0035, 0xFFFF, 0x0033, 0x2044, 0x0035, 0x0033, 0x2044, 0x0035, 0xFFFF };
unsigned short unac_data306[] = { 0x0034, 0x2044, 0x0035, 0x0034, 0x2044, 0x0035, 0xFFFF, 0x0031, 0x2044, 0x0036, 0x0031, 0x2044, 0x0036, 0xFFFF, 0x0035, 0x2044, 0x0036, 0x0035, 0x2044, 0x0036, 0xFFFF, 0x0031, 0x2044, 0x0038, 0x0031, 0x2044, 0x0038, 0xFFFF, 0x0033, 0x2044, 0x0038, 0x0033, 0x2044, 0x0038, 0xFFFF, 0x0035, 0x2044, 0x0038, 0x0035, 0x2044, 0x0038, 0xFFFF, 0x0037, 0x2044, 0x0038, 0x0037, 0x2044, 0x0038, 0xFFFF, 0x0031, 0x2044, 0x0031, 0x2044, 0xFFFF };
unsigned short unac_data307[] = { 0x0049, 0x0069, 0x2170, 0x0049, 0x0049, 0x0069, 0x0069, 0x2171, 0x0049, 0x0049, 0x0049, 0x0069, 0x0069, 0x0069, 0x2172, 0x0049, 0x0056, 0x0069, 0x0076, 0x2173, 0x0056, 0x0076, 0x2174, 0x0056, 0x0049, 0x0076, 0x0069, 0x2175, 0x0056, 0x0049, 0x0049, 0x0076, 0x0069, 0x0069, 0x2176, 0x0056, 0x0049, 0x0049, 0x0049, 0x0076, 0x0069, 0x0069, 0x0069, 0x2177 };
unsigned short unac_data308[] = { 0x0049, 0x0058, 0x0069, 0x0078, 0x2178, 0x0058, 0x0078, 0x2179, 0x0058, 0x0049, 0x0078, 0x0069, 0x217A, 0x0058, 0x0049, 0x0049, 0x0078, 0x0069, 0x0069, 0x217B, 0x004C, 0x006C, 0x217C, 0x0043, 0x0063, 0x217D, 0x0044, 0x0064, 0x217E, 0x004D, 0x006D, 0x217F };
unsigned short unac_data309[] = { 0x0069, 0x0069, 0xFFFF, 0x0069, 0x0069, 0x0069, 0x0069, 0xFFFF, 0x0069, 0x0069, 0x0069, 0x0069, 0x0069, 0x0069, 0xFFFF, 0x0069, 0x0076, 0x0069, 0x0076, 0xFFFF, 0x0076, 0x0076, 0xFFFF, 0x0076, 0x0069, 0x0076, 0x0069, 0xFFFF, 0x0076, 0x0069, 0x0069, 0x0076, 0x0069, 0x0069, 0xFFFF, 0x0076, 0x0069, 0x0069, 0x0069, 0x0076, 0x0069, 0x0069, 0x0069, 0xFFFF };
unsigned short unac_data310[] = { 0x0069, 0x0078, 0x0069, 0x0078, 0xFFFF, 0x0078, 0x0078, 0xFFFF, 0x0078, 0x0069, 0x0078, 0x0069, 0xFFFF, 0x0078, 0x0069, 0x0069, 0x0078, 0x0069, 0x0069, 0xFFFF, 0x006C, 0x006C, 0xFFFF, 0x0063, 0x0063, 0xFFFF, 0x0064, 0x0064, 0xFFFF, 0x006D, 0x006D, 0xFFFF };
unsigned short unac_data311[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2184, 0x2184, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data312[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0030, 0x2044, 0x0033, 0x0030, 0x2044, 0x0033, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data313[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2190, 0x2190, 0xFFFF, 0x2192, 0x2192, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data314[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2194, 0x2194, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data315[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x21D0, 0x21D0, 0xFFFF, 0x21D4, 0x21D4, 0xFFFF, 0x21D2, 0x21D2, 0xFFFF };
unsigned short unac_data316[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2203, 0x2203, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data317[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x2208, 0x2208, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x220B, 0x220B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data318[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2223, 0x2223, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2225, 0x2225, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data319[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x222B, 0x222B, 0x222B, 0x222B, 0xFFFF, 0x222B, 0x222B, 0x222B, 0x222B, 0x222B, 0x222B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x222E, 0x222E, 0x222E, 0x222E, 0xFFFF };
unsigned short unac_data320[] = { 0x222E, 0x222E, 0x222E, 0x222E, 0x222E, 0x222E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data321[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x223C, 0x223C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2243, 0x2243, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2245, 0x2245, 0xFFFF };
unsigned short unac_data322[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x2248, 0x2248, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data323[] = { 0x003D, 0x003D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2261, 0x2261, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data324[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x224D, 0x224D, 0xFFFF, 0x003C, 0x003C, 0xFFFF, 0x003E, 0x003E, 0xFFFF };
unsigned short unac_data325[] = { 0x2264, 0x2264, 0xFFFF, 0x2265, 0x2265, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2272, 0x2272, 0xFFFF, 0x2273, 0x2273, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data326[] = { 0x2276, 0x2276, 0xFFFF, 0x2277, 0x2277, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data327[] = { 0x227A, 0x227A, 0xFFFF, 0x227B, 0x227B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2282, 0x2282, 0xFFFF, 0x2283, 0x2283, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data328[] = { 0x2286, 0x2286, 0xFFFF, 0x2287, 0x2287, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data329[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x22A2, 0x22A2, 0xFFFF, 0x22A8, 0x22A8, 0xFFFF, 0x22A9, 0x22A9, 0xFFFF, 0x22AB, 0x22AB, 0xFFFF };
unsigned short unac_data330[] = { 0x227C, 0x227C, 0xFFFF, 0x227D, 0x227D, 0xFFFF, 0x2291, 0x2291, 0xFFFF, 0x2292, 0x2292, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data331[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x22B2, 0x22B2, 0xFFFF, 0x22B3, 0x22B3, 0xFFFF, 0x22B4, 0x22B4, 0xFFFF, 0x22B5, 0x22B5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data332[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x3008, 0x3008, 0xFFFF, 0x3009, 0x3009, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data333[] = { 0x0031, 0x0031, 0xFFFF, 0x0032, 0x0032, 0xFFFF, 0x0033, 0x0033, 0xFFFF, 0x0034, 0x0034, 0xFFFF, 0x0035, 0x0035, 0xFFFF, 0x0036, 0x0036, 0xFFFF, 0x0037, 0x0037, 0xFFFF, 0x0038, 0x0038, 0xFFFF };
unsigned short unac_data334[] = { 0x0039, 0x0039, 0xFFFF, 0x0031, 0x0030, 0x0031, 0x0030, 0xFFFF, 0x0031, 0x0031, 0x0031, 0x0031, 0xFFFF, 0x0031, 0x0032, 0x0031, 0x0032, 0xFFFF, 0x0031, 0x0033, 0x0031, 0x0033, 0xFFFF, 0x0031, 0x0034, 0x0031, 0x0034, 0xFFFF, 0x0031, 0x0035, 0x0031, 0x0035, 0xFFFF, 0x0031, 0x0036, 0x0031, 0x0036, 0xFFFF };
unsigned short unac_data335[] = { 0x0031, 0x0037, 0x0031, 0x0037, 0xFFFF, 0x0031, 0x0038, 0x0031, 0x0038, 0xFFFF, 0x0031, 0x0039, 0x0031, 0x0039, 0xFFFF, 0x0032, 0x0030, 0x0032, 0x0030, 0xFFFF, 0x0028, 0x0031, 0x0029, 0x0028, 0x0031, 0x0029, 0xFFFF, 0x0028, 0x0032, 0x0029, 0x0028, 0x0032, 0x0029, 0xFFFF, 0x0028, 0x0033, 0x0029, 0x0028, 0x0033, 0x0029, 0xFFFF, 0x0028, 0x0034, 0x0029, 0x0028, 0x0034, 0x0029, 0xFFFF };
unsigned short unac_data336[] = { 0x0028, 0x0035, 0x0029, 0x0028, 0x0035, 0x0029, 0xFFFF, 0x0028, 0x0036, 0x0029, 0x0028, 0x0036, 0x0029, 0xFFFF, 0x0028, 0x0037, 0x0029, 0x0028, 0x0037, 0x0029, 0xFFFF, 0x0028, 0x0038, 0x0029, 0x0028, 0x0038, 0x0029, 0xFFFF, 0x0028, 0x0039, 0x0029, 0x0028, 0x0039, 0x0029, 0xFFFF, 0x0028, 0x0031, 0x0030, 0x0029, 0x0028, 0x0031, 0x0030, 0x0029, 0xFFFF, 0x0028, 0x0031, 0x0031, 0x0029, 0x0028, 0x0031, 0x0031, 0x0029, 0xFFFF, 0x0028, 0x0031, 0x0032, 0x0029, 0x0028, 0x0031, 0x0032, 0x0029, 0xFFFF };
unsigned short unac_data337[] = { 0x0028, 0x0031, 0x0033, 0x0029, 0x0028, 0x0031, 0x0033, 0x0029, 0xFFFF, 0x0028, 0x0031, 0x0034, 0x0029, 0x0028, 0x0031, 0x0034, 0x0029, 0xFFFF, 0x0028, 0x0031, 0x0035, 0x0029, 0x0028, 0x0031, 0x0035, 0x0029, 0xFFFF, 0x0028, 0x0031, 0x0036, 0x0029, 0x0028, 0x0031, 0x0036, 0x0029, 0xFFFF, 0x0028, 0x0031, 0x0037, 0x0029, 0x0028, 0x0031, 0x0037, 0x0029, 0xFFFF, 0x0028, 0x0031, 0x0038, 0x0029, 0x0028, 0x0031, 0x0038, 0x0029, 0xFFFF, 0x0028, 0x0031, 0x0039, 0x0029, 0x0028, 0x0031, 0x0039, 0x0029, 0xFFFF, 0x0028, 0x0032, 0x0030, 0x0029, 0x0028, 0x0032, 0x0030, 0x0029, 0xFFFF };
unsigned short unac_data338[] = { 0x0031, 0x002E, 0x0031, 0x002E, 0xFFFF, 0x0032, 0x002E, 0x0032, 0x002E, 0xFFFF, 0x0033, 0x002E, 0x0033, 0x002E, 0xFFFF, 0x0034, 0x002E, 0x0034, 0x002E, 0xFFFF, 0x0035, 0x002E, 0x0035, 0x002E, 0xFFFF, 0x0036, 0x002E, 0x0036, 0x002E, 0xFFFF, 0x0037, 0x002E, 0x0037, 0x002E, 0xFFFF, 0x0038, 0x002E, 0x0038, 0x002E, 0xFFFF };
unsigned short unac_data339[] = { 0x0039, 0x002E, 0x0039, 0x002E, 0xFFFF, 0x0031, 0x0030, 0x002E, 0x0031, 0x0030, 0x002E, 0xFFFF, 0x0031, 0x0031, 0x002E, 0x0031, 0x0031, 0x002E, 0xFFFF, 0x0031, 0x0032, 0x002E, 0x0031, 0x0032, 0x002E, 0xFFFF, 0x0031, 0x0033, 0x002E, 0x0031, 0x0033, 0x002E, 0xFFFF, 0x0031, 0x0034, 0x002E, 0x0031, 0x0034, 0x002E, 0xFFFF, 0x0031, 0x0035, 0x002E, 0x0031, 0x0035, 0x002E, 0xFFFF, 0x0031, 0x0036, 0x002E, 0x0031, 0x0036, 0x002E, 0xFFFF };
unsigned short unac_data340[] = { 0x0031, 0x0037, 0x002E, 0x0031, 0x0037, 0x002E, 0xFFFF, 0x0031, 0x0038, 0x002E, 0x0031, 0x0038, 0x002E, 0xFFFF, 0x0031, 0x0039, 0x002E, 0x0031, 0x0039, 0x002E, 0xFFFF, 0x0032, 0x0030, 0x002E, 0x0032, 0x0030, 0x002E, 0xFFFF, 0x0028, 0x0061, 0x0029, 0x0028, 0x0061, 0x0029, 0xFFFF, 0x0028, 0x0062, 0x0029, 0x0028, 0x0062, 0x0029, 0xFFFF, 0x0028, 0x0063, 0x0029, 0x0028, 0x0063, 0x0029, 0xFFFF, 0x0028, 0x0064, 0x0029, 0x0028, 0x0064, 0x0029, 0xFFFF };
unsigned short unac_data341[] = { 0x0028, 0x0065, 0x0029, 0x0028, 0x0065, 0x0029, 0xFFFF, 0x0028, 0x0066, 0x0029, 0x0028, 0x0066, 0x0029, 0xFFFF, 0x0028, 0x0067, 0x0029, 0x0028, 0x0067, 0x0029, 0xFFFF, 0x0028, 0x0068, 0x0029, 0x0028, 0x0068, 0x0029, 0xFFFF, 0x0028, 0x0069, 0x0029, 0x0028, 0x0069, 0x0029, 0xFFFF, 0x0028, 0x006A, 0x0029, 0x0028, 0x006A, 0x0029, 0xFFFF, 0x0028, 0x006B, 0x0029, 0x0028, 0x006B, 0x0029, 0xFFFF, 0x0028, 0x006C, 0x0029, 0x0028, 0x006C, 0x0029, 0xFFFF };
unsigned short unac_data342[] = { 0x0028, 0x006D, 0x0029, 0x0028, 0x006D, 0x0029, 0xFFFF, 0x0028, 0x006E, 0x0029, 0x0028, 0x006E, 0x0029, 0xFFFF, 0x0028, 0x006F, 0x0029, 0x0028, 0x006F, 0x0029, 0xFFFF, 0x0028, 0x0070, 0x0029, 0x0028, 0x0070, 0x0029, 0xFFFF, 0x0028, 0x0071, 0x0029, 0x0028, 0x0071, 0x0029, 0xFFFF, 0x0028, 0x0072, 0x0029, 0x0028, 0x0072, 0x0029, 0xFFFF, 0x0028, 0x0073, 0x0029, 0x0028, 0x0073, 0x0029, 0xFFFF, 0x0028, 0x0074, 0x0029, 0x0028, 0x0074, 0x0029, 0xFFFF };
unsigned short unac_data343[] = { 0x0028, 0x0075, 0x0029, 0x0028, 0x0075, 0x0029, 0xFFFF, 0x0028, 0x0076, 0x0029, 0x0028, 0x0076, 0x0029, 0xFFFF, 0x0028, 0x0077, 0x0029, 0x0028, 0x0077, 0x0029, 0xFFFF, 0x0028, 0x0078, 0x0029, 0x0028, 0x0078, 0x0029, 0xFFFF, 0x0028, 0x0079, 0x0029, 0x0028, 0x0079, 0x0029, 0xFFFF, 0x0028, 0x007A, 0x0029, 0x0028, 0x007A, 0x0029, 0xFFFF, 0x0041, 0x0061, 0x24D0, 0x0042, 0x0062, 0x24D1 };
unsigned short unac_data344[] = { 0x0043, 0x0063, 0x24D2, 0x0044, 0x0064, 0x24D3, 0x0045, 0x0065, 0x24D4, 0x0046, 0x0066, 0x24D5, 0x0047, 0x0067, 0x24D6, 0x0048, 0x0068, 0x24D7, 0x0049, 0x0069, 0x24D8, 0x004A, 0x006A, 0x24D9 };
unsigned short unac_data345[] = { 0x004B, 0x006B, 0x24DA, 0x004C, 0x006C, 0x24DB, 0x004D, 0x006D, 0x24DC, 0x004E, 0x006E, 0x24DD, 0x004F, 0x006F, 0x24DE, 0x0050, 0x0070, 0x24DF, 0x0051, 0x0071, 0x24E0, 0x0052, 0x0072, 0x24E1 };
unsigned short unac_data346[] = { 0x0053, 0x0073, 0x24E2, 0x0054, 0x0074, 0x24E3, 0x0055, 0x0075, 0x24E4, 0x0056, 0x0076, 0x24E5, 0x0057, 0x0077, 0x24E6, 0x0058, 0x0078, 0x24E7, 0x0059, 0x0079, 0x24E8, 0x005A, 0x007A, 0x24E9 };
unsigned short unac_data347[] = { 0x0061, 0x0061, 0xFFFF, 0x0062, 0x0062, 0xFFFF, 0x0063, 0x0063, 0xFFFF, 0x0064, 0x0064, 0xFFFF, 0x0065, 0x0065, 0xFFFF, 0x0066, 0x0066, 0xFFFF, 0x0067, 0x0067, 0xFFFF, 0x0068, 0x0068, 0xFFFF };
unsigned short unac_data348[] = { 0x0069, 0x0069, 0xFFFF, 0x006A, 0x006A, 0xFFFF, 0x006B, 0x006B, 0xFFFF, 0x006C, 0x006C, 0xFFFF, 0x006D, 0x006D, 0xFFFF, 0x006E, 0x006E, 0xFFFF, 0x006F, 0x006F, 0xFFFF, 0x0070, 0x0070, 0xFFFF };
unsigned short unac_data349[] = { 0x0071, 0x0071, 0xFFFF, 0x0072, 0x0072, 0xFFFF, 0x0073, 0x0073, 0xFFFF, 0x0074, 0x0074, 0xFFFF, 0x0075, 0x0075, 0xFFFF, 0x0076, 0x0076, 0xFFFF, 0x0077, 0x0077, 0xFFFF, 0x0078, 0x0078, 0xFFFF };
unsigned short unac_data350[] = { 0x0079, 0x0079, 0xFFFF, 0x007A, 0x007A, 0xFFFF, 0x0030, 0x0030, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data351[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x222B, 0x222B, 0x222B, 0x222B, 0x222B, 0x222B, 0x222B, 0x222B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data352[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x003A, 0x003A, 0x003D, 0x003A, 0x003A, 0x003D, 0xFFFF, 0x003D, 0x003D, 0x003D, 0x003D, 0xFFFF, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0x003D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data353[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2ADD, 0x2ADD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data354[] = { 0xFFFF, 0x2C30, 0x2C30, 0xFFFF, 0x2C31, 0x2C31, 0xFFFF, 0x2C32, 0x2C32, 0xFFFF, 0x2C33, 0x2C33, 0xFFFF, 0x2C34, 0x2C34, 0xFFFF, 0x2C35, 0x2C35, 0xFFFF, 0x2C36, 0x2C36, 0xFFFF, 0x2C37, 0x2C37 };
unsigned short unac_data355[] = { 0xFFFF, 0x2C38, 0x2C38, 0xFFFF, 0x2C39, 0x2C39, 0xFFFF, 0x2C3A, 0x2C3A, 0xFFFF, 0x2C3B, 0x2C3B, 0xFFFF, 0x2C3C, 0x2C3C, 0xFFFF, 0x2C3D, 0x2C3D, 0xFFFF, 0x2C3E, 0x2C3E, 0xFFFF, 0x2C3F, 0x2C3F };
unsigned short unac_data356[] = { 0xFFFF, 0x2C40, 0x2C40, 0xFFFF, 0x2C41, 0x2C41, 0xFFFF, 0x2C42, 0x2C42, 0xFFFF, 0x2C43, 0x2C43, 0xFFFF, 0x2C44, 0x2C44, 0xFFFF, 0x2C45, 0x2C45, 0xFFFF, 0x2C46, 0x2C46, 0xFFFF, 0x2C47, 0x2C47 };
unsigned short unac_data357[] = { 0xFFFF, 0x2C48, 0x2C48, 0xFFFF, 0x2C49, 0x2C49, 0xFFFF, 0x2C4A, 0x2C4A, 0xFFFF, 0x2C4B, 0x2C4B, 0xFFFF, 0x2C4C, 0x2C4C, 0xFFFF, 0x2C4D, 0x2C4D, 0xFFFF, 0x2C4E, 0x2C4E, 0xFFFF, 0x2C4F, 0x2C4F };
unsigned short unac_data358[] = { 0xFFFF, 0x2C50, 0x2C50, 0xFFFF, 0x2C51, 0x2C51, 0xFFFF, 0x2C52, 0x2C52, 0xFFFF, 0x2C53, 0x2C53, 0xFFFF, 0x2C54, 0x2C54, 0xFFFF, 0x2C55, 0x2C55, 0xFFFF, 0x2C56, 0x2C56, 0xFFFF, 0x2C57, 0x2C57 };
unsigned short unac_data359[] = { 0xFFFF, 0x2C58, 0x2C58, 0xFFFF, 0x2C59, 0x2C59, 0xFFFF, 0x2C5A, 0x2C5A, 0xFFFF, 0x2C5B, 0x2C5B, 0xFFFF, 0x2C5C, 0x2C5C, 0xFFFF, 0x2C5D, 0x2C5D, 0xFFFF, 0x2C5E, 0x2C5E, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data360[] = { 0xFFFF, 0x2C61, 0x2C61, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x026B, 0x026B, 0xFFFF, 0x1D7D, 0x1D7D, 0xFFFF, 0x027D, 0x027D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C68, 0x2C68 };
unsigned short unac_data361[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C6A, 0x2C6A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C6C, 0x2C6C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0251, 0x0251, 0xFFFF, 0x0271, 0x0271, 0xFFFF, 0x0250, 0x0250 };
unsigned short unac_data362[] = { 0xFFFF, 0x0252, 0x0252, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C73, 0x2C73, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C76, 0x2C76, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data363[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x006A, 0x006A, 0xFFFF, 0x0056, 0x0076, 0xFFFF, 0xFFFF, 0x023F, 0x023F, 0xFFFF, 0x0240, 0x0240 };
unsigned short unac_data364[] = { 0xFFFF, 0x2C81, 0x2C81, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C83, 0x2C83, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C85, 0x2C85, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C87, 0x2C87, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data365[] = { 0xFFFF, 0x2C89, 0x2C89, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C8B, 0x2C8B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C8D, 0x2C8D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C8F, 0x2C8F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data366[] = { 0xFFFF, 0x2C91, 0x2C91, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C93, 0x2C93, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C95, 0x2C95, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C97, 0x2C97, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data367[] = { 0xFFFF, 0x2C99, 0x2C99, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C9B, 0x2C9B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C9D, 0x2C9D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2C9F, 0x2C9F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data368[] = { 0xFFFF, 0x2CA1, 0x2CA1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CA3, 0x2CA3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CA5, 0x2CA5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CA7, 0x2CA7, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data369[] = { 0xFFFF, 0x2CA9, 0x2CA9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CAB, 0x2CAB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CAD, 0x2CAD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CAF, 0x2CAF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data370[] = { 0xFFFF, 0x2CB1, 0x2CB1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CB3, 0x2CB3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CB5, 0x2CB5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CB7, 0x2CB7, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data371[] = { 0xFFFF, 0x2CB9, 0x2CB9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CBB, 0x2CBB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CBD, 0x2CBD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CBF, 0x2CBF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data372[] = { 0xFFFF, 0x2CC1, 0x2CC1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CC3, 0x2CC3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CC5, 0x2CC5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CC7, 0x2CC7, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data373[] = { 0xFFFF, 0x2CC9, 0x2CC9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CCB, 0x2CCB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CCD, 0x2CCD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CCF, 0x2CCF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data374[] = { 0xFFFF, 0x2CD1, 0x2CD1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CD3, 0x2CD3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CD5, 0x2CD5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CD7, 0x2CD7, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data375[] = { 0xFFFF, 0x2CD9, 0x2CD9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CDB, 0x2CDB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CDD, 0x2CDD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CDF, 0x2CDF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data376[] = { 0xFFFF, 0x2CE1, 0x2CE1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CE3, 0x2CE3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data377[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CEC, 0x2CEC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2CEE, 0x2CEE, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data378[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0x2CF3, 0x2CF3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data379[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x2D61, 0x2D61, 0xFFFF };
unsigned short unac_data380[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x6BCD, 0x6BCD, 0xFFFF };
unsigned short unac_data381[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9F9F, 0x9F9F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data382[] = { 0x4E00, 0x4E00, 0xFFFF, 0x4E28, 0x4E28, 0xFFFF, 0x4E36, 0x4E36, 0xFFFF, 0x4E3F, 0x4E3F, 0xFFFF, 0x4E59, 0x4E59, 0xFFFF, 0x4E85, 0x4E85, 0xFFFF, 0x4E8C, 0x4E8C, 0xFFFF, 0x4EA0, 0x4EA0, 0xFFFF };
unsigned short unac_data383[] = { 0x4EBA, 0x4EBA, 0xFFFF, 0x513F, 0x513F, 0xFFFF, 0x5165, 0x5165, 0xFFFF, 0x516B, 0x516B, 0xFFFF, 0x5182, 0x5182, 0xFFFF, 0x5196, 0x5196, 0xFFFF, 0x51AB, 0x51AB, 0xFFFF, 0x51E0, 0x51E0, 0xFFFF };
unsigned short unac_data384[] = { 0x51F5, 0x51F5, 0xFFFF, 0x5200, 0x5200, 0xFFFF, 0x529B, 0x529B, 0xFFFF, 0x52F9, 0x52F9, 0xFFFF, 0x5315, 0x5315, 0xFFFF, 0x531A, 0x531A, 0xFFFF, 0x5338, 0x5338, 0xFFFF, 0x5341, 0x5341, 0xFFFF };
unsigned short unac_data385[] = { 0x535C, 0x535C, 0xFFFF, 0x5369, 0x5369, 0xFFFF, 0x5382, 0x5382, 0xFFFF, 0x53B6, 0x53B6, 0xFFFF, 0x53C8, 0x53C8, 0xFFFF, 0x53E3, 0x53E3, 0xFFFF, 0x56D7, 0x56D7, 0xFFFF, 0x571F, 0x571F, 0xFFFF };
unsigned short unac_data386[] = { 0x58EB, 0x58EB, 0xFFFF, 0x5902, 0x5902, 0xFFFF, 0x590A, 0x590A, 0xFFFF, 0x5915, 0x5915, 0xFFFF, 0x5927, 0x5927, 0xFFFF, 0x5973, 0x5973, 0xFFFF, 0x5B50, 0x5B50, 0xFFFF, 0x5B80, 0x5B80, 0xFFFF };
unsigned short unac_data387[] = { 0x5BF8, 0x5BF8, 0xFFFF, 0x5C0F, 0x5C0F, 0xFFFF, 0x5C22, 0x5C22, 0xFFFF, 0x5C38, 0x5C38, 0xFFFF, 0x5C6E, 0x5C6E, 0xFFFF, 0x5C71, 0x5C71, 0xFFFF, 0x5DDB, 0x5DDB, 0xFFFF, 0x5DE5, 0x5DE5, 0xFFFF };
unsigned short unac_data388[] = { 0x5DF1, 0x5DF1, 0xFFFF, 0x5DFE, 0x5DFE, 0xFFFF, 0x5E72, 0x5E72, 0xFFFF, 0x5E7A, 0x5E7A, 0xFFFF, 0x5E7F, 0x5E7F, 0xFFFF, 0x5EF4, 0x5EF4, 0xFFFF, 0x5EFE, 0x5EFE, 0xFFFF, 0x5F0B, 0x5F0B, 0xFFFF };
unsigned short unac_data389[] = { 0x5F13, 0x5F13, 0xFFFF, 0x5F50, 0x5F50, 0xFFFF, 0x5F61, 0x5F61, 0xFFFF, 0x5F73, 0x5F73, 0xFFFF, 0x5FC3, 0x5FC3, 0xFFFF, 0x6208, 0x6208, 0xFFFF, 0x6236, 0x6236, 0xFFFF, 0x624B, 0x624B, 0xFFFF };
unsigned short unac_data390[] = { 0x652F, 0x652F, 0xFFFF, 0x6534, 0x6534, 0xFFFF, 0x6587, 0x6587, 0xFFFF, 0x6597, 0x6597, 0xFFFF, 0x65A4, 0x65A4, 0xFFFF, 0x65B9, 0x65B9, 0xFFFF, 0x65E0, 0x65E0, 0xFFFF, 0x65E5, 0x65E5, 0xFFFF };
unsigned short unac_data391[] = { 0x66F0, 0x66F0, 0xFFFF, 0x6708, 0x6708, 0xFFFF, 0x6728, 0x6728, 0xFFFF, 0x6B20, 0x6B20, 0xFFFF, 0x6B62, 0x6B62, 0xFFFF, 0x6B79, 0x6B79, 0xFFFF, 0x6BB3, 0x6BB3, 0xFFFF, 0x6BCB, 0x6BCB, 0xFFFF };
unsigned short unac_data392[] = { 0x6BD4, 0x6BD4, 0xFFFF, 0x6BDB, 0x6BDB, 0xFFFF, 0x6C0F, 0x6C0F, 0xFFFF, 0x6C14, 0x6C14, 0xFFFF, 0x6C34, 0x6C34, 0xFFFF, 0x706B, 0x706B, 0xFFFF, 0x722A, 0x722A, 0xFFFF, 0x7236, 0x7236, 0xFFFF };
unsigned short unac_data393[] = { 0x723B, 0x723B, 0xFFFF, 0x723F, 0x723F, 0xFFFF, 0x7247, 0x7247, 0xFFFF, 0x7259, 0x7259, 0xFFFF, 0x725B, 0x725B, 0xFFFF, 0x72AC, 0x72AC, 0xFFFF, 0x7384, 0x7384, 0xFFFF, 0x7389, 0x7389, 0xFFFF };
unsigned short unac_data394[] = { 0x74DC, 0x74DC, 0xFFFF, 0x74E6, 0x74E6, 0xFFFF, 0x7518, 0x7518, 0xFFFF, 0x751F, 0x751F, 0xFFFF, 0x7528, 0x7528, 0xFFFF, 0x7530, 0x7530, 0xFFFF, 0x758B, 0x758B, 0xFFFF, 0x7592, 0x7592, 0xFFFF };
unsigned short unac_data395[] = { 0x7676, 0x7676, 0xFFFF, 0x767D, 0x767D, 0xFFFF, 0x76AE, 0x76AE, 0xFFFF, 0x76BF, 0x76BF, 0xFFFF, 0x76EE, 0x76EE, 0xFFFF, 0x77DB, 0x77DB, 0xFFFF, 0x77E2, 0x77E2, 0xFFFF, 0x77F3, 0x77F3, 0xFFFF };
unsigned short unac_data396[] = { 0x793A, 0x793A, 0xFFFF, 0x79B8, 0x79B8, 0xFFFF, 0x79BE, 0x79BE, 0xFFFF, 0x7A74, 0x7A74, 0xFFFF, 0x7ACB, 0x7ACB, 0xFFFF, 0x7AF9, 0x7AF9, 0xFFFF, 0x7C73, 0x7C73, 0xFFFF, 0x7CF8, 0x7CF8, 0xFFFF };
unsigned short unac_data397[] = { 0x7F36, 0x7F36, 0xFFFF, 0x7F51, 0x7F51, 0xFFFF, 0x7F8A, 0x7F8A, 0xFFFF, 0x7FBD, 0x7FBD, 0xFFFF, 0x8001, 0x8001, 0xFFFF, 0x800C, 0x800C, 0xFFFF, 0x8012, 0x8012, 0xFFFF, 0x8033, 0x8033, 0xFFFF };
unsigned short unac_data398[] = { 0x807F, 0x807F, 0xFFFF, 0x8089, 0x8089, 0xFFFF, 0x81E3, 0x81E3, 0xFFFF, 0x81EA, 0x81EA, 0xFFFF, 0x81F3, 0x81F3, 0xFFFF, 0x81FC, 0x81FC, 0xFFFF, 0x820C, 0x820C, 0xFFFF, 0x821B, 0x821B, 0xFFFF };
unsigned short unac_data399[] = { 0x821F, 0x821F, 0xFFFF, 0x826E, 0x826E, 0xFFFF, 0x8272, 0x8272, 0xFFFF, 0x8278, 0x8278, 0xFFFF, 0x864D, 0x864D, 0xFFFF, 0x866B, 0x866B, 0xFFFF, 0x8840, 0x8840, 0xFFFF, 0x884C, 0x884C, 0xFFFF };
unsigned short unac_data400[] = { 0x8863, 0x8863, 0xFFFF, 0x897E, 0x897E, 0xFFFF, 0x898B, 0x898B, 0xFFFF, 0x89D2, 0x89D2, 0xFFFF, 0x8A00, 0x8A00, 0xFFFF, 0x8C37, 0x8C37, 0xFFFF, 0x8C46, 0x8C46, 0xFFFF, 0x8C55, 0x8C55, 0xFFFF };
unsigned short unac_data401[] = { 0x8C78, 0x8C78, 0xFFFF, 0x8C9D, 0x8C9D, 0xFFFF, 0x8D64, 0x8D64, 0xFFFF, 0x8D70, 0x8D70, 0xFFFF, 0x8DB3, 0x8DB3, 0xFFFF, 0x8EAB, 0x8EAB, 0xFFFF, 0x8ECA, 0x8ECA, 0xFFFF, 0x8F9B, 0x8F9B, 0xFFFF };
unsigned short unac_data402[] = { 0x8FB0, 0x8FB0, 0xFFFF, 0x8FB5, 0x8FB5, 0xFFFF, 0x9091, 0x9091, 0xFFFF, 0x9149, 0x9149, 0xFFFF, 0x91C6, 0x91C6, 0xFFFF, 0x91CC, 0x91CC, 0xFFFF, 0x91D1, 0x91D1, 0xFFFF, 0x9577, 0x9577, 0xFFFF };
unsigned short unac_data403[] = { 0x9580, 0x9580, 0xFFFF, 0x961C, 0x961C, 0xFFFF, 0x96B6, 0x96B6, 0xFFFF, 0x96B9, 0x96B9, 0xFFFF, 0x96E8, 0x96E8, 0xFFFF, 0x9751, 0x9751, 0xFFFF, 0x975E, 0x975E, 0xFFFF, 0x9762, 0x9762, 0xFFFF };
unsigned short unac_data404[] = { 0x9769, 0x9769, 0xFFFF, 0x97CB, 0x97CB, 0xFFFF, 0x97ED, 0x97ED, 0xFFFF, 0x97F3, 0x97F3, 0xFFFF, 0x9801, 0x9801, 0xFFFF, 0x98A8, 0x98A8, 0xFFFF, 0x98DB, 0x98DB, 0xFFFF, 0x98DF, 0x98DF, 0xFFFF };
unsigned short unac_data405[] = { 0x9996, 0x9996, 0xFFFF, 0x9999, 0x9999, 0xFFFF, 0x99AC, 0x99AC, 0xFFFF, 0x9AA8, 0x9AA8, 0xFFFF, 0x9AD8, 0x9AD8, 0xFFFF, 0x9ADF, 0x9ADF, 0xFFFF, 0x9B25, 0x9B25, 0xFFFF, 0x9B2F, 0x9B2F, 0xFFFF };
unsigned short unac_data406[] = { 0x9B32, 0x9B32, 0xFFFF, 0x9B3C, 0x9B3C, 0xFFFF, 0x9B5A, 0x9B5A, 0xFFFF, 0x9CE5, 0x9CE5, 0xFFFF, 0x9E75, 0x9E75, 0xFFFF, 0x9E7F, 0x9E7F, 0xFFFF, 0x9EA5, 0x9EA5, 0xFFFF, 0x9EBB, 0x9EBB, 0xFFFF };
unsigned short unac_data407[] = { 0x9EC3, 0x9EC3, 0xFFFF, 0x9ECD, 0x9ECD, 0xFFFF, 0x9ED1, 0x9ED1, 0xFFFF, 0x9EF9, 0x9EF9, 0xFFFF, 0x9EFD, 0x9EFD, 0xFFFF, 0x9F0E, 0x9F0E, 0xFFFF, 0x9F13, 0x9F13, 0xFFFF, 0x9F20, 0x9F20, 0xFFFF };
unsigned short unac_data408[] = { 0x9F3B, 0x9F3B, 0xFFFF, 0x9F4A, 0x9F4A, 0xFFFF, 0x9F52, 0x9F52, 0xFFFF, 0x9F8D, 0x9F8D, 0xFFFF, 0x9F9C, 0x9F9C, 0xFFFF, 0x9FA0, 0x9FA0, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data409[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x3012, 0x3012, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data410[] = { 0x5341, 0x5341, 0xFFFF, 0x5344, 0x5344, 0xFFFF, 0x5345, 0x5345, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data411[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x1100, 0x1100, 0xFFFF, 0x1101, 0x1101, 0xFFFF, 0x11AA, 0x11AA, 0xFFFF, 0x1102, 0x1102, 0xFFFF, 0x11AC, 0x11AC, 0xFFFF, 0x11AD, 0x11AD, 0xFFFF, 0x1103, 0x1103, 0xFFFF };
unsigned short unac_data412[] = { 0x1104, 0x1104, 0xFFFF, 0x1105, 0x1105, 0xFFFF, 0x11B0, 0x11B0, 0xFFFF, 0x11B1, 0x11B1, 0xFFFF, 0x11B2, 0x11B2, 0xFFFF, 0x11B3, 0x11B3, 0xFFFF, 0x11B4, 0x11B4, 0xFFFF, 0x11B5, 0x11B5, 0xFFFF };
unsigned short unac_data413[] = { 0x111A, 0x111A, 0xFFFF, 0x1106, 0x1106, 0xFFFF, 0x1107, 0x1107, 0xFFFF, 0x1108, 0x1108, 0xFFFF, 0x1121, 0x1121, 0xFFFF, 0x1109, 0x1109, 0xFFFF, 0x110A, 0x110A, 0xFFFF, 0x110B, 0x110B, 0xFFFF };
unsigned short unac_data414[] = { 0x110C, 0x110C, 0xFFFF, 0x110D, 0x110D, 0xFFFF, 0x110E, 0x110E, 0xFFFF, 0x110F, 0x110F, 0xFFFF, 0x1110, 0x1110, 0xFFFF, 0x1111, 0x1111, 0xFFFF, 0x1112, 0x1112, 0xFFFF, 0x1161, 0x1161, 0xFFFF };
unsigned short unac_data415[] = { 0x1162, 0x1162, 0xFFFF, 0x1163, 0x1163, 0xFFFF, 0x1164, 0x1164, 0xFFFF, 0x1165, 0x1165, 0xFFFF, 0x1166, 0x1166, 0xFFFF, 0x1167, 0x1167, 0xFFFF, 0x1168, 0x1168, 0xFFFF, 0x1169, 0x1169, 0xFFFF };
unsigned short unac_data416[] = { 0x116A, 0x116A, 0xFFFF, 0x116B, 0x116B, 0xFFFF, 0x116C, 0x116C, 0xFFFF, 0x116D, 0x116D, 0xFFFF, 0x116E, 0x116E, 0xFFFF, 0x116F, 0x116F, 0xFFFF, 0x1170, 0x1170, 0xFFFF, 0x1171, 0x1171, 0xFFFF };
unsigned short unac_data417[] = { 0x1172, 0x1172, 0xFFFF, 0x1173, 0x1173, 0xFFFF, 0x1174, 0x1174, 0xFFFF, 0x1175, 0x1175, 0xFFFF, 0x1160, 0x1160, 0xFFFF, 0x1114, 0x1114, 0xFFFF, 0x1115, 0x1115, 0xFFFF, 0x11C7, 0x11C7, 0xFFFF };
unsigned short unac_data418[] = { 0x11C8, 0x11C8, 0xFFFF, 0x11CC, 0x11CC, 0xFFFF, 0x11CE, 0x11CE, 0xFFFF, 0x11D3, 0x11D3, 0xFFFF, 0x11D7, 0x11D7, 0xFFFF, 0x11D9, 0x11D9, 0xFFFF, 0x111C, 0x111C, 0xFFFF, 0x11DD, 0x11DD, 0xFFFF };
unsigned short unac_data419[] = { 0x11DF, 0x11DF, 0xFFFF, 0x111D, 0x111D, 0xFFFF, 0x111E, 0x111E, 0xFFFF, 0x1120, 0x1120, 0xFFFF, 0x1122, 0x1122, 0xFFFF, 0x1123, 0x1123, 0xFFFF, 0x1127, 0x1127, 0xFFFF, 0x1129, 0x1129, 0xFFFF };
unsigned short unac_data420[] = { 0x112B, 0x112B, 0xFFFF, 0x112C, 0x112C, 0xFFFF, 0x112D, 0x112D, 0xFFFF, 0x112E, 0x112E, 0xFFFF, 0x112F, 0x112F, 0xFFFF, 0x1132, 0x1132, 0xFFFF, 0x1136, 0x1136, 0xFFFF, 0x1140, 0x1140, 0xFFFF };
unsigned short unac_data421[] = { 0x1147, 0x1147, 0xFFFF, 0x114C, 0x114C, 0xFFFF, 0x11F1, 0x11F1, 0xFFFF, 0x11F2, 0x11F2, 0xFFFF, 0x1157, 0x1157, 0xFFFF, 0x1158, 0x1158, 0xFFFF, 0x1159, 0x1159, 0xFFFF, 0x1184, 0x1184, 0xFFFF };
unsigned short unac_data422[] = { 0x1185, 0x1185, 0xFFFF, 0x1188, 0x1188, 0xFFFF, 0x1191, 0x1191, 0xFFFF, 0x1192, 0x1192, 0xFFFF, 0x1194, 0x1194, 0xFFFF, 0x119E, 0x119E, 0xFFFF, 0x11A1, 0x11A1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data423[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4E00, 0x4E00, 0xFFFF, 0x4E8C, 0x4E8C, 0xFFFF, 0x4E09, 0x4E09, 0xFFFF, 0x56DB, 0x56DB, 0xFFFF, 0x4E0A, 0x4E0A, 0xFFFF, 0x4E2D, 0x4E2D, 0xFFFF };
unsigned short unac_data424[] = { 0x4E0B, 0x4E0B, 0xFFFF, 0x7532, 0x7532, 0xFFFF, 0x4E59, 0x4E59, 0xFFFF, 0x4E19, 0x4E19, 0xFFFF, 0x4E01, 0x4E01, 0xFFFF, 0x5929, 0x5929, 0xFFFF, 0x5730, 0x5730, 0xFFFF, 0x4EBA, 0x4EBA, 0xFFFF };
unsigned short unac_data425[] = { 0x0028, 0x1100, 0x0029, 0x0028, 0x1100, 0x0029, 0xFFFF, 0x0028, 0x1102, 0x0029, 0x0028, 0x1102, 0x0029, 0xFFFF, 0x0028, 0x1103, 0x0029, 0x0028, 0x1103, 0x0029, 0xFFFF, 0x0028, 0x1105, 0x0029, 0x0028, 0x1105, 0x0029, 0xFFFF, 0x0028, 0x1106, 0x0029, 0x0028, 0x1106, 0x0029, 0xFFFF, 0x0028, 0x1107, 0x0029, 0x0028, 0x1107, 0x0029, 0xFFFF, 0x0028, 0x1109, 0x0029, 0x0028, 0x1109, 0x0029, 0xFFFF, 0x0028, 0x110B, 0x0029, 0x0028, 0x110B, 0x0029, 0xFFFF };
unsigned short unac_data426[] = { 0x0028, 0x110C, 0x0029, 0x0028, 0x110C, 0x0029, 0xFFFF, 0x0028, 0x110E, 0x0029, 0x0028, 0x110E, 0x0029, 0xFFFF, 0x0028, 0x110F, 0x0029, 0x0028, 0x110F, 0x0029, 0xFFFF, 0x0028, 0x1110, 0x0029, 0x0028, 0x1110, 0x0029, 0xFFFF, 0x0028, 0x1111, 0x0029, 0x0028, 0x1111, 0x0029, 0xFFFF, 0x0028, 0x1112, 0x0029, 0x0028, 0x1112, 0x0029, 0xFFFF, 0x0028, 0x1100, 0x1161, 0x0029, 0x0028, 0x1100, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x1102, 0x1161, 0x0029, 0x0028, 0x1102, 0x1161, 0x0029, 0xFFFF };
unsigned short unac_data427[] = { 0x0028, 0x1103, 0x1161, 0x0029, 0x0028, 0x1103, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x1105, 0x1161, 0x0029, 0x0028, 0x1105, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x1106, 0x1161, 0x0029, 0x0028, 0x1106, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x1107, 0x1161, 0x0029, 0x0028, 0x1107, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x1109, 0x1161, 0x0029, 0x0028, 0x1109, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x110B, 0x1161, 0x0029, 0x0028, 0x110B, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x110C, 0x1161, 0x0029, 0x0028, 0x110C, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x110E, 0x1161, 0x0029, 0x0028, 0x110E, 0x1161, 0x0029, 0xFFFF };
unsigned short unac_data428[] = { 0x0028, 0x110F, 0x1161, 0x0029, 0x0028, 0x110F, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x1110, 0x1161, 0x0029, 0x0028, 0x1110, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x1111, 0x1161, 0x0029, 0x0028, 0x1111, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x1112, 0x1161, 0x0029, 0x0028, 0x1112, 0x1161, 0x0029, 0xFFFF, 0x0028, 0x110C, 0x116E, 0x0029, 0x0028, 0x110C, 0x116E, 0x0029, 0xFFFF, 0x0028, 0x110B, 0x1169, 0x110C, 0x1165, 0x11AB, 0x0029, 0x0028, 0x110B, 0x1169, 0x110C, 0x1165, 0x11AB, 0x0029, 0xFFFF, 0x0028, 0x110B, 0x1169, 0x1112, 0x116E, 0x0029, 0x0028, 0x110B, 0x1169, 0x1112, 0x116E, 0x0029, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data429[] = { 0x0028, 0x4E00, 0x0029, 0x0028, 0x4E00, 0x0029, 0xFFFF, 0x0028, 0x4E8C, 0x0029, 0x0028, 0x4E8C, 0x0029, 0xFFFF, 0x0028, 0x4E09, 0x0029, 0x0028, 0x4E09, 0x0029, 0xFFFF, 0x0028, 0x56DB, 0x0029, 0x0028, 0x56DB, 0x0029, 0xFFFF, 0x0028, 0x4E94, 0x0029, 0x0028, 0x4E94, 0x0029, 0xFFFF, 0x0028, 0x516D, 0x0029, 0x0028, 0x516D, 0x0029, 0xFFFF, 0x0028, 0x4E03, 0x0029, 0x0028, 0x4E03, 0x0029, 0xFFFF, 0x0028, 0x516B, 0x0029, 0x0028, 0x516B, 0x0029, 0xFFFF };
unsigned short unac_data430[] = { 0x0028, 0x4E5D, 0x0029, 0x0028, 0x4E5D, 0x0029, 0xFFFF, 0x0028, 0x5341, 0x0029, 0x0028, 0x5341, 0x0029, 0xFFFF, 0x0028, 0x6708, 0x0029, 0x0028, 0x6708, 0x0029, 0xFFFF, 0x0028, 0x706B, 0x0029, 0x0028, 0x706B, 0x0029, 0xFFFF, 0x0028, 0x6C34, 0x0029, 0x0028, 0x6C34, 0x0029, 0xFFFF, 0x0028, 0x6728, 0x0029, 0x0028, 0x6728, 0x0029, 0xFFFF, 0x0028, 0x91D1, 0x0029, 0x0028, 0x91D1, 0x0029, 0xFFFF, 0x0028, 0x571F, 0x0029, 0x0028, 0x571F, 0x0029, 0xFFFF };
unsigned short unac_data431[] = { 0x0028, 0x65E5, 0x0029, 0x0028, 0x65E5, 0x0029, 0xFFFF, 0x0028, 0x682A, 0x0029, 0x0028, 0x682A, 0x0029, 0xFFFF, 0x0028, 0x6709, 0x0029, 0x0028, 0x6709, 0x0029, 0xFFFF, 0x0028, 0x793E, 0x0029, 0x0028, 0x793E, 0x0029, 0xFFFF, 0x0028, 0x540D, 0x0029, 0x0028, 0x540D, 0x0029, 0xFFFF, 0x0028, 0x7279, 0x0029, 0x0028, 0x7279, 0x0029, 0xFFFF, 0x0028, 0x8CA1, 0x0029, 0x0028, 0x8CA1, 0x0029, 0xFFFF, 0x0028, 0x795D, 0x0029, 0x0028, 0x795D, 0x0029, 0xFFFF };
unsigned short unac_data432[] = { 0x0028, 0x52B4, 0x0029, 0x0028, 0x52B4, 0x0029, 0xFFFF, 0x0028, 0x4EE3, 0x0029, 0x0028, 0x4EE3, 0x0029, 0xFFFF, 0x0028, 0x547C, 0x0029, 0x0028, 0x547C, 0x0029, 0xFFFF, 0x0028, 0x5B66, 0x0029, 0x0028, 0x5B66, 0x0029, 0xFFFF, 0x0028, 0x76E3, 0x0029, 0x0028, 0x76E3, 0x0029, 0xFFFF, 0x0028, 0x4F01, 0x0029, 0x0028, 0x4F01, 0x0029, 0xFFFF, 0x0028, 0x8CC7, 0x0029, 0x0028, 0x8CC7, 0x0029, 0xFFFF, 0x0028, 0x5354, 0x0029, 0x0028, 0x5354, 0x0029, 0xFFFF };
unsigned short unac_data433[] = { 0x0028, 0x796D, 0x0029, 0x0028, 0x796D, 0x0029, 0xFFFF, 0x0028, 0x4F11, 0x0029, 0x0028, 0x4F11, 0x0029, 0xFFFF, 0x0028, 0x81EA, 0x0029, 0x0028, 0x81EA, 0x0029, 0xFFFF, 0x0028, 0x81F3, 0x0029, 0x0028, 0x81F3, 0x0029, 0xFFFF, 0x554F, 0x554F, 0xFFFF, 0x5E7C, 0x5E7C, 0xFFFF, 0x6587, 0x6587, 0xFFFF, 0x7B8F, 0x7B8F, 0xFFFF };
unsigned short unac_data434[] = { 0x0050, 0x0054, 0x0045, 0x0070, 0x0074, 0x0065, 0xFFFF, 0x0032, 0x0031, 0x0032, 0x0031, 0xFFFF, 0x0032, 0x0032, 0x0032, 0x0032, 0xFFFF, 0x0032, 0x0033, 0x0032, 0x0033, 0xFFFF, 0x0032, 0x0034, 0x0032, 0x0034, 0xFFFF, 0x0032, 0x0035, 0x0032, 0x0035, 0xFFFF, 0x0032, 0x0036, 0x0032, 0x0036, 0xFFFF, 0x0032, 0x0037, 0x0032, 0x0037, 0xFFFF };
unsigned short unac_data435[] = { 0x0032, 0x0038, 0x0032, 0x0038, 0xFFFF, 0x0032, 0x0039, 0x0032, 0x0039, 0xFFFF, 0x0033, 0x0030, 0x0033, 0x0030, 0xFFFF, 0x0033, 0x0031, 0x0033, 0x0031, 0xFFFF, 0x0033, 0x0032, 0x0033, 0x0032, 0xFFFF, 0x0033, 0x0033, 0x0033, 0x0033, 0xFFFF, 0x0033, 0x0034, 0x0033, 0x0034, 0xFFFF, 0x0033, 0x0035, 0x0033, 0x0035, 0xFFFF };
unsigned short unac_data436[] = { 0x1100, 0x1100, 0xFFFF, 0x1102, 0x1102, 0xFFFF, 0x1103, 0x1103, 0xFFFF, 0x1105, 0x1105, 0xFFFF, 0x1106, 0x1106, 0xFFFF, 0x1107, 0x1107, 0xFFFF, 0x1109, 0x1109, 0xFFFF, 0x110B, 0x110B, 0xFFFF };
unsigned short unac_data437[] = { 0x110C, 0x110C, 0xFFFF, 0x110E, 0x110E, 0xFFFF, 0x110F, 0x110F, 0xFFFF, 0x1110, 0x1110, 0xFFFF, 0x1111, 0x1111, 0xFFFF, 0x1112, 0x1112, 0xFFFF, 0x1100, 0x1161, 0x1100, 0x1161, 0xFFFF, 0x1102, 0x1161, 0x1102, 0x1161, 0xFFFF };
unsigned short unac_data438[] = { 0x1103, 0x1161, 0x1103, 0x1161, 0xFFFF, 0x1105, 0x1161, 0x1105, 0x1161, 0xFFFF, 0x1106, 0x1161, 0x1106, 0x1161, 0xFFFF, 0x1107, 0x1161, 0x1107, 0x1161, 0xFFFF, 0x1109, 0x1161, 0x1109, 0x1161, 0xFFFF, 0x110B, 0x1161, 0x110B, 0x1161, 0xFFFF, 0x110C, 0x1161, 0x110C, 0x1161, 0xFFFF, 0x110E, 0x1161, 0x110E, 0x1161, 0xFFFF };
unsigned short unac_data439[] = { 0x110F, 0x1161, 0x110F, 0x1161, 0xFFFF, 0x1110, 0x1161, 0x1110, 0x1161, 0xFFFF, 0x1111, 0x1161, 0x1111, 0x1161, 0xFFFF, 0x1112, 0x1161, 0x1112, 0x1161, 0xFFFF, 0x110E, 0x1161, 0x11B7, 0x1100, 0x1169, 0x110E, 0x1161, 0x11B7, 0x1100, 0x1169, 0xFFFF, 0x110C, 0x116E, 0x110B, 0x1174, 0x110C, 0x116E, 0x110B, 0x1174, 0xFFFF, 0x110B, 0x116E, 0x110B, 0x116E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data440[] = { 0x4E00, 0x4E00, 0xFFFF, 0x4E8C, 0x4E8C, 0xFFFF, 0x4E09, 0x4E09, 0xFFFF, 0x56DB, 0x56DB, 0xFFFF, 0x4E94, 0x4E94, 0xFFFF, 0x516D, 0x516D, 0xFFFF, 0x4E03, 0x4E03, 0xFFFF, 0x516B, 0x516B, 0xFFFF };
unsigned short unac_data441[] = { 0x4E5D, 0x4E5D, 0xFFFF, 0x5341, 0x5341, 0xFFFF, 0x6708, 0x6708, 0xFFFF, 0x706B, 0x706B, 0xFFFF, 0x6C34, 0x6C34, 0xFFFF, 0x6728, 0x6728, 0xFFFF, 0x91D1, 0x91D1, 0xFFFF, 0x571F, 0x571F, 0xFFFF };
unsigned short unac_data442[] = { 0x65E5, 0x65E5, 0xFFFF, 0x682A, 0x682A, 0xFFFF, 0x6709, 0x6709, 0xFFFF, 0x793E, 0x793E, 0xFFFF, 0x540D, 0x540D, 0xFFFF, 0x7279, 0x7279, 0xFFFF, 0x8CA1, 0x8CA1, 0xFFFF, 0x795D, 0x795D, 0xFFFF };
unsigned short unac_data443[] = { 0x52B4, 0x52B4, 0xFFFF, 0x79D8, 0x79D8, 0xFFFF, 0x7537, 0x7537, 0xFFFF, 0x5973, 0x5973, 0xFFFF, 0x9069, 0x9069, 0xFFFF, 0x512A, 0x512A, 0xFFFF, 0x5370, 0x5370, 0xFFFF, 0x6CE8, 0x6CE8, 0xFFFF };
unsigned short unac_data444[] = { 0x9805, 0x9805, 0xFFFF, 0x4F11, 0x4F11, 0xFFFF, 0x5199, 0x5199, 0xFFFF, 0x6B63, 0x6B63, 0xFFFF, 0x4E0A, 0x4E0A, 0xFFFF, 0x4E2D, 0x4E2D, 0xFFFF, 0x4E0B, 0x4E0B, 0xFFFF, 0x5DE6, 0x5DE6, 0xFFFF };
unsigned short unac_data445[] = { 0x53F3, 0x53F3, 0xFFFF, 0x533B, 0x533B, 0xFFFF, 0x5B97, 0x5B97, 0xFFFF, 0x5B66, 0x5B66, 0xFFFF, 0x76E3, 0x76E3, 0xFFFF, 0x4F01, 0x4F01, 0xFFFF, 0x8CC7, 0x8CC7, 0xFFFF, 0x5354, 0x5354, 0xFFFF };
unsigned short unac_data446[] = { 0x591C, 0x591C, 0xFFFF, 0x0033, 0x0036, 0x0033, 0x0036, 0xFFFF, 0x0033, 0x0037, 0x0033, 0x0037, 0xFFFF, 0x0033, 0x0038, 0x0033, 0x0038, 0xFFFF, 0x0033, 0x0039, 0x0033, 0x0039, 0xFFFF, 0x0034, 0x0030, 0x0034, 0x0030, 0xFFFF, 0x0034, 0x0031, 0x0034, 0x0031, 0xFFFF, 0x0034, 0x0032, 0x0034, 0x0032, 0xFFFF };
unsigned short unac_data447[] = { 0x0034, 0x0033, 0x0034, 0x0033, 0xFFFF, 0x0034, 0x0034, 0x0034, 0x0034, 0xFFFF, 0x0034, 0x0035, 0x0034, 0x0035, 0xFFFF, 0x0034, 0x0036, 0x0034, 0x0036, 0xFFFF, 0x0034, 0x0037, 0x0034, 0x0037, 0xFFFF, 0x0034, 0x0038, 0x0034, 0x0038, 0xFFFF, 0x0034, 0x0039, 0x0034, 0x0039, 0xFFFF, 0x0035, 0x0030, 0x0035, 0x0030, 0xFFFF };
unsigned short unac_data448[] = { 0x0031, 0x6708, 0x0031, 0x6708, 0xFFFF, 0x0032, 0x6708, 0x0032, 0x6708, 0xFFFF, 0x0033, 0x6708, 0x0033, 0x6708, 0xFFFF, 0x0034, 0x6708, 0x0034, 0x6708, 0xFFFF, 0x0035, 0x6708, 0x0035, 0x6708, 0xFFFF, 0x0036, 0x6708, 0x0036, 0x6708, 0xFFFF, 0x0037, 0x6708, 0x0037, 0x6708, 0xFFFF, 0x0038, 0x6708, 0x0038, 0x6708, 0xFFFF };
unsigned short unac_data449[] = { 0x0039, 0x6708, 0x0039, 0x6708, 0xFFFF, 0x0031, 0x0030, 0x6708, 0x0031, 0x0030, 0x6708, 0xFFFF, 0x0031, 0x0031, 0x6708, 0x0031, 0x0031, 0x6708, 0xFFFF, 0x0031, 0x0032, 0x6708, 0x0031, 0x0032, 0x6708, 0xFFFF, 0x0048, 0x0067, 0x0068, 0x0067, 0xFFFF, 0x0065, 0x0072, 0x0067, 0x0065, 0x0072, 0x0067, 0xFFFF, 0x0065, 0x0056, 0x0065, 0x0076, 0xFFFF, 0x004C, 0x0054, 0x0044, 0x006C, 0x0074, 0x0064, 0xFFFF };
unsigned short unac_data450[] = { 0x30A2, 0x30A2, 0xFFFF, 0x30A4, 0x30A4, 0xFFFF, 0x30A6, 0x30A6, 0xFFFF, 0x30A8, 0x30A8, 0xFFFF, 0x30AA, 0x30AA, 0xFFFF, 0x30AB, 0x30AB, 0xFFFF, 0x30AD, 0x30AD, 0xFFFF, 0x30AF, 0x30AF, 0xFFFF };
unsigned short unac_data451[] = { 0x30B1, 0x30B1, 0xFFFF, 0x30B3, 0x30B3, 0xFFFF, 0x30B5, 0x30B5, 0xFFFF, 0x30B7, 0x30B7, 0xFFFF, 0x30B9, 0x30B9, 0xFFFF, 0x30BB, 0x30BB, 0xFFFF, 0x30BD, 0x30BD, 0xFFFF, 0x30BF, 0x30BF, 0xFFFF };
unsigned short unac_data452[] = { 0x30C1, 0x30C1, 0xFFFF, 0x30C4, 0x30C4, 0xFFFF, 0x30C6, 0x30C6, 0xFFFF, 0x30C8, 0x30C8, 0xFFFF, 0x30CA, 0x30CA, 0xFFFF, 0x30CB, 0x30CB, 0xFFFF, 0x30CC, 0x30CC, 0xFFFF, 0x30CD, 0x30CD, 0xFFFF };
unsigned short unac_data453[] = { 0x30CE, 0x30CE, 0xFFFF, 0x30CF, 0x30CF, 0xFFFF, 0x30D2, 0x30D2, 0xFFFF, 0x30D5, 0x30D5, 0xFFFF, 0x30D8, 0x30D8, 0xFFFF, 0x30DB, 0x30DB, 0xFFFF, 0x30DE, 0x30DE, 0xFFFF, 0x30DF, 0x30DF, 0xFFFF };
unsigned short unac_data454[] = { 0x30E0, 0x30E0, 0xFFFF, 0x30E1, 0x30E1, 0xFFFF, 0x30E2, 0x30E2, 0xFFFF, 0x30E4, 0x30E4, 0xFFFF, 0x30E6, 0x30E6, 0xFFFF, 0x30E8, 0x30E8, 0xFFFF, 0x30E9, 0x30E9, 0xFFFF, 0x30EA, 0x30EA, 0xFFFF };
unsigned short unac_data455[] = { 0x30EB, 0x30EB, 0xFFFF, 0x30EC, 0x30EC, 0xFFFF, 0x30ED, 0x30ED, 0xFFFF, 0x30EF, 0x30EF, 0xFFFF, 0x30F0, 0x30F0, 0xFFFF, 0x30F1, 0x30F1, 0xFFFF, 0x30F2, 0x30F2, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data456[] = { 0x30A2, 0x30D1, 0x30FC, 0x30C8, 0x30A2, 0x30D1, 0x30FC, 0x30C8, 0xFFFF, 0x30A2, 0x30EB, 0x30D5, 0x30A1, 0x30A2, 0x30EB, 0x30D5, 0x30A1, 0xFFFF, 0x30A2, 0x30F3, 0x30DA, 0x30A2, 0x30A2, 0x30F3, 0x30DA, 0x30A2, 0xFFFF, 0x30A2, 0x30FC, 0x30EB, 0x30A2, 0x30FC, 0x30EB, 0xFFFF, 0x30A4, 0x30CB, 0x30F3, 0x30B0, 0x30A4, 0x30CB, 0x30F3, 0x30B0, 0xFFFF, 0x30A4, 0x30F3, 0x30C1, 0x30A4, 0x30F3, 0x30C1, 0xFFFF, 0x30A6, 0x30A9, 0x30F3, 0x30A6, 0x30A9, 0x30F3, 0xFFFF, 0x30A8, 0x30B9, 0x30AF, 0x30FC, 0x30C9, 0x30A8, 0x30B9, 0x30AF, 0x30FC, 0x30C9, 0xFFFF };
unsigned short unac_data457[] = { 0x30A8, 0x30FC, 0x30AB, 0x30FC, 0x30A8, 0x30FC, 0x30AB, 0x30FC, 0xFFFF, 0x30AA, 0x30F3, 0x30B9, 0x30AA, 0x30F3, 0x30B9, 0xFFFF, 0x30AA, 0x30FC, 0x30E0, 0x30AA, 0x30FC, 0x30E0, 0xFFFF, 0x30AB, 0x30A4, 0x30EA, 0x30AB, 0x30A4, 0x30EA, 0xFFFF, 0x30AB, 0x30E9, 0x30C3, 0x30C8, 0x30AB, 0x30E9, 0x30C3, 0x30C8, 0xFFFF, 0x30AB, 0x30ED, 0x30EA, 0x30FC, 0x30AB, 0x30ED, 0x30EA, 0x30FC, 0xFFFF, 0x30AC, 0x30ED, 0x30F3, 0x30AC, 0x30ED, 0x30F3, 0xFFFF, 0x30AC, 0x30F3, 0x30DE, 0x30AC, 0x30F3, 0x30DE, 0xFFFF };
unsigned short unac_data458[] = { 0x30AE, 0x30AC, 0x30AE, 0x30AC, 0xFFFF, 0x30AE, 0x30CB, 0x30FC, 0x30AE, 0x30CB, 0x30FC, 0xFFFF, 0x30AD, 0x30E5, 0x30EA, 0x30FC, 0x30AD, 0x30E5, 0x30EA, 0x30FC, 0xFFFF, 0x30AE, 0x30EB, 0x30C0, 0x30FC, 0x30AE, 0x30EB, 0x30C0, 0x30FC, 0xFFFF, 0x30AD, 0x30ED, 0x30AD, 0x30ED, 0xFFFF, 0x30AD, 0x30ED, 0x30B0, 0x30E9, 0x30E0, 0x30AD, 0x30ED, 0x30B0, 0x30E9, 0x30E0, 0xFFFF, 0x30AD, 0x30ED, 0x30E1, 0x30FC, 0x30C8, 0x30EB, 0x30AD, 0x30ED, 0x30E1, 0x30FC, 0x30C8, 0x30EB, 0xFFFF, 0x30AD, 0x30ED, 0x30EF, 0x30C3, 0x30C8, 0x30AD, 0x30ED, 0x30EF, 0x30C3, 0x30C8, 0xFFFF };
unsigned short unac_data459[] = { 0x30B0, 0x30E9, 0x30E0, 0x30B0, 0x30E9, 0x30E0, 0xFFFF, 0x30B0, 0x30E9, 0x30E0, 0x30C8, 0x30F3, 0x30B0, 0x30E9, 0x30E0, 0x30C8, 0x30F3, 0xFFFF, 0x30AF, 0x30EB, 0x30BC, 0x30A4, 0x30ED, 0x30AF, 0x30EB, 0x30BC, 0x30A4, 0x30ED, 0xFFFF, 0x30AF, 0x30ED, 0x30FC, 0x30CD, 0x30AF, 0x30ED, 0x30FC, 0x30CD, 0xFFFF, 0x30B1, 0x30FC, 0x30B9, 0x30B1, 0x30FC, 0x30B9, 0xFFFF, 0x30B3, 0x30EB, 0x30CA, 0x30B3, 0x30EB, 0x30CA, 0xFFFF, 0x30B3, 0x30FC, 0x30DD, 0x30B3, 0x30FC, 0x30DD, 0xFFFF, 0x30B5, 0x30A4, 0x30AF, 0x30EB, 0x30B5, 0x30A4, 0x30AF, 0x30EB, 0xFFFF };
unsigned short unac_data460[] = { 0x30B5, 0x30F3, 0x30C1, 0x30FC, 0x30E0, 0x30B5, 0x30F3, 0x30C1, 0x30FC, 0x30E0, 0xFFFF, 0x30B7, 0x30EA, 0x30F3, 0x30B0, 0x30B7, 0x30EA, 0x30F3, 0x30B0, 0xFFFF, 0x30BB, 0x30F3, 0x30C1, 0x30BB, 0x30F3, 0x30C1, 0xFFFF, 0x30BB, 0x30F3, 0x30C8, 0x30BB, 0x30F3, 0x30C8, 0xFFFF, 0x30C0, 0x30FC, 0x30B9, 0x30C0, 0x30FC, 0x30B9, 0xFFFF, 0x30C7, 0x30B7, 0x30C7, 0x30B7, 0xFFFF, 0x30C9, 0x30EB, 0x30C9, 0x30EB, 0xFFFF, 0x30C8, 0x30F3, 0x30C8, 0x30F3, 0xFFFF };
unsigned short unac_data461[] = { 0x30CA, 0x30CE, 0x30CA, 0x30CE, 0xFFFF, 0x30CE, 0x30C3, 0x30C8, 0x30CE, 0x30C3, 0x30C8, 0xFFFF, 0x30CF, 0x30A4, 0x30C4, 0x30CF, 0x30A4, 0x30C4, 0xFFFF, 0x30D1, 0x30FC, 0x30BB, 0x30F3, 0x30C8, 0x30D1, 0x30FC, 0x30BB, 0x30F3, 0x30C8, 0xFFFF, 0x30D1, 0x30FC, 0x30C4, 0x30D1, 0x30FC, 0x30C4, 0xFFFF, 0x30D0, 0x30FC, 0x30EC, 0x30EB, 0x30D0, 0x30FC, 0x30EC, 0x30EB, 0xFFFF, 0x30D4, 0x30A2, 0x30B9, 0x30C8, 0x30EB, 0x30D4, 0x30A2, 0x30B9, 0x30C8, 0x30EB, 0xFFFF, 0x30D4, 0x30AF, 0x30EB, 0x30D4, 0x30AF, 0x30EB, 0xFFFF };
unsigned short unac_data462[] = { 0x30D4, 0x30B3, 0x30D4, 0x30B3, 0xFFFF, 0x30D3, 0x30EB, 0x30D3, 0x30EB, 0xFFFF, 0x30D5, 0x30A1, 0x30E9, 0x30C3, 0x30C9, 0x30D5, 0x30A1, 0x30E9, 0x30C3, 0x30C9, 0xFFFF, 0x30D5, 0x30A3, 0x30FC, 0x30C8, 0x30D5, 0x30A3, 0x30FC, 0x30C8, 0xFFFF, 0x30D6, 0x30C3, 0x30B7, 0x30A7, 0x30EB, 0x30D6, 0x30C3, 0x30B7, 0x30A7, 0x30EB, 0xFFFF, 0x30D5, 0x30E9, 0x30F3, 0x30D5, 0x30E9, 0x30F3, 0xFFFF, 0x30D8, 0x30AF, 0x30BF, 0x30FC, 0x30EB, 0x30D8, 0x30AF, 0x30BF, 0x30FC, 0x30EB, 0xFFFF, 0x30DA, 0x30BD, 0x30DA, 0x30BD, 0xFFFF };
unsigned short unac_data463[] = { 0x30DA, 0x30CB, 0x30D2, 0x30DA, 0x30CB, 0x30D2, 0xFFFF, 0x30D8, 0x30EB, 0x30C4, 0x30D8, 0x30EB, 0x30C4, 0xFFFF, 0x30DA, 0x30F3, 0x30B9, 0x30DA, 0x30F3, 0x30B9, 0xFFFF, 0x30DA, 0x30FC, 0x30B8, 0x30DA, 0x30FC, 0x30B8, 0xFFFF, 0x30D9, 0x30FC, 0x30BF, 0x30D9, 0x30FC, 0x30BF, 0xFFFF, 0x30DD, 0x30A4, 0x30F3, 0x30C8, 0x30DD, 0x30A4, 0x30F3, 0x30C8, 0xFFFF, 0x30DC, 0x30EB, 0x30C8, 0x30DC, 0x30EB, 0x30C8, 0xFFFF, 0x30DB, 0x30F3, 0x30DB, 0x30F3, 0xFFFF };
unsigned short unac_data464[] = { 0x30DD, 0x30F3, 0x30C9, 0x30DD, 0x30F3, 0x30C9, 0xFFFF, 0x30DB, 0x30FC, 0x30EB, 0x30DB, 0x30FC, 0x30EB, 0xFFFF, 0x30DB, 0x30FC, 0x30F3, 0x30DB, 0x30FC, 0x30F3, 0xFFFF, 0x30DE, 0x30A4, 0x30AF, 0x30ED, 0x30DE, 0x30A4, 0x30AF, 0x30ED, 0xFFFF, 0x30DE, 0x30A4, 0x30EB, 0x30DE, 0x30A4, 0x30EB, 0xFFFF, 0x30DE, 0x30C3, 0x30CF, 0x30DE, 0x30C3, 0x30CF, 0xFFFF, 0x30DE, 0x30EB, 0x30AF, 0x30DE, 0x30EB, 0x30AF, 0xFFFF, 0x30DE, 0x30F3, 0x30B7, 0x30E7, 0x30F3, 0x30DE, 0x30F3, 0x30B7, 0x30E7, 0x30F3, 0xFFFF };
unsigned short unac_data465[] = { 0x30DF, 0x30AF, 0x30ED, 0x30F3, 0x30DF, 0x30AF, 0x30ED, 0x30F3, 0xFFFF, 0x30DF, 0x30EA, 0x30DF, 0x30EA, 0xFFFF, 0x30DF, 0x30EA, 0x30D0, 0x30FC, 0x30EB, 0x30DF, 0x30EA, 0x30D0, 0x30FC, 0x30EB, 0xFFFF, 0x30E1, 0x30AC, 0x30E1, 0x30AC, 0xFFFF, 0x30E1, 0x30AC, 0x30C8, 0x30F3, 0x30E1, 0x30AC, 0x30C8, 0x30F3, 0xFFFF, 0x30E1, 0x30FC, 0x30C8, 0x30EB, 0x30E1, 0x30FC, 0x30C8, 0x30EB, 0xFFFF, 0x30E4, 0x30FC, 0x30C9, 0x30E4, 0x30FC, 0x30C9, 0xFFFF, 0x30E4, 0x30FC, 0x30EB, 0x30E4, 0x30FC, 0x30EB, 0xFFFF };
unsigned short unac_data466[] = { 0x30E6, 0x30A2, 0x30F3, 0x30E6, 0x30A2, 0x30F3, 0xFFFF, 0x30EA, 0x30C3, 0x30C8, 0x30EB, 0x30EA, 0x30C3, 0x30C8, 0x30EB, 0xFFFF, 0x30EA, 0x30E9, 0x30EA, 0x30E9, 0xFFFF, 0x30EB, 0x30D4, 0x30FC, 0x30EB, 0x30D4, 0x30FC, 0xFFFF, 0x30EB, 0x30FC, 0x30D6, 0x30EB, 0x30EB, 0x30FC, 0x30D6, 0x30EB, 0xFFFF, 0x30EC, 0x30E0, 0x30EC, 0x30E0, 0xFFFF, 0x30EC, 0x30F3, 0x30C8, 0x30B2, 0x30F3, 0x30EC, 0x30F3, 0x30C8, 0x30B2, 0x30F3, 0xFFFF, 0x30EF, 0x30C3, 0x30C8, 0x30EF, 0x30C3, 0x30C8, 0xFFFF };
unsigned short unac_data467[] = { 0x0030, 0x70B9, 0x0030, 0x70B9, 0xFFFF, 0x0031, 0x70B9, 0x0031, 0x70B9, 0xFFFF, 0x0032, 0x70B9, 0x0032, 0x70B9, 0xFFFF, 0x0033, 0x70B9, 0x0033, 0x70B9, 0xFFFF, 0x0034, 0x70B9, 0x0034, 0x70B9, 0xFFFF, 0x0035, 0x70B9, 0x0035, 0x70B9, 0xFFFF, 0x0036, 0x70B9, 0x0036, 0x70B9, 0xFFFF, 0x0037, 0x70B9, 0x0037, 0x70B9, 0xFFFF };
unsigned short unac_data468[] = { 0x0038, 0x70B9, 0x0038, 0x70B9, 0xFFFF, 0x0039, 0x70B9, 0x0039, 0x70B9, 0xFFFF, 0x0031, 0x0030, 0x70B9, 0x0031, 0x0030, 0x70B9, 0xFFFF, 0x0031, 0x0031, 0x70B9, 0x0031, 0x0031, 0x70B9, 0xFFFF, 0x0031, 0x0032, 0x70B9, 0x0031, 0x0032, 0x70B9, 0xFFFF, 0x0031, 0x0033, 0x70B9, 0x0031, 0x0033, 0x70B9, 0xFFFF, 0x0031, 0x0034, 0x70B9, 0x0031, 0x0034, 0x70B9, 0xFFFF, 0x0031, 0x0035, 0x70B9, 0x0031, 0x0035, 0x70B9, 0xFFFF };
unsigned short unac_data469[] = { 0x0031, 0x0036, 0x70B9, 0x0031, 0x0036, 0x70B9, 0xFFFF, 0x0031, 0x0037, 0x70B9, 0x0031, 0x0037, 0x70B9, 0xFFFF, 0x0031, 0x0038, 0x70B9, 0x0031, 0x0038, 0x70B9, 0xFFFF, 0x0031, 0x0039, 0x70B9, 0x0031, 0x0039, 0x70B9, 0xFFFF, 0x0032, 0x0030, 0x70B9, 0x0032, 0x0030, 0x70B9, 0xFFFF, 0x0032, 0x0031, 0x70B9, 0x0032, 0x0031, 0x70B9, 0xFFFF, 0x0032, 0x0032, 0x70B9, 0x0032, 0x0032, 0x70B9, 0xFFFF, 0x0032, 0x0033, 0x70B9, 0x0032, 0x0033, 0x70B9, 0xFFFF };
unsigned short unac_data470[] = { 0x0032, 0x0034, 0x70B9, 0x0032, 0x0034, 0x70B9, 0xFFFF, 0x0068, 0x0050, 0x0061, 0x0068, 0x0070, 0x0061, 0xFFFF, 0x0064, 0x0061, 0x0064, 0x0061, 0xFFFF, 0x0041, 0x0055, 0x0061, 0x0075, 0xFFFF, 0x0062, 0x0061, 0x0072, 0x0062, 0x0061, 0x0072, 0xFFFF, 0x006F, 0x0056, 0x006F, 0x0076, 0xFFFF, 0x0070, 0x0063, 0x0070, 0x0063, 0xFFFF, 0x0064, 0x006D, 0x0064, 0x006D, 0xFFFF };
unsigned short unac_data471[] = { 0x0064, 0x006D, 0x0032, 0x0064, 0x006D, 0x0032, 0xFFFF, 0x0064, 0x006D, 0x0033, 0x0064, 0x006D, 0x0033, 0xFFFF, 0x0049, 0x0055, 0x0069, 0x0075, 0xFFFF, 0x5E73, 0x6210, 0x5E73, 0x6210, 0xFFFF, 0x662D, 0x548C, 0x662D, 0x548C, 0xFFFF, 0x5927, 0x6B63, 0x5927, 0x6B63, 0xFFFF, 0x660E, 0x6CBB, 0x660E, 0x6CBB, 0xFFFF, 0x682A, 0x5F0F, 0x4F1A, 0x793E, 0x682A, 0x5F0F, 0x4F1A, 0x793E, 0xFFFF };
unsigned short unac_data472[] = { 0x0070, 0x0041, 0x0070, 0x0061, 0xFFFF, 0x006E, 0x0041, 0x006E, 0x0061, 0xFFFF, 0x03BC, 0x0041, 0x03BC, 0x0061, 0xFFFF, 0x006D, 0x0041, 0x006D, 0x0061, 0xFFFF, 0x006B, 0x0041, 0x006B, 0x0061, 0xFFFF, 0x004B, 0x0042, 0x006B, 0x0062, 0xFFFF, 0x004D, 0x0042, 0x006D, 0x0062, 0xFFFF, 0x0047, 0x0042, 0x0067, 0x0062, 0xFFFF };
unsigned short unac_data473[] = { 0x0063, 0x0061, 0x006C, 0x0063, 0x0061, 0x006C, 0xFFFF, 0x006B, 0x0063, 0x0061, 0x006C, 0x006B, 0x0063, 0x0061, 0x006C, 0xFFFF, 0x0070, 0x0046, 0x0070, 0x0066, 0xFFFF, 0x006E, 0x0046, 0x006E, 0x0066, 0xFFFF, 0x03BC, 0x0046, 0x03BC, 0x0066, 0xFFFF, 0x03BC, 0x0067, 0x03BC, 0x0067, 0xFFFF, 0x006D, 0x0067, 0x006D, 0x0067, 0xFFFF, 0x006B, 0x0067, 0x006B, 0x0067, 0xFFFF };
unsigned short unac_data474[] = { 0x0048, 0x007A, 0x0068, 0x007A, 0xFFFF, 0x006B, 0x0048, 0x007A, 0x006B, 0x0068, 0x007A, 0xFFFF, 0x004D, 0x0048, 0x007A, 0x006D, 0x0068, 0x007A, 0xFFFF, 0x0047, 0x0048, 0x007A, 0x0067, 0x0068, 0x007A, 0xFFFF, 0x0054, 0x0048, 0x007A, 0x0074, 0x0068, 0x007A, 0xFFFF, 0x03BC, 0x006C, 0x03BC, 0x006C, 0xFFFF, 0x006D, 0x006C, 0x006D, 0x006C, 0xFFFF, 0x0064, 0x006C, 0x0064, 0x006C, 0xFFFF };
unsigned short unac_data475[] = { 0x006B, 0x006C, 0x006B, 0x006C, 0xFFFF, 0x0066, 0x006D, 0x0066, 0x006D, 0xFFFF, 0x006E, 0x006D, 0x006E, 0x006D, 0xFFFF, 0x03BC, 0x006D, 0x03BC, 0x006D, 0xFFFF, 0x006D, 0x006D, 0x006D, 0x006D, 0xFFFF, 0x0063, 0x006D, 0x0063, 0x006D, 0xFFFF, 0x006B, 0x006D, 0x006B, 0x006D, 0xFFFF, 0x006D, 0x006D, 0x0032, 0x006D, 0x006D, 0x0032, 0xFFFF };
unsigned short unac_data476[] = { 0x0063, 0x006D, 0x0032, 0x0063, 0x006D, 0x0032, 0xFFFF, 0x006D, 0x0032, 0x006D, 0x0032, 0xFFFF, 0x006B, 0x006D, 0x0032, 0x006B, 0x006D, 0x0032, 0xFFFF, 0x006D, 0x006D, 0x0033, 0x006D, 0x006D, 0x0033, 0xFFFF, 0x0063, 0x006D, 0x0033, 0x0063, 0x006D, 0x0033, 0xFFFF, 0x006D, 0x0033, 0x006D, 0x0033, 0xFFFF, 0x006B, 0x006D, 0x0033, 0x006B, 0x006D, 0x0033, 0xFFFF, 0x006D, 0x2215, 0x0073, 0x006D, 0x2215, 0x0073, 0xFFFF };
unsigned short unac_data477[] = { 0x006D, 0x2215, 0x0073, 0x0032, 0x006D, 0x2215, 0x0073, 0x0032, 0xFFFF, 0x0050, 0x0061, 0x0070, 0x0061, 0xFFFF, 0x006B, 0x0050, 0x0061, 0x006B, 0x0070, 0x0061, 0xFFFF, 0x004D, 0x0050, 0x0061, 0x006D, 0x0070, 0x0061, 0xFFFF, 0x0047, 0x0050, 0x0061, 0x0067, 0x0070, 0x0061, 0xFFFF, 0x0072, 0x0061, 0x0064, 0x0072, 0x0061, 0x0064, 0xFFFF, 0x0072, 0x0061, 0x0064, 0x2215, 0x0073, 0x0072, 0x0061, 0x0064, 0x2215, 0x0073, 0xFFFF, 0x0072, 0x0061, 0x0064, 0x2215, 0x0073, 0x0032, 0x0072, 0x0061, 0x0064, 0x2215, 0x0073, 0x0032, 0xFFFF };
unsigned short unac_data478[] = { 0x0070, 0x0073, 0x0070, 0x0073, 0xFFFF, 0x006E, 0x0073, 0x006E, 0x0073, 0xFFFF, 0x03BC, 0x0073, 0x03BC, 0x0073, 0xFFFF, 0x006D, 0x0073, 0x006D, 0x0073, 0xFFFF, 0x0070, 0x0056, 0x0070, 0x0076, 0xFFFF, 0x006E, 0x0056, 0x006E, 0x0076, 0xFFFF, 0x03BC, 0x0056, 0x03BC, 0x0076, 0xFFFF, 0x006D, 0x0056, 0x006D, 0x0076, 0xFFFF };
unsigned short unac_data479[] = { 0x006B, 0x0056, 0x006B, 0x0076, 0xFFFF, 0x004D, 0x0056, 0x006D, 0x0076, 0xFFFF, 0x0070, 0x0057, 0x0070, 0x0077, 0xFFFF, 0x006E, 0x0057, 0x006E, 0x0077, 0xFFFF, 0x03BC, 0x0057, 0x03BC, 0x0077, 0xFFFF, 0x006D, 0x0057, 0x006D, 0x0077, 0xFFFF, 0x006B, 0x0057, 0x006B, 0x0077, 0xFFFF, 0x004D, 0x0057, 0x006D, 0x0077, 0xFFFF };
unsigned short unac_data480[] = { 0x006B, 0x03A9, 0x006B, 0x03C9, 0xFFFF, 0x004D, 0x03A9, 0x006D, 0x03C9, 0xFFFF, 0x0061, 0x002E, 0x006D, 0x002E, 0x0061, 0x002E, 0x006D, 0x002E, 0xFFFF, 0x0042, 0x0071, 0x0062, 0x0071, 0xFFFF, 0x0063, 0x0063, 0x0063, 0x0063, 0xFFFF, 0x0063, 0x0064, 0x0063, 0x0064, 0xFFFF, 0x0043, 0x2215, 0x006B, 0x0067, 0x0063, 0x2215, 0x006B, 0x0067, 0xFFFF, 0x0043, 0x006F, 0x002E, 0x0063, 0x006F, 0x002E, 0xFFFF };
unsigned short unac_data481[] = { 0x0064, 0x0042, 0x0064, 0x0062, 0xFFFF, 0x0047, 0x0079, 0x0067, 0x0079, 0xFFFF, 0x0068, 0x0061, 0x0068, 0x0061, 0xFFFF, 0x0048, 0x0050, 0x0068, 0x0070, 0xFFFF, 0x0069, 0x006E, 0x0069, 0x006E, 0xFFFF, 0x004B, 0x004B, 0x006B, 0x006B, 0xFFFF, 0x004B, 0x004D, 0x006B, 0x006D, 0xFFFF, 0x006B, 0x0074, 0x006B, 0x0074, 0xFFFF };
unsigned short unac_data482[] = { 0x006C, 0x006D, 0x006C, 0x006D, 0xFFFF, 0x006C, 0x006E, 0x006C, 0x006E, 0xFFFF, 0x006C, 0x006F, 0x0067, 0x006C, 0x006F, 0x0067, 0xFFFF, 0x006C, 0x0078, 0x006C, 0x0078, 0xFFFF, 0x006D, 0x0062, 0x006D, 0x0062, 0xFFFF, 0x006D, 0x0069, 0x006C, 0x006D, 0x0069, 0x006C, 0xFFFF, 0x006D, 0x006F, 0x006C, 0x006D, 0x006F, 0x006C, 0xFFFF, 0x0050, 0x0048, 0x0070, 0x0068, 0xFFFF };
unsigned short unac_data483[] = { 0x0070, 0x002E, 0x006D, 0x002E, 0x0070, 0x002E, 0x006D, 0x002E, 0xFFFF, 0x0050, 0x0050, 0x004D, 0x0070, 0x0070, 0x006D, 0xFFFF, 0x0050, 0x0052, 0x0070, 0x0072, 0xFFFF, 0x0073, 0x0072, 0x0073, 0x0072, 0xFFFF, 0x0053, 0x0076, 0x0073, 0x0076, 0xFFFF, 0x0057, 0x0062, 0x0077, 0x0062, 0xFFFF, 0x0056, 0x2215, 0x006D, 0x0076, 0x2215, 0x006D, 0xFFFF, 0x0041, 0x2215, 0x006D, 0x0061, 0x2215, 0x006D, 0xFFFF };
unsigned short unac_data484[] = { 0x0031, 0x65E5, 0x0031, 0x65E5, 0xFFFF, 0x0032, 0x65E5, 0x0032, 0x65E5, 0xFFFF, 0x0033, 0x65E5, 0x0033, 0x65E5, 0xFFFF, 0x0034, 0x65E5, 0x0034, 0x65E5, 0xFFFF, 0x0035, 0x65E5, 0x0035, 0x65E5, 0xFFFF, 0x0036, 0x65E5, 0x0036, 0x65E5, 0xFFFF, 0x0037, 0x65E5, 0x0037, 0x65E5, 0xFFFF, 0x0038, 0x65E5, 0x0038, 0x65E5, 0xFFFF };
unsigned short unac_data485[] = { 0x0039, 0x65E5, 0x0039, 0x65E5, 0xFFFF, 0x0031, 0x0030, 0x65E5, 0x0031, 0x0030, 0x65E5, 0xFFFF, 0x0031, 0x0031, 0x65E5, 0x0031, 0x0031, 0x65E5, 0xFFFF, 0x0031, 0x0032, 0x65E5, 0x0031, 0x0032, 0x65E5, 0xFFFF, 0x0031, 0x0033, 0x65E5, 0x0031, 0x0033, 0x65E5, 0xFFFF, 0x0031, 0x0034, 0x65E5, 0x0031, 0x0034, 0x65E5, 0xFFFF, 0x0031, 0x0035, 0x65E5, 0x0031, 0x0035, 0x65E5, 0xFFFF, 0x0031, 0x0036, 0x65E5, 0x0031, 0x0036, 0x65E5, 0xFFFF };
unsigned short unac_data486[] = { 0x0031, 0x0037, 0x65E5, 0x0031, 0x0037, 0x65E5, 0xFFFF, 0x0031, 0x0038, 0x65E5, 0x0031, 0x0038, 0x65E5, 0xFFFF, 0x0031, 0x0039, 0x65E5, 0x0031, 0x0039, 0x65E5, 0xFFFF, 0x0032, 0x0030, 0x65E5, 0x0032, 0x0030, 0x65E5, 0xFFFF, 0x0032, 0x0031, 0x65E5, 0x0032, 0x0031, 0x65E5, 0xFFFF, 0x0032, 0x0032, 0x65E5, 0x0032, 0x0032, 0x65E5, 0xFFFF, 0x0032, 0x0033, 0x65E5, 0x0032, 0x0033, 0x65E5, 0xFFFF, 0x0032, 0x0034, 0x65E5, 0x0032, 0x0034, 0x65E5, 0xFFFF };
unsigned short unac_data487[] = { 0x0032, 0x0035, 0x65E5, 0x0032, 0x0035, 0x65E5, 0xFFFF, 0x0032, 0x0036, 0x65E5, 0x0032, 0x0036, 0x65E5, 0xFFFF, 0x0032, 0x0037, 0x65E5, 0x0032, 0x0037, 0x65E5, 0xFFFF, 0x0032, 0x0038, 0x65E5, 0x0032, 0x0038, 0x65E5, 0xFFFF, 0x0032, 0x0039, 0x65E5, 0x0032, 0x0039, 0x65E5, 0xFFFF, 0x0033, 0x0030, 0x65E5, 0x0033, 0x0030, 0x65E5, 0xFFFF, 0x0033, 0x0031, 0x65E5, 0x0033, 0x0031, 0x65E5, 0xFFFF, 0x0067, 0x0061, 0x006C, 0x0067, 0x0061, 0x006C, 0xFFFF };
unsigned short unac_data488[] = { 0xFFFF, 0xA641, 0xA641, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA643, 0xA643, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA645, 0xA645, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA647, 0xA647, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data489[] = { 0xFFFF, 0xA649, 0xA649, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA64B, 0xA64B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA64D, 0xA64D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA64F, 0xA64F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data490[] = { 0xFFFF, 0xA651, 0xA651, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA653, 0xA653, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA655, 0xA655, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA657, 0xA657, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data491[] = { 0xFFFF, 0xA659, 0xA659, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA65B, 0xA65B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA65D, 0xA65D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA65F, 0xA65F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data492[] = { 0xFFFF, 0xA661, 0xA661, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA663, 0xA663, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA665, 0xA665, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA667, 0xA667, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data493[] = { 0xFFFF, 0xA669, 0xA669, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA66B, 0xA66B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA66D, 0xA66D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data494[] = { 0xFFFF, 0xA681, 0xA681, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA683, 0xA683, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA685, 0xA685, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA687, 0xA687, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data495[] = { 0xFFFF, 0xA689, 0xA689, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA68B, 0xA68B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA68D, 0xA68D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA68F, 0xA68F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data496[] = { 0xFFFF, 0xA691, 0xA691, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA693, 0xA693, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA695, 0xA695, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA697, 0xA697, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data497[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA723, 0xA723, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA725, 0xA725, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA727, 0xA727, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data498[] = { 0xFFFF, 0xA729, 0xA729, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA72B, 0xA72B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA72D, 0xA72D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA72F, 0xA72F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data499[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA733, 0xA733, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA735, 0xA735, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA737, 0xA737, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data500[] = { 0xFFFF, 0xA739, 0xA739, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA73B, 0xA73B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA73D, 0xA73D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA73F, 0xA73F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data501[] = { 0xFFFF, 0xA741, 0xA741, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA743, 0xA743, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA745, 0xA745, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA747, 0xA747, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data502[] = { 0xFFFF, 0xA749, 0xA749, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA74B, 0xA74B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA74D, 0xA74D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA74F, 0xA74F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data503[] = { 0xFFFF, 0xA751, 0xA751, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA753, 0xA753, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA755, 0xA755, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA757, 0xA757, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data504[] = { 0xFFFF, 0xA759, 0xA759, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA75B, 0xA75B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA75D, 0xA75D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA75F, 0xA75F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data505[] = { 0xFFFF, 0xA761, 0xA761, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA763, 0xA763, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA765, 0xA765, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA767, 0xA767, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data506[] = { 0xFFFF, 0xA769, 0xA769, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA76B, 0xA76B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA76D, 0xA76D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA76F, 0xA76F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data507[] = { 0xA76F, 0xA76F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data508[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA77A, 0xA77A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA77C, 0xA77C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1D79, 0x1D79, 0xFFFF, 0xA77F, 0xA77F, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data509[] = { 0xFFFF, 0xA781, 0xA781, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA783, 0xA783, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA785, 0xA785, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA787, 0xA787, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data510[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA78C, 0xA78C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0265, 0x0265, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data511[] = { 0xFFFF, 0xA791, 0xA791, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA793, 0xA793, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data512[] = { 0xFFFF, 0xA7A1, 0xA7A1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA7A3, 0xA7A3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA7A5, 0xA7A5, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA7A7, 0xA7A7, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data513[] = { 0xFFFF, 0xA7A9, 0xA7A9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0266, 0x0266, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data514[] = { 0x0126, 0x0127, 0xFFFF, 0x0153, 0x0153, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data515[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data516[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data517[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data518[] = { 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF };
unsigned short unac_data519[] = { 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data520[] = { 0x8C48, 0x8C48, 0xFFFF, 0x66F4, 0x66F4, 0xFFFF, 0x8ECA, 0x8ECA, 0xFFFF, 0x8CC8, 0x8CC8, 0xFFFF, 0x6ED1, 0x6ED1, 0xFFFF, 0x4E32, 0x4E32, 0xFFFF, 0x53E5, 0x53E5, 0xFFFF, 0x9F9C, 0x9F9C, 0xFFFF };
unsigned short unac_data521[] = { 0x9F9C, 0x9F9C, 0xFFFF, 0x5951, 0x5951, 0xFFFF, 0x91D1, 0x91D1, 0xFFFF, 0x5587, 0x5587, 0xFFFF, 0x5948, 0x5948, 0xFFFF, 0x61F6, 0x61F6, 0xFFFF, 0x7669, 0x7669, 0xFFFF, 0x7F85, 0x7F85, 0xFFFF };
unsigned short unac_data522[] = { 0x863F, 0x863F, 0xFFFF, 0x87BA, 0x87BA, 0xFFFF, 0x88F8, 0x88F8, 0xFFFF, 0x908F, 0x908F, 0xFFFF, 0x6A02, 0x6A02, 0xFFFF, 0x6D1B, 0x6D1B, 0xFFFF, 0x70D9, 0x70D9, 0xFFFF, 0x73DE, 0x73DE, 0xFFFF };
unsigned short unac_data523[] = { 0x843D, 0x843D, 0xFFFF, 0x916A, 0x916A, 0xFFFF, 0x99F1, 0x99F1, 0xFFFF, 0x4E82, 0x4E82, 0xFFFF, 0x5375, 0x5375, 0xFFFF, 0x6B04, 0x6B04, 0xFFFF, 0x721B, 0x721B, 0xFFFF, 0x862D, 0x862D, 0xFFFF };
unsigned short unac_data524[] = { 0x9E1E, 0x9E1E, 0xFFFF, 0x5D50, 0x5D50, 0xFFFF, 0x6FEB, 0x6FEB, 0xFFFF, 0x85CD, 0x85CD, 0xFFFF, 0x8964, 0x8964, 0xFFFF, 0x62C9, 0x62C9, 0xFFFF, 0x81D8, 0x81D8, 0xFFFF, 0x881F, 0x881F, 0xFFFF };
unsigned short unac_data525[] = { 0x5ECA, 0x5ECA, 0xFFFF, 0x6717, 0x6717, 0xFFFF, 0x6D6A, 0x6D6A, 0xFFFF, 0x72FC, 0x72FC, 0xFFFF, 0x90CE, 0x90CE, 0xFFFF, 0x4F86, 0x4F86, 0xFFFF, 0x51B7, 0x51B7, 0xFFFF, 0x52DE, 0x52DE, 0xFFFF };
unsigned short unac_data526[] = { 0x64C4, 0x64C4, 0xFFFF, 0x6AD3, 0x6AD3, 0xFFFF, 0x7210, 0x7210, 0xFFFF, 0x76E7, 0x76E7, 0xFFFF, 0x8001, 0x8001, 0xFFFF, 0x8606, 0x8606, 0xFFFF, 0x865C, 0x865C, 0xFFFF, 0x8DEF, 0x8DEF, 0xFFFF };
unsigned short unac_data527[] = { 0x9732, 0x9732, 0xFFFF, 0x9B6F, 0x9B6F, 0xFFFF, 0x9DFA, 0x9DFA, 0xFFFF, 0x788C, 0x788C, 0xFFFF, 0x797F, 0x797F, 0xFFFF, 0x7DA0, 0x7DA0, 0xFFFF, 0x83C9, 0x83C9, 0xFFFF, 0x9304, 0x9304, 0xFFFF };
unsigned short unac_data528[] = { 0x9E7F, 0x9E7F, 0xFFFF, 0x8AD6, 0x8AD6, 0xFFFF, 0x58DF, 0x58DF, 0xFFFF, 0x5F04, 0x5F04, 0xFFFF, 0x7C60, 0x7C60, 0xFFFF, 0x807E, 0x807E, 0xFFFF, 0x7262, 0x7262, 0xFFFF, 0x78CA, 0x78CA, 0xFFFF };
unsigned short unac_data529[] = { 0x8CC2, 0x8CC2, 0xFFFF, 0x96F7, 0x96F7, 0xFFFF, 0x58D8, 0x58D8, 0xFFFF, 0x5C62, 0x5C62, 0xFFFF, 0x6A13, 0x6A13, 0xFFFF, 0x6DDA, 0x6DDA, 0xFFFF, 0x6F0F, 0x6F0F, 0xFFFF, 0x7D2F, 0x7D2F, 0xFFFF };
unsigned short unac_data530[] = { 0x7E37, 0x7E37, 0xFFFF, 0x964B, 0x964B, 0xFFFF, 0x52D2, 0x52D2, 0xFFFF, 0x808B, 0x808B, 0xFFFF, 0x51DC, 0x51DC, 0xFFFF, 0x51CC, 0x51CC, 0xFFFF, 0x7A1C, 0x7A1C, 0xFFFF, 0x7DBE, 0x7DBE, 0xFFFF };
unsigned short unac_data531[] = { 0x83F1, 0x83F1, 0xFFFF, 0x9675, 0x9675, 0xFFFF, 0x8B80, 0x8B80, 0xFFFF, 0x62CF, 0x62CF, 0xFFFF, 0x6A02, 0x6A02, 0xFFFF, 0x8AFE, 0x8AFE, 0xFFFF, 0x4E39, 0x4E39, 0xFFFF, 0x5BE7, 0x5BE7, 0xFFFF };
unsigned short unac_data532[] = { 0x6012, 0x6012, 0xFFFF, 0x7387, 0x7387, 0xFFFF, 0x7570, 0x7570, 0xFFFF, 0x5317, 0x5317, 0xFFFF, 0x78FB, 0x78FB, 0xFFFF, 0x4FBF, 0x4FBF, 0xFFFF, 0x5FA9, 0x5FA9, 0xFFFF, 0x4E0D, 0x4E0D, 0xFFFF };
unsigned short unac_data533[] = { 0x6CCC, 0x6CCC, 0xFFFF, 0x6578, 0x6578, 0xFFFF, 0x7D22, 0x7D22, 0xFFFF, 0x53C3, 0x53C3, 0xFFFF, 0x585E, 0x585E, 0xFFFF, 0x7701, 0x7701, 0xFFFF, 0x8449, 0x8449, 0xFFFF, 0x8AAA, 0x8AAA, 0xFFFF };
unsigned short unac_data534[] = { 0x6BBA, 0x6BBA, 0xFFFF, 0x8FB0, 0x8FB0, 0xFFFF, 0x6C88, 0x6C88, 0xFFFF, 0x62FE, 0x62FE, 0xFFFF, 0x82E5, 0x82E5, 0xFFFF, 0x63A0, 0x63A0, 0xFFFF, 0x7565, 0x7565, 0xFFFF, 0x4EAE, 0x4EAE, 0xFFFF };
unsigned short unac_data535[] = { 0x5169, 0x5169, 0xFFFF, 0x51C9, 0x51C9, 0xFFFF, 0x6881, 0x6881, 0xFFFF, 0x7CE7, 0x7CE7, 0xFFFF, 0x826F, 0x826F, 0xFFFF, 0x8AD2, 0x8AD2, 0xFFFF, 0x91CF, 0x91CF, 0xFFFF, 0x52F5, 0x52F5, 0xFFFF };
unsigned short unac_data536[] = { 0x5442, 0x5442, 0xFFFF, 0x5973, 0x5973, 0xFFFF, 0x5EEC, 0x5EEC, 0xFFFF, 0x65C5, 0x65C5, 0xFFFF, 0x6FFE, 0x6FFE, 0xFFFF, 0x792A, 0x792A, 0xFFFF, 0x95AD, 0x95AD, 0xFFFF, 0x9A6A, 0x9A6A, 0xFFFF };
unsigned short unac_data537[] = { 0x9E97, 0x9E97, 0xFFFF, 0x9ECE, 0x9ECE, 0xFFFF, 0x529B, 0x529B, 0xFFFF, 0x66C6, 0x66C6, 0xFFFF, 0x6B77, 0x6B77, 0xFFFF, 0x8F62, 0x8F62, 0xFFFF, 0x5E74, 0x5E74, 0xFFFF, 0x6190, 0x6190, 0xFFFF };
unsigned short unac_data538[] = { 0x6200, 0x6200, 0xFFFF, 0x649A, 0x649A, 0xFFFF, 0x6F23, 0x6F23, 0xFFFF, 0x7149, 0x7149, 0xFFFF, 0x7489, 0x7489, 0xFFFF, 0x79CA, 0x79CA, 0xFFFF, 0x7DF4, 0x7DF4, 0xFFFF, 0x806F, 0x806F, 0xFFFF };
unsigned short unac_data539[] = { 0x8F26, 0x8F26, 0xFFFF, 0x84EE, 0x84EE, 0xFFFF, 0x9023, 0x9023, 0xFFFF, 0x934A, 0x934A, 0xFFFF, 0x5217, 0x5217, 0xFFFF, 0x52A3, 0x52A3, 0xFFFF, 0x54BD, 0x54BD, 0xFFFF, 0x70C8, 0x70C8, 0xFFFF };
unsigned short unac_data540[] = { 0x88C2, 0x88C2, 0xFFFF, 0x8AAA, 0x8AAA, 0xFFFF, 0x5EC9, 0x5EC9, 0xFFFF, 0x5FF5, 0x5FF5, 0xFFFF, 0x637B, 0x637B, 0xFFFF, 0x6BAE, 0x6BAE, 0xFFFF, 0x7C3E, 0x7C3E, 0xFFFF, 0x7375, 0x7375, 0xFFFF };
unsigned short unac_data541[] = { 0x4EE4, 0x4EE4, 0xFFFF, 0x56F9, 0x56F9, 0xFFFF, 0x5BE7, 0x5BE7, 0xFFFF, 0x5DBA, 0x5DBA, 0xFFFF, 0x601C, 0x601C, 0xFFFF, 0x73B2, 0x73B2, 0xFFFF, 0x7469, 0x7469, 0xFFFF, 0x7F9A, 0x7F9A, 0xFFFF };
unsigned short unac_data542[] = { 0x8046, 0x8046, 0xFFFF, 0x9234, 0x9234, 0xFFFF, 0x96F6, 0x96F6, 0xFFFF, 0x9748, 0x9748, 0xFFFF, 0x9818, 0x9818, 0xFFFF, 0x4F8B, 0x4F8B, 0xFFFF, 0x79AE, 0x79AE, 0xFFFF, 0x91B4, 0x91B4, 0xFFFF };
unsigned short unac_data543[] = { 0x96B8, 0x96B8, 0xFFFF, 0x60E1, 0x60E1, 0xFFFF, 0x4E86, 0x4E86, 0xFFFF, 0x50DA, 0x50DA, 0xFFFF, 0x5BEE, 0x5BEE, 0xFFFF, 0x5C3F, 0x5C3F, 0xFFFF, 0x6599, 0x6599, 0xFFFF, 0x6A02, 0x6A02, 0xFFFF };
unsigned short unac_data544[] = { 0x71CE, 0x71CE, 0xFFFF, 0x7642, 0x7642, 0xFFFF, 0x84FC, 0x84FC, 0xFFFF, 0x907C, 0x907C, 0xFFFF, 0x9F8D, 0x9F8D, 0xFFFF, 0x6688, 0x6688, 0xFFFF, 0x962E, 0x962E, 0xFFFF, 0x5289, 0x5289, 0xFFFF };
unsigned short unac_data545[] = { 0x677B, 0x677B, 0xFFFF, 0x67F3, 0x67F3, 0xFFFF, 0x6D41, 0x6D41, 0xFFFF, 0x6E9C, 0x6E9C, 0xFFFF, 0x7409, 0x7409, 0xFFFF, 0x7559, 0x7559, 0xFFFF, 0x786B, 0x786B, 0xFFFF, 0x7D10, 0x7D10, 0xFFFF };
unsigned short unac_data546[] = { 0x985E, 0x985E, 0xFFFF, 0x516D, 0x516D, 0xFFFF, 0x622E, 0x622E, 0xFFFF, 0x9678, 0x9678, 0xFFFF, 0x502B, 0x502B, 0xFFFF, 0x5D19, 0x5D19, 0xFFFF, 0x6DEA, 0x6DEA, 0xFFFF, 0x8F2A, 0x8F2A, 0xFFFF };
unsigned short unac_data547[] = { 0x5F8B, 0x5F8B, 0xFFFF, 0x6144, 0x6144, 0xFFFF, 0x6817, 0x6817, 0xFFFF, 0x7387, 0x7387, 0xFFFF, 0x9686, 0x9686, 0xFFFF, 0x5229, 0x5229, 0xFFFF, 0x540F, 0x540F, 0xFFFF, 0x5C65, 0x5C65, 0xFFFF };
unsigned short unac_data548[] = { 0x6613, 0x6613, 0xFFFF, 0x674E, 0x674E, 0xFFFF, 0x68A8, 0x68A8, 0xFFFF, 0x6CE5, 0x6CE5, 0xFFFF, 0x7406, 0x7406, 0xFFFF, 0x75E2, 0x75E2, 0xFFFF, 0x7F79, 0x7F79, 0xFFFF, 0x88CF, 0x88CF, 0xFFFF };
unsigned short unac_data549[] = { 0x88E1, 0x88E1, 0xFFFF, 0x91CC, 0x91CC, 0xFFFF, 0x96E2, 0x96E2, 0xFFFF, 0x533F, 0x533F, 0xFFFF, 0x6EBA, 0x6EBA, 0xFFFF, 0x541D, 0x541D, 0xFFFF, 0x71D0, 0x71D0, 0xFFFF, 0x7498, 0x7498, 0xFFFF };
unsigned short unac_data550[] = { 0x85FA, 0x85FA, 0xFFFF, 0x96A3, 0x96A3, 0xFFFF, 0x9C57, 0x9C57, 0xFFFF, 0x9E9F, 0x9E9F, 0xFFFF, 0x6797, 0x6797, 0xFFFF, 0x6DCB, 0x6DCB, 0xFFFF, 0x81E8, 0x81E8, 0xFFFF, 0x7ACB, 0x7ACB, 0xFFFF };
unsigned short unac_data551[] = { 0x7B20, 0x7B20, 0xFFFF, 0x7C92, 0x7C92, 0xFFFF, 0x72C0, 0x72C0, 0xFFFF, 0x7099, 0x7099, 0xFFFF, 0x8B58, 0x8B58, 0xFFFF, 0x4EC0, 0x4EC0, 0xFFFF, 0x8336, 0x8336, 0xFFFF, 0x523A, 0x523A, 0xFFFF };
unsigned short unac_data552[] = { 0x5207, 0x5207, 0xFFFF, 0x5EA6, 0x5EA6, 0xFFFF, 0x62D3, 0x62D3, 0xFFFF, 0x7CD6, 0x7CD6, 0xFFFF, 0x5B85, 0x5B85, 0xFFFF, 0x6D1E, 0x6D1E, 0xFFFF, 0x66B4, 0x66B4, 0xFFFF, 0x8F3B, 0x8F3B, 0xFFFF };
unsigned short unac_data553[] = { 0x884C, 0x884C, 0xFFFF, 0x964D, 0x964D, 0xFFFF, 0x898B, 0x898B, 0xFFFF, 0x5ED3, 0x5ED3, 0xFFFF, 0x5140, 0x5140, 0xFFFF, 0x55C0, 0x55C0, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data554[] = { 0x585A, 0x585A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x6674, 0x6674, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x51DE, 0x51DE, 0xFFFF, 0x732A, 0x732A, 0xFFFF, 0x76CA, 0x76CA, 0xFFFF };
unsigned short unac_data555[] = { 0x793C, 0x793C, 0xFFFF, 0x795E, 0x795E, 0xFFFF, 0x7965, 0x7965, 0xFFFF, 0x798F, 0x798F, 0xFFFF, 0x9756, 0x9756, 0xFFFF, 0x7CBE, 0x7CBE, 0xFFFF, 0x7FBD, 0x7FBD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data556[] = { 0x8612, 0x8612, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x8AF8, 0x8AF8, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9038, 0x9038, 0xFFFF, 0x90FD, 0x90FD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data557[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x98EF, 0x98EF, 0xFFFF, 0x98FC, 0x98FC, 0xFFFF, 0x9928, 0x9928, 0xFFFF, 0x9DB4, 0x9DB4, 0xFFFF, 0x90DE, 0x90DE, 0xFFFF, 0x96B7, 0x96B7, 0xFFFF };
unsigned short unac_data558[] = { 0x4FAE, 0x4FAE, 0xFFFF, 0x50E7, 0x50E7, 0xFFFF, 0x514D, 0x514D, 0xFFFF, 0x52C9, 0x52C9, 0xFFFF, 0x52E4, 0x52E4, 0xFFFF, 0x5351, 0x5351, 0xFFFF, 0x559D, 0x559D, 0xFFFF, 0x5606, 0x5606, 0xFFFF };
unsigned short unac_data559[] = { 0x5668, 0x5668, 0xFFFF, 0x5840, 0x5840, 0xFFFF, 0x58A8, 0x58A8, 0xFFFF, 0x5C64, 0x5C64, 0xFFFF, 0x5C6E, 0x5C6E, 0xFFFF, 0x6094, 0x6094, 0xFFFF, 0x6168, 0x6168, 0xFFFF, 0x618E, 0x618E, 0xFFFF };
unsigned short unac_data560[] = { 0x61F2, 0x61F2, 0xFFFF, 0x654F, 0x654F, 0xFFFF, 0x65E2, 0x65E2, 0xFFFF, 0x6691, 0x6691, 0xFFFF, 0x6885, 0x6885, 0xFFFF, 0x6D77, 0x6D77, 0xFFFF, 0x6E1A, 0x6E1A, 0xFFFF, 0x6F22, 0x6F22, 0xFFFF };
unsigned short unac_data561[] = { 0x716E, 0x716E, 0xFFFF, 0x722B, 0x722B, 0xFFFF, 0x7422, 0x7422, 0xFFFF, 0x7891, 0x7891, 0xFFFF, 0x793E, 0x793E, 0xFFFF, 0x7949, 0x7949, 0xFFFF, 0x7948, 0x7948, 0xFFFF, 0x7950, 0x7950, 0xFFFF };
unsigned short unac_data562[] = { 0x7956, 0x7956, 0xFFFF, 0x795D, 0x795D, 0xFFFF, 0x798D, 0x798D, 0xFFFF, 0x798E, 0x798E, 0xFFFF, 0x7A40, 0x7A40, 0xFFFF, 0x7A81, 0x7A81, 0xFFFF, 0x7BC0, 0x7BC0, 0xFFFF, 0x7DF4, 0x7DF4, 0xFFFF };
unsigned short unac_data563[] = { 0x7E09, 0x7E09, 0xFFFF, 0x7E41, 0x7E41, 0xFFFF, 0x7F72, 0x7F72, 0xFFFF, 0x8005, 0x8005, 0xFFFF, 0x81ED, 0x81ED, 0xFFFF, 0x8279, 0x8279, 0xFFFF, 0x8279, 0x8279, 0xFFFF, 0x8457, 0x8457, 0xFFFF };
unsigned short unac_data564[] = { 0x8910, 0x8910, 0xFFFF, 0x8996, 0x8996, 0xFFFF, 0x8B01, 0x8B01, 0xFFFF, 0x8B39, 0x8B39, 0xFFFF, 0x8CD3, 0x8CD3, 0xFFFF, 0x8D08, 0x8D08, 0xFFFF, 0x8FB6, 0x8FB6, 0xFFFF, 0x9038, 0x9038, 0xFFFF };
unsigned short unac_data565[] = { 0x96E3, 0x96E3, 0xFFFF, 0x97FF, 0x97FF, 0xFFFF, 0x983B, 0x983B, 0xFFFF, 0x6075, 0x6075, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x8218, 0x8218, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data566[] = { 0x4E26, 0x4E26, 0xFFFF, 0x51B5, 0x51B5, 0xFFFF, 0x5168, 0x5168, 0xFFFF, 0x4F80, 0x4F80, 0xFFFF, 0x5145, 0x5145, 0xFFFF, 0x5180, 0x5180, 0xFFFF, 0x52C7, 0x52C7, 0xFFFF, 0x52FA, 0x52FA, 0xFFFF };
unsigned short unac_data567[] = { 0x559D, 0x559D, 0xFFFF, 0x5555, 0x5555, 0xFFFF, 0x5599, 0x5599, 0xFFFF, 0x55E2, 0x55E2, 0xFFFF, 0x585A, 0x585A, 0xFFFF, 0x58B3, 0x58B3, 0xFFFF, 0x5944, 0x5944, 0xFFFF, 0x5954, 0x5954, 0xFFFF };
unsigned short unac_data568[] = { 0x5A62, 0x5A62, 0xFFFF, 0x5B28, 0x5B28, 0xFFFF, 0x5ED2, 0x5ED2, 0xFFFF, 0x5ED9, 0x5ED9, 0xFFFF, 0x5F69, 0x5F69, 0xFFFF, 0x5FAD, 0x5FAD, 0xFFFF, 0x60D8, 0x60D8, 0xFFFF, 0x614E, 0x614E, 0xFFFF };
unsigned short unac_data569[] = { 0x6108, 0x6108, 0xFFFF, 0x618E, 0x618E, 0xFFFF, 0x6160, 0x6160, 0xFFFF, 0x61F2, 0x61F2, 0xFFFF, 0x6234, 0x6234, 0xFFFF, 0x63C4, 0x63C4, 0xFFFF, 0x641C, 0x641C, 0xFFFF, 0x6452, 0x6452, 0xFFFF };
unsigned short unac_data570[] = { 0x6556, 0x6556, 0xFFFF, 0x6674, 0x6674, 0xFFFF, 0x6717, 0x6717, 0xFFFF, 0x671B, 0x671B, 0xFFFF, 0x6756, 0x6756, 0xFFFF, 0x6B79, 0x6B79, 0xFFFF, 0x6BBA, 0x6BBA, 0xFFFF, 0x6D41, 0x6D41, 0xFFFF };
unsigned short unac_data571[] = { 0x6EDB, 0x6EDB, 0xFFFF, 0x6ECB, 0x6ECB, 0xFFFF, 0x6F22, 0x6F22, 0xFFFF, 0x701E, 0x701E, 0xFFFF, 0x716E, 0x716E, 0xFFFF, 0x77A7, 0x77A7, 0xFFFF, 0x7235, 0x7235, 0xFFFF, 0x72AF, 0x72AF, 0xFFFF };
unsigned short unac_data572[] = { 0x732A, 0x732A, 0xFFFF, 0x7471, 0x7471, 0xFFFF, 0x7506, 0x7506, 0xFFFF, 0x753B, 0x753B, 0xFFFF, 0x761D, 0x761D, 0xFFFF, 0x761F, 0x761F, 0xFFFF, 0x76CA, 0x76CA, 0xFFFF, 0x76DB, 0x76DB, 0xFFFF };
unsigned short unac_data573[] = { 0x76F4, 0x76F4, 0xFFFF, 0x774A, 0x774A, 0xFFFF, 0x7740, 0x7740, 0xFFFF, 0x78CC, 0x78CC, 0xFFFF, 0x7AB1, 0x7AB1, 0xFFFF, 0x7BC0, 0x7BC0, 0xFFFF, 0x7C7B, 0x7C7B, 0xFFFF, 0x7D5B, 0x7D5B, 0xFFFF };
unsigned short unac_data574[] = { 0x7DF4, 0x7DF4, 0xFFFF, 0x7F3E, 0x7F3E, 0xFFFF, 0x8005, 0x8005, 0xFFFF, 0x8352, 0x8352, 0xFFFF, 0x83EF, 0x83EF, 0xFFFF, 0x8779, 0x8779, 0xFFFF, 0x8941, 0x8941, 0xFFFF, 0x8986, 0x8986, 0xFFFF };
unsigned short unac_data575[] = { 0x8996, 0x8996, 0xFFFF, 0x8ABF, 0x8ABF, 0xFFFF, 0x8AF8, 0x8AF8, 0xFFFF, 0x8ACB, 0x8ACB, 0xFFFF, 0x8B01, 0x8B01, 0xFFFF, 0x8AFE, 0x8AFE, 0xFFFF, 0x8AED, 0x8AED, 0xFFFF, 0x8B39, 0x8B39, 0xFFFF };
unsigned short unac_data576[] = { 0x8B8A, 0x8B8A, 0xFFFF, 0x8D08, 0x8D08, 0xFFFF, 0x8F38, 0x8F38, 0xFFFF, 0x9072, 0x9072, 0xFFFF, 0x9199, 0x9199, 0xFFFF, 0x9276, 0x9276, 0xFFFF, 0x967C, 0x967C, 0xFFFF, 0x96E3, 0x96E3, 0xFFFF };
unsigned short unac_data577[] = { 0x9756, 0x9756, 0xFFFF, 0x97DB, 0x97DB, 0xFFFF, 0x97FF, 0x97FF, 0xFFFF, 0x980B, 0x980B, 0xFFFF, 0x983B, 0x983B, 0xFFFF, 0x9B12, 0x9B12, 0xFFFF, 0x9F9C, 0x9F9C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data578[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x3B9D, 0x3B9D, 0xFFFF, 0x4018, 0x4018, 0xFFFF, 0x4039, 0x4039, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data579[] = { 0x9F43, 0x9F43, 0xFFFF, 0x9F8E, 0x9F8E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data580[] = { 0x0066, 0x0066, 0x0066, 0x0066, 0x0066, 0x0066, 0x0066, 0x0069, 0x0066, 0x0069, 0x0066, 0x0069, 0x0066, 0x006C, 0x0066, 0x006C, 0x0066, 0x006C, 0x0066, 0x0066, 0x0069, 0x0066, 0x0066, 0x0069, 0x0066, 0x0066, 0x0069, 0x0066, 0x0066, 0x006C, 0x0066, 0x0066, 0x006C, 0x0066, 0x0066, 0x006C, 0x0074, 0x0073, 0x0074, 0x0073, 0x0073, 0x0074, 0x0073, 0x0074, 0x0073, 0x0074, 0x0073, 0x0074, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data581[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0574, 0x0576, 0x0574, 0x0576, 0x0574, 0x0576, 0x0574, 0x0565, 0x0574, 0x0565, 0x0574, 0x0565, 0x0574, 0x056B, 0x0574, 0x056B, 0x0574, 0x056B, 0x057E, 0x0576, 0x057E, 0x0576, 0x057E, 0x0576, 0x0574, 0x056D, 0x0574, 0x056D, 0x0574, 0x056D };
unsigned short unac_data582[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x05D9, 0x05D9, 0xFFFF, 0x0000, 0x0000, 0xFFFF, 0x05F2, 0x05F2, 0xFFFF };
unsigned short unac_data583[] = { 0x05E2, 0x05E2, 0xFFFF, 0x05D0, 0x05D0, 0xFFFF, 0x05D3, 0x05D3, 0xFFFF, 0x05D4, 0x05D4, 0xFFFF, 0x05DB, 0x05DB, 0xFFFF, 0x05DC, 0x05DC, 0xFFFF, 0x05DD, 0x05DD, 0xFFFF, 0x05E8, 0x05E8, 0xFFFF };
unsigned short unac_data584[] = { 0x05EA, 0x05EA, 0xFFFF, 0x002B, 0x002B, 0xFFFF, 0x05E9, 0x05E9, 0xFFFF, 0x05E9, 0x05E9, 0xFFFF, 0x05E9, 0x05E9, 0xFFFF, 0x05E9, 0x05E9, 0xFFFF, 0x05D0, 0x05D0, 0xFFFF, 0x05D0, 0x05D0, 0xFFFF };
unsigned short unac_data585[] = { 0x05D0, 0x05D0, 0xFFFF, 0x05D1, 0x05D1, 0xFFFF, 0x05D2, 0x05D2, 0xFFFF, 0x05D3, 0x05D3, 0xFFFF, 0x05D4, 0x05D4, 0xFFFF, 0x05D5, 0x05D5, 0xFFFF, 0x05D6, 0x05D6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data586[] = { 0x05D8, 0x05D8, 0xFFFF, 0x05D9, 0x05D9, 0xFFFF, 0x05DA, 0x05DA, 0xFFFF, 0x05DB, 0x05DB, 0xFFFF, 0x05DC, 0x05DC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x05DE, 0x05DE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data587[] = { 0x05E0, 0x05E0, 0xFFFF, 0x05E1, 0x05E1, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x05E3, 0x05E3, 0xFFFF, 0x05E4, 0x05E4, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x05E6, 0x05E6, 0xFFFF, 0x05E7, 0x05E7, 0xFFFF };
unsigned short unac_data588[] = { 0x05E8, 0x05E8, 0xFFFF, 0x05E9, 0x05E9, 0xFFFF, 0x05EA, 0x05EA, 0xFFFF, 0x05D5, 0x05D5, 0xFFFF, 0x05D1, 0x05D1, 0xFFFF, 0x05DB, 0x05DB, 0xFFFF, 0x05E4, 0x05E4, 0xFFFF, 0x05D0, 0x05DC, 0x05D0, 0x05DC, 0xFFFF };
unsigned short unac_data589[] = { 0x0671, 0x0671, 0xFFFF, 0x0671, 0x0671, 0xFFFF, 0x067B, 0x067B, 0xFFFF, 0x067B, 0x067B, 0xFFFF, 0x067B, 0x067B, 0xFFFF, 0x067B, 0x067B, 0xFFFF, 0x067E, 0x067E, 0xFFFF, 0x067E, 0x067E, 0xFFFF };
unsigned short unac_data590[] = { 0x067E, 0x067E, 0xFFFF, 0x067E, 0x067E, 0xFFFF, 0x0680, 0x0680, 0xFFFF, 0x0680, 0x0680, 0xFFFF, 0x0680, 0x0680, 0xFFFF, 0x0680, 0x0680, 0xFFFF, 0x067A, 0x067A, 0xFFFF, 0x067A, 0x067A, 0xFFFF };
unsigned short unac_data591[] = { 0x067A, 0x067A, 0xFFFF, 0x067A, 0x067A, 0xFFFF, 0x067F, 0x067F, 0xFFFF, 0x067F, 0x067F, 0xFFFF, 0x067F, 0x067F, 0xFFFF, 0x067F, 0x067F, 0xFFFF, 0x0679, 0x0679, 0xFFFF, 0x0679, 0x0679, 0xFFFF };
unsigned short unac_data592[] = { 0x0679, 0x0679, 0xFFFF, 0x0679, 0x0679, 0xFFFF, 0x06A4, 0x06A4, 0xFFFF, 0x06A4, 0x06A4, 0xFFFF, 0x06A4, 0x06A4, 0xFFFF, 0x06A4, 0x06A4, 0xFFFF, 0x06A6, 0x06A6, 0xFFFF, 0x06A6, 0x06A6, 0xFFFF };
unsigned short unac_data593[] = { 0x06A6, 0x06A6, 0xFFFF, 0x06A6, 0x06A6, 0xFFFF, 0x0684, 0x0684, 0xFFFF, 0x0684, 0x0684, 0xFFFF, 0x0684, 0x0684, 0xFFFF, 0x0684, 0x0684, 0xFFFF, 0x0683, 0x0683, 0xFFFF, 0x0683, 0x0683, 0xFFFF };
unsigned short unac_data594[] = { 0x0683, 0x0683, 0xFFFF, 0x0683, 0x0683, 0xFFFF, 0x0686, 0x0686, 0xFFFF, 0x0686, 0x0686, 0xFFFF, 0x0686, 0x0686, 0xFFFF, 0x0686, 0x0686, 0xFFFF, 0x0687, 0x0687, 0xFFFF, 0x0687, 0x0687, 0xFFFF };
unsigned short unac_data595[] = { 0x0687, 0x0687, 0xFFFF, 0x0687, 0x0687, 0xFFFF, 0x068D, 0x068D, 0xFFFF, 0x068D, 0x068D, 0xFFFF, 0x068C, 0x068C, 0xFFFF, 0x068C, 0x068C, 0xFFFF, 0x068E, 0x068E, 0xFFFF, 0x068E, 0x068E, 0xFFFF };
unsigned short unac_data596[] = { 0x0688, 0x0688, 0xFFFF, 0x0688, 0x0688, 0xFFFF, 0x0698, 0x0698, 0xFFFF, 0x0698, 0x0698, 0xFFFF, 0x0691, 0x0691, 0xFFFF, 0x0691, 0x0691, 0xFFFF, 0x06A9, 0x06A9, 0xFFFF, 0x06A9, 0x06A9, 0xFFFF };
unsigned short unac_data597[] = { 0x06A9, 0x06A9, 0xFFFF, 0x06A9, 0x06A9, 0xFFFF, 0x06AF, 0x06AF, 0xFFFF, 0x06AF, 0x06AF, 0xFFFF, 0x06AF, 0x06AF, 0xFFFF, 0x06AF, 0x06AF, 0xFFFF, 0x06B3, 0x06B3, 0xFFFF, 0x06B3, 0x06B3, 0xFFFF };
unsigned short unac_data598[] = { 0x06B3, 0x06B3, 0xFFFF, 0x06B3, 0x06B3, 0xFFFF, 0x06B1, 0x06B1, 0xFFFF, 0x06B1, 0x06B1, 0xFFFF, 0x06B1, 0x06B1, 0xFFFF, 0x06B1, 0x06B1, 0xFFFF, 0x06BA, 0x06BA, 0xFFFF, 0x06BA, 0x06BA, 0xFFFF };
unsigned short unac_data599[] = { 0x06BB, 0x06BB, 0xFFFF, 0x06BB, 0x06BB, 0xFFFF, 0x06BB, 0x06BB, 0xFFFF, 0x06BB, 0x06BB, 0xFFFF, 0x06D5, 0x06D5, 0xFFFF, 0x06D5, 0x06D5, 0xFFFF, 0x06C1, 0x06C1, 0xFFFF, 0x06C1, 0x06C1, 0xFFFF };
unsigned short unac_data600[] = { 0x06C1, 0x06C1, 0xFFFF, 0x06C1, 0x06C1, 0xFFFF, 0x06BE, 0x06BE, 0xFFFF, 0x06BE, 0x06BE, 0xFFFF, 0x06BE, 0x06BE, 0xFFFF, 0x06BE, 0x06BE, 0xFFFF, 0x06D2, 0x06D2, 0xFFFF, 0x06D2, 0x06D2, 0xFFFF };
unsigned short unac_data601[] = { 0x06D2, 0x06D2, 0xFFFF, 0x06D2, 0x06D2, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data602[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x06AD, 0x06AD, 0xFFFF, 0x06AD, 0x06AD, 0xFFFF, 0x06AD, 0x06AD, 0xFFFF, 0x06AD, 0x06AD, 0xFFFF, 0x06C7, 0x06C7, 0xFFFF };
unsigned short unac_data603[] = { 0x06C7, 0x06C7, 0xFFFF, 0x06C6, 0x06C6, 0xFFFF, 0x06C6, 0x06C6, 0xFFFF, 0x06C8, 0x06C8, 0xFFFF, 0x06C8, 0x06C8, 0xFFFF, 0x06C7, 0x0674, 0x06C7, 0x0674, 0xFFFF, 0x06CB, 0x06CB, 0xFFFF, 0x06CB, 0x06CB, 0xFFFF };
unsigned short unac_data604[] = { 0x06C5, 0x06C5, 0xFFFF, 0x06C5, 0x06C5, 0xFFFF, 0x06C9, 0x06C9, 0xFFFF, 0x06C9, 0x06C9, 0xFFFF, 0x06D0, 0x06D0, 0xFFFF, 0x06D0, 0x06D0, 0xFFFF, 0x06D0, 0x06D0, 0xFFFF, 0x06D0, 0x06D0, 0xFFFF };
unsigned short unac_data605[] = { 0x0649, 0x0649, 0xFFFF, 0x0649, 0x0649, 0xFFFF, 0x0627, 0x064A, 0x0627, 0x064A, 0xFFFF, 0x0627, 0x064A, 0x0627, 0x064A, 0xFFFF, 0x06D5, 0x064A, 0x06D5, 0x064A, 0xFFFF, 0x06D5, 0x064A, 0x06D5, 0x064A, 0xFFFF, 0x0648, 0x064A, 0x0648, 0x064A, 0xFFFF, 0x0648, 0x064A, 0x0648, 0x064A, 0xFFFF };
unsigned short unac_data606[] = { 0x06C7, 0x064A, 0x06C7, 0x064A, 0xFFFF, 0x06C7, 0x064A, 0x06C7, 0x064A, 0xFFFF, 0x06C6, 0x064A, 0x06C6, 0x064A, 0xFFFF, 0x06C6, 0x064A, 0x06C6, 0x064A, 0xFFFF, 0x06C8, 0x064A, 0x06C8, 0x064A, 0xFFFF, 0x06C8, 0x064A, 0x06C8, 0x064A, 0xFFFF, 0x06D0, 0x064A, 0x06D0, 0x064A, 0xFFFF, 0x06D0, 0x064A, 0x06D0, 0x064A, 0xFFFF };
unsigned short unac_data607[] = { 0x06D0, 0x064A, 0x06D0, 0x064A, 0xFFFF, 0x0649, 0x064A, 0x0649, 0x064A, 0xFFFF, 0x0649, 0x064A, 0x0649, 0x064A, 0xFFFF, 0x0649, 0x064A, 0x0649, 0x064A, 0xFFFF, 0x06CC, 0x06CC, 0xFFFF, 0x06CC, 0x06CC, 0xFFFF, 0x06CC, 0x06CC, 0xFFFF, 0x06CC, 0x06CC, 0xFFFF };
unsigned short unac_data608[] = { 0x062C, 0x064A, 0x062C, 0x064A, 0xFFFF, 0x062D, 0x064A, 0x062D, 0x064A, 0xFFFF, 0x0645, 0x064A, 0x0645, 0x064A, 0xFFFF, 0x0649, 0x064A, 0x0649, 0x064A, 0xFFFF, 0x064A, 0x064A, 0x064A, 0x064A, 0xFFFF, 0x0628, 0x062C, 0x0628, 0x062C, 0xFFFF, 0x0628, 0x062D, 0x0628, 0x062D, 0xFFFF, 0x0628, 0x062E, 0x0628, 0x062E, 0xFFFF };
unsigned short unac_data609[] = { 0x0628, 0x0645, 0x0628, 0x0645, 0xFFFF, 0x0628, 0x0649, 0x0628, 0x0649, 0xFFFF, 0x0628, 0x064A, 0x0628, 0x064A, 0xFFFF, 0x062A, 0x062C, 0x062A, 0x062C, 0xFFFF, 0x062A, 0x062D, 0x062A, 0x062D, 0xFFFF, 0x062A, 0x062E, 0x062A, 0x062E, 0xFFFF, 0x062A, 0x0645, 0x062A, 0x0645, 0xFFFF, 0x062A, 0x0649, 0x062A, 0x0649, 0xFFFF };
unsigned short unac_data610[] = { 0x062A, 0x064A, 0x062A, 0x064A, 0xFFFF, 0x062B, 0x062C, 0x062B, 0x062C, 0xFFFF, 0x062B, 0x0645, 0x062B, 0x0645, 0xFFFF, 0x062B, 0x0649, 0x062B, 0x0649, 0xFFFF, 0x062B, 0x064A, 0x062B, 0x064A, 0xFFFF, 0x062C, 0x062D, 0x062C, 0x062D, 0xFFFF, 0x062C, 0x0645, 0x062C, 0x0645, 0xFFFF, 0x062D, 0x062C, 0x062D, 0x062C, 0xFFFF };
unsigned short unac_data611[] = { 0x062D, 0x0645, 0x062D, 0x0645, 0xFFFF, 0x062E, 0x062C, 0x062E, 0x062C, 0xFFFF, 0x062E, 0x062D, 0x062E, 0x062D, 0xFFFF, 0x062E, 0x0645, 0x062E, 0x0645, 0xFFFF, 0x0633, 0x062C, 0x0633, 0x062C, 0xFFFF, 0x0633, 0x062D, 0x0633, 0x062D, 0xFFFF, 0x0633, 0x062E, 0x0633, 0x062E, 0xFFFF, 0x0633, 0x0645, 0x0633, 0x0645, 0xFFFF };
unsigned short unac_data612[] = { 0x0635, 0x062D, 0x0635, 0x062D, 0xFFFF, 0x0635, 0x0645, 0x0635, 0x0645, 0xFFFF, 0x0636, 0x062C, 0x0636, 0x062C, 0xFFFF, 0x0636, 0x062D, 0x0636, 0x062D, 0xFFFF, 0x0636, 0x062E, 0x0636, 0x062E, 0xFFFF, 0x0636, 0x0645, 0x0636, 0x0645, 0xFFFF, 0x0637, 0x062D, 0x0637, 0x062D, 0xFFFF, 0x0637, 0x0645, 0x0637, 0x0645, 0xFFFF };
unsigned short unac_data613[] = { 0x0638, 0x0645, 0x0638, 0x0645, 0xFFFF, 0x0639, 0x062C, 0x0639, 0x062C, 0xFFFF, 0x0639, 0x0645, 0x0639, 0x0645, 0xFFFF, 0x063A, 0x062C, 0x063A, 0x062C, 0xFFFF, 0x063A, 0x0645, 0x063A, 0x0645, 0xFFFF, 0x0641, 0x062C, 0x0641, 0x062C, 0xFFFF, 0x0641, 0x062D, 0x0641, 0x062D, 0xFFFF, 0x0641, 0x062E, 0x0641, 0x062E, 0xFFFF };
unsigned short unac_data614[] = { 0x0641, 0x0645, 0x0641, 0x0645, 0xFFFF, 0x0641, 0x0649, 0x0641, 0x0649, 0xFFFF, 0x0641, 0x064A, 0x0641, 0x064A, 0xFFFF, 0x0642, 0x062D, 0x0642, 0x062D, 0xFFFF, 0x0642, 0x0645, 0x0642, 0x0645, 0xFFFF, 0x0642, 0x0649, 0x0642, 0x0649, 0xFFFF, 0x0642, 0x064A, 0x0642, 0x064A, 0xFFFF, 0x0643, 0x0627, 0x0643, 0x0627, 0xFFFF };
unsigned short unac_data615[] = { 0x0643, 0x062C, 0x0643, 0x062C, 0xFFFF, 0x0643, 0x062D, 0x0643, 0x062D, 0xFFFF, 0x0643, 0x062E, 0x0643, 0x062E, 0xFFFF, 0x0643, 0x0644, 0x0643, 0x0644, 0xFFFF, 0x0643, 0x0645, 0x0643, 0x0645, 0xFFFF, 0x0643, 0x0649, 0x0643, 0x0649, 0xFFFF, 0x0643, 0x064A, 0x0643, 0x064A, 0xFFFF, 0x0644, 0x062C, 0x0644, 0x062C, 0xFFFF };
unsigned short unac_data616[] = { 0x0644, 0x062D, 0x0644, 0x062D, 0xFFFF, 0x0644, 0x062E, 0x0644, 0x062E, 0xFFFF, 0x0644, 0x0645, 0x0644, 0x0645, 0xFFFF, 0x0644, 0x0649, 0x0644, 0x0649, 0xFFFF, 0x0644, 0x064A, 0x0644, 0x064A, 0xFFFF, 0x0645, 0x062C, 0x0645, 0x062C, 0xFFFF, 0x0645, 0x062D, 0x0645, 0x062D, 0xFFFF, 0x0645, 0x062E, 0x0645, 0x062E, 0xFFFF };
unsigned short unac_data617[] = { 0x0645, 0x0645, 0x0645, 0x0645, 0xFFFF, 0x0645, 0x0649, 0x0645, 0x0649, 0xFFFF, 0x0645, 0x064A, 0x0645, 0x064A, 0xFFFF, 0x0646, 0x062C, 0x0646, 0x062C, 0xFFFF, 0x0646, 0x062D, 0x0646, 0x062D, 0xFFFF, 0x0646, 0x062E, 0x0646, 0x062E, 0xFFFF, 0x0646, 0x0645, 0x0646, 0x0645, 0xFFFF, 0x0646, 0x0649, 0x0646, 0x0649, 0xFFFF };
unsigned short unac_data618[] = { 0x0646, 0x064A, 0x0646, 0x064A, 0xFFFF, 0x0647, 0x062C, 0x0647, 0x062C, 0xFFFF, 0x0647, 0x0645, 0x0647, 0x0645, 0xFFFF, 0x0647, 0x0649, 0x0647, 0x0649, 0xFFFF, 0x0647, 0x064A, 0x0647, 0x064A, 0xFFFF, 0x064A, 0x062C, 0x064A, 0x062C, 0xFFFF, 0x064A, 0x062D, 0x064A, 0x062D, 0xFFFF, 0x064A, 0x062E, 0x064A, 0x062E, 0xFFFF };
unsigned short unac_data619[] = { 0x064A, 0x0645, 0x064A, 0x0645, 0xFFFF, 0x064A, 0x0649, 0x064A, 0x0649, 0xFFFF, 0x064A, 0x064A, 0x064A, 0x064A, 0xFFFF, 0x0630, 0x0630, 0xFFFF, 0x0631, 0x0631, 0xFFFF, 0x0649, 0x0649, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF };
unsigned short unac_data620[] = { 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0631, 0x064A, 0x0631, 0x064A, 0xFFFF, 0x0632, 0x064A, 0x0632, 0x064A, 0xFFFF, 0x0645, 0x064A, 0x0645, 0x064A, 0xFFFF, 0x0646, 0x064A, 0x0646, 0x064A, 0xFFFF };
unsigned short unac_data621[] = { 0x0649, 0x064A, 0x0649, 0x064A, 0xFFFF, 0x064A, 0x064A, 0x064A, 0x064A, 0xFFFF, 0x0628, 0x0631, 0x0628, 0x0631, 0xFFFF, 0x0628, 0x0632, 0x0628, 0x0632, 0xFFFF, 0x0628, 0x0645, 0x0628, 0x0645, 0xFFFF, 0x0628, 0x0646, 0x0628, 0x0646, 0xFFFF, 0x0628, 0x0649, 0x0628, 0x0649, 0xFFFF, 0x0628, 0x064A, 0x0628, 0x064A, 0xFFFF };
unsigned short unac_data622[] = { 0x062A, 0x0631, 0x062A, 0x0631, 0xFFFF, 0x062A, 0x0632, 0x062A, 0x0632, 0xFFFF, 0x062A, 0x0645, 0x062A, 0x0645, 0xFFFF, 0x062A, 0x0646, 0x062A, 0x0646, 0xFFFF, 0x062A, 0x0649, 0x062A, 0x0649, 0xFFFF, 0x062A, 0x064A, 0x062A, 0x064A, 0xFFFF, 0x062B, 0x0631, 0x062B, 0x0631, 0xFFFF, 0x062B, 0x0632, 0x062B, 0x0632, 0xFFFF };
unsigned short unac_data623[] = { 0x062B, 0x0645, 0x062B, 0x0645, 0xFFFF, 0x062B, 0x0646, 0x062B, 0x0646, 0xFFFF, 0x062B, 0x0649, 0x062B, 0x0649, 0xFFFF, 0x062B, 0x064A, 0x062B, 0x064A, 0xFFFF, 0x0641, 0x0649, 0x0641, 0x0649, 0xFFFF, 0x0641, 0x064A, 0x0641, 0x064A, 0xFFFF, 0x0642, 0x0649, 0x0642, 0x0649, 0xFFFF, 0x0642, 0x064A, 0x0642, 0x064A, 0xFFFF };
unsigned short unac_data624[] = { 0x0643, 0x0627, 0x0643, 0x0627, 0xFFFF, 0x0643, 0x0644, 0x0643, 0x0644, 0xFFFF, 0x0643, 0x0645, 0x0643, 0x0645, 0xFFFF, 0x0643, 0x0649, 0x0643, 0x0649, 0xFFFF, 0x0643, 0x064A, 0x0643, 0x064A, 0xFFFF, 0x0644, 0x0645, 0x0644, 0x0645, 0xFFFF, 0x0644, 0x0649, 0x0644, 0x0649, 0xFFFF, 0x0644, 0x064A, 0x0644, 0x064A, 0xFFFF };
unsigned short unac_data625[] = { 0x0645, 0x0627, 0x0645, 0x0627, 0xFFFF, 0x0645, 0x0645, 0x0645, 0x0645, 0xFFFF, 0x0646, 0x0631, 0x0646, 0x0631, 0xFFFF, 0x0646, 0x0632, 0x0646, 0x0632, 0xFFFF, 0x0646, 0x0645, 0x0646, 0x0645, 0xFFFF, 0x0646, 0x0646, 0x0646, 0x0646, 0xFFFF, 0x0646, 0x0649, 0x0646, 0x0649, 0xFFFF, 0x0646, 0x064A, 0x0646, 0x064A, 0xFFFF };
unsigned short unac_data626[] = { 0x0649, 0x0649, 0xFFFF, 0x064A, 0x0631, 0x064A, 0x0631, 0xFFFF, 0x064A, 0x0632, 0x064A, 0x0632, 0xFFFF, 0x064A, 0x0645, 0x064A, 0x0645, 0xFFFF, 0x064A, 0x0646, 0x064A, 0x0646, 0xFFFF, 0x064A, 0x0649, 0x064A, 0x0649, 0xFFFF, 0x064A, 0x064A, 0x064A, 0x064A, 0xFFFF, 0x062C, 0x064A, 0x062C, 0x064A, 0xFFFF };
unsigned short unac_data627[] = { 0x062D, 0x064A, 0x062D, 0x064A, 0xFFFF, 0x062E, 0x064A, 0x062E, 0x064A, 0xFFFF, 0x0645, 0x064A, 0x0645, 0x064A, 0xFFFF, 0x0647, 0x064A, 0x0647, 0x064A, 0xFFFF, 0x0628, 0x062C, 0x0628, 0x062C, 0xFFFF, 0x0628, 0x062D, 0x0628, 0x062D, 0xFFFF, 0x0628, 0x062E, 0x0628, 0x062E, 0xFFFF, 0x0628, 0x0645, 0x0628, 0x0645, 0xFFFF };
unsigned short unac_data628[] = { 0x0628, 0x0647, 0x0628, 0x0647, 0xFFFF, 0x062A, 0x062C, 0x062A, 0x062C, 0xFFFF, 0x062A, 0x062D, 0x062A, 0x062D, 0xFFFF, 0x062A, 0x062E, 0x062A, 0x062E, 0xFFFF, 0x062A, 0x0645, 0x062A, 0x0645, 0xFFFF, 0x062A, 0x0647, 0x062A, 0x0647, 0xFFFF, 0x062B, 0x0645, 0x062B, 0x0645, 0xFFFF, 0x062C, 0x062D, 0x062C, 0x062D, 0xFFFF };
unsigned short unac_data629[] = { 0x062C, 0x0645, 0x062C, 0x0645, 0xFFFF, 0x062D, 0x062C, 0x062D, 0x062C, 0xFFFF, 0x062D, 0x0645, 0x062D, 0x0645, 0xFFFF, 0x062E, 0x062C, 0x062E, 0x062C, 0xFFFF, 0x062E, 0x0645, 0x062E, 0x0645, 0xFFFF, 0x0633, 0x062C, 0x0633, 0x062C, 0xFFFF, 0x0633, 0x062D, 0x0633, 0x062D, 0xFFFF, 0x0633, 0x062E, 0x0633, 0x062E, 0xFFFF };
unsigned short unac_data630[] = { 0x0633, 0x0645, 0x0633, 0x0645, 0xFFFF, 0x0635, 0x062D, 0x0635, 0x062D, 0xFFFF, 0x0635, 0x062E, 0x0635, 0x062E, 0xFFFF, 0x0635, 0x0645, 0x0635, 0x0645, 0xFFFF, 0x0636, 0x062C, 0x0636, 0x062C, 0xFFFF, 0x0636, 0x062D, 0x0636, 0x062D, 0xFFFF, 0x0636, 0x062E, 0x0636, 0x062E, 0xFFFF, 0x0636, 0x0645, 0x0636, 0x0645, 0xFFFF };
unsigned short unac_data631[] = { 0x0637, 0x062D, 0x0637, 0x062D, 0xFFFF, 0x0638, 0x0645, 0x0638, 0x0645, 0xFFFF, 0x0639, 0x062C, 0x0639, 0x062C, 0xFFFF, 0x0639, 0x0645, 0x0639, 0x0645, 0xFFFF, 0x063A, 0x062C, 0x063A, 0x062C, 0xFFFF, 0x063A, 0x0645, 0x063A, 0x0645, 0xFFFF, 0x0641, 0x062C, 0x0641, 0x062C, 0xFFFF, 0x0641, 0x062D, 0x0641, 0x062D, 0xFFFF };
unsigned short unac_data632[] = { 0x0641, 0x062E, 0x0641, 0x062E, 0xFFFF, 0x0641, 0x0645, 0x0641, 0x0645, 0xFFFF, 0x0642, 0x062D, 0x0642, 0x062D, 0xFFFF, 0x0642, 0x0645, 0x0642, 0x0645, 0xFFFF, 0x0643, 0x062C, 0x0643, 0x062C, 0xFFFF, 0x0643, 0x062D, 0x0643, 0x062D, 0xFFFF, 0x0643, 0x062E, 0x0643, 0x062E, 0xFFFF, 0x0643, 0x0644, 0x0643, 0x0644, 0xFFFF };
unsigned short unac_data633[] = { 0x0643, 0x0645, 0x0643, 0x0645, 0xFFFF, 0x0644, 0x062C, 0x0644, 0x062C, 0xFFFF, 0x0644, 0x062D, 0x0644, 0x062D, 0xFFFF, 0x0644, 0x062E, 0x0644, 0x062E, 0xFFFF, 0x0644, 0x0645, 0x0644, 0x0645, 0xFFFF, 0x0644, 0x0647, 0x0644, 0x0647, 0xFFFF, 0x0645, 0x062C, 0x0645, 0x062C, 0xFFFF, 0x0645, 0x062D, 0x0645, 0x062D, 0xFFFF };
unsigned short unac_data634[] = { 0x0645, 0x062E, 0x0645, 0x062E, 0xFFFF, 0x0645, 0x0645, 0x0645, 0x0645, 0xFFFF, 0x0646, 0x062C, 0x0646, 0x062C, 0xFFFF, 0x0646, 0x062D, 0x0646, 0x062D, 0xFFFF, 0x0646, 0x062E, 0x0646, 0x062E, 0xFFFF, 0x0646, 0x0645, 0x0646, 0x0645, 0xFFFF, 0x0646, 0x0647, 0x0646, 0x0647, 0xFFFF, 0x0647, 0x062C, 0x0647, 0x062C, 0xFFFF };
unsigned short unac_data635[] = { 0x0647, 0x0645, 0x0647, 0x0645, 0xFFFF, 0x0647, 0x0647, 0xFFFF, 0x064A, 0x062C, 0x064A, 0x062C, 0xFFFF, 0x064A, 0x062D, 0x064A, 0x062D, 0xFFFF, 0x064A, 0x062E, 0x064A, 0x062E, 0xFFFF, 0x064A, 0x0645, 0x064A, 0x0645, 0xFFFF, 0x064A, 0x0647, 0x064A, 0x0647, 0xFFFF, 0x0645, 0x064A, 0x0645, 0x064A, 0xFFFF };
unsigned short unac_data636[] = { 0x0647, 0x064A, 0x0647, 0x064A, 0xFFFF, 0x0628, 0x0645, 0x0628, 0x0645, 0xFFFF, 0x0628, 0x0647, 0x0628, 0x0647, 0xFFFF, 0x062A, 0x0645, 0x062A, 0x0645, 0xFFFF, 0x062A, 0x0647, 0x062A, 0x0647, 0xFFFF, 0x062B, 0x0645, 0x062B, 0x0645, 0xFFFF, 0x062B, 0x0647, 0x062B, 0x0647, 0xFFFF, 0x0633, 0x0645, 0x0633, 0x0645, 0xFFFF };
unsigned short unac_data637[] = { 0x0633, 0x0647, 0x0633, 0x0647, 0xFFFF, 0x0634, 0x0645, 0x0634, 0x0645, 0xFFFF, 0x0634, 0x0647, 0x0634, 0x0647, 0xFFFF, 0x0643, 0x0644, 0x0643, 0x0644, 0xFFFF, 0x0643, 0x0645, 0x0643, 0x0645, 0xFFFF, 0x0644, 0x0645, 0x0644, 0x0645, 0xFFFF, 0x0646, 0x0645, 0x0646, 0x0645, 0xFFFF, 0x0646, 0x0647, 0x0646, 0x0647, 0xFFFF };
unsigned short unac_data638[] = { 0x064A, 0x0645, 0x064A, 0x0645, 0xFFFF, 0x064A, 0x0647, 0x064A, 0x0647, 0xFFFF, 0x0640, 0x0640, 0xFFFF, 0x0640, 0x0640, 0xFFFF, 0x0640, 0x0640, 0xFFFF, 0x0637, 0x0649, 0x0637, 0x0649, 0xFFFF, 0x0637, 0x064A, 0x0637, 0x064A, 0xFFFF, 0x0639, 0x0649, 0x0639, 0x0649, 0xFFFF };
unsigned short unac_data639[] = { 0x0639, 0x064A, 0x0639, 0x064A, 0xFFFF, 0x063A, 0x0649, 0x063A, 0x0649, 0xFFFF, 0x063A, 0x064A, 0x063A, 0x064A, 0xFFFF, 0x0633, 0x0649, 0x0633, 0x0649, 0xFFFF, 0x0633, 0x064A, 0x0633, 0x064A, 0xFFFF, 0x0634, 0x0649, 0x0634, 0x0649, 0xFFFF, 0x0634, 0x064A, 0x0634, 0x064A, 0xFFFF, 0x062D, 0x0649, 0x062D, 0x0649, 0xFFFF };
unsigned short unac_data640[] = { 0x062D, 0x064A, 0x062D, 0x064A, 0xFFFF, 0x062C, 0x0649, 0x062C, 0x0649, 0xFFFF, 0x062C, 0x064A, 0x062C, 0x064A, 0xFFFF, 0x062E, 0x0649, 0x062E, 0x0649, 0xFFFF, 0x062E, 0x064A, 0x062E, 0x064A, 0xFFFF, 0x0635, 0x0649, 0x0635, 0x0649, 0xFFFF, 0x0635, 0x064A, 0x0635, 0x064A, 0xFFFF, 0x0636, 0x0649, 0x0636, 0x0649, 0xFFFF };
unsigned short unac_data641[] = { 0x0636, 0x064A, 0x0636, 0x064A, 0xFFFF, 0x0634, 0x062C, 0x0634, 0x062C, 0xFFFF, 0x0634, 0x062D, 0x0634, 0x062D, 0xFFFF, 0x0634, 0x062E, 0x0634, 0x062E, 0xFFFF, 0x0634, 0x0645, 0x0634, 0x0645, 0xFFFF, 0x0634, 0x0631, 0x0634, 0x0631, 0xFFFF, 0x0633, 0x0631, 0x0633, 0x0631, 0xFFFF, 0x0635, 0x0631, 0x0635, 0x0631, 0xFFFF };
unsigned short unac_data642[] = { 0x0636, 0x0631, 0x0636, 0x0631, 0xFFFF, 0x0637, 0x0649, 0x0637, 0x0649, 0xFFFF, 0x0637, 0x064A, 0x0637, 0x064A, 0xFFFF, 0x0639, 0x0649, 0x0639, 0x0649, 0xFFFF, 0x0639, 0x064A, 0x0639, 0x064A, 0xFFFF, 0x063A, 0x0649, 0x063A, 0x0649, 0xFFFF, 0x063A, 0x064A, 0x063A, 0x064A, 0xFFFF, 0x0633, 0x0649, 0x0633, 0x0649, 0xFFFF };
unsigned short unac_data643[] = { 0x0633, 0x064A, 0x0633, 0x064A, 0xFFFF, 0x0634, 0x0649, 0x0634, 0x0649, 0xFFFF, 0x0634, 0x064A, 0x0634, 0x064A, 0xFFFF, 0x062D, 0x0649, 0x062D, 0x0649, 0xFFFF, 0x062D, 0x064A, 0x062D, 0x064A, 0xFFFF, 0x062C, 0x0649, 0x062C, 0x0649, 0xFFFF, 0x062C, 0x064A, 0x062C, 0x064A, 0xFFFF, 0x062E, 0x0649, 0x062E, 0x0649, 0xFFFF };
unsigned short unac_data644[] = { 0x062E, 0x064A, 0x062E, 0x064A, 0xFFFF, 0x0635, 0x0649, 0x0635, 0x0649, 0xFFFF, 0x0635, 0x064A, 0x0635, 0x064A, 0xFFFF, 0x0636, 0x0649, 0x0636, 0x0649, 0xFFFF, 0x0636, 0x064A, 0x0636, 0x064A, 0xFFFF, 0x0634, 0x062C, 0x0634, 0x062C, 0xFFFF, 0x0634, 0x062D, 0x0634, 0x062D, 0xFFFF, 0x0634, 0x062E, 0x0634, 0x062E, 0xFFFF };
unsigned short unac_data645[] = { 0x0634, 0x0645, 0x0634, 0x0645, 0xFFFF, 0x0634, 0x0631, 0x0634, 0x0631, 0xFFFF, 0x0633, 0x0631, 0x0633, 0x0631, 0xFFFF, 0x0635, 0x0631, 0x0635, 0x0631, 0xFFFF, 0x0636, 0x0631, 0x0636, 0x0631, 0xFFFF, 0x0634, 0x062C, 0x0634, 0x062C, 0xFFFF, 0x0634, 0x062D, 0x0634, 0x062D, 0xFFFF, 0x0634, 0x062E, 0x0634, 0x062E, 0xFFFF };
unsigned short unac_data646[] = { 0x0634, 0x0645, 0x0634, 0x0645, 0xFFFF, 0x0633, 0x0647, 0x0633, 0x0647, 0xFFFF, 0x0634, 0x0647, 0x0634, 0x0647, 0xFFFF, 0x0637, 0x0645, 0x0637, 0x0645, 0xFFFF, 0x0633, 0x062C, 0x0633, 0x062C, 0xFFFF, 0x0633, 0x062D, 0x0633, 0x062D, 0xFFFF, 0x0633, 0x062E, 0x0633, 0x062E, 0xFFFF, 0x0634, 0x062C, 0x0634, 0x062C, 0xFFFF };
unsigned short unac_data647[] = { 0x0634, 0x062D, 0x0634, 0x062D, 0xFFFF, 0x0634, 0x062E, 0x0634, 0x062E, 0xFFFF, 0x0637, 0x0645, 0x0637, 0x0645, 0xFFFF, 0x0638, 0x0645, 0x0638, 0x0645, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data648[] = { 0x062A, 0x062C, 0x0645, 0x062A, 0x062C, 0x0645, 0xFFFF, 0x062A, 0x062D, 0x062C, 0x062A, 0x062D, 0x062C, 0xFFFF, 0x062A, 0x062D, 0x062C, 0x062A, 0x062D, 0x062C, 0xFFFF, 0x062A, 0x062D, 0x0645, 0x062A, 0x062D, 0x0645, 0xFFFF, 0x062A, 0x062E, 0x0645, 0x062A, 0x062E, 0x0645, 0xFFFF, 0x062A, 0x0645, 0x062C, 0x062A, 0x0645, 0x062C, 0xFFFF, 0x062A, 0x0645, 0x062D, 0x062A, 0x0645, 0x062D, 0xFFFF, 0x062A, 0x0645, 0x062E, 0x062A, 0x0645, 0x062E, 0xFFFF };
unsigned short unac_data649[] = { 0x062C, 0x0645, 0x062D, 0x062C, 0x0645, 0x062D, 0xFFFF, 0x062C, 0x0645, 0x062D, 0x062C, 0x0645, 0x062D, 0xFFFF, 0x062D, 0x0645, 0x064A, 0x062D, 0x0645, 0x064A, 0xFFFF, 0x062D, 0x0645, 0x0649, 0x062D, 0x0645, 0x0649, 0xFFFF, 0x0633, 0x062D, 0x062C, 0x0633, 0x062D, 0x062C, 0xFFFF, 0x0633, 0x062C, 0x062D, 0x0633, 0x062C, 0x062D, 0xFFFF, 0x0633, 0x062C, 0x0649, 0x0633, 0x062C, 0x0649, 0xFFFF, 0x0633, 0x0645, 0x062D, 0x0633, 0x0645, 0x062D, 0xFFFF };
unsigned short unac_data650[] = { 0x0633, 0x0645, 0x062D, 0x0633, 0x0645, 0x062D, 0xFFFF, 0x0633, 0x0645, 0x062C, 0x0633, 0x0645, 0x062C, 0xFFFF, 0x0633, 0x0645, 0x0645, 0x0633, 0x0645, 0x0645, 0xFFFF, 0x0633, 0x0645, 0x0645, 0x0633, 0x0645, 0x0645, 0xFFFF, 0x0635, 0x062D, 0x062D, 0x0635, 0x062D, 0x062D, 0xFFFF, 0x0635, 0x062D, 0x062D, 0x0635, 0x062D, 0x062D, 0xFFFF, 0x0635, 0x0645, 0x0645, 0x0635, 0x0645, 0x0645, 0xFFFF, 0x0634, 0x062D, 0x0645, 0x0634, 0x062D, 0x0645, 0xFFFF };
unsigned short unac_data651[] = { 0x0634, 0x062D, 0x0645, 0x0634, 0x062D, 0x0645, 0xFFFF, 0x0634, 0x062C, 0x064A, 0x0634, 0x062C, 0x064A, 0xFFFF, 0x0634, 0x0645, 0x062E, 0x0634, 0x0645, 0x062E, 0xFFFF, 0x0634, 0x0645, 0x062E, 0x0634, 0x0645, 0x062E, 0xFFFF, 0x0634, 0x0645, 0x0645, 0x0634, 0x0645, 0x0645, 0xFFFF, 0x0634, 0x0645, 0x0645, 0x0634, 0x0645, 0x0645, 0xFFFF, 0x0636, 0x062D, 0x0649, 0x0636, 0x062D, 0x0649, 0xFFFF, 0x0636, 0x062E, 0x0645, 0x0636, 0x062E, 0x0645, 0xFFFF };
unsigned short unac_data652[] = { 0x0636, 0x062E, 0x0645, 0x0636, 0x062E, 0x0645, 0xFFFF, 0x0637, 0x0645, 0x062D, 0x0637, 0x0645, 0x062D, 0xFFFF, 0x0637, 0x0645, 0x062D, 0x0637, 0x0645, 0x062D, 0xFFFF, 0x0637, 0x0645, 0x0645, 0x0637, 0x0645, 0x0645, 0xFFFF, 0x0637, 0x0645, 0x064A, 0x0637, 0x0645, 0x064A, 0xFFFF, 0x0639, 0x062C, 0x0645, 0x0639, 0x062C, 0x0645, 0xFFFF, 0x0639, 0x0645, 0x0645, 0x0639, 0x0645, 0x0645, 0xFFFF, 0x0639, 0x0645, 0x0645, 0x0639, 0x0645, 0x0645, 0xFFFF };
unsigned short unac_data653[] = { 0x0639, 0x0645, 0x0649, 0x0639, 0x0645, 0x0649, 0xFFFF, 0x063A, 0x0645, 0x0645, 0x063A, 0x0645, 0x0645, 0xFFFF, 0x063A, 0x0645, 0x064A, 0x063A, 0x0645, 0x064A, 0xFFFF, 0x063A, 0x0645, 0x0649, 0x063A, 0x0645, 0x0649, 0xFFFF, 0x0641, 0x062E, 0x0645, 0x0641, 0x062E, 0x0645, 0xFFFF, 0x0641, 0x062E, 0x0645, 0x0641, 0x062E, 0x0645, 0xFFFF, 0x0642, 0x0645, 0x062D, 0x0642, 0x0645, 0x062D, 0xFFFF, 0x0642, 0x0645, 0x0645, 0x0642, 0x0645, 0x0645, 0xFFFF };
unsigned short unac_data654[] = { 0x0644, 0x062D, 0x0645, 0x0644, 0x062D, 0x0645, 0xFFFF, 0x0644, 0x062D, 0x064A, 0x0644, 0x062D, 0x064A, 0xFFFF, 0x0644, 0x062D, 0x0649, 0x0644, 0x062D, 0x0649, 0xFFFF, 0x0644, 0x062C, 0x062C, 0x0644, 0x062C, 0x062C, 0xFFFF, 0x0644, 0x062C, 0x062C, 0x0644, 0x062C, 0x062C, 0xFFFF, 0x0644, 0x062E, 0x0645, 0x0644, 0x062E, 0x0645, 0xFFFF, 0x0644, 0x062E, 0x0645, 0x0644, 0x062E, 0x0645, 0xFFFF, 0x0644, 0x0645, 0x062D, 0x0644, 0x0645, 0x062D, 0xFFFF };
unsigned short unac_data655[] = { 0x0644, 0x0645, 0x062D, 0x0644, 0x0645, 0x062D, 0xFFFF, 0x0645, 0x062D, 0x062C, 0x0645, 0x062D, 0x062C, 0xFFFF, 0x0645, 0x062D, 0x0645, 0x0645, 0x062D, 0x0645, 0xFFFF, 0x0645, 0x062D, 0x064A, 0x0645, 0x062D, 0x064A, 0xFFFF, 0x0645, 0x062C, 0x062D, 0x0645, 0x062C, 0x062D, 0xFFFF, 0x0645, 0x062C, 0x0645, 0x0645, 0x062C, 0x0645, 0xFFFF, 0x0645, 0x062E, 0x062C, 0x0645, 0x062E, 0x062C, 0xFFFF, 0x0645, 0x062E, 0x0645, 0x0645, 0x062E, 0x0645, 0xFFFF };
unsigned short unac_data656[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0645, 0x062C, 0x062E, 0x0645, 0x062C, 0x062E, 0xFFFF, 0x0647, 0x0645, 0x062C, 0x0647, 0x0645, 0x062C, 0xFFFF, 0x0647, 0x0645, 0x0645, 0x0647, 0x0645, 0x0645, 0xFFFF, 0x0646, 0x062D, 0x0645, 0x0646, 0x062D, 0x0645, 0xFFFF, 0x0646, 0x062D, 0x0649, 0x0646, 0x062D, 0x0649, 0xFFFF, 0x0646, 0x062C, 0x0645, 0x0646, 0x062C, 0x0645, 0xFFFF };
unsigned short unac_data657[] = { 0x0646, 0x062C, 0x0645, 0x0646, 0x062C, 0x0645, 0xFFFF, 0x0646, 0x062C, 0x0649, 0x0646, 0x062C, 0x0649, 0xFFFF, 0x0646, 0x0645, 0x064A, 0x0646, 0x0645, 0x064A, 0xFFFF, 0x0646, 0x0645, 0x0649, 0x0646, 0x0645, 0x0649, 0xFFFF, 0x064A, 0x0645, 0x0645, 0x064A, 0x0645, 0x0645, 0xFFFF, 0x064A, 0x0645, 0x0645, 0x064A, 0x0645, 0x0645, 0xFFFF, 0x0628, 0x062E, 0x064A, 0x0628, 0x062E, 0x064A, 0xFFFF, 0x062A, 0x062C, 0x064A, 0x062A, 0x062C, 0x064A, 0xFFFF };
unsigned short unac_data658[] = { 0x062A, 0x062C, 0x0649, 0x062A, 0x062C, 0x0649, 0xFFFF, 0x062A, 0x062E, 0x064A, 0x062A, 0x062E, 0x064A, 0xFFFF, 0x062A, 0x062E, 0x0649, 0x062A, 0x062E, 0x0649, 0xFFFF, 0x062A, 0x0645, 0x064A, 0x062A, 0x0645, 0x064A, 0xFFFF, 0x062A, 0x0645, 0x0649, 0x062A, 0x0645, 0x0649, 0xFFFF, 0x062C, 0x0645, 0x064A, 0x062C, 0x0645, 0x064A, 0xFFFF, 0x062C, 0x062D, 0x0649, 0x062C, 0x062D, 0x0649, 0xFFFF, 0x062C, 0x0645, 0x0649, 0x062C, 0x0645, 0x0649, 0xFFFF };
unsigned short unac_data659[] = { 0x0633, 0x062E, 0x0649, 0x0633, 0x062E, 0x0649, 0xFFFF, 0x0635, 0x062D, 0x064A, 0x0635, 0x062D, 0x064A, 0xFFFF, 0x0634, 0x062D, 0x064A, 0x0634, 0x062D, 0x064A, 0xFFFF, 0x0636, 0x062D, 0x064A, 0x0636, 0x062D, 0x064A, 0xFFFF, 0x0644, 0x062C, 0x064A, 0x0644, 0x062C, 0x064A, 0xFFFF, 0x0644, 0x0645, 0x064A, 0x0644, 0x0645, 0x064A, 0xFFFF, 0x064A, 0x062D, 0x064A, 0x064A, 0x062D, 0x064A, 0xFFFF, 0x064A, 0x062C, 0x064A, 0x064A, 0x062C, 0x064A, 0xFFFF };
unsigned short unac_data660[] = { 0x064A, 0x0645, 0x064A, 0x064A, 0x0645, 0x064A, 0xFFFF, 0x0645, 0x0645, 0x064A, 0x0645, 0x0645, 0x064A, 0xFFFF, 0x0642, 0x0645, 0x064A, 0x0642, 0x0645, 0x064A, 0xFFFF, 0x0646, 0x062D, 0x064A, 0x0646, 0x062D, 0x064A, 0xFFFF, 0x0642, 0x0645, 0x062D, 0x0642, 0x0645, 0x062D, 0xFFFF, 0x0644, 0x062D, 0x0645, 0x0644, 0x062D, 0x0645, 0xFFFF, 0x0639, 0x0645, 0x064A, 0x0639, 0x0645, 0x064A, 0xFFFF, 0x0643, 0x0645, 0x064A, 0x0643, 0x0645, 0x064A, 0xFFFF };
unsigned short unac_data661[] = { 0x0646, 0x062C, 0x062D, 0x0646, 0x062C, 0x062D, 0xFFFF, 0x0645, 0x062E, 0x064A, 0x0645, 0x062E, 0x064A, 0xFFFF, 0x0644, 0x062C, 0x0645, 0x0644, 0x062C, 0x0645, 0xFFFF, 0x0643, 0x0645, 0x0645, 0x0643, 0x0645, 0x0645, 0xFFFF, 0x0644, 0x062C, 0x0645, 0x0644, 0x062C, 0x0645, 0xFFFF, 0x0646, 0x062C, 0x062D, 0x0646, 0x062C, 0x062D, 0xFFFF, 0x062C, 0x062D, 0x064A, 0x062C, 0x062D, 0x064A, 0xFFFF, 0x062D, 0x062C, 0x064A, 0x062D, 0x062C, 0x064A, 0xFFFF };
unsigned short unac_data662[] = { 0x0645, 0x062C, 0x064A, 0x0645, 0x062C, 0x064A, 0xFFFF, 0x0641, 0x0645, 0x064A, 0x0641, 0x0645, 0x064A, 0xFFFF, 0x0628, 0x062D, 0x064A, 0x0628, 0x062D, 0x064A, 0xFFFF, 0x0643, 0x0645, 0x0645, 0x0643, 0x0645, 0x0645, 0xFFFF, 0x0639, 0x062C, 0x0645, 0x0639, 0x062C, 0x0645, 0xFFFF, 0x0635, 0x0645, 0x0645, 0x0635, 0x0645, 0x0645, 0xFFFF, 0x0633, 0x062E, 0x064A, 0x0633, 0x062E, 0x064A, 0xFFFF, 0x0646, 0x062C, 0x064A, 0x0646, 0x062C, 0x064A, 0xFFFF };
unsigned short unac_data663[] = { 0x0635, 0x0644, 0x06D2, 0x0635, 0x0644, 0x06D2, 0xFFFF, 0x0642, 0x0644, 0x06D2, 0x0642, 0x0644, 0x06D2, 0xFFFF, 0x0627, 0x0644, 0x0644, 0x0647, 0x0627, 0x0644, 0x0644, 0x0647, 0xFFFF, 0x0627, 0x0643, 0x0628, 0x0631, 0x0627, 0x0643, 0x0628, 0x0631, 0xFFFF, 0x0645, 0x062D, 0x0645, 0x062F, 0x0645, 0x062D, 0x0645, 0x062F, 0xFFFF, 0x0635, 0x0644, 0x0639, 0x0645, 0x0635, 0x0644, 0x0639, 0x0645, 0xFFFF, 0x0631, 0x0633, 0x0648, 0x0644, 0x0631, 0x0633, 0x0648, 0x0644, 0xFFFF, 0x0639, 0x0644, 0x064A, 0x0647, 0x0639, 0x0644, 0x064A, 0x0647, 0xFFFF };
unsigned short unac_data664[] = { 0x0648, 0x0633, 0x0644, 0x0645, 0x0648, 0x0633, 0x0644, 0x0645, 0xFFFF, 0x0635, 0x0644, 0x0649, 0x0635, 0x0644, 0x0649, 0xFFFF, 0x0635, 0x0644, 0x0649, 0x0020, 0x0627, 0x0644, 0x0644, 0x0647, 0x0020, 0x0639, 0x0644, 0x064A, 0x0647, 0x0020, 0x0648, 0x0633, 0x0644, 0x0645, 0x0635, 0x0644, 0x0649, 0x0020, 0x0627, 0x0644, 0x0644, 0x0647, 0x0020, 0x0639, 0x0644, 0x064A, 0x0647, 0x0020, 0x0648, 0x0633, 0x0644, 0x0645, 0xFFFF, 0x062C, 0x0644, 0x0020, 0x062C, 0x0644, 0x0627, 0x0644, 0x0647, 0x062C, 0x0644, 0x0020, 0x062C, 0x0644, 0x0627, 0x0644, 0x0647, 0xFFFF, 0x0631, 0x06CC, 0x0627, 0x0644, 0x0631, 0x06CC, 0x0627, 0x0644, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data665[] = { 0x002C, 0x002C, 0xFFFF, 0x3001, 0x3001, 0xFFFF, 0x3002, 0x3002, 0xFFFF, 0x003A, 0x003A, 0xFFFF, 0x003B, 0x003B, 0xFFFF, 0x0021, 0x0021, 0xFFFF, 0x003F, 0x003F, 0xFFFF, 0x3016, 0x3016, 0xFFFF };
unsigned short unac_data666[] = { 0x3017, 0x3017, 0xFFFF, 0x002E, 0x002E, 0x002E, 0x002E, 0x002E, 0x002E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data667[] = { 0x002E, 0x002E, 0x002E, 0x002E, 0xFFFF, 0x2014, 0x2014, 0xFFFF, 0x2013, 0x2013, 0xFFFF, 0x005F, 0x005F, 0xFFFF, 0x005F, 0x005F, 0xFFFF, 0x0028, 0x0028, 0xFFFF, 0x0029, 0x0029, 0xFFFF, 0x007B, 0x007B, 0xFFFF };
unsigned short unac_data668[] = { 0x007D, 0x007D, 0xFFFF, 0x3014, 0x3014, 0xFFFF, 0x3015, 0x3015, 0xFFFF, 0x3010, 0x3010, 0xFFFF, 0x3011, 0x3011, 0xFFFF, 0x300A, 0x300A, 0xFFFF, 0x300B, 0x300B, 0xFFFF, 0x3008, 0x3008, 0xFFFF };
unsigned short unac_data669[] = { 0x3009, 0x3009, 0xFFFF, 0x300C, 0x300C, 0xFFFF, 0x300D, 0x300D, 0xFFFF, 0x300E, 0x300E, 0xFFFF, 0x300F, 0x300F, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x005B, 0x005B, 0xFFFF };
unsigned short unac_data670[] = { 0x005D, 0x005D, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x005F, 0x005F, 0xFFFF, 0x005F, 0x005F, 0xFFFF, 0x005F, 0x005F, 0xFFFF };
unsigned short unac_data671[] = { 0x002C, 0x002C, 0xFFFF, 0x3001, 0x3001, 0xFFFF, 0x002E, 0x002E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x003B, 0x003B, 0xFFFF, 0x003A, 0x003A, 0xFFFF, 0x003F, 0x003F, 0xFFFF, 0x0021, 0x0021, 0xFFFF };
unsigned short unac_data672[] = { 0x2014, 0x2014, 0xFFFF, 0x0028, 0x0028, 0xFFFF, 0x0029, 0x0029, 0xFFFF, 0x007B, 0x007B, 0xFFFF, 0x007D, 0x007D, 0xFFFF, 0x3014, 0x3014, 0xFFFF, 0x3015, 0x3015, 0xFFFF, 0x0023, 0x0023, 0xFFFF };
unsigned short unac_data673[] = { 0x0026, 0x0026, 0xFFFF, 0x002A, 0x002A, 0xFFFF, 0x002B, 0x002B, 0xFFFF, 0x002D, 0x002D, 0xFFFF, 0x003C, 0x003C, 0xFFFF, 0x003E, 0x003E, 0xFFFF, 0x003D, 0x003D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data674[] = { 0x005C, 0x005C, 0xFFFF, 0x0024, 0x0024, 0xFFFF, 0x0025, 0x0025, 0xFFFF, 0x0040, 0x0040, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data675[] = { 0x0020, 0x0020, 0xFFFF, 0x0640, 0x0640, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0640, 0x0640, 0xFFFF };
unsigned short unac_data676[] = { 0x0020, 0x0020, 0xFFFF, 0x0640, 0x0640, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0640, 0x0640, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0640, 0x0640, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x0640, 0x0640, 0xFFFF };
unsigned short unac_data677[] = { 0x0621, 0x0621, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x0648, 0x0648, 0xFFFF, 0x0648, 0x0648, 0xFFFF, 0x0627, 0x0627, 0xFFFF };
unsigned short unac_data678[] = { 0x0627, 0x0627, 0xFFFF, 0x064A, 0x064A, 0xFFFF, 0x064A, 0x064A, 0xFFFF, 0x064A, 0x064A, 0xFFFF, 0x064A, 0x064A, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x0627, 0x0627, 0xFFFF, 0x0628, 0x0628, 0xFFFF };
unsigned short unac_data679[] = { 0x0628, 0x0628, 0xFFFF, 0x0628, 0x0628, 0xFFFF, 0x0628, 0x0628, 0xFFFF, 0x0629, 0x0629, 0xFFFF, 0x0629, 0x0629, 0xFFFF, 0x062A, 0x062A, 0xFFFF, 0x062A, 0x062A, 0xFFFF, 0x062A, 0x062A, 0xFFFF };
unsigned short unac_data680[] = { 0x062A, 0x062A, 0xFFFF, 0x062B, 0x062B, 0xFFFF, 0x062B, 0x062B, 0xFFFF, 0x062B, 0x062B, 0xFFFF, 0x062B, 0x062B, 0xFFFF, 0x062C, 0x062C, 0xFFFF, 0x062C, 0x062C, 0xFFFF, 0x062C, 0x062C, 0xFFFF };
unsigned short unac_data681[] = { 0x062C, 0x062C, 0xFFFF, 0x062D, 0x062D, 0xFFFF, 0x062D, 0x062D, 0xFFFF, 0x062D, 0x062D, 0xFFFF, 0x062D, 0x062D, 0xFFFF, 0x062E, 0x062E, 0xFFFF, 0x062E, 0x062E, 0xFFFF, 0x062E, 0x062E, 0xFFFF };
unsigned short unac_data682[] = { 0x062E, 0x062E, 0xFFFF, 0x062F, 0x062F, 0xFFFF, 0x062F, 0x062F, 0xFFFF, 0x0630, 0x0630, 0xFFFF, 0x0630, 0x0630, 0xFFFF, 0x0631, 0x0631, 0xFFFF, 0x0631, 0x0631, 0xFFFF, 0x0632, 0x0632, 0xFFFF };
unsigned short unac_data683[] = { 0x0632, 0x0632, 0xFFFF, 0x0633, 0x0633, 0xFFFF, 0x0633, 0x0633, 0xFFFF, 0x0633, 0x0633, 0xFFFF, 0x0633, 0x0633, 0xFFFF, 0x0634, 0x0634, 0xFFFF, 0x0634, 0x0634, 0xFFFF, 0x0634, 0x0634, 0xFFFF };
unsigned short unac_data684[] = { 0x0634, 0x0634, 0xFFFF, 0x0635, 0x0635, 0xFFFF, 0x0635, 0x0635, 0xFFFF, 0x0635, 0x0635, 0xFFFF, 0x0635, 0x0635, 0xFFFF, 0x0636, 0x0636, 0xFFFF, 0x0636, 0x0636, 0xFFFF, 0x0636, 0x0636, 0xFFFF };
unsigned short unac_data685[] = { 0x0636, 0x0636, 0xFFFF, 0x0637, 0x0637, 0xFFFF, 0x0637, 0x0637, 0xFFFF, 0x0637, 0x0637, 0xFFFF, 0x0637, 0x0637, 0xFFFF, 0x0638, 0x0638, 0xFFFF, 0x0638, 0x0638, 0xFFFF, 0x0638, 0x0638, 0xFFFF };
unsigned short unac_data686[] = { 0x0638, 0x0638, 0xFFFF, 0x0639, 0x0639, 0xFFFF, 0x0639, 0x0639, 0xFFFF, 0x0639, 0x0639, 0xFFFF, 0x0639, 0x0639, 0xFFFF, 0x063A, 0x063A, 0xFFFF, 0x063A, 0x063A, 0xFFFF, 0x063A, 0x063A, 0xFFFF };
unsigned short unac_data687[] = { 0x063A, 0x063A, 0xFFFF, 0x0641, 0x0641, 0xFFFF, 0x0641, 0x0641, 0xFFFF, 0x0641, 0x0641, 0xFFFF, 0x0641, 0x0641, 0xFFFF, 0x0642, 0x0642, 0xFFFF, 0x0642, 0x0642, 0xFFFF, 0x0642, 0x0642, 0xFFFF };
unsigned short unac_data688[] = { 0x0642, 0x0642, 0xFFFF, 0x0643, 0x0643, 0xFFFF, 0x0643, 0x0643, 0xFFFF, 0x0643, 0x0643, 0xFFFF, 0x0643, 0x0643, 0xFFFF, 0x0644, 0x0644, 0xFFFF, 0x0644, 0x0644, 0xFFFF, 0x0644, 0x0644, 0xFFFF };
unsigned short unac_data689[] = { 0x0644, 0x0644, 0xFFFF, 0x0645, 0x0645, 0xFFFF, 0x0645, 0x0645, 0xFFFF, 0x0645, 0x0645, 0xFFFF, 0x0645, 0x0645, 0xFFFF, 0x0646, 0x0646, 0xFFFF, 0x0646, 0x0646, 0xFFFF, 0x0646, 0x0646, 0xFFFF };
unsigned short unac_data690[] = { 0x0646, 0x0646, 0xFFFF, 0x0647, 0x0647, 0xFFFF, 0x0647, 0x0647, 0xFFFF, 0x0647, 0x0647, 0xFFFF, 0x0647, 0x0647, 0xFFFF, 0x0648, 0x0648, 0xFFFF, 0x0648, 0x0648, 0xFFFF, 0x0649, 0x0649, 0xFFFF };
unsigned short unac_data691[] = { 0x0649, 0x0649, 0xFFFF, 0x064A, 0x064A, 0xFFFF, 0x064A, 0x064A, 0xFFFF, 0x064A, 0x064A, 0xFFFF, 0x064A, 0x064A, 0xFFFF, 0x0644, 0x0627, 0x0644, 0x0627, 0xFFFF, 0x0644, 0x0627, 0x0644, 0x0627, 0xFFFF, 0x0644, 0x0627, 0x0644, 0x0627, 0xFFFF };
unsigned short unac_data692[] = { 0x0644, 0x0627, 0x0644, 0x0627, 0xFFFF, 0x0644, 0x0627, 0x0644, 0x0627, 0xFFFF, 0x0644, 0x0627, 0x0644, 0x0627, 0xFFFF, 0x0644, 0x0627, 0x0644, 0x0627, 0xFFFF, 0x0644, 0x0627, 0x0644, 0x0627, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data693[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0x0021, 0x0021, 0xFFFF, 0x0022, 0x0022, 0xFFFF, 0x0023, 0x0023, 0xFFFF, 0x0024, 0x0024, 0xFFFF, 0x0025, 0x0025, 0xFFFF, 0x0026, 0x0026, 0xFFFF, 0x0027, 0x0027, 0xFFFF };
unsigned short unac_data694[] = { 0x0028, 0x0028, 0xFFFF, 0x0029, 0x0029, 0xFFFF, 0x002A, 0x002A, 0xFFFF, 0x002B, 0x002B, 0xFFFF, 0x002C, 0x002C, 0xFFFF, 0x002D, 0x002D, 0xFFFF, 0x002E, 0x002E, 0xFFFF, 0x002F, 0x002F, 0xFFFF };
unsigned short unac_data695[] = { 0x0038, 0x0038, 0xFFFF, 0x0039, 0x0039, 0xFFFF, 0x003A, 0x003A, 0xFFFF, 0x003B, 0x003B, 0xFFFF, 0x003C, 0x003C, 0xFFFF, 0x003D, 0x003D, 0xFFFF, 0x003E, 0x003E, 0xFFFF, 0x003F, 0x003F, 0xFFFF };
unsigned short unac_data696[] = { 0x0040, 0x0040, 0xFFFF, 0x0041, 0x0061, 0xFF41, 0x0042, 0x0062, 0xFF42, 0x0043, 0x0063, 0xFF43, 0x0044, 0x0064, 0xFF44, 0x0045, 0x0065, 0xFF45, 0x0046, 0x0066, 0xFF46, 0x0047, 0x0067, 0xFF47 };
unsigned short unac_data697[] = { 0x0048, 0x0068, 0xFF48, 0x0049, 0x0069, 0xFF49, 0x004A, 0x006A, 0xFF4A, 0x004B, 0x006B, 0xFF4B, 0x004C, 0x006C, 0xFF4C, 0x004D, 0x006D, 0xFF4D, 0x004E, 0x006E, 0xFF4E, 0x004F, 0x006F, 0xFF4F };
unsigned short unac_data698[] = { 0x0050, 0x0070, 0xFF50, 0x0051, 0x0071, 0xFF51, 0x0052, 0x0072, 0xFF52, 0x0053, 0x0073, 0xFF53, 0x0054, 0x0074, 0xFF54, 0x0055, 0x0075, 0xFF55, 0x0056, 0x0076, 0xFF56, 0x0057, 0x0077, 0xFF57 };
unsigned short unac_data699[] = { 0x0058, 0x0078, 0xFF58, 0x0059, 0x0079, 0xFF59, 0x005A, 0x007A, 0xFF5A, 0x005B, 0x005B, 0xFFFF, 0x005C, 0x005C, 0xFFFF, 0x005D, 0x005D, 0xFFFF, 0x005E, 0x005E, 0xFFFF, 0x005F, 0x005F, 0xFFFF };
unsigned short unac_data700[] = { 0x0060, 0x0060, 0xFFFF, 0x0061, 0x0061, 0xFFFF, 0x0062, 0x0062, 0xFFFF, 0x0063, 0x0063, 0xFFFF, 0x0064, 0x0064, 0xFFFF, 0x0065, 0x0065, 0xFFFF, 0x0066, 0x0066, 0xFFFF, 0x0067, 0x0067, 0xFFFF };
unsigned short unac_data701[] = { 0x0068, 0x0068, 0xFFFF, 0x0069, 0x0069, 0xFFFF, 0x006A, 0x006A, 0xFFFF, 0x006B, 0x006B, 0xFFFF, 0x006C, 0x006C, 0xFFFF, 0x006D, 0x006D, 0xFFFF, 0x006E, 0x006E, 0xFFFF, 0x006F, 0x006F, 0xFFFF };
unsigned short unac_data702[] = { 0x0070, 0x0070, 0xFFFF, 0x0071, 0x0071, 0xFFFF, 0x0072, 0x0072, 0xFFFF, 0x0073, 0x0073, 0xFFFF, 0x0074, 0x0074, 0xFFFF, 0x0075, 0x0075, 0xFFFF, 0x0076, 0x0076, 0xFFFF, 0x0077, 0x0077, 0xFFFF };
unsigned short unac_data703[] = { 0x0078, 0x0078, 0xFFFF, 0x0079, 0x0079, 0xFFFF, 0x007A, 0x007A, 0xFFFF, 0x007B, 0x007B, 0xFFFF, 0x007C, 0x007C, 0xFFFF, 0x007D, 0x007D, 0xFFFF, 0x007E, 0x007E, 0xFFFF, 0x2985, 0x2985, 0xFFFF };
unsigned short unac_data704[] = { 0x2986, 0x2986, 0xFFFF, 0x3002, 0x3002, 0xFFFF, 0x300C, 0x300C, 0xFFFF, 0x300D, 0x300D, 0xFFFF, 0x3001, 0x3001, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data705[] = { 0x1160, 0x1160, 0xFFFF, 0x1100, 0x1100, 0xFFFF, 0x1101, 0x1101, 0xFFFF, 0x11AA, 0x11AA, 0xFFFF, 0x1102, 0x1102, 0xFFFF, 0x11AC, 0x11AC, 0xFFFF, 0x11AD, 0x11AD, 0xFFFF, 0x1103, 0x1103, 0xFFFF };
unsigned short unac_data706[] = { 0x110C, 0x110C, 0xFFFF, 0x110D, 0x110D, 0xFFFF, 0x110E, 0x110E, 0xFFFF, 0x110F, 0x110F, 0xFFFF, 0x1110, 0x1110, 0xFFFF, 0x1111, 0x1111, 0xFFFF, 0x1112, 0x1112, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data707[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1161, 0x1161, 0xFFFF, 0x1162, 0x1162, 0xFFFF, 0x1163, 0x1163, 0xFFFF, 0x1164, 0x1164, 0xFFFF, 0x1165, 0x1165, 0xFFFF, 0x1166, 0x1166, 0xFFFF };
unsigned short unac_data708[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1167, 0x1167, 0xFFFF, 0x1168, 0x1168, 0xFFFF, 0x1169, 0x1169, 0xFFFF, 0x116A, 0x116A, 0xFFFF, 0x116B, 0x116B, 0xFFFF, 0x116C, 0x116C, 0xFFFF };
unsigned short unac_data709[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x116D, 0x116D, 0xFFFF, 0x116E, 0x116E, 0xFFFF, 0x116F, 0x116F, 0xFFFF, 0x1170, 0x1170, 0xFFFF, 0x1171, 0x1171, 0xFFFF, 0x1172, 0x1172, 0xFFFF };
unsigned short unac_data710[] = { 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x1173, 0x1173, 0xFFFF, 0x1174, 0x1174, 0xFFFF, 0x1175, 0x1175, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data711[] = { 0x00A2, 0x00A2, 0xFFFF, 0x00A3, 0x00A3, 0xFFFF, 0x00AC, 0x00AC, 0xFFFF, 0x0020, 0x0020, 0xFFFF, 0x00A6, 0x00A6, 0xFFFF, 0x00A5, 0x00A5, 0xFFFF, 0x20A9, 0x20A9, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };
unsigned short unac_data712[] = { 0x2502, 0x2502, 0xFFFF, 0x2190, 0x2190, 0xFFFF, 0x2191, 0x2191, 0xFFFF, 0x2192, 0x2192, 0xFFFF, 0x2193, 0x2193, 0xFFFF, 0x25A0, 0x25A0, 0xFFFF, 0x25CB, 0x25CB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF };

unsigned short* unac_data_table[UNAC_BLOCK_COUNT] = {
unac_data0,
unac_data1,
unac_data2,
unac_data3,
unac_data4,
unac_data5,
unac_data6,
unac_data7,
unac_data8,
unac_data9,
unac_data10,
unac_data11,
unac_data12,
unac_data13,
unac_data14,
unac_data15,
unac_data16,
unac_data17,
unac_data18,
unac_data19,
unac_data20,
unac_data21,
unac_data22,
unac_data23,
unac_data24,
unac_data25,
unac_data26,
unac_data27,
unac_data28,
unac_data29,
unac_data30,
unac_data31,
unac_data32,
unac_data33,
unac_data34,
unac_data35,
unac_data36,
unac_data37,
unac_data38,
unac_data39,
unac_data40,
unac_data41,
unac_data42,
unac_data43,
unac_data44,
unac_data45,
unac_data46,
unac_data47,
unac_data48,
unac_data49,
unac_data50,
unac_data51,
unac_data52,
unac_data53,
unac_data54,
unac_data55,
unac_data56,
unac_data57,
unac_data58,
unac_data59,
unac_data60,
unac_data61,
unac_data62,
unac_data63,
unac_data64,
unac_data65,
unac_data66,
unac_data67,
unac_data68,
unac_data69,
unac_data70,
unac_data71,
unac_data72,
unac_data73,
unac_data74,
unac_data75,
unac_data76,
unac_data77,
unac_data78,
unac_data79,
unac_data80,
unac_data81,
unac_data82,
unac_data83,
unac_data84,
unac_data85,
unac_data86,
unac_data87,
unac_data88,
unac_data89,
unac_data90,
unac_data91,
unac_data92,
unac_data93,
unac_data94,
unac_data95,
unac_data96,
unac_data97,
unac_data98,
unac_data99,
unac_data100,
unac_data101,
unac_data102,
unac_data103,
unac_data104,
unac_data105,
unac_data106,
unac_data107,
unac_data108,
unac_data109,
unac_data110,
unac_data111,
unac_data112,
unac_data113,
unac_data114,
unac_data115,
unac_data116,
unac_data117,
unac_data118,
unac_data119,
unac_data120,
unac_data121,
unac_data122,
unac_data123,
unac_data124,
unac_data125,
unac_data126,
unac_data127,
unac_data128,
unac_data129,
unac_data130,
unac_data131,
unac_data132,
unac_data133,
unac_data134,
unac_data135,
unac_data136,
unac_data137,
unac_data138,
unac_data139,
unac_data140,
unac_data141,
unac_data142,
unac_data143,
unac_data144,
unac_data145,
unac_data146,
unac_data147,
unac_data148,
unac_data149,
unac_data150,
unac_data151,
unac_data152,
unac_data153,
unac_data154,
unac_data155,
unac_data156,
unac_data157,
unac_data158,
unac_data159,
unac_data160,
unac_data161,
unac_data162,
unac_data163,
unac_data164,
unac_data165,
unac_data166,
unac_data167,
unac_data168,
unac_data169,
unac_data170,
unac_data171,
unac_data172,
unac_data173,
unac_data174,
unac_data175,
unac_data176,
unac_data177,
unac_data178,
unac_data179,
unac_data180,
unac_data181,
unac_data182,
unac_data183,
unac_data184,
unac_data185,
unac_data186,
unac_data187,
unac_data188,
unac_data189,
unac_data190,
unac_data191,
unac_data192,
unac_data193,
unac_data194,
unac_data195,
unac_data196,
unac_data197,
unac_data198,
unac_data199,
unac_data200,
unac_data201,
unac_data202,
unac_data203,
unac_data204,
unac_data205,
unac_data206,
unac_data207,
unac_data208,
unac_data209,
unac_data210,
unac_data211,
unac_data212,
unac_data213,
unac_data214,
unac_data215,
unac_data216,
unac_data217,
unac_data218,
unac_data219,
unac_data220,
unac_data221,
unac_data222,
unac_data223,
unac_data224,
unac_data225,
unac_data226,
unac_data227,
unac_data228,
unac_data229,
unac_data230,
unac_data231,
unac_data232,
unac_data233,
unac_data234,
unac_data235,
unac_data236,
unac_data237,
unac_data238,
unac_data239,
unac_data240,
unac_data241,
unac_data242,
unac_data243,
unac_data244,
unac_data245,
unac_data246,
unac_data247,
unac_data248,
unac_data249,
unac_data250,
unac_data251,
unac_data252,
unac_data253,
unac_data254,
unac_data255,
unac_data256,
unac_data257,
unac_data258,
unac_data259,
unac_data260,
unac_data261,
unac_data262,
unac_data263,
unac_data264,
unac_data265,
unac_data266,
unac_data267,
unac_data268,
unac_data269,
unac_data270,
unac_data271,
unac_data272,
unac_data273,
unac_data274,
unac_data275,
unac_data276,
unac_data277,
unac_data278,
unac_data279,
unac_data280,
unac_data281,
unac_data282,
unac_data283,
unac_data284,
unac_data285,
unac_data286,
unac_data287,
unac_data288,
unac_data289,
unac_data290,
unac_data291,
unac_data292,
unac_data293,
unac_data294,
unac_data295,
unac_data296,
unac_data297,
unac_data298,
unac_data299,
unac_data300,
unac_data301,
unac_data302,
unac_data303,
unac_data304,
unac_data305,
unac_data306,
unac_data307,
unac_data308,
unac_data309,
unac_data310,
unac_data311,
unac_data312,
unac_data313,
unac_data314,
unac_data315,
unac_data316,
unac_data317,
unac_data318,
unac_data319,
unac_data320,
unac_data321,
unac_data322,
unac_data323,
unac_data324,
unac_data325,
unac_data326,
unac_data327,
unac_data328,
unac_data329,
unac_data330,
unac_data331,
unac_data332,
unac_data333,
unac_data334,
unac_data335,
unac_data336,
unac_data337,
unac_data338,
unac_data339,
unac_data340,
unac_data341,
unac_data342,
unac_data343,
unac_data344,
unac_data345,
unac_data346,
unac_data347,
unac_data348,
unac_data349,
unac_data350,
unac_data351,
unac_data352,
unac_data353,
unac_data354,
unac_data355,
unac_data356,
unac_data357,
unac_data358,
unac_data359,
unac_data360,
unac_data361,
unac_data362,
unac_data363,
unac_data364,
unac_data365,
unac_data366,
unac_data367,
unac_data368,
unac_data369,
unac_data370,
unac_data371,
unac_data372,
unac_data373,
unac_data374,
unac_data375,
unac_data376,
unac_data377,
unac_data378,
unac_data379,
unac_data380,
unac_data381,
unac_data382,
unac_data383,
unac_data384,
unac_data385,
unac_data386,
unac_data387,
unac_data388,
unac_data389,
unac_data390,
unac_data391,
unac_data392,
unac_data393,
unac_data394,
unac_data395,
unac_data396,
unac_data397,
unac_data398,
unac_data399,
unac_data400,
unac_data401,
unac_data402,
unac_data403,
unac_data404,
unac_data405,
unac_data406,
unac_data407,
unac_data408,
unac_data409,
unac_data410,
unac_data411,
unac_data412,
unac_data413,
unac_data414,
unac_data415,
unac_data416,
unac_data417,
unac_data418,
unac_data419,
unac_data420,
unac_data421,
unac_data422,
unac_data423,
unac_data424,
unac_data425,
unac_data426,
unac_data427,
unac_data428,
unac_data429,
unac_data430,
unac_data431,
unac_data432,
unac_data433,
unac_data434,
unac_data435,
unac_data436,
unac_data437,
unac_data438,
unac_data439,
unac_data440,
unac_data441,
unac_data442,
unac_data443,
unac_data444,
unac_data445,
unac_data446,
unac_data447,
unac_data448,
unac_data449,
unac_data450,
unac_data451,
unac_data452,
unac_data453,
unac_data454,
unac_data455,
unac_data456,
unac_data457,
unac_data458,
unac_data459,
unac_data460,
unac_data461,
unac_data462,
unac_data463,
unac_data464,
unac_data465,
unac_data466,
unac_data467,
unac_data468,
unac_data469,
unac_data470,
unac_data471,
unac_data472,
unac_data473,
unac_data474,
unac_data475,
unac_data476,
unac_data477,
unac_data478,
unac_data479,
unac_data480,
unac_data481,
unac_data482,
unac_data483,
unac_data484,
unac_data485,
unac_data486,
unac_data487,
unac_data488,
unac_data489,
unac_data490,
unac_data491,
unac_data492,
unac_data493,
unac_data494,
unac_data495,
unac_data496,
unac_data497,
unac_data498,
unac_data499,
unac_data500,
unac_data501,
unac_data502,
unac_data503,
unac_data504,
unac_data505,
unac_data506,
unac_data507,
unac_data508,
unac_data509,
unac_data510,
unac_data511,
unac_data512,
unac_data513,
unac_data514,
unac_data515,
unac_data516,
unac_data517,
unac_data518,
unac_data519,
unac_data520,
unac_data521,
unac_data522,
unac_data523,
unac_data524,
unac_data525,
unac_data526,
unac_data527,
unac_data528,
unac_data529,
unac_data530,
unac_data531,
unac_data532,
unac_data533,
unac_data534,
unac_data535,
unac_data536,
unac_data537,
unac_data538,
unac_data539,
unac_data540,
unac_data541,
unac_data542,
unac_data543,
unac_data544,
unac_data545,
unac_data546,
unac_data547,
unac_data548,
unac_data549,
unac_data550,
unac_data551,
unac_data552,
unac_data553,
unac_data554,
unac_data555,
unac_data556,
unac_data557,
unac_data558,
unac_data559,
unac_data560,
unac_data561,
unac_data562,
unac_data563,
unac_data564,
unac_data565,
unac_data566,
unac_data567,
unac_data568,
unac_data569,
unac_data570,
unac_data571,
unac_data572,
unac_data573,
unac_data574,
unac_data575,
unac_data576,
unac_data577,
unac_data578,
unac_data579,
unac_data580,
unac_data581,
unac_data582,
unac_data583,
unac_data584,
unac_data585,
unac_data586,
unac_data587,
unac_data588,
unac_data589,
unac_data590,
unac_data591,
unac_data592,
unac_data593,
unac_data594,
unac_data595,
unac_data596,
unac_data597,
unac_data598,
unac_data599,
unac_data600,
unac_data601,
unac_data602,
unac_data603,
unac_data604,
unac_data605,
unac_data606,
unac_data607,
unac_data608,
unac_data609,
unac_data610,
unac_data611,
unac_data612,
unac_data613,
unac_data614,
unac_data615,
unac_data616,
unac_data617,
unac_data618,
unac_data619,
unac_data620,
unac_data621,
unac_data622,
unac_data623,
unac_data624,
unac_data625,
unac_data626,
unac_data627,
unac_data628,
unac_data629,
unac_data630,
unac_data631,
unac_data632,
unac_data633,
unac_data634,
unac_data635,
unac_data636,
unac_data637,
unac_data638,
unac_data639,
unac_data640,
unac_data641,
unac_data642,
unac_data643,
unac_data644,
unac_data645,
unac_data646,
unac_data647,
unac_data648,
unac_data649,
unac_data650,
unac_data651,
unac_data652,
unac_data653,
unac_data654,
unac_data655,
unac_data656,
unac_data657,
unac_data658,
unac_data659,
unac_data660,
unac_data661,
unac_data662,
unac_data663,
unac_data664,
unac_data665,
unac_data666,
unac_data667,
unac_data668,
unac_data669,
unac_data670,
unac_data671,
unac_data672,
unac_data673,
unac_data674,
unac_data675,
unac_data676,
unac_data677,
unac_data678,
unac_data679,
unac_data680,
unac_data681,
unac_data682,
unac_data683,
unac_data684,
unac_data685,
unac_data686,
unac_data687,
unac_data688,
unac_data689,
unac_data690,
unac_data691,
unac_data692,
unac_data693,
unac_data694,
unac_data695,
unac_data696,
unac_data697,
unac_data698,
unac_data699,
unac_data700,
unac_data701,
unac_data702,
unac_data703,
unac_data704,
unac_data705,
unac_data706,
unac_data707,
unac_data708,
unac_data709,
unac_data710,
unac_data711,
unac_data712
};
/* Generated by builder. Do not modify. End tables */

/*
 * Debug level. See unac.h for a detailed discussion of the
 * values.
 */
static int debug_level = UNAC_DEBUG_LOW;

#ifdef UNAC_DEBUG_AVAILABLE

/*
 * Default debug function, printing on stderr.
 */
static void debug_doprint_default(const char* message, void* data)
{
  fprintf(stderr, "%s", message);
}

/*
 * Default doprint handler is debug_doprint.
 */
static unac_debug_print_t debug_doprint = debug_doprint_default;
/*
 * Default app data is null.
 */
static void* debug_appdata = (void*)0;

/*
 * Generate a debug message (arguments ala printf) and
 * send it to the doprint handler.
 */
#define DEBUG debug_print("%s:%d: ", __FILE__, __LINE__), debug_print
#define DEBUG_APPEND debug_print
static void debug_print(const char* message, ...)
{
#define UNAC_MAXIMUM_MESSAGE_SIZE 512
  /*
   * UNAC_MAXIMUM_MESSAGE_SIZE is supposedly enough but I
   * do trust some vsnprintf implementations to be bugous.
   */
  char unac_message_buffer[UNAC_MAXIMUM_MESSAGE_SIZE+1] = { '\0' };
  va_list args;
  va_start(args, message);
  if(vsnprintf(unac_message_buffer, UNAC_MAXIMUM_MESSAGE_SIZE, message, args) < 0) {
    char tmp[UNAC_MAXIMUM_MESSAGE_SIZE];
    sprintf(tmp, "[message larger than %d, truncated]", UNAC_MAXIMUM_MESSAGE_SIZE);
    debug_doprint(tmp, debug_appdata);
  }
  va_end(args);
  unac_message_buffer[UNAC_MAXIMUM_MESSAGE_SIZE] = '\0';

  debug_doprint(unac_message_buffer, debug_appdata);
}

void unac_debug_callback(int level, unac_debug_print_t function, void* data)
{
  debug_level = level;
  if(function)
    debug_doprint = function;
  debug_appdata = data;
}

#else /* UNAC_DEBUG_AVAILABLE */
#define DEBUG 
#define DEBUG_APPEND
#endif /* UNAC_DEBUG_AVAILABLE */

/* 0 1 2 are the offsets from base in the position table, keep the value! */
#define UNAC_UNAC 0
#define UNAC_UNACFOLD 1
#define UNAC_FOLD 2

int unacmaybefold_string_utf16(const char* in, size_t in_length,
			       char** outp, size_t* out_lengthp, int what)
{
  char* out;
  size_t out_size;
  size_t out_length;
  size_t i;

  out_size = in_length > 0 ? in_length : 1024;

  out = *outp;
  out = (char*)realloc(out, out_size + 1);
  if(out == 0) {
      if(debug_level >= UNAC_DEBUG_LOW)
	  DEBUG("realloc %d bytes failed\n", out_size+1);
      /* *outp is still valid. Let the caller free it */
      return -1;
  }

  out_length = 0;

  for(i = 0; i < in_length; i += 2) {
    unsigned short c;
    unsigned short* p;
    size_t l;
    size_t k;
    c = (in[i] << 8) | (in[i + 1] & 0xff);
    /*
     * Lookup the tables for decomposition information
     */
#ifdef BUILDING_RECOLL
    // Exception unac/fold values set by user. There should be 3 arrays for
    // unac/fold/unac+fold. For now there is only one array, which used to
    // be set for unac+fold, and is mostly or only used to prevent diacritics
    // removal for some chars and languages where it should not be done.
    // In conformance with current usage, but incorrectly, we do the following
    // things for the special chars depending on the operation requested:
    //   - unaccenting: do nothing (copy original char)
    //   - unac+fold: use table
    //   - fold: use the unicode data.
    string trans;
    if (what != UNAC_FOLD && except_trans.size() != 0 && 
	is_except_char(c, trans)) {
	if (what == UNAC_UNAC) {
	    // Unaccent only. Do nothing
	    p = 0;
	    l = 0;
	} else {
	    // Has to be UNAC_UNACFOLD: use table
	    p = (unsigned short *)trans.c_str();
	    l = trans.size() / 2;
	}
    } else {
#endif /* BUILDING_RECOLL */
	unac_uf_char_utf16_(c, p, l, what)
#ifdef BUILDING_RECOLL
    }
#endif /* BUILDING_RECOLL */

    /*
     * Explain what's done in great detail
     */
    if(debug_level == UNAC_DEBUG_HIGH) {
      unsigned short index = unac_indexes[(c) >> UNAC_BLOCK_SHIFT];
      unsigned char position = (c) & UNAC_BLOCK_MASK;
      DEBUG("unac_data%d[%d] & unac_positions[%d][%d]: ", index, unac_positions[index][position], index, position+1);
      DEBUG_APPEND("0x%04x => ", (c));
      if(l == 0) {
	DEBUG_APPEND("untouched\n");
      } else {
	size_t i;
	for(i = 0; i < l; i++)
	  DEBUG_APPEND("0x%04x ", p[i]);
	DEBUG_APPEND("\n");
      }
    }

    /*
     * Make sure there is enough space to hold the decomposition
     * Note: a previous realloc may have succeeded, which means that *outp 
     * is not valid any more. We have to do the freeing and zero out *outp
     */
    if(out_length + ((l + 1) * 2) > out_size) {
      char *saved;
      out_size += ((l + 1) * 2) + 1024;
      saved = out;
      out = (char *)realloc(out, out_size);
      if(out == 0) {
	if(debug_level >= UNAC_DEBUG_LOW)
	  DEBUG("realloc %d bytes failed\n", out_size);
        free(saved);
	*outp = 0;
	return -1;
      }
    }
    if(l > 0) {
	/* l == 1 && *p == 0 is the special case generated for
	   mark characters (which may be found if the input is
	   already in decomposed form. Output nothing */
	if (l != 1 || *p != 0) {
	    /*
	     * If there is a decomposition, insert it in the output 
	     * string.
	     */
	    for(k = 0; k < l; k++) {
		out[out_length++] = (p[k] >> 8) & 0xff;
		out[out_length++] = (p[k] & 0xff);
	    }
	}
    } else {
      /*
       * If there is no decomposition leave it unchanged
       */
      out[out_length++] = in[i];
      out[out_length++] = in[i + 1];
    }
  }

  *outp = out;
  *out_lengthp = out_length;
  (*outp)[*out_lengthp] = '\0';

  return 0;
}
int unac_string_utf16(const char* in, size_t in_length,
		      char** outp, size_t* out_lengthp)
{
    return unacmaybefold_string_utf16(in, in_length,
				      outp, out_lengthp, UNAC_UNAC);
}
int unacfold_string_utf16(const char* in, size_t in_length,
		      char** outp, size_t* out_lengthp)
{
    return unacmaybefold_string_utf16(in, in_length,
				      outp, out_lengthp, UNAC_UNACFOLD);
}
int fold_string_utf16(const char* in, size_t in_length,
		      char** outp, size_t* out_lengthp)
{
    return unacmaybefold_string_utf16(in, in_length,
				      outp, out_lengthp, UNAC_FOLD);
}

static const char *utf16be = "UTF-16BE";
static iconv_t u8tou16_cd = (iconv_t)-1;
static iconv_t u16tou8_cd = (iconv_t)-1;
static std::mutex o_unac_mutex;

/*
 * Convert buffer  containing string encoded in charset  into
 * a string in charset  and return it in buffer . The 
 * points to a malloced string large enough to hold the conversion result.
 * It is the responsibility of the caller to free this array.
 * The out string is always null terminated.
 */
static int convert(const char* from, const char* to,
		   const char* in, size_t in_length,
		   char** outp, size_t* out_lengthp)
{
  int ret = -1;
  iconv_t cd;
  char* out;
  size_t out_remain;
  size_t out_size;
  char* out_base;
  int from_utf16, from_utf8, to_utf16, to_utf8, u8tou16, u16tou8;
  const char space[] = { 0x00, 0x20 };

  std::unique_lock lock(o_unac_mutex);

  if (!strcmp(utf16be, from)) {
      from_utf8 = 0;
      from_utf16 = 1;
  } else if (!strcasecmp("UTF-8", from)) {
      from_utf8 = 1;
      from_utf16 = 0;
  } else {
      from_utf8 = from_utf16 = 0;
  }
  if (!strcmp(utf16be, to)) {
      to_utf8 = 0;
      to_utf16 = 1;
  } else if (!strcasecmp("UTF-8", to)) {
      to_utf8 = 1;
      to_utf16 = 0;
  } else {
      to_utf8 = to_utf16 = 0;
  }
  u16tou8 = from_utf16 && to_utf8;
  u8tou16 = from_utf8 && to_utf16;

  out_size = in_length > 0 ? in_length : 1024;

  out = *outp;
  out = (char *)realloc(out, out_size + 1);
  if(out == 0) {
      /* *outp still valid, no freeing */
      if(debug_level >= UNAC_DEBUG_LOW)
	  DEBUG("realloc %d bytes failed\n", out_size+1);
      goto out;
  }

  out_remain = out_size;
  out_base = out;

  if (u8tou16) {
      if (u8tou16_cd == (iconv_t)-1) {
	  if((u8tou16_cd = iconv_open(to, from)) == (iconv_t)-1) {
	      goto out;
	  }
      } else {
	  iconv(u8tou16_cd, 0, 0, 0, 0);
      }
      cd = u8tou16_cd;
  } else if (u16tou8) {
      if (u16tou8_cd == (iconv_t)-1) {
	  if((u16tou8_cd = iconv_open(to, from)) == (iconv_t)-1) {
	      goto out;
	  }
      } else {
	  iconv(u16tou8_cd, 0, 0, 0, 0);
      }
      cd = u16tou8_cd;
  } else {
      if((cd = iconv_open(to, from)) == (iconv_t)-1) {
	  goto out;
      }
  }

  do {
    if(iconv(cd, (ICONV_CONST char **) &in, &in_length, &out, &out_remain) == (size_t)-1) {
      switch(errno) {
      case EILSEQ:
	/*
	 * If an illegal sequence is found in the context of unac_string
	 * it means the unaccented version of a character contains
	 * a sequence that cannot be mapped back to the original charset.
	 * For instance, the 1/4 character in ISO-8859-1 is decomposed
	 * in three characters including the FRACTION SLASH (2044) which
	 * have no equivalent in the ISO-8859-1 map. One can argue that
	 * the conversions tables should map it to the regular / character
	 * or that a  entry should be associated with it. 
	 *
	 * To cope with this situation, convert silently transform all
	 * illegal sequences (EILSEQ) into a SPACE character 0x0020.
	 *
	 * In the general conversion case this behaviour is not desirable.
	 * However, it is not the responsibility of this program to cope
	 * with inconsistencies of the Unicode description and a bug report
	 * should be submited to Unicode so that they can fix the problem.
	 * 
	 */
	if(from_utf16) {
	  const char* tmp = space;
	  size_t tmp_length = 2;
	  if(iconv(cd, (ICONV_CONST char **) &tmp, &tmp_length, &out, &out_remain) == (size_t)-1) {
              if(errno == E2BIG) {
	      /* fall thru to the E2BIG case below */;
              } else {
                  goto out;
              }
	  } else {
	    /* The offending character was replaced by a SPACE, skip it. */
	    in += 2;
	    in_length -= 2;
	    /* And continue conversion. */
	    break;
	  }
	} else {
	  goto out;
	}
      case E2BIG:
	{
	  /*
	   * The output does not fit in the current out buffer, enlarge it.
	   */
	  size_t length = out - out_base;
	  out_size *= 2;
	  {
	      char *saved = out_base;
	      /* +1 for null */
	      out_base = (char *)realloc(out_base, out_size + 1);
	      if (out_base == 0) {
		  /* *outp potentially not valid any more. Free here,
		   * and zero out */
		  if(debug_level >= UNAC_DEBUG_LOW)
		      DEBUG("realloc %d bytes failed\n", out_size+1);
		  free(saved);
		  *outp = 0;
		  goto out;
	      }
	  }
	  out = out_base + length;
	  out_remain = out_size - length;
	}
	break;
      default:
	goto out;
	break;
      }
    }
  } while(in_length > 0);

  if (!u8tou16 && !u16tou8)
      iconv_close(cd);

  *outp = out_base;
  *out_lengthp = out - out_base;
  (*outp)[*out_lengthp] = '\0';

  ret = 0;
out:
  return ret;
}

int unacmaybefold_string(const char* charset,
			 const char* in, size_t in_length,
			 char** outp, size_t* out_lengthp, int what)
{
    /*
     * When converting an empty string, skip everything but alloc the
     * buffer if NULL pointer.
     */
    if (in_length <= 0) {
	if(!*outp) {
	    if ((*outp = (char*)malloc(32)) == 0)
		return -1;
	}
	(*outp)[0] = '\0';
	*out_lengthp = 0;
    } else {
	char* utf16 = 0;
	size_t utf16_length = 0;
	char* utf16_unaccented = 0;
	size_t utf16_unaccented_length = 0;
  
	if(convert(charset, utf16be, in, in_length, &utf16, &utf16_length) < 0) {
	    return -1;
	}

	unacmaybefold_string_utf16(utf16, utf16_length, &utf16_unaccented, 
				   &utf16_unaccented_length, what);
	free(utf16);

	if(convert(utf16be, charset, utf16_unaccented, utf16_unaccented_length, 
		   outp, out_lengthp) < 0) {
	    return -1;
	}
	free(utf16_unaccented);
    }

    return 0;
}

int unac_string(const char* charset,
		const char* in, size_t in_length,
		char** outp, size_t* out_lengthp)
{
    return unacmaybefold_string(charset, in, in_length,
				outp, out_lengthp, UNAC_UNAC);
}
int unacfold_string(const char* charset,
		    const char* in, size_t in_length,
		    char** outp, size_t* out_lengthp)
{
    return unacmaybefold_string(charset, in, in_length,
				outp, out_lengthp, UNAC_UNACFOLD);
}
int fold_string(const char* charset,
		    const char* in, size_t in_length,
		    char** outp, size_t* out_lengthp)
{
    return unacmaybefold_string(charset, in, in_length,
				outp, out_lengthp, UNAC_FOLD);
}

const char* unac_version(void)
{
  return UNAC_VERSION;
}

#ifdef BUILDING_RECOLL
void unac_set_except_translations(const char *spectrans)
{
    except_trans.clear();
    if (!spectrans || !spectrans[0])
	return;

    // The translation tables out of Unicode are in machine byte order (we
    // just let the compiler read the values). 
    // For the translation part, we need to choose our encoding in accordance )
    // (16BE or 16LE depending on processor)
    // On the contrary, the source char is always to be compared to
    // the input text, which is encoded in UTF-16BE ... What a mess.
    static const char *machinecoding = 0;
    bool littleendian = true;
    if (machinecoding == 0) {
	const char*  charshort = "\001\002";
	short *ip = (short *)charshort;
	if (*ip == 0x0102) {
	    littleendian = false;
	    machinecoding = "UTF-16BE";
	} else {
	    littleendian = true;
	    machinecoding = "UTF-16LE";
	}
    }

    vector vtrans;
    stringToStrings(spectrans, vtrans);

    for (vector::iterator it = vtrans.begin();
	 it != vtrans.end(); it++) {

	/* Convert the whole thing to utf-16be/le according to endianness */
	char *out = 0;
	size_t outsize;
	if (convert("UTF-8", machinecoding,
		    it->c_str(), it->size(),
		    &out, &outsize) != 0 || outsize < 2)
	    continue;

	/* The source char must be utf-16be as this is what we convert the
	   input text to for internal processing */
	unsigned short ch;
	if (littleendian)
	    ch = (out[1] << 8) | (out[0] & 0xff);
	else
	    ch = (out[0] << 8) | (out[1] & 0xff);

	except_trans[ch] = string((const char *)(out + 2), outsize-2);
	free(out);
    }
}
#endif /* BUILDING_RECOLL */
recoll-1.26.3/unac/unac.h0000644000175000017500000010252513533651561012046 00000000000000/*
 * Copyright (C) 2000, 2001, 2002 Loic Dachary 
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

/*
 * Provides functions to strip accents from a string in all the
 * charset supported by iconv(3).
 *
 * See the unac(3) manual page for more information.
 *
 */

#ifndef _unac_h
#define _unac_h

#ifdef __cplusplus
extern "C" {
#endif

/* Generated by builder. Do not modify. Start defines */
#define UNAC_BLOCK_SHIFT 3
#define UNAC_BLOCK_MASK ((1 << UNAC_BLOCK_SHIFT) - 1)
#define UNAC_BLOCK_SIZE (1 << UNAC_BLOCK_SHIFT)
#define UNAC_BLOCK_COUNT 713
#define UNAC_INDEXES_SIZE (0x10000 >> UNAC_BLOCK_SHIFT)
/* Generated by builder. Do not modify. End defines */

/*
 * Return the unaccented equivalent of the UTF-16 character 
 * in the pointer 

. The length of the unsigned short array pointed * by

is returned in the argument. * The C++ prototype of this macro would be: * * void unac_char(const unsigned short c, unsigned short*& p, int& l, int o) * * See unac(3) in IMPLEMENTATION NOTES for more information about the * tables (unac_data_table, unac_positions) layout. * * Each transformed char has 3 possible outputs: unaccented, unaccented and * folded, or just folded. These are kept at offset 0,1,2 in the position table */ #define unac_uf_char_utf16_(c,p,l,o) \ { \ unsigned short index = unac_indexes[(c) >> UNAC_BLOCK_SHIFT]; \ unsigned char position = 3*((c) & UNAC_BLOCK_MASK) + (o); \ (p) = &(unac_data_table[index][unac_positions[index][position]]); \ (l) = unac_positions[index][position + 1] \ - unac_positions[index][position]; \ if((l) == 1 && *(p) == 0xFFFF) { \ (p) = 0; \ (l) = 0; \ } \ } #define unac_char_utf16(c,p,l) unac_uf_char_utf16_((c),(p),(l),0) #define unacfold_char_utf16(c,p,l) unac_uf_char_utf16_((c),(p),(l),1) #define fold_char_utf16(c,p,l) unac_uf_char_utf16_((c),(p),(l),2) /* * Return the unaccented equivalent of the UTF-16 string of * length in the pointer . The length of the UTF-16 * string returned in is stored in . If the pointer * *out is null, a new string is allocated using malloc(3). If the * pointer *out is not null, the available length must also be given * in the *out_length argument. The pointer passed to *out must have * been allocated by malloc(3) and may be reallocated by realloc(3) if * needs be. It is the responsibility of the caller to free the * pointer returned in *out. The return value is 0 on success and -1 * on error, in which case the errno variable is set to the * corresponding error code. */ int unac_string_utf16(const char* in, size_t in_length, char** out, size_t* out_length); int unacfold_string_utf16(const char* in, size_t in_length, char** out, size_t* out_length); int fold_string_utf16(const char* in, size_t in_length, char** out, size_t* out_length); /* * The semantic of this function is stricly equal to the function * unac_string_utf16. The argument applies to the content of the * input string. It is converted to UTF-16 using iconv(3) before calling * the unac_string function and the result is converted from UTF-16 to * the specified before returning it in the pointer. * For efficiency purpose it is recommended that the caller uses * unac_string and iconv(3) to save buffer allocations overhead. * The return value is 0 on success and -1 on error, in which case * the errno variable is set to the corresponding error code. */ int unac_string(const char* charset, const char* in, size_t in_length, char** out, size_t* out_length); int unacfold_string(const char* charset, const char* in, size_t in_length, char** out, size_t* out_length); int fold_string(const char* charset, const char* in, size_t in_length, char** out, size_t* out_length); #ifdef BUILDING_RECOLL #include /** * Set exceptions for unaccenting, for characters which should not be * handled according to what the Unicode tables say. For example "a * with circle above" should not be stripped to a in swedish, etc. * * @param spectrans defines the translations as a blank separated list of * UTF-8 strings. Inside each string, the first character is the exception * the rest is the translation (which may be empty). You can use double * quotes for translations which should include white space. The double-quote * can't be an exception character, deal with it... */ void unac_set_except_translations(const char *spectrans); #endif /* BUILDING_RECOLL */ /* * Return unac version number. */ const char* unac_version(void); #define UNAC_DEBUG_NONE 0x00 #define UNAC_DEBUG_LOW 0x01 #define UNAC_DEBUG_HIGH 0x02 #ifdef HAVE_VSNPRINTF #define UNAC_DEBUG_AVAILABLE 1 /* * Set the unac debug level. is one of: * UNAC_DEBUG_NONE for no debug messages at all * UNAC_DEBUG_LOW for minimal information * UNAC_DEBUG_HIGH for extremely verbose information, * only usable when translating a few short strings. * * unac_debug with anything but UNAC_DEBUG_NONE is not * thread safe. */ #define unac_debug(l) unac_debug_callback((l), 0, (void*)0); /* * Set the debug level and define a printing function callback. * The debug level is the same as in unac_debug. The * is in charge of dealing with the debug messages, * presumably to print them to the user. The is an opaque * pointer that is passed along to , should it * need to manage a persistent context. * * The prototype of allows two arguments. The first * is the debug message (const char*), the second is the opaque * pointer given as argument to unac_debug_callback. * * If is NULL, messages are printed on the standard * error output using fprintf(stderr...). * * unac_debug_callback with anything but UNAC_DEBUG_NONE is not * thread safe. * */ typedef void (*unac_debug_print_t)(const char* message, void* data); void unac_debug_callback(int level, unac_debug_print_t function, void* data); #endif /* HAVE_VSNPRINTF */ /* Generated by builder. Do not modify. Start declarations */ extern unsigned short unac_indexes[UNAC_INDEXES_SIZE]; extern unsigned char unac_positions[UNAC_BLOCK_COUNT][3*UNAC_BLOCK_SIZE + 1]; extern unsigned short* unac_data_table[UNAC_BLOCK_COUNT]; extern unsigned short unac_data0[]; extern unsigned short unac_data1[]; extern unsigned short unac_data2[]; extern unsigned short unac_data3[]; extern unsigned short unac_data4[]; extern unsigned short unac_data5[]; extern unsigned short unac_data6[]; extern unsigned short unac_data7[]; extern unsigned short unac_data8[]; extern unsigned short unac_data9[]; extern unsigned short unac_data10[]; extern unsigned short unac_data11[]; extern unsigned short unac_data12[]; extern unsigned short unac_data13[]; extern unsigned short unac_data14[]; extern unsigned short unac_data15[]; extern unsigned short unac_data16[]; extern unsigned short unac_data17[]; extern unsigned short unac_data18[]; extern unsigned short unac_data19[]; extern unsigned short unac_data20[]; extern unsigned short unac_data21[]; extern unsigned short unac_data22[]; extern unsigned short unac_data23[]; extern unsigned short unac_data24[]; extern unsigned short unac_data25[]; extern unsigned short unac_data26[]; extern unsigned short unac_data27[]; extern unsigned short unac_data28[]; extern unsigned short unac_data29[]; extern unsigned short unac_data30[]; extern unsigned short unac_data31[]; extern unsigned short unac_data32[]; extern unsigned short unac_data33[]; extern unsigned short unac_data34[]; extern unsigned short unac_data35[]; extern unsigned short unac_data36[]; extern unsigned short unac_data37[]; extern unsigned short unac_data38[]; extern unsigned short unac_data39[]; extern unsigned short unac_data40[]; extern unsigned short unac_data41[]; extern unsigned short unac_data42[]; extern unsigned short unac_data43[]; extern unsigned short unac_data44[]; extern unsigned short unac_data45[]; extern unsigned short unac_data46[]; extern unsigned short unac_data47[]; extern unsigned short unac_data48[]; extern unsigned short unac_data49[]; extern unsigned short unac_data50[]; extern unsigned short unac_data51[]; extern unsigned short unac_data52[]; extern unsigned short unac_data53[]; extern unsigned short unac_data54[]; extern unsigned short unac_data55[]; extern unsigned short unac_data56[]; extern unsigned short unac_data57[]; extern unsigned short unac_data58[]; extern unsigned short unac_data59[]; extern unsigned short unac_data60[]; extern unsigned short unac_data61[]; extern unsigned short unac_data62[]; extern unsigned short unac_data63[]; extern unsigned short unac_data64[]; extern unsigned short unac_data65[]; extern unsigned short unac_data66[]; extern unsigned short unac_data67[]; extern unsigned short unac_data68[]; extern unsigned short unac_data69[]; extern unsigned short unac_data70[]; extern unsigned short unac_data71[]; extern unsigned short unac_data72[]; extern unsigned short unac_data73[]; extern unsigned short unac_data74[]; extern unsigned short unac_data75[]; extern unsigned short unac_data76[]; extern unsigned short unac_data77[]; extern unsigned short unac_data78[]; extern unsigned short unac_data79[]; extern unsigned short unac_data80[]; extern unsigned short unac_data81[]; extern unsigned short unac_data82[]; extern unsigned short unac_data83[]; extern unsigned short unac_data84[]; extern unsigned short unac_data85[]; extern unsigned short unac_data86[]; extern unsigned short unac_data87[]; extern unsigned short unac_data88[]; extern unsigned short unac_data89[]; extern unsigned short unac_data90[]; extern unsigned short unac_data91[]; extern unsigned short unac_data92[]; extern unsigned short unac_data93[]; extern unsigned short unac_data94[]; extern unsigned short unac_data95[]; extern unsigned short unac_data96[]; extern unsigned short unac_data97[]; extern unsigned short unac_data98[]; extern unsigned short unac_data99[]; extern unsigned short unac_data100[]; extern unsigned short unac_data101[]; extern unsigned short unac_data102[]; extern unsigned short unac_data103[]; extern unsigned short unac_data104[]; extern unsigned short unac_data105[]; extern unsigned short unac_data106[]; extern unsigned short unac_data107[]; extern unsigned short unac_data108[]; extern unsigned short unac_data109[]; extern unsigned short unac_data110[]; extern unsigned short unac_data111[]; extern unsigned short unac_data112[]; extern unsigned short unac_data113[]; extern unsigned short unac_data114[]; extern unsigned short unac_data115[]; extern unsigned short unac_data116[]; extern unsigned short unac_data117[]; extern unsigned short unac_data118[]; extern unsigned short unac_data119[]; extern unsigned short unac_data120[]; extern unsigned short unac_data121[]; extern unsigned short unac_data122[]; extern unsigned short unac_data123[]; extern unsigned short unac_data124[]; extern unsigned short unac_data125[]; extern unsigned short unac_data126[]; extern unsigned short unac_data127[]; extern unsigned short unac_data128[]; extern unsigned short unac_data129[]; extern unsigned short unac_data130[]; extern unsigned short unac_data131[]; extern unsigned short unac_data132[]; extern unsigned short unac_data133[]; extern unsigned short unac_data134[]; extern unsigned short unac_data135[]; extern unsigned short unac_data136[]; extern unsigned short unac_data137[]; extern unsigned short unac_data138[]; extern unsigned short unac_data139[]; extern unsigned short unac_data140[]; extern unsigned short unac_data141[]; extern unsigned short unac_data142[]; extern unsigned short unac_data143[]; extern unsigned short unac_data144[]; extern unsigned short unac_data145[]; extern unsigned short unac_data146[]; extern unsigned short unac_data147[]; extern unsigned short unac_data148[]; extern unsigned short unac_data149[]; extern unsigned short unac_data150[]; extern unsigned short unac_data151[]; extern unsigned short unac_data152[]; extern unsigned short unac_data153[]; extern unsigned short unac_data154[]; extern unsigned short unac_data155[]; extern unsigned short unac_data156[]; extern unsigned short unac_data157[]; extern unsigned short unac_data158[]; extern unsigned short unac_data159[]; extern unsigned short unac_data160[]; extern unsigned short unac_data161[]; extern unsigned short unac_data162[]; extern unsigned short unac_data163[]; extern unsigned short unac_data164[]; extern unsigned short unac_data165[]; extern unsigned short unac_data166[]; extern unsigned short unac_data167[]; extern unsigned short unac_data168[]; extern unsigned short unac_data169[]; extern unsigned short unac_data170[]; extern unsigned short unac_data171[]; extern unsigned short unac_data172[]; extern unsigned short unac_data173[]; extern unsigned short unac_data174[]; extern unsigned short unac_data175[]; extern unsigned short unac_data176[]; extern unsigned short unac_data177[]; extern unsigned short unac_data178[]; extern unsigned short unac_data179[]; extern unsigned short unac_data180[]; extern unsigned short unac_data181[]; extern unsigned short unac_data182[]; extern unsigned short unac_data183[]; extern unsigned short unac_data184[]; extern unsigned short unac_data185[]; extern unsigned short unac_data186[]; extern unsigned short unac_data187[]; extern unsigned short unac_data188[]; extern unsigned short unac_data189[]; extern unsigned short unac_data190[]; extern unsigned short unac_data191[]; extern unsigned short unac_data192[]; extern unsigned short unac_data193[]; extern unsigned short unac_data194[]; extern unsigned short unac_data195[]; extern unsigned short unac_data196[]; extern unsigned short unac_data197[]; extern unsigned short unac_data198[]; extern unsigned short unac_data199[]; extern unsigned short unac_data200[]; extern unsigned short unac_data201[]; extern unsigned short unac_data202[]; extern unsigned short unac_data203[]; extern unsigned short unac_data204[]; extern unsigned short unac_data205[]; extern unsigned short unac_data206[]; extern unsigned short unac_data207[]; extern unsigned short unac_data208[]; extern unsigned short unac_data209[]; extern unsigned short unac_data210[]; extern unsigned short unac_data211[]; extern unsigned short unac_data212[]; extern unsigned short unac_data213[]; extern unsigned short unac_data214[]; extern unsigned short unac_data215[]; extern unsigned short unac_data216[]; extern unsigned short unac_data217[]; extern unsigned short unac_data218[]; extern unsigned short unac_data219[]; extern unsigned short unac_data220[]; extern unsigned short unac_data221[]; extern unsigned short unac_data222[]; extern unsigned short unac_data223[]; extern unsigned short unac_data224[]; extern unsigned short unac_data225[]; extern unsigned short unac_data226[]; extern unsigned short unac_data227[]; extern unsigned short unac_data228[]; extern unsigned short unac_data229[]; extern unsigned short unac_data230[]; extern unsigned short unac_data231[]; extern unsigned short unac_data232[]; extern unsigned short unac_data233[]; extern unsigned short unac_data234[]; extern unsigned short unac_data235[]; extern unsigned short unac_data236[]; extern unsigned short unac_data237[]; extern unsigned short unac_data238[]; extern unsigned short unac_data239[]; extern unsigned short unac_data240[]; extern unsigned short unac_data241[]; extern unsigned short unac_data242[]; extern unsigned short unac_data243[]; extern unsigned short unac_data244[]; extern unsigned short unac_data245[]; extern unsigned short unac_data246[]; extern unsigned short unac_data247[]; extern unsigned short unac_data248[]; extern unsigned short unac_data249[]; extern unsigned short unac_data250[]; extern unsigned short unac_data251[]; extern unsigned short unac_data252[]; extern unsigned short unac_data253[]; extern unsigned short unac_data254[]; extern unsigned short unac_data255[]; extern unsigned short unac_data256[]; extern unsigned short unac_data257[]; extern unsigned short unac_data258[]; extern unsigned short unac_data259[]; extern unsigned short unac_data260[]; extern unsigned short unac_data261[]; extern unsigned short unac_data262[]; extern unsigned short unac_data263[]; extern unsigned short unac_data264[]; extern unsigned short unac_data265[]; extern unsigned short unac_data266[]; extern unsigned short unac_data267[]; extern unsigned short unac_data268[]; extern unsigned short unac_data269[]; extern unsigned short unac_data270[]; extern unsigned short unac_data271[]; extern unsigned short unac_data272[]; extern unsigned short unac_data273[]; extern unsigned short unac_data274[]; extern unsigned short unac_data275[]; extern unsigned short unac_data276[]; extern unsigned short unac_data277[]; extern unsigned short unac_data278[]; extern unsigned short unac_data279[]; extern unsigned short unac_data280[]; extern unsigned short unac_data281[]; extern unsigned short unac_data282[]; extern unsigned short unac_data283[]; extern unsigned short unac_data284[]; extern unsigned short unac_data285[]; extern unsigned short unac_data286[]; extern unsigned short unac_data287[]; extern unsigned short unac_data288[]; extern unsigned short unac_data289[]; extern unsigned short unac_data290[]; extern unsigned short unac_data291[]; extern unsigned short unac_data292[]; extern unsigned short unac_data293[]; extern unsigned short unac_data294[]; extern unsigned short unac_data295[]; extern unsigned short unac_data296[]; extern unsigned short unac_data297[]; extern unsigned short unac_data298[]; extern unsigned short unac_data299[]; extern unsigned short unac_data300[]; extern unsigned short unac_data301[]; extern unsigned short unac_data302[]; extern unsigned short unac_data303[]; extern unsigned short unac_data304[]; extern unsigned short unac_data305[]; extern unsigned short unac_data306[]; extern unsigned short unac_data307[]; extern unsigned short unac_data308[]; extern unsigned short unac_data309[]; extern unsigned short unac_data310[]; extern unsigned short unac_data311[]; extern unsigned short unac_data312[]; extern unsigned short unac_data313[]; extern unsigned short unac_data314[]; extern unsigned short unac_data315[]; extern unsigned short unac_data316[]; extern unsigned short unac_data317[]; extern unsigned short unac_data318[]; extern unsigned short unac_data319[]; extern unsigned short unac_data320[]; extern unsigned short unac_data321[]; extern unsigned short unac_data322[]; extern unsigned short unac_data323[]; extern unsigned short unac_data324[]; extern unsigned short unac_data325[]; extern unsigned short unac_data326[]; extern unsigned short unac_data327[]; extern unsigned short unac_data328[]; extern unsigned short unac_data329[]; extern unsigned short unac_data330[]; extern unsigned short unac_data331[]; extern unsigned short unac_data332[]; extern unsigned short unac_data333[]; extern unsigned short unac_data334[]; extern unsigned short unac_data335[]; extern unsigned short unac_data336[]; extern unsigned short unac_data337[]; extern unsigned short unac_data338[]; extern unsigned short unac_data339[]; extern unsigned short unac_data340[]; extern unsigned short unac_data341[]; extern unsigned short unac_data342[]; extern unsigned short unac_data343[]; extern unsigned short unac_data344[]; extern unsigned short unac_data345[]; extern unsigned short unac_data346[]; extern unsigned short unac_data347[]; extern unsigned short unac_data348[]; extern unsigned short unac_data349[]; extern unsigned short unac_data350[]; extern unsigned short unac_data351[]; extern unsigned short unac_data352[]; extern unsigned short unac_data353[]; extern unsigned short unac_data354[]; extern unsigned short unac_data355[]; extern unsigned short unac_data356[]; extern unsigned short unac_data357[]; extern unsigned short unac_data358[]; extern unsigned short unac_data359[]; extern unsigned short unac_data360[]; extern unsigned short unac_data361[]; extern unsigned short unac_data362[]; extern unsigned short unac_data363[]; extern unsigned short unac_data364[]; extern unsigned short unac_data365[]; extern unsigned short unac_data366[]; extern unsigned short unac_data367[]; extern unsigned short unac_data368[]; extern unsigned short unac_data369[]; extern unsigned short unac_data370[]; extern unsigned short unac_data371[]; extern unsigned short unac_data372[]; extern unsigned short unac_data373[]; extern unsigned short unac_data374[]; extern unsigned short unac_data375[]; extern unsigned short unac_data376[]; extern unsigned short unac_data377[]; extern unsigned short unac_data378[]; extern unsigned short unac_data379[]; extern unsigned short unac_data380[]; extern unsigned short unac_data381[]; extern unsigned short unac_data382[]; extern unsigned short unac_data383[]; extern unsigned short unac_data384[]; extern unsigned short unac_data385[]; extern unsigned short unac_data386[]; extern unsigned short unac_data387[]; extern unsigned short unac_data388[]; extern unsigned short unac_data389[]; extern unsigned short unac_data390[]; extern unsigned short unac_data391[]; extern unsigned short unac_data392[]; extern unsigned short unac_data393[]; extern unsigned short unac_data394[]; extern unsigned short unac_data395[]; extern unsigned short unac_data396[]; extern unsigned short unac_data397[]; extern unsigned short unac_data398[]; extern unsigned short unac_data399[]; extern unsigned short unac_data400[]; extern unsigned short unac_data401[]; extern unsigned short unac_data402[]; extern unsigned short unac_data403[]; extern unsigned short unac_data404[]; extern unsigned short unac_data405[]; extern unsigned short unac_data406[]; extern unsigned short unac_data407[]; extern unsigned short unac_data408[]; extern unsigned short unac_data409[]; extern unsigned short unac_data410[]; extern unsigned short unac_data411[]; extern unsigned short unac_data412[]; extern unsigned short unac_data413[]; extern unsigned short unac_data414[]; extern unsigned short unac_data415[]; extern unsigned short unac_data416[]; extern unsigned short unac_data417[]; extern unsigned short unac_data418[]; extern unsigned short unac_data419[]; extern unsigned short unac_data420[]; extern unsigned short unac_data421[]; extern unsigned short unac_data422[]; extern unsigned short unac_data423[]; extern unsigned short unac_data424[]; extern unsigned short unac_data425[]; extern unsigned short unac_data426[]; extern unsigned short unac_data427[]; extern unsigned short unac_data428[]; extern unsigned short unac_data429[]; extern unsigned short unac_data430[]; extern unsigned short unac_data431[]; extern unsigned short unac_data432[]; extern unsigned short unac_data433[]; extern unsigned short unac_data434[]; extern unsigned short unac_data435[]; extern unsigned short unac_data436[]; extern unsigned short unac_data437[]; extern unsigned short unac_data438[]; extern unsigned short unac_data439[]; extern unsigned short unac_data440[]; extern unsigned short unac_data441[]; extern unsigned short unac_data442[]; extern unsigned short unac_data443[]; extern unsigned short unac_data444[]; extern unsigned short unac_data445[]; extern unsigned short unac_data446[]; extern unsigned short unac_data447[]; extern unsigned short unac_data448[]; extern unsigned short unac_data449[]; extern unsigned short unac_data450[]; extern unsigned short unac_data451[]; extern unsigned short unac_data452[]; extern unsigned short unac_data453[]; extern unsigned short unac_data454[]; extern unsigned short unac_data455[]; extern unsigned short unac_data456[]; extern unsigned short unac_data457[]; extern unsigned short unac_data458[]; extern unsigned short unac_data459[]; extern unsigned short unac_data460[]; extern unsigned short unac_data461[]; extern unsigned short unac_data462[]; extern unsigned short unac_data463[]; extern unsigned short unac_data464[]; extern unsigned short unac_data465[]; extern unsigned short unac_data466[]; extern unsigned short unac_data467[]; extern unsigned short unac_data468[]; extern unsigned short unac_data469[]; extern unsigned short unac_data470[]; extern unsigned short unac_data471[]; extern unsigned short unac_data472[]; extern unsigned short unac_data473[]; extern unsigned short unac_data474[]; extern unsigned short unac_data475[]; extern unsigned short unac_data476[]; extern unsigned short unac_data477[]; extern unsigned short unac_data478[]; extern unsigned short unac_data479[]; extern unsigned short unac_data480[]; extern unsigned short unac_data481[]; extern unsigned short unac_data482[]; extern unsigned short unac_data483[]; extern unsigned short unac_data484[]; extern unsigned short unac_data485[]; extern unsigned short unac_data486[]; extern unsigned short unac_data487[]; extern unsigned short unac_data488[]; extern unsigned short unac_data489[]; extern unsigned short unac_data490[]; extern unsigned short unac_data491[]; extern unsigned short unac_data492[]; extern unsigned short unac_data493[]; extern unsigned short unac_data494[]; extern unsigned short unac_data495[]; extern unsigned short unac_data496[]; extern unsigned short unac_data497[]; extern unsigned short unac_data498[]; extern unsigned short unac_data499[]; extern unsigned short unac_data500[]; extern unsigned short unac_data501[]; extern unsigned short unac_data502[]; extern unsigned short unac_data503[]; extern unsigned short unac_data504[]; extern unsigned short unac_data505[]; extern unsigned short unac_data506[]; extern unsigned short unac_data507[]; extern unsigned short unac_data508[]; extern unsigned short unac_data509[]; extern unsigned short unac_data510[]; extern unsigned short unac_data511[]; extern unsigned short unac_data512[]; extern unsigned short unac_data513[]; extern unsigned short unac_data514[]; extern unsigned short unac_data515[]; extern unsigned short unac_data516[]; extern unsigned short unac_data517[]; extern unsigned short unac_data518[]; extern unsigned short unac_data519[]; extern unsigned short unac_data520[]; extern unsigned short unac_data521[]; extern unsigned short unac_data522[]; extern unsigned short unac_data523[]; extern unsigned short unac_data524[]; extern unsigned short unac_data525[]; extern unsigned short unac_data526[]; extern unsigned short unac_data527[]; extern unsigned short unac_data528[]; extern unsigned short unac_data529[]; extern unsigned short unac_data530[]; extern unsigned short unac_data531[]; extern unsigned short unac_data532[]; extern unsigned short unac_data533[]; extern unsigned short unac_data534[]; extern unsigned short unac_data535[]; extern unsigned short unac_data536[]; extern unsigned short unac_data537[]; extern unsigned short unac_data538[]; extern unsigned short unac_data539[]; extern unsigned short unac_data540[]; extern unsigned short unac_data541[]; extern unsigned short unac_data542[]; extern unsigned short unac_data543[]; extern unsigned short unac_data544[]; extern unsigned short unac_data545[]; extern unsigned short unac_data546[]; extern unsigned short unac_data547[]; extern unsigned short unac_data548[]; extern unsigned short unac_data549[]; extern unsigned short unac_data550[]; extern unsigned short unac_data551[]; extern unsigned short unac_data552[]; extern unsigned short unac_data553[]; extern unsigned short unac_data554[]; extern unsigned short unac_data555[]; extern unsigned short unac_data556[]; extern unsigned short unac_data557[]; extern unsigned short unac_data558[]; extern unsigned short unac_data559[]; extern unsigned short unac_data560[]; extern unsigned short unac_data561[]; extern unsigned short unac_data562[]; extern unsigned short unac_data563[]; extern unsigned short unac_data564[]; extern unsigned short unac_data565[]; extern unsigned short unac_data566[]; extern unsigned short unac_data567[]; extern unsigned short unac_data568[]; extern unsigned short unac_data569[]; extern unsigned short unac_data570[]; extern unsigned short unac_data571[]; extern unsigned short unac_data572[]; extern unsigned short unac_data573[]; extern unsigned short unac_data574[]; extern unsigned short unac_data575[]; extern unsigned short unac_data576[]; extern unsigned short unac_data577[]; extern unsigned short unac_data578[]; extern unsigned short unac_data579[]; extern unsigned short unac_data580[]; extern unsigned short unac_data581[]; extern unsigned short unac_data582[]; extern unsigned short unac_data583[]; extern unsigned short unac_data584[]; extern unsigned short unac_data585[]; extern unsigned short unac_data586[]; extern unsigned short unac_data587[]; extern unsigned short unac_data588[]; extern unsigned short unac_data589[]; extern unsigned short unac_data590[]; extern unsigned short unac_data591[]; extern unsigned short unac_data592[]; extern unsigned short unac_data593[]; extern unsigned short unac_data594[]; extern unsigned short unac_data595[]; extern unsigned short unac_data596[]; extern unsigned short unac_data597[]; extern unsigned short unac_data598[]; extern unsigned short unac_data599[]; extern unsigned short unac_data600[]; extern unsigned short unac_data601[]; extern unsigned short unac_data602[]; extern unsigned short unac_data603[]; extern unsigned short unac_data604[]; extern unsigned short unac_data605[]; extern unsigned short unac_data606[]; extern unsigned short unac_data607[]; extern unsigned short unac_data608[]; extern unsigned short unac_data609[]; extern unsigned short unac_data610[]; extern unsigned short unac_data611[]; extern unsigned short unac_data612[]; extern unsigned short unac_data613[]; extern unsigned short unac_data614[]; extern unsigned short unac_data615[]; extern unsigned short unac_data616[]; extern unsigned short unac_data617[]; extern unsigned short unac_data618[]; extern unsigned short unac_data619[]; extern unsigned short unac_data620[]; extern unsigned short unac_data621[]; extern unsigned short unac_data622[]; extern unsigned short unac_data623[]; extern unsigned short unac_data624[]; extern unsigned short unac_data625[]; extern unsigned short unac_data626[]; extern unsigned short unac_data627[]; extern unsigned short unac_data628[]; extern unsigned short unac_data629[]; extern unsigned short unac_data630[]; extern unsigned short unac_data631[]; extern unsigned short unac_data632[]; extern unsigned short unac_data633[]; extern unsigned short unac_data634[]; extern unsigned short unac_data635[]; extern unsigned short unac_data636[]; extern unsigned short unac_data637[]; extern unsigned short unac_data638[]; extern unsigned short unac_data639[]; extern unsigned short unac_data640[]; extern unsigned short unac_data641[]; extern unsigned short unac_data642[]; extern unsigned short unac_data643[]; extern unsigned short unac_data644[]; extern unsigned short unac_data645[]; extern unsigned short unac_data646[]; extern unsigned short unac_data647[]; extern unsigned short unac_data648[]; extern unsigned short unac_data649[]; extern unsigned short unac_data650[]; extern unsigned short unac_data651[]; extern unsigned short unac_data652[]; extern unsigned short unac_data653[]; extern unsigned short unac_data654[]; extern unsigned short unac_data655[]; extern unsigned short unac_data656[]; extern unsigned short unac_data657[]; extern unsigned short unac_data658[]; extern unsigned short unac_data659[]; extern unsigned short unac_data660[]; extern unsigned short unac_data661[]; extern unsigned short unac_data662[]; extern unsigned short unac_data663[]; extern unsigned short unac_data664[]; extern unsigned short unac_data665[]; extern unsigned short unac_data666[]; extern unsigned short unac_data667[]; extern unsigned short unac_data668[]; extern unsigned short unac_data669[]; extern unsigned short unac_data670[]; extern unsigned short unac_data671[]; extern unsigned short unac_data672[]; extern unsigned short unac_data673[]; extern unsigned short unac_data674[]; extern unsigned short unac_data675[]; extern unsigned short unac_data676[]; extern unsigned short unac_data677[]; extern unsigned short unac_data678[]; extern unsigned short unac_data679[]; extern unsigned short unac_data680[]; extern unsigned short unac_data681[]; extern unsigned short unac_data682[]; extern unsigned short unac_data683[]; extern unsigned short unac_data684[]; extern unsigned short unac_data685[]; extern unsigned short unac_data686[]; extern unsigned short unac_data687[]; extern unsigned short unac_data688[]; extern unsigned short unac_data689[]; extern unsigned short unac_data690[]; extern unsigned short unac_data691[]; extern unsigned short unac_data692[]; extern unsigned short unac_data693[]; extern unsigned short unac_data694[]; extern unsigned short unac_data695[]; extern unsigned short unac_data696[]; extern unsigned short unac_data697[]; extern unsigned short unac_data698[]; extern unsigned short unac_data699[]; extern unsigned short unac_data700[]; extern unsigned short unac_data701[]; extern unsigned short unac_data702[]; extern unsigned short unac_data703[]; extern unsigned short unac_data704[]; extern unsigned short unac_data705[]; extern unsigned short unac_data706[]; extern unsigned short unac_data707[]; extern unsigned short unac_data708[]; extern unsigned short unac_data709[]; extern unsigned short unac_data710[]; extern unsigned short unac_data711[]; extern unsigned short unac_data712[]; /* Generated by builder. Do not modify. End declarations */ #ifdef __cplusplus } #endif #endif /* _unac_h */ recoll-1.26.3/bincimapmime/0000755000175000017500000000000013570165410012520 500000000000000recoll-1.26.3/bincimapmime/00README.recoll0000644000175000017500000000016313303776057014750 00000000000000Most of the code in this directory was taken from the Binc IMAP project (http://www.bincimap.org/), version 1.3.3 recoll-1.26.3/bincimapmime/COPYING0000644000175000017500000004460513533651561013512 00000000000000This software is released under the GPL. Find a full copy of the GNU General Public License below. In addition, as a special exception, Andreas Aardal Hanssen, author of Binc IMAP, gives permission to link the code of this program with the OpenSSL library (or with modified versions of OpenSSL that use the same license as OpenSSL, listed in the included COPYING.OpenSSL file), and distribute linked combinations including the two. You must obey the GNU General Public License in all respects for all of the code used other than OpenSSL. If you modify this file, you may extend this exception to your version of the file, but you are not obligated to do so. If you do not wish to do so, delete this exception statement from your version. --------------------------------------------------------------------- GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. recoll-1.26.3/bincimapmime/convert.h0000644000175000017500000002150713533651561014304 00000000000000/* -*- mode:c++;c-basic-offset:2 -*- */ /* -------------------------------------------------------------------- * Filename: * src/util/convert.h * * Description: * Declaration of miscellaneous convertion functions. * -------------------------------------------------------------------- * Copyright 2002-2005 Andreas Aardal Hanssen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * -------------------------------------------------------------------- */ #ifndef convert_h_included #define convert_h_included #include #include #include #include #include #include #include #include namespace Binc { //---------------------------------------------------------------------- inline std::string toString(int i_in) { char intbuf[16]; snprintf(intbuf, sizeof(intbuf), "%d", i_in); return std::string(intbuf); } //---------------------------------------------------------------------- inline std::string toString(unsigned int i_in) { char intbuf[16]; snprintf(intbuf, sizeof(intbuf), "%u", i_in); return std::string(intbuf); } //---------------------------------------------------------------------- inline std::string toString(unsigned long i_in) { char longbuf[40]; snprintf(longbuf, sizeof(longbuf), "%lu", i_in); return std::string(longbuf); } //---------------------------------------------------------------------- inline std::string toString(const char *i_in) { return std::string(i_in); } //---------------------------------------------------------------------- inline int atoi(const std::string &s_in) { return ::atoi(s_in.c_str()); } //---------------------------------------------------------------------- inline std::string toHex(const std::string &s) { const char hexchars[] = "0123456789abcdef"; std::string tmp; for (std::string::const_iterator i = s.begin(); i != s.end(); ++i) { unsigned char c = (unsigned char)*i; tmp += hexchars[((c & 0xf0) >> 4)]; tmp += hexchars[c & 0x0f]; } return tmp; } //---------------------------------------------------------------------- inline std::string fromHex(const std::string &s) { const char hexchars[] = "0123456789abcdef"; std::string tmp; for (std::string::const_iterator i = s.begin(); i != s.end() && i + 1 != s.end(); i += 2) { ptrdiff_t n; unsigned char c = *i; unsigned char d = *(i + 1); const char *t; if ((t = strchr(hexchars, c)) == 0) return "out of range"; n = (t - hexchars) << 4; if ((t = strchr(hexchars, d)) == 0) return "out of range"; n += (t - hexchars); if (n >= 0 && n <= 255) tmp += (char) n; else return "out of range"; } return tmp; } //---------------------------------------------------------------------- inline std::string toImapString(const std::string &s_in) { for (std::string::const_iterator i = s_in.begin(); i != s_in.end(); ++i) { unsigned char c = (unsigned char)*i; if (c <= 31 || c >= 127 || c == '\"' || c == '\\') return "{" + toString((unsigned long)s_in.length()) + "}\r\n" + s_in; } return "\"" + s_in + "\""; } //---------------------------------------------------------------------- inline void uppercase(std::string &input) { for (std::string::iterator i = input.begin(); i != input.end(); ++i) *i = toupper(*i); } //---------------------------------------------------------------------- inline void lowercase(std::string &input) { for (std::string::iterator i = input.begin(); i != input.end(); ++i) *i = tolower(*i); } //---------------------------------------------------------------------- inline void chomp(std::string &s_in, const std::string &chars = " \t\r\n") { std::string::size_type n = s_in.length(); while (n > 1 && chars.find(s_in[n - 1]) != std::string::npos) s_in.resize(n-- - 1); } //---------------------------------------------------------------------- inline void trim(std::string &s_in, const std::string &chars = " \t\r\n") { while (s_in != "" && chars.find(s_in[0]) != std::string::npos) s_in = s_in.substr(1); chomp(s_in, chars); } //---------------------------------------------------------------------- inline const std::string unfold(const std::string &a, bool removecomment = true) { std::string tmp; bool incomment = false; bool inquotes = false; for (std::string::const_iterator i = a.begin(); i != a.end(); ++i) { unsigned char c = (unsigned char)*i; if (!inquotes && removecomment) { if (c == '(') { incomment = true; tmp += " "; } else if (c == ')') { incomment = false; } else if (c != 0x0a && c != 0x0d) { tmp += *i; } } else if (c != 0x0a && c != 0x0d) { tmp += *i; } if (!incomment) { if (*i == '\"') inquotes = !inquotes; } } trim(tmp); return tmp; } //---------------------------------------------------------------------- inline void split(const std::string &s_in, const std::string &delim, std::vector &dest, bool skipempty = true) { std::string token; for (std::string::const_iterator i = s_in.begin(); i != s_in.end(); ++i) { if (delim.find(*i) != std::string::npos) { if (!skipempty || token != "") dest.push_back(token); token.clear(); } else token += *i; } if (token != "") dest.push_back(token); } //---------------------------------------------------------------------- inline void splitAddr(const std::string &s_in, std::vector &dest, bool skipempty = true) { static const std::string delim = ","; std::string token; bool inquote = false; for (std::string::const_iterator i = s_in.begin(); i != s_in.end(); ++i) { if (inquote && *i == '\"') inquote = false; else if (!inquote && *i == '\"') inquote = true; if (!inquote && delim.find(*i) != std::string::npos) { if (!skipempty || token != "") dest.push_back(token); token.clear(); } else token += *i; } if (token != "") dest.push_back(token); } //---------------------------------------------------------------------- inline std::string toCanonMailbox(const std::string &s_in) { if (s_in.find("..") != std::string::npos) return std::string(); if (s_in.length() >= 5) { std::string a = s_in.substr(0, 5); uppercase(a); return a == "INBOX" ? a + (s_in.length() > 5 ? s_in.substr(5) : std::string()) : s_in; } return s_in; } //------------------------------------------------------------------------ inline std::string toRegex(const std::string &s_in, char delimiter) { std::string regex = "^"; for (std::string::const_iterator i = s_in.begin(); i != s_in.end(); ++i) { if (*i == '.' || *i == '[' || *i == ']' || *i == '{' || *i == '}' || *i == '(' || *i == ')' || *i == '^' || *i == '$' || *i == '?' || *i == '+' || *i == '\\') { regex += "\\"; regex += *i; } else if (*i == '*') regex += ".*?"; else if (*i == '%') { regex += "(\\"; regex += delimiter; regex += "){0,1}"; regex += "[^\\"; regex += delimiter; regex += "]*?"; } else regex += *i; } if (regex[regex.length() - 1] == '?') regex[regex.length() - 1] = '$'; else regex += "$"; return regex; } //------------------------------------------------------------------------ class BincStream { private: std::string nstr; public: //-- BincStream &operator << (std::ostream&(*)(std::ostream&)); BincStream &operator << (const std::string &t); BincStream &operator << (unsigned int t); BincStream &operator << (int t); BincStream &operator << (char t); //-- std::string popString(std::string::size_type size); //-- char popChar(void); void unpopChar(char c); void unpopStr(const std::string &s); //-- const std::string &str(void) const; //-- unsigned int getSize(void) const; //-- void clear(void); //-- BincStream(void); ~BincStream(void); }; } #endif recoll-1.26.3/bincimapmime/mime-inputsource.h0000644000175000017500000001274013533651561016130 00000000000000/* -*- mode:c++;c-basic-offset:2 -*- */ /* -------------------------------------------------------------------- * Filename: * src/mime-inputsource.h * * Description: * The base class of the MIME input source * -------------------------------------------------------------------- * Copyright 2002-2005 Andreas Aardal Hanssen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * -------------------------------------------------------------------- */ #ifndef mime_inputsource_h_included #define mime_inputsource_h_included #include "autoconfig.h" // Data source for MIME parser // Note about large files: we might want to change the unsigned int // used for offsets into an off_t for intellectual satisfaction, but // in the context of recoll, we could only get into trouble if a // *single message* exceeded 2GB, which seems rather unlikely. When // parsing a mailbox files, we read each message in memory and use the // stream input source (from a memory buffer, no file offsets). When // parsing a raw message file, it's only one message. #include #include "safeunistd.h" #include namespace Binc { class MimeInputSource { public: // Note that we do NOT take ownership of fd, won't close it on delete inline MimeInputSource(int fd, unsigned int start = 0); virtual inline ~MimeInputSource(void); virtual inline ssize_t fillRaw(char *raw, size_t nbytes); virtual inline void reset(void); virtual inline bool fillInputBuffer(void); inline void seek(unsigned int offset); inline bool getChar(char *c); inline void ungetChar(void); inline int getFileDescriptor(void) const; inline unsigned int getOffset(void) const; private: int fd; char data[16384]; unsigned int offset; unsigned int tail; unsigned int head; unsigned int start; char lastChar; }; inline MimeInputSource::MimeInputSource(int fd, unsigned int start) { this->fd = fd; this->start = start; offset = 0; tail = 0; head = 0; lastChar = '\0'; memset(data, '\0', sizeof(data)); seek(start); } inline MimeInputSource::~MimeInputSource(void) { } inline ssize_t MimeInputSource::fillRaw(char *raw, size_t nbytes) { return read(fd, raw, nbytes); } inline bool MimeInputSource::fillInputBuffer(void) { char raw[4096]; ssize_t nbytes = fillRaw(raw, 4096); if (nbytes <= 0) { // FIXME: If ferror(crlffile) we should log this. return false; } for (ssize_t i = 0; i < nbytes; ++i) { const char c = raw[i]; if (c == '\r') { if (lastChar == '\r') { data[tail++ & (0x4000-1)] = '\r'; data[tail++ & (0x4000-1)] = '\n'; } } else if (c == '\n') { data[tail++ & (0x4000-1)] = '\r'; data[tail++ & (0x4000-1)] = '\n'; } else { if (lastChar == '\r') { data[tail++ & (0x4000-1)] = '\r'; data[tail++ & (0x4000-1)] = '\n'; } data[tail++ & (0x4000-1)] = c; } lastChar = c; } return true; } inline void MimeInputSource::reset(void) { offset = head = tail = 0; lastChar = '\0'; if (fd != -1) lseek(fd, 0, SEEK_SET); } inline void MimeInputSource::seek(unsigned int seekToOffset) { if (offset > seekToOffset) reset(); char c; int n = 0; while (seekToOffset > offset) { if (!getChar(&c)) break; ++n; } } inline bool MimeInputSource::getChar(char *c) { if (head == tail && !fillInputBuffer()) return false; *c = data[head++ & (0x4000-1)]; ++offset; return true; } inline void MimeInputSource::ungetChar() { --head; --offset; } inline int MimeInputSource::getFileDescriptor(void) const { return fd; } inline unsigned int MimeInputSource::getOffset(void) const { return offset; } /////////////////////////////////// class MimeInputSourceStream : public MimeInputSource { public: inline MimeInputSourceStream(istream& s, unsigned int start = 0); virtual inline ssize_t fillRaw(char *raw, size_t nb); virtual inline void reset(void); private: istream& s; }; inline MimeInputSourceStream::MimeInputSourceStream(istream& si, unsigned int start) : MimeInputSource(-1, start), s(si) { } inline ssize_t MimeInputSourceStream::fillRaw(char *raw, size_t nb) { // Why can't streams tell how many characters were actually read // when hitting eof ? std::streampos st = s.tellg(); s.seekg(0, ios::end); std::streampos lst = s.tellg(); s.seekg(st); size_t nbytes = size_t(lst - st); if (nbytes > nb) { nbytes = nb; } if (nbytes <= 0) { return (ssize_t)-1; } s.read(raw, nbytes); return static_cast(nbytes); } inline void MimeInputSourceStream::reset(void) { MimeInputSource::reset(); s.seekg(0); } } #endif recoll-1.26.3/bincimapmime/mime-printbody.cc0000644000175000017500000000324013533651561015713 00000000000000/* -*- mode:c++;c-basic-offset:2 -*- */ /* -------------------------------------------------------------------- * Filename: * mime-printbody.cc * * Description: * Implementation of main mime parser components * -------------------------------------------------------------------- * Copyright 2002-2005 Andreas Aardal Hanssen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * -------------------------------------------------------------------- */ #include "mime.h" #include "mime-utils.h" #include "mime-inputsource.h" #include using namespace ::std; void Binc::MimePart::getBody(string &s, unsigned int startoffset, unsigned int length) const { mimeSource->reset(); mimeSource->seek(bodystartoffsetcrlf + startoffset); s.reserve(length); if (startoffset + length > bodylength) length = bodylength - startoffset; char c = '\0'; for (unsigned int i = 0; i < length; ++i) { if (!mimeSource->getChar(&c)) break; s += (char)c; } } recoll-1.26.3/bincimapmime/mime.cc0000644000175000017500000001022213533651561013701 00000000000000/* -*- mode:c++;c-basic-offset:2 -*- */ /* -------------------------------------------------------------------- * Filename: * mime.cc * * Description: * Implementation of main mime parser components * -------------------------------------------------------------------- * Copyright 2002-2005 Andreas Aardal Hanssen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * -------------------------------------------------------------------- */ #include #include #include #include #include #include #include #include #include #ifndef NO_NAMESPACES using namespace ::std; #endif /* NO_NAMESPACES */ #include "mime.h" #include "convert.h" #include "mime-inputsource.h" //------------------------------------------------------------------------ Binc::MimeDocument::MimeDocument(void) { allIsParsed = false; headerIsParsed = false; doc_mimeSource = 0; } //------------------------------------------------------------------------ Binc::MimeDocument::~MimeDocument(void) { delete doc_mimeSource; doc_mimeSource = 0; } //------------------------------------------------------------------------ void Binc::MimeDocument::clear(void) { members.clear(); h.clear(); headerIsParsed = false; allIsParsed = false; delete doc_mimeSource; doc_mimeSource = 0; } //------------------------------------------------------------------------ void Binc::MimePart::clear(void) { members.clear(); h.clear(); mimeSource = 0; } //------------------------------------------------------------------------ Binc::MimePart::MimePart(void) { size = 0; messagerfc822 = false; multipart = false; nlines = 0; nbodylines = 0; mimeSource = 0; } //------------------------------------------------------------------------ Binc::MimePart::~MimePart(void) { } //------------------------------------------------------------------------ Binc::HeaderItem::HeaderItem(void) { } //------------------------------------------------------------------------ Binc::HeaderItem::HeaderItem(const string &key, const string &value) { this->key = key; this->value = value; } //------------------------------------------------------------------------ Binc::Header::Header(void) { } //------------------------------------------------------------------------ Binc::Header::~Header(void) { } //------------------------------------------------------------------------ bool Binc::Header::getFirstHeader(const string &key, HeaderItem &dest) const { string k = key; lowercase(k); for (vector::const_iterator i = content.begin(); i != content.end(); ++i) { string tmp = (*i).getKey(); lowercase(tmp); if (tmp == k) { dest = *i; return true; } } return false; } //------------------------------------------------------------------------ bool Binc::Header::getAllHeaders(const string &key, vector &dest) const { string k = key; lowercase(k); for (vector::const_iterator i = content.begin(); i != content.end(); ++i) { string tmp = (*i).getKey(); lowercase(tmp); if (tmp == k) dest.push_back(*i); } return (dest.size() != 0); } //------------------------------------------------------------------------ void Binc::Header::clear(void) { content.clear(); } //------------------------------------------------------------------------ void Binc::Header::add(const string &key, const string &value) { content.push_back(HeaderItem(key, value)); } recoll-1.26.3/bincimapmime/mime-parsefull.cc0000644000175000017500000004124113533651561015701 00000000000000 /* -*- mode:c++;c-basic-offset:2 -*- */ /* -------------------------------------------------------------------- * Filename: * mime-parsefull.cc * * Description: * Implementation of main mime parser components * -------------------------------------------------------------------- * Copyright 2002-2005 Andreas Aardal Hanssen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * -------------------------------------------------------------------- */ #include #include #include #include #include #include #include #include #include #ifndef NO_NAMESPACES using namespace ::std; #endif /* NO_NAMESPACES */ #include "mime.h" #include "mime-utils.h" #include "mime-inputsource.h" #include "convert.h" // #define MPF #ifdef MPF #define MPFDEB(X) fprintf X #else #define MPFDEB(X) #endif //------------------------------------------------------------------------ void Binc::MimeDocument::parseFull(int fd) { if (allIsParsed) return; allIsParsed = true; delete doc_mimeSource; doc_mimeSource = new MimeInputSource(fd); headerstartoffsetcrlf = 0; headerlength = 0; bodystartoffsetcrlf = 0; bodylength = 0; size = 0; messagerfc822 = false; multipart = false; int bsize = 0; string bound; doParseFull(doc_mimeSource, bound, bsize); // eat any trailing junk to get the correct size char c; while (doc_mimeSource->getChar(&c)); size = doc_mimeSource->getOffset(); } void Binc::MimeDocument::parseFull(istream& s) { if (allIsParsed) return; allIsParsed = true; delete doc_mimeSource; doc_mimeSource = new MimeInputSourceStream(s); headerstartoffsetcrlf = 0; headerlength = 0; bodystartoffsetcrlf = 0; bodylength = 0; size = 0; messagerfc822 = false; multipart = false; int bsize = 0; string bound; doParseFull(doc_mimeSource, bound, bsize); // eat any trailing junk to get the correct size char c; while (doc_mimeSource->getChar(&c)); size = doc_mimeSource->getOffset(); } //------------------------------------------------------------------------ bool Binc::MimePart::parseOneHeaderLine(Binc::Header *header, unsigned int *nlines) { using namespace ::Binc; char c; bool eof = false; char cqueue[4]; string name; string content; while (mimeSource->getChar(&c)) { // If we encounter a \r before we got to the first ':', then // rewind back to the start of the line and assume we're at the // start of the body. if (c == '\r') { for (int i = 0; i < (int) name.length() + 1; ++i) mimeSource->ungetChar(); return false; } // A colon marks the end of the header name if (c == ':') break; // Otherwise add to the header name name += c; } cqueue[0] = '\0'; cqueue[1] = '\0'; cqueue[2] = '\0'; cqueue[3] = '\0'; // Read until the end of the header. bool endOfHeaders = false; while (!endOfHeaders) { if (!mimeSource->getChar(&c)) { eof = true; break; } if (c == '\n') ++*nlines; for (int i = 0; i < 3; ++i) cqueue[i] = cqueue[i + 1]; cqueue[3] = c; if (strncmp(cqueue, "\r\n\r\n", 4) == 0) { endOfHeaders = true; break; } // If the last character was a newline, and the first now is not // whitespace, then rewind one character and store the current // key,value pair. if (cqueue[2] == '\n' && c != ' ' && c != '\t') { if (content.length() > 2) content.resize(content.length() - 2); trim(content); header->add(name, content); if (c != '\r') { mimeSource->ungetChar(); if (c == '\n') --*nlines; return true; } mimeSource->getChar(&c); return false; } content += c; } if (name != "") { if (content.length() > 2) content.resize(content.length() - 2); header->add(name, content); } return !(eof || endOfHeaders); } //------------------------------------------------------------------------ void Binc::MimePart::parseHeader(Binc::Header *header, unsigned int *nlines) { while (parseOneHeaderLine(header, nlines)) { } } //------------------------------------------------------------------------ void Binc::MimePart::analyzeHeader(Binc::Header *header, bool *multipart, bool *messagerfc822, string *subtype, string *boundary) { using namespace ::Binc; // Do simple parsing of headers to determine the // type of message (multipart,messagerfc822 etc) HeaderItem ctype; if (header->getFirstHeader("content-type", ctype)) { vector types; split(ctype.getValue(), ";", types); if (types.size() > 0) { // first element should describe content type string tmp = types[0]; trim(tmp); vector v; split(tmp, "/", v); string key, value; key = (v.size() > 0) ? v[0] : "text"; value = (v.size() > 1) ? v[1] : "plain"; lowercase(key); if (key == "multipart") { *multipart = true; lowercase(value); *subtype = value; } else if (key == "message") { lowercase(value); if (value == "rfc822") *messagerfc822 = true; } } for (vector::const_iterator i = types.begin(); i != types.end(); ++i) { string element = *i; trim(element); if (element.find("=") != string::npos) { string::size_type pos = element.find('='); string key = element.substr(0, pos); string value = element.substr(pos + 1); lowercase(key); trim(key); if (key == "boundary") { trim(value, " \""); *boundary = value; } } } } } void Binc::MimePart::parseMessageRFC822(vector *members, bool *foundendofpart, unsigned int *bodylength, unsigned int *nbodylines, const string &toboundary) { using namespace ::Binc; // message rfc822 means a completely enclosed mime document. we // call the parser recursively, and pass on the boundary string // that we got. when parse() finds this boundary, it returns 0. if // it finds the end boundary (boundary + "--"), it returns != 0. MimePart m; unsigned int bodystartoffsetcrlf = mimeSource->getOffset(); // parsefull returns the number of bytes that need to be removed // from the body because of the terminating boundary string. int bsize = 0; if (m.doParseFull(mimeSource, toboundary, bsize)) *foundendofpart = true; // make sure bodylength doesn't overflow *bodylength = mimeSource->getOffset(); if (*bodylength >= bodystartoffsetcrlf) { *bodylength -= bodystartoffsetcrlf; if (*bodylength >= (unsigned int) bsize) { *bodylength -= (unsigned int) bsize; } else { *bodylength = 0; } } else { *bodylength = 0; } *nbodylines += m.getNofLines(); members->push_back(m); } bool Binc::MimePart::skipUntilBoundary(const string &delimiter, unsigned int *nlines, bool *eof) { string::size_type endpos = delimiter.length(); char *delimiterqueue = 0; string::size_type delimiterpos = 0; const char *delimiterStr = delimiter.c_str(); if (delimiter != "") { delimiterqueue = new char[endpos]; memset(delimiterqueue, 0, endpos); } // first, skip to the first delimiter string. Anything between the // header and the first delimiter string is simply ignored (it's // usually a text message intended for non-mime clients) char c; bool foundBoundary = false; for (;;) { if (!mimeSource->getChar(&c)) { *eof = true; break; } if (c == '\n') ++*nlines; // if there is no delimiter, we just read until the end of the // file. if (!delimiterqueue) continue; delimiterqueue[delimiterpos++] = c; if (delimiterpos == endpos) delimiterpos = 0; if (compareStringToQueue(delimiterStr, delimiterqueue, delimiterpos, int(endpos))) { foundBoundary = true; break; } } delete [] delimiterqueue; delimiterqueue = 0; return foundBoundary; } // JFD: Things we do after finding a boundary (something like CRLF--somestring) // Need to see if this is a final one (with an additional -- at the end), // and need to check if it is immediately followed by another boundary // (in this case, we give up our final CRLF in its favour) inline void Binc::MimePart::postBoundaryProcessing(bool *eof, unsigned int *nlines, int *boundarysize, bool *foundendofpart) { // Read two more characters. This may be CRLF, it may be "--" and // it may be any other two characters. char a = '\0'; if (!mimeSource->getChar(&a)) *eof = true; if (a == '\n') ++*nlines; char b = '\0'; if (!mimeSource->getChar(&b)) *eof = true; if (b == '\n') ++*nlines; // If eof, we're done here if (*eof) return; // If we find two dashes after the boundary, then this is the end // of boundary marker, and we need to get 2 more chars if (a == '-' && b == '-') { *foundendofpart = true; *boundarysize += 2; if (!mimeSource->getChar(&a)) *eof = true; if (a == '\n') ++*nlines; if (!mimeSource->getChar(&b)) *eof = true; if (b == '\n') ++*nlines; } // If the boundary is followed by CRLF, we need to handle the // special case where another boundary line follows // immediately. In this case we consider the CRLF to be part of // the NEXT boundary. if (a == '\r' && b == '\n') { // Get 2 more if (!mimeSource->getChar(&a) || !mimeSource->getChar(&b)) { *eof = true; } else if (a == '-' && b == '-') { MPFDEB((stderr, "BINC: consecutive delimiters, giving up CRLF\n")); mimeSource->ungetChar(); mimeSource->ungetChar(); mimeSource->ungetChar(); mimeSource->ungetChar(); } else { // We unget the 2 chars, and keep our crlf (increasing our own size) MPFDEB((stderr, "BINC: keeping my CRLF\n")); mimeSource->ungetChar(); mimeSource->ungetChar(); *boundarysize += 2; } } else { // Boundary string not followed by CRLF, don't read more and let // others skip the rest. Note that this is allowed but quite uncommon mimeSource->ungetChar(); mimeSource->ungetChar(); } } void Binc::MimePart::parseMultipart(const string &boundary, const string &toboundary, bool *eof, unsigned int *nlines, int *boundarysize, bool *foundendofpart, unsigned int *bodylength, vector *members) { MPFDEB((stderr, "BINC: ParseMultipart: boundary [%s], toboundary[%s]\n", boundary.c_str(), toboundary.c_str())); using namespace ::Binc; unsigned int bodystartoffsetcrlf = mimeSource->getOffset(); // multipart parsing starts with skipping to the first // boundary. then we call parse() for all parts. the last parse() // command will return a code indicating that it found the last // boundary of this multipart. Note that the first boundary does // not have to start with CRLF. string delimiter = "--" + boundary; skipUntilBoundary(delimiter, nlines, eof); if (!eof) *boundarysize = int(delimiter.size()); postBoundaryProcessing(eof, nlines, boundarysize, foundendofpart); // read all mime parts. if (!*foundendofpart && !*eof) { bool quit = false; do { MimePart m; // If parseFull returns != 0, then it encountered the multipart's // final boundary. int bsize = 0; if (m.doParseFull(mimeSource, boundary, bsize)) { quit = true; *boundarysize = bsize; } members->push_back(m); } while (!quit); } if (!*foundendofpart && !*eof) { // multipart parsing starts with skipping to the first // boundary. then we call parse() for all parts. the last parse() // command will return a code indicating that it found the last // boundary of this multipart. Note that the first boundary does // not have to start with CRLF. string delimiter = "\r\n--" + toboundary; skipUntilBoundary(delimiter, nlines, eof); if (!*eof) *boundarysize = int(delimiter.size()); postBoundaryProcessing(eof, nlines, boundarysize, foundendofpart); } // make sure bodylength doesn't overflow *bodylength = mimeSource->getOffset(); if (*bodylength >= bodystartoffsetcrlf) { *bodylength -= bodystartoffsetcrlf; if (*bodylength >= (unsigned int) *boundarysize) { *bodylength -= (unsigned int) *boundarysize; } else { *bodylength = 0; } } else { *bodylength = 0; } MPFDEB((stderr, "BINC: ParseMultipart return\n")); } void Binc::MimePart::parseSinglePart(const string &toboundary, int *boundarysize, unsigned int *nbodylines, unsigned int *nlines, bool *eof, bool *foundendofpart, unsigned int *bodylength) { MPFDEB((stderr, "BINC: parseSinglePart, boundary [%s]\n", toboundary.c_str())); using namespace ::Binc; unsigned int bodystartoffsetcrlf = mimeSource->getOffset(); // If toboundary is empty, then we read until the end of the // file. Otherwise we will read until we encounter toboundary. string _toboundary; if (toboundary != "") { _toboundary = "\r\n--"; _toboundary += toboundary; } // if (skipUntilBoundary(_toboundary, nlines, eof)) // *boundarysize = _toboundary.length(); char *boundaryqueue = 0; size_t endpos = _toboundary.length(); if (toboundary != "") { boundaryqueue = new char[endpos]; memset(boundaryqueue, 0, endpos); } *boundarysize = 0; const char *_toboundaryStr = _toboundary.c_str(); string line; bool toboundaryIsEmpty = (toboundary == ""); char c; string::size_type boundarypos = 0; while (mimeSource->getChar(&c)) { if (c == '\n') { ++*nbodylines; ++*nlines; } if (toboundaryIsEmpty) continue; // find boundary boundaryqueue[boundarypos++] = c; if (boundarypos == endpos) boundarypos = 0; if (compareStringToQueue(_toboundaryStr, boundaryqueue, boundarypos, int(endpos))) { *boundarysize = static_cast(_toboundary.length()); break; } } delete [] boundaryqueue; if (toboundary != "") { postBoundaryProcessing(eof, nlines, boundarysize, foundendofpart); } else { // Recoll: in the case of a multipart body with a null // boundary (probably illegal but wtf), eof was not set and // multipart went into a loop until bad alloc. *eof = true; } // make sure bodylength doesn't overflow *bodylength = mimeSource->getOffset(); if (*bodylength >= bodystartoffsetcrlf) { *bodylength -= bodystartoffsetcrlf; if (*bodylength >= (unsigned int) *boundarysize) { *bodylength -= (unsigned int) *boundarysize; } else { *bodylength = 0; } } else { *bodylength = 0; } MPFDEB((stderr, "BINC: parseSimple ret: bodylength %d, boundarysize %d\n", *bodylength, *boundarysize)); } //------------------------------------------------------------------------ int Binc::MimePart::doParseFull(MimeInputSource *ms, const string &toboundary, int &boundarysize) { MPFDEB((stderr, "BINC: doParsefull, toboundary[%s]\n", toboundary.c_str())); mimeSource = ms; headerstartoffsetcrlf = mimeSource->getOffset(); // Parse the header of this mime part. parseHeader(&h, &nlines); // Headerlength includes the seperating CRLF. Body starts after the // CRLF. headerlength = mimeSource->getOffset() - headerstartoffsetcrlf; bodystartoffsetcrlf = mimeSource->getOffset(); MPFDEB((stderr, "BINC: doParsefull, bodystartoffsetcrlf %d\n", bodystartoffsetcrlf)); bodylength = 0; // Determine the type of mime part by looking at fields in the // header. analyzeHeader(&h, &multipart, &messagerfc822, &subtype, &boundary); bool eof = false; bool foundendofpart = false; if (messagerfc822) { parseMessageRFC822(&members, &foundendofpart, &bodylength, &nbodylines, toboundary); } else if (multipart) { parseMultipart(boundary, toboundary, &eof, &nlines, &boundarysize, &foundendofpart, &bodylength, &members); } else { parseSinglePart(toboundary, &boundarysize, &nbodylines, &nlines, &eof, &foundendofpart, &bodylength); } MPFDEB((stderr, "BINC: doParsefull ret, toboundary[%s]\n", toboundary.c_str())); return (eof || foundendofpart) ? 1 : 0; } recoll-1.26.3/bincimapmime/AUTHORS0000644000175000017500000000444713303776057013532 00000000000000The following parties have participated in writing code or otherwise contributed to the Binc IMAP project: Author: Andreas Aardal Hanssen Several users have been very helpful with bug reports and suggestions, and the author is very grateful for their contributions. Some users have also gone to the extra effort of debugging the cause of a bug, or have found a way of implementing a feature, and have either provided a very good description of what is needed, or they have actually provided a patch that has been added to Binc IMAP. While adding extra value to the discussion around the discovery of a bug or the evaluation of a new feature, these contributors also take some load of the author's back, so they deserve extra thanks. In this list are also included people who have contributed with mirrors and translations of the web pages. Henry Baragar Jrgen Botz Charlie Brady Caskey Dickson Ketil Froyn Gary Gordon Marek Gutkowski Daniel James Zak Johnson Sergei Kolobov Rafal Kupka Eivind Kvedalen HIROSHIMA Naoki Greger Stolt Nilsen John Starks Peter Stuge Gerrit Pape Jeremy Rossi Dale Woolridge If you have contributed to the Binc IMAP project but are not listed here (this happens quite often), please send a mail to andreas-binc@bincimap.org and I'll add you to the list. recoll-1.26.3/bincimapmime/convert.cc0000644000175000017500000000672313533651561014445 00000000000000/* -*- mode:c++;c-basic-offset:2 -*- */ /* -------------------------------------------------------------------- * Filename: * convert.cc * * Description: * Implementation of miscellaneous convertion functions. * -------------------------------------------------------------------- * Copyright 2002-2005 Andreas Aardal Hanssen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * -------------------------------------------------------------------- */ #include "convert.h" #include #ifndef NO_NAMESPACES using namespace ::std; using namespace Binc; #endif /* NO_NAMESPACES */ //------------------------------------------------------------------------ BincStream::BincStream(void) { } //------------------------------------------------------------------------ BincStream::~BincStream(void) { clear(); } //------------------------------------------------------------------------ string BincStream::popString(std::string::size_type size) { if (size > nstr.length()) size = nstr.length(); string tmp = nstr.substr(0, size); nstr = nstr.substr(size); return tmp; } //------------------------------------------------------------------------ char BincStream::popChar(void) { if (nstr.length() == 0) return '\0'; char c = nstr[0]; nstr = nstr.substr(1); return c; } //------------------------------------------------------------------------ void BincStream::unpopChar(char c) { nstr = c + nstr; } //------------------------------------------------------------------------ void BincStream::unpopStr(const string &s) { nstr = s + nstr; } //------------------------------------------------------------------------ const string &BincStream::str(void) const { return nstr; } //------------------------------------------------------------------------ void BincStream::clear(void) { nstr.clear(); } //------------------------------------------------------------------------ unsigned int BincStream::getSize(void) const { return (unsigned int) nstr.length(); } //------------------------------------------------------------------------ BincStream &BincStream::operator << (std::ostream&(*)(std::ostream&)) { nstr += "\r\n"; return *this; } //------------------------------------------------------------------------ BincStream &BincStream::operator << (const string &t) { nstr += t; return *this; } //------------------------------------------------------------------------ BincStream &BincStream::operator << (int t) { nstr += toString(t); return *this; } //------------------------------------------------------------------------ BincStream &BincStream::operator << (unsigned int t) { nstr += toString(t); return *this; } //------------------------------------------------------------------------ BincStream &BincStream::operator << (char t) { nstr += t; return *this; } recoll-1.26.3/bincimapmime/mime.h0000644000175000017500000001331513533651561013551 00000000000000/* -*- mode:c++;c-basic-offset:2 -*- */ /* -------------------------------------------------------------------- * Filename: * src/parsers/mime/mime.h * * Description: * Declaration of main mime parser components * -------------------------------------------------------------------- * Copyright 2002-2005 Andreas Aardal Hanssen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * -------------------------------------------------------------------- */ #ifndef mime_h_included #define mime_h_included #include #include #include #include namespace Binc { class MimeInputSource; //---------------------------------------------------------------------- class HeaderItem { private: mutable std::string key; mutable std::string value; public: inline const std::string &getKey(void) const { return key; } inline const std::string &getValue(void) const { return value; } //-- HeaderItem(void); HeaderItem(const std::string &key, const std::string &value); }; //---------------------------------------------------------------------- class Header { private: mutable std::vector content; public: bool getFirstHeader(const std::string &key, HeaderItem &dest) const; bool getAllHeaders(const std::string &key, std::vector &dest) const; void add(const std::string &name, const std::string &content); void clear(void); //-- Header(void); ~Header(void); }; //---------------------------------------------------------------------- class IODevice; class MimeDocument; class MimePart { protected: public: mutable bool multipart; mutable bool messagerfc822; mutable std::string subtype; mutable std::string boundary; mutable unsigned int headerstartoffsetcrlf; mutable unsigned int headerlength; mutable unsigned int bodystartoffsetcrlf; mutable unsigned int bodylength; mutable unsigned int nlines; mutable unsigned int nbodylines; mutable unsigned int size; public: enum FetchType { FetchBody, FetchHeader, FetchMime }; mutable Header h; mutable std::vector members; inline const std::string &getSubType(void) const { return subtype; } inline bool isMultipart(void) const { return multipart; } inline bool isMessageRFC822(void) const { return messagerfc822; } inline unsigned int getSize(void) const { return bodylength; } inline unsigned int getNofLines(void) const { return nlines; } inline unsigned int getNofBodyLines(void) const { return nbodylines; } inline unsigned int getBodyLength(void) const { return bodylength; } inline unsigned int getBodyStartOffset(void) const { return bodystartoffsetcrlf; } void printBody(Binc::IODevice &output, unsigned int startoffset, unsigned int length) const; void getBody(std::string& s, unsigned int startoffset, unsigned int length) const; virtual void clear(void); virtual int doParseOnlyHeader(MimeInputSource *ms, const std::string &toboundary); virtual int doParseFull(MimeInputSource *ms, const std::string &toboundary, int &boundarysize); MimePart(void); virtual ~MimePart(void); private: MimeInputSource *mimeSource; bool parseOneHeaderLine(Binc::Header *header, unsigned int *nlines); bool skipUntilBoundary(const std::string &delimiter, unsigned int *nlines, bool *eof); inline void postBoundaryProcessing(bool *eof, unsigned int *nlines, int *boundarysize, bool *foundendofpart); void parseMultipart(const std::string &boundary, const std::string &toboundary, bool *eof, unsigned int *nlines, int *boundarysize, bool *foundendofpart, unsigned int *bodylength, std::vector *members); void parseSinglePart(const std::string &toboundary, int *boundarysize, unsigned int *nbodylines, unsigned int *nlines, bool *eof, bool *foundendofpart, unsigned int *bodylength); void parseHeader(Binc::Header *header, unsigned int *nlines); void analyzeHeader(Binc::Header *header, bool *multipart, bool *messagerfc822, std::string *subtype, std::string *boundary); void parseMessageRFC822(std::vector *members, bool *foundendofpart, unsigned int *bodylength, unsigned int *nbodylines, const std::string &toboundary); }; //---------------------------------------------------------------------- class MimeDocument : public MimePart { public: MimeDocument(void); ~MimeDocument(void); void parseOnlyHeader(int fd); void parseFull(int fd); void parseOnlyHeader(std::istream& s); void parseFull(std::istream& s); void clear(void); bool isHeaderParsed(void) const { return headerIsParsed; } bool isAllParsed(void) const { return allIsParsed; } private: bool headerIsParsed; bool allIsParsed; MimeInputSource *doc_mimeSource; }; }; #endif recoll-1.26.3/bincimapmime/mime-utils.h0000644000175000017500000000310113533651561014677 00000000000000/* -*- mode:c++;c-basic-offset:2 -*- */ /* -------------------------------------------------------------------- * Filename: * mime.cc * * Description: * Implementation of main mime parser components * -------------------------------------------------------------------- * Copyright 2002-2005 Andreas Aardal Hanssen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * -------------------------------------------------------------------- */ #ifndef mime_utils_h_included #define mime_utils_h_included #include #include #include #include #ifndef NO_NAMESPACES using namespace ::std; #endif /* NO_NAMESPACES */ inline bool compareStringToQueue(const char *s_in, char *bqueue, int pos, int size) { for (int i = 0; i < size; ++i) { if (s_in[i] != bqueue[pos]) return false; if (++pos == size) pos = 0; } return true; } #endif recoll-1.26.3/bincimapmime/mime-parseonlyheader.cc0000644000175000017500000001014213533651561017065 00000000000000/* -*- mode:c++;c-basic-offset:2 -*- */ /* -------------------------------------------------------------------- * Filename: * mime-parseonlyheader.cc * * Description: * Implementation of main mime parser components * -------------------------------------------------------------------- * Copyright 2002-2005 Andreas Aardal Hanssen * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * -------------------------------------------------------------------- */ #include "mime.h" #include "mime-utils.h" #include "mime-inputsource.h" #include "convert.h" #include #include #include #include #include #include #include #include #include #ifndef NO_NAMESPACES using namespace ::std; #endif /* NO_NAMESPACES */ //------------------------------------------------------------------------ void Binc::MimeDocument::parseOnlyHeader(int fd) { if (allIsParsed || headerIsParsed) return; headerIsParsed = true; delete doc_mimeSource; doc_mimeSource = new MimeInputSource(fd); headerstartoffsetcrlf = 0; headerlength = 0; bodystartoffsetcrlf = 0; bodylength = 0; messagerfc822 = false; multipart = false; nlines = 0; nbodylines = 0; doParseOnlyHeader(doc_mimeSource, ""); } void Binc::MimeDocument::parseOnlyHeader(istream& s) { if (allIsParsed || headerIsParsed) return; headerIsParsed = true; delete doc_mimeSource; doc_mimeSource = new MimeInputSourceStream(s); headerstartoffsetcrlf = 0; headerlength = 0; bodystartoffsetcrlf = 0; bodylength = 0; messagerfc822 = false; multipart = false; nlines = 0; nbodylines = 0; doParseOnlyHeader(doc_mimeSource, ""); } //------------------------------------------------------------------------ int Binc::MimePart::doParseOnlyHeader(MimeInputSource *ms, const string &toboundary) { mimeSource = ms; string name; string content; char cqueue[4]; memset(cqueue, 0, sizeof(cqueue)); headerstartoffsetcrlf = mimeSource->getOffset(); bool quit = false; char c = '\0'; while (!quit) { // read name while (1) { if (!mimeSource->getChar(&c)) { quit = true; break; } if (c == '\n') ++nlines; if (c == ':') break; if (c == '\n') { for (int i = int(name.length()) - 1; i >= 0; --i) mimeSource->ungetChar(); quit = true; name.clear(); break; } name += c; if (name.length() == 2 && name.substr(0, 2) == "\r\n") { name.clear(); quit = true; break; } } if (name.length() == 1 && name[0] == '\r') { name.clear(); break; } if (quit) break; while (!quit) { if (!mimeSource->getChar(&c)) { quit = true; break; } if (c == '\n') ++nlines; for (int i = 0; i < 3; ++i) cqueue[i] = cqueue[i + 1]; cqueue[3] = c; if (strncmp(cqueue, "\r\n\r\n", 4) == 0) { quit = true; break; } if (cqueue[2] == '\n') { // guess the mime rfc says what can not appear on the beginning // of a line. if (!isspace(cqueue[3])) { if (content.length() > 2) content.resize(content.length() - 2); trim(content); h.add(name, content); name = c; content.clear(); break; } } content += c; } } if (name != "") { if (content.length() > 2) content.resize(content.length() - 2); h.add(name, content); } headerlength = mimeSource->getOffset() - headerstartoffsetcrlf; return 1; } recoll-1.26.3/desktop/0000755000175000017500000000000013570165410011537 500000000000000recoll-1.26.3/desktop/recoll.appdata.xml0000644000175000017500000000173113303776057015105 00000000000000 recoll.desktop CC0-1.0 GPL-2.0+ Recoll

Find documents by specifying search terms

Recoll finds keywords inside documents text as well as file names.

  • It can search most document formats.
  • It can reach any storage place: files, archive members, email attachments, transparently handling decompression.
  • One click will open the document inside a native editor or display an even quicker text preview.
http://www.recoll.org/files/recoll-mainwin-h-1248x702.png http://www.recoll.org/ contact@recoll.org recoll-1.26.3/desktop/recoll_index_on_ac.sh0000755000175000017500000000404213303776057015635 00000000000000#!/bin/sh # This is a shell script that starts and stops the recollindex daemon # depending on whether or not the power supply is plugged in. It should be # called from the file ~/.config/autostart/recollindex.desktop. # # That is: make the script executable (chmod +x) and replace in # recollindex.desk the line: # Exec=recollindex -w 60 -m # With # Exec=/path/to/recoll_index_on_ac.sh # # # By: The Doctor (drwho at virtadpt dot net) # License: GPLv3 # # Modifications by J.F Dockes # - replaced "acpi" usage with "on_ac_power" which seems to be both # more common and more universal. # - Changed the default to be that we run recollindex if we can't determine # power status (ie: on_ac_power not installed or not working: we're most # probably not running on a laptop). INDEXER="recollindex -w 60 -m" ACPI=`which on_ac_power` # If the on_ac_power script isn't installed, warn, but run anyway. Maybe # this is not a laptop or not linux. if test "x$ACPI" = "x" ; then echo "on_ac_power utility not found. Starting recollindex anyway." fi while true; do # Determine whether or not the power supply is plugged in. if test "x$ACPI" != "x" ; then on_ac_power STATUS=$? else STATUS=0 fi # Get the PID of the indexing daemon. if test -f ~/.recoll/index.pid ; then PID=`cat ~/.recoll/index.pid` # Make sure that this is recollindex running. pid could have # been reallocated ps ax | egrep "^[ \t]*$PID " | grep -q recollindex || PID="" fi # echo "Recollindex pid is $PID" if test $STATUS -eq 1 ; then # The power supply is not plugged in. See if the indexing daemon is # running, and if it is, kill it. The indexing daemon will not be # started. if test x"$PID" != x; then kill $PID fi else # The power supply is plugged in or we just don't know. # See if the indexing daemon is running, and if it's not start it. if test -z "$PID" ; then $INDEXER fi fi # Go to sleep for a while. sleep 120 continue done recoll-1.26.3/desktop/recoll-searchgui.desktop0000644000175000017500000000057613533651561016320 00000000000000[Desktop Entry] Categories=Qt;Utility;Filesystem;Database; Comment=Find documents by specifying search terms Comment[ru]=Поиск документов по заданным условиям Exec=recoll GenericName=Local Text Search GenericName[ru]=Локальный текстовый поиск Icon=recoll Name=Recoll Terminal=false Type=Application Keywords=Search;Full Text; recoll-1.26.3/desktop/recoll.png0000644000175000017500000000071113303776057013455 00000000000000PNG  IHDR00`n pHYs  tIME tEXtCommentCreated with The GIMPd%n?IDATXcr3}~-\t$iKᒵKg\kqI11 2@l!4آq4ʆZ)z-Ƌ'ƀ[//px:պ\3\x&dّG тq4ʆf1[KѫM h kר_u\.\6kZ?kh.# ] tih4/p%K.Y7%PA+7/9|1vǸ$޾£$6(A~hWz4Fl#ڒx1, image/svg+xml recoll-1.26.3/desktop/recoll.xcf0000644000175000017500000000736113303776057013461 00000000000000gimp xcf fileBBS gimp-commentCreated with The GIMPgimp-image-grid(style intersections) (fgcolor (color-rgba 0.000000 0.000000 0.000000 1.000000)) (bgcolor (color-rgba 1.000000 1.000000 1.000000 1.000000)) (xspacing 10.000000) (yspacing 10.000000) (spacing-unit inches) (xoffset 0.000000) (yoffset 0.000000) (offset-unit inches)  Background     i %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%% v%v%v%v%v%v%v%v%v%v%v%v %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%% v%v%v%v%v%v%v%v%v%v%v%v f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3 f3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f 3f%f%f%f%f%f%f%f%f%f%f%f f %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%% f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3f 3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f3 f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3fv%v%v%v%v%v%v%v%v%v%v%v%v%v %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%% v%v%v%v%v%v%v%v%v%v%v%v%v%v %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%% f%f%f%f%f%f%f%f%f%f%f%f%f%f f3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f 3f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3 f %%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%% f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3f 3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f%3f3 f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3%f3f@@recoll-1.26.3/testmains/0000755000175000017500000000000013570165410012075 500000000000000recoll-1.26.3/testmains/Makefile.am0000644000175000017500000000271513566424763014074 00000000000000CXXFLAGS ?= @CXXFLAGS@ LIBXAPIAN=@LIBXAPIAN@ XAPIANCXXFLAGS=@XAPIANCXXFLAGS@ XSLT_CFLAGS=@XSLT_CFLAGS@ XSLT_LINKADD=@XSLT_LINKADD@ LIBICONV=@LIBICONV@ INCICONV=@INCICONV@ LIBFAM = @LIBFAM@ RCLLIBVERSION=@RCLLIBVERSION@ X_CFLAGS=@X_CFLAGS@ X_PRE_LIBS=@X_PRE_LIBS@ X_LIBS=@X_LIBS@ X_EXTRA_LIBS=@X_EXTRA_LIBS@ X_LIBX11=@X_LIBX11@ DEFS=@DEFS@ COMMONCPPFLAGS = -I. \ -I$(top_srcdir)/aspell \ -I$(top_srcdir)/bincimapmime \ -I$(top_srcdir)/common \ -I$(top_srcdir)/index \ -I$(top_srcdir)/internfile \ -I$(top_srcdir)/rcldb \ -I$(top_srcdir)/unac \ -I$(top_srcdir)/utils \ -I$(top_srcdir)/xaposix \ -DBUILDING_RECOLL AM_CPPFLAGS = -Wall -Wno-unused -std=c++11 \ $(COMMONCPPFLAGS) \ $(INCICONV) \ $(XAPIANCXXFLAGS) \ $(XSLT_CFLAGS) \ $(X_CFLAGS) \ -DRECOLL_DATADIR=\"${pkgdatadir}\" \ -DREADFILE_ENABLE_ZLIB -DREADFILE_ENABLE_MINIZ -DREADFILE_ENABLE_MD5 \ -D_GNU_SOURCE \ $(DEFS) noinst_PROGRAMS = textsplit utf8iter fstreewalk rclconfig hldata unac mbox textsplit_SOURCES = trtextsplit.cpp textsplit_LDADD = ../librecoll.la utf8iter_SOURCES = trutf8iter.cpp utf8iter_LDADD = ../librecoll.la fstreewalk_SOURCES = trfstreewalk.cpp fstreewalk_LDADD = ../librecoll.la rclconfig_SOURCES = trrclconfig.cpp rclconfig_LDADD = ../librecoll.la hldata_SOURCES = trhldata.cpp hldata_LDADD = ../librecoll.la unac_SOURCES = trunac.cpp unac_LDADD = ../librecoll.la mbox_SOURCES = trmbox.cpp mbox_LDADD = ../librecoll.la recoll-1.26.3/configure0000755000175000017500000247341213570165162011736 00000000000000#! /bin/sh # Guess values for system-dependent variables and create Makefiles. # Generated by GNU Autoconf 2.69 for Recoll 1.26.3. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. # # # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # Use a proper internal environment variable to ensure we don't fall # into an infinite loop, continuously re-executing ourselves. if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then _as_can_reexec=no; export _as_can_reexec; # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 as_fn_exit 255 fi # We don't want this to propagate to other subprocesses. { _as_can_reexec=; unset _as_can_reexec;} if test "x$CONFIG_SHELL" = x; then as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which # is contrary to our usage. Disable this feature. alias -g '\${1+\"\$@\"}'='\"\$@\"' setopt NO_GLOB_SUBST else case \`(set -o) 2>/dev/null\` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi " as_required="as_fn_return () { (exit \$1); } as_fn_success () { as_fn_return 0; } as_fn_failure () { as_fn_return 1; } as_fn_ret_success () { return 0; } as_fn_ret_failure () { return 1; } exitcode=0 as_fn_success || { exitcode=1; echo as_fn_success failed.; } as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : else exitcode=1; echo positional parameters were not saved. fi test x\$exitcode = x0 || exit 1 test -x / || exit 1" as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO PATH=/empty FPATH=/empty; export PATH FPATH test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 test \$(( 1 + 1 )) = 2 || exit 1" if (eval "$as_required") 2>/dev/null; then : as_have_required=yes else as_have_required=no fi if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR as_found=false for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. as_found=: case $as_dir in #( /*) for as_base in sh bash ksh sh5; do # Try only shells that exist, to save several forks. as_shell=$as_dir/$as_base if { test -f "$as_shell" || test -f "$as_shell.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : CONFIG_SHELL=$as_shell as_have_required=yes if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : break 2 fi fi done;; esac as_found=false done $as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : CONFIG_SHELL=$SHELL as_have_required=yes fi; } IFS=$as_save_IFS if test "x$CONFIG_SHELL" != x; then : export CONFIG_SHELL # We cannot yet assume a decent shell, so we have to provide a # neutralization value for shells without unset; and this also # works around shells that cannot unset nonexistent variables. # Preserve -v and -x to the replacement shell. BASH_ENV=/dev/null ENV=/dev/null (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV case $- in # (((( *v*x* | *x*v* ) as_opts=-vx ;; *v* ) as_opts=-v ;; *x* ) as_opts=-x ;; * ) as_opts= ;; esac exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"} # Admittedly, this is quite paranoid, since all the known shells bail # out after a failed `exec'. $as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2 exit 255 fi if test x$as_have_required = xno; then : $as_echo "$0: This script requires a shell more modern than all" $as_echo "$0: the shells that I found on your system." if test x${ZSH_VERSION+set} = xset ; then $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" $as_echo "$0: be upgraded to zsh 4.3.4 or later." else $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, $0: including any error possibly output before this $0: message. Then install a modern shell, or manually run $0: the script under such a shell if you do have one." fi exit 1 fi fi fi SHELL=${CONFIG_SHELL-/bin/sh} export SHELL # Unset more variables known to interfere with behavior of common tools. CLICOLOR_FORCE= GREP_OPTIONS= unset CLICOLOR_FORCE GREP_OPTIONS ## --------------------- ## ## M4sh Shell Functions. ## ## --------------------- ## # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits as_lineno_1=$LINENO as_lineno_1a=$LINENO as_lineno_2=$LINENO as_lineno_2a=$LINENO eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) sed -n ' p /[$]LINENO/= ' <$as_myself | sed ' s/[$]LINENO.*/&-/ t lineno b :lineno N :loop s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ t loop s/-\n.*// ' >$as_me.lineno && chmod +x "$as_me.lineno" || { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } # If we had to re-execute with $CONFIG_SHELL, we're ensured to have # already done that, so ensure we don't try to do so again and fall # in an infinite loop. This has already happened in practice. _as_can_reexec=no; export _as_can_reexec # Don't try to exec as it changes $[0], causing all sort of problems # (the dirname of $[0] is not the place where we might find the # original and so on. Autoconf is especially sensitive to this). . "./$as_me.lineno" # Exit status is that of the last command. exit } ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" SHELL=${CONFIG_SHELL-/bin/sh} test -n "$DJDIR" || exec 7<&0 &1 # Name of the host. # hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` # # Initializations. # ac_default_prefix=/usr/local ac_clean_files= ac_config_libobj_dir=. LIBOBJS= cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= # Identity of this package. PACKAGE_NAME='Recoll' PACKAGE_TARNAME='recoll' PACKAGE_VERSION='1.26.3' PACKAGE_STRING='Recoll 1.26.3' PACKAGE_BUGREPORT='' PACKAGE_URL='' ac_unique_file="index/recollindex.cpp" # Factoring default headers for most tests. ac_includes_default="\ #include #ifdef HAVE_SYS_TYPES_H # include #endif #ifdef HAVE_SYS_STAT_H # include #endif #ifdef STDC_HEADERS # include # include #else # ifdef HAVE_STDLIB_H # include # endif #endif #ifdef HAVE_STRING_H # if !defined STDC_HEADERS && defined HAVE_MEMORY_H # include # endif # include #endif #ifdef HAVE_STRINGS_H # include #endif #ifdef HAVE_INTTYPES_H # include #endif #ifdef HAVE_STDINT_H # include #endif #ifdef HAVE_UNISTD_H # include #endif" ac_subst_vars='am__EXEEXT_FALSE am__EXEEXT_TRUE LTLIBOBJS LIBOBJS XSLT_LINKADD XSLT_CFLAGS RCLLIBVERSION RCLVERSION LIBQZEITGEIST QMAKE_DISABLE_ZEITGEIST QMAKE_ENABLE_ZEITGEIST QMAKE_DISABLE_WEBENGINE QMAKE_ENABLE_WEBENGINE QMAKE_DISABLE_WEBKIT QMAKE_ENABLE_WEBKIT XAPIANCXXFLAGS QTGUI QMAKE LIBFAM LIBXAPIANSTATICEXTRA LIBXAPIANDIR LIBXAPIAN INCICONV X_LIBX11 RECOLL_DATADIR X_EXTRA_LIBS X_LIBS X_PRE_LIBS X_CFLAGS XMKMF QMAKEPATH MAKECMDLINE_FALSE MAKECMDLINE_TRUE MAKEQT_FALSE MAKEQT_TRUE MAKEUSERDOC_FALSE MAKEUSERDOC_TRUE MAKEXADUMP_FALSE MAKEXADUMP_TRUE XSLT_CONFIG0 XAPIAN_CONFIG2 XAPIAN_CONFIG1 XAPIAN_CONFIG0 MAKEPYTHONCHM_FALSE MAKEPYTHONCHM_TRUE MAKEPYTHON_FALSE MAKEPYTHON_TRUE COND_TESTMAINS_FALSE COND_TESTMAINS_TRUE NOTHREADS_FALSE NOTHREADS_TRUE aspellProg fileProg YFLAGS YACC CXXCPP am__fastdepCXX_FALSE am__fastdepCXX_TRUE CXXDEPMODE ac_ct_CXX CXXFLAGS CXX LTLIBICONV LIBICONV CPP LT_SYS_LIBRARY_PATH OTOOL64 OTOOL LIPO NMEDIT DSYMUTIL MANIFEST_TOOL RANLIB ac_ct_AR AR DLLTOOL OBJDUMP LN_S NM ac_ct_DUMPBIN DUMPBIN LD FGREP EGREP GREP SED am__fastdepCC_FALSE am__fastdepCC_TRUE CCDEPMODE am__nodep AMDEPBACKSLASH AMDEP_FALSE AMDEP_TRUE am__quote am__include DEPDIR OBJEXT EXEEXT ac_ct_CC CPPFLAGS LDFLAGS CFLAGS CC host_os host_vendor host_cpu host build_os build_vendor build_cpu build LIBTOOL AM_BACKSLASH AM_DEFAULT_VERBOSITY AM_DEFAULT_V AM_V am__untar am__tar AMTAR am__leading_dot SET_MAKE AWK mkdir_p MKDIR_P INSTALL_STRIP_PROGRAM STRIP install_sh MAKEINFO AUTOHEADER AUTOMAKE AUTOCONF ACLOCAL VERSION PACKAGE CYGPATH_W am__isrc INSTALL_DATA INSTALL_SCRIPT INSTALL_PROGRAM target_alias host_alias build_alias LIBS ECHO_T ECHO_N ECHO_C DEFS mandir localedir libdir psdir pdfdir dvidir htmldir infodir docdir oldincludedir includedir runstatedir localstatedir sharedstatedir sysconfdir datadir datarootdir libexecdir sbindir bindir program_transform_name prefix exec_prefix PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION PACKAGE_TARNAME PACKAGE_NAME PATH_SEPARATOR SHELL' ac_subst_files='' ac_user_opts=' enable_option_checking enable_silent_rules enable_static enable_shared with_pic enable_fast_install with_aix_soname enable_dependency_tracking with_gnu_ld with_sysroot enable_libtool_lock enable_rpath with_libiconv_prefix enable_largefile enable_posix_spawn with_file_command with_aspell with_inotify with_fam enable_idxthreads enable_testmains enable_camelcase enable_python_module enable_python_chm enable_xadump enable_userdoc enable_qtgui enable_recollq enable_webkit enable_webengine with_qzeitgeist enable_x11mon with_x ' ac_precious_vars='build_alias host_alias target_alias CC CFLAGS LDFLAGS LIBS CPPFLAGS LT_SYS_LIBRARY_PATH CPP CXX CXXFLAGS CCC CXXCPP YACC YFLAGS XMKMF' # Initialize some variables set by options. ac_init_help= ac_init_version=false ac_unrecognized_opts= ac_unrecognized_sep= # The variables have the same names as the options, with # dashes changed to underlines. cache_file=/dev/null exec_prefix=NONE no_create= no_recursion= prefix=NONE program_prefix=NONE program_suffix=NONE program_transform_name=s,x,x, silent= site= srcdir= verbose= x_includes=NONE x_libraries=NONE # Installation directory options. # These are left unexpanded so users can "make install exec_prefix=/foo" # and all the variables that are supposed to be based on exec_prefix # by default will actually change. # Use braces instead of parens because sh, perl, etc. also accept them. # (The list follows the same order as the GNU Coding Standards.) bindir='${exec_prefix}/bin' sbindir='${exec_prefix}/sbin' libexecdir='${exec_prefix}/libexec' datarootdir='${prefix}/share' datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' runstatedir='${localstatedir}/run' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' infodir='${datarootdir}/info' htmldir='${docdir}' dvidir='${docdir}' pdfdir='${docdir}' psdir='${docdir}' libdir='${exec_prefix}/lib' localedir='${datarootdir}/locale' mandir='${datarootdir}/man' ac_prev= ac_dashdash= for ac_option do # If the previous option needs an argument, assign it. if test -n "$ac_prev"; then eval $ac_prev=\$ac_option ac_prev= continue fi case $ac_option in *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; *=) ac_optarg= ;; *) ac_optarg=yes ;; esac # Accept the important Cygnus configure options, so we can diagnose typos. case $ac_dashdash$ac_option in --) ac_dashdash=yes ;; -bindir | --bindir | --bindi | --bind | --bin | --bi) ac_prev=bindir ;; -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) bindir=$ac_optarg ;; -build | --build | --buil | --bui | --bu) ac_prev=build_alias ;; -build=* | --build=* | --buil=* | --bui=* | --bu=*) build_alias=$ac_optarg ;; -cache-file | --cache-file | --cache-fil | --cache-fi \ | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) ac_prev=cache_file ;; -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) cache_file=$ac_optarg ;; --config-cache | -C) cache_file=config.cache ;; -datadir | --datadir | --datadi | --datad) ac_prev=datadir ;; -datadir=* | --datadir=* | --datadi=* | --datad=*) datadir=$ac_optarg ;; -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ | --dataroo | --dataro | --datar) ac_prev=datarootdir ;; -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) datarootdir=$ac_optarg ;; -disable-* | --disable-*) ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=no ;; -docdir | --docdir | --docdi | --doc | --do) ac_prev=docdir ;; -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) docdir=$ac_optarg ;; -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) ac_prev=dvidir ;; -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) dvidir=$ac_optarg ;; -enable-* | --enable-*) ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid feature name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "enable_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval enable_$ac_useropt=\$ac_optarg ;; -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ | --exec | --exe | --ex) ac_prev=exec_prefix ;; -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ | --exec=* | --exe=* | --ex=*) exec_prefix=$ac_optarg ;; -gas | --gas | --ga | --g) # Obsolete; use --with-gas. with_gas=yes ;; -help | --help | --hel | --he | -h) ac_init_help=long ;; -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) ac_init_help=recursive ;; -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) ac_init_help=short ;; -host | --host | --hos | --ho) ac_prev=host_alias ;; -host=* | --host=* | --hos=* | --ho=*) host_alias=$ac_optarg ;; -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) ac_prev=htmldir ;; -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ | --ht=*) htmldir=$ac_optarg ;; -includedir | --includedir | --includedi | --included | --include \ | --includ | --inclu | --incl | --inc) ac_prev=includedir ;; -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ | --includ=* | --inclu=* | --incl=* | --inc=*) includedir=$ac_optarg ;; -infodir | --infodir | --infodi | --infod | --info | --inf) ac_prev=infodir ;; -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) infodir=$ac_optarg ;; -libdir | --libdir | --libdi | --libd) ac_prev=libdir ;; -libdir=* | --libdir=* | --libdi=* | --libd=*) libdir=$ac_optarg ;; -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ | --libexe | --libex | --libe) ac_prev=libexecdir ;; -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ | --libexe=* | --libex=* | --libe=*) libexecdir=$ac_optarg ;; -localedir | --localedir | --localedi | --localed | --locale) ac_prev=localedir ;; -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) localedir=$ac_optarg ;; -localstatedir | --localstatedir | --localstatedi | --localstated \ | --localstate | --localstat | --localsta | --localst | --locals) ac_prev=localstatedir ;; -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) localstatedir=$ac_optarg ;; -mandir | --mandir | --mandi | --mand | --man | --ma | --m) ac_prev=mandir ;; -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) mandir=$ac_optarg ;; -nfp | --nfp | --nf) # Obsolete; use --without-fp. with_fp=no ;; -no-create | --no-create | --no-creat | --no-crea | --no-cre \ | --no-cr | --no-c | -n) no_create=yes ;; -no-recursion | --no-recursion | --no-recursio | --no-recursi \ | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) no_recursion=yes ;; -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ | --oldin | --oldi | --old | --ol | --o) ac_prev=oldincludedir ;; -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) oldincludedir=$ac_optarg ;; -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) ac_prev=prefix ;; -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) prefix=$ac_optarg ;; -program-prefix | --program-prefix | --program-prefi | --program-pref \ | --program-pre | --program-pr | --program-p) ac_prev=program_prefix ;; -program-prefix=* | --program-prefix=* | --program-prefi=* \ | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) program_prefix=$ac_optarg ;; -program-suffix | --program-suffix | --program-suffi | --program-suff \ | --program-suf | --program-su | --program-s) ac_prev=program_suffix ;; -program-suffix=* | --program-suffix=* | --program-suffi=* \ | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) program_suffix=$ac_optarg ;; -program-transform-name | --program-transform-name \ | --program-transform-nam | --program-transform-na \ | --program-transform-n | --program-transform- \ | --program-transform | --program-transfor \ | --program-transfo | --program-transf \ | --program-trans | --program-tran \ | --progr-tra | --program-tr | --program-t) ac_prev=program_transform_name ;; -program-transform-name=* | --program-transform-name=* \ | --program-transform-nam=* | --program-transform-na=* \ | --program-transform-n=* | --program-transform-=* \ | --program-transform=* | --program-transfor=* \ | --program-transfo=* | --program-transf=* \ | --program-trans=* | --program-tran=* \ | --progr-tra=* | --program-tr=* | --program-t=*) program_transform_name=$ac_optarg ;; -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) ac_prev=pdfdir ;; -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) pdfdir=$ac_optarg ;; -psdir | --psdir | --psdi | --psd | --ps) ac_prev=psdir ;; -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) psdir=$ac_optarg ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) silent=yes ;; -runstatedir | --runstatedir | --runstatedi | --runstated \ | --runstate | --runstat | --runsta | --runst | --runs \ | --run | --ru | --r) ac_prev=runstatedir ;; -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ | --run=* | --ru=* | --r=*) runstatedir=$ac_optarg ;; -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ | --sbi=* | --sb=*) sbindir=$ac_optarg ;; -sharedstatedir | --sharedstatedir | --sharedstatedi \ | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ | --sharedst | --shareds | --shared | --share | --shar \ | --sha | --sh) ac_prev=sharedstatedir ;; -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ | --sha=* | --sh=*) sharedstatedir=$ac_optarg ;; -site | --site | --sit) ac_prev=site ;; -site=* | --site=* | --sit=*) site=$ac_optarg ;; -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) ac_prev=srcdir ;; -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) srcdir=$ac_optarg ;; -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ | --syscon | --sysco | --sysc | --sys | --sy) ac_prev=sysconfdir ;; -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) sysconfdir=$ac_optarg ;; -target | --target | --targe | --targ | --tar | --ta | --t) ac_prev=target_alias ;; -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) target_alias=$ac_optarg ;; -v | -verbose | --verbose | --verbos | --verbo | --verb) verbose=yes ;; -version | --version | --versio | --versi | --vers | -V) ac_init_version=: ;; -with-* | --with-*) ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=\$ac_optarg ;; -without-* | --without-*) ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && as_fn_error $? "invalid package name: $ac_useropt" ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in *" "with_$ac_useropt" "*) ;; *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" ac_unrecognized_sep=', ';; esac eval with_$ac_useropt=no ;; --x) # Obsolete; use --with-x. with_x=yes ;; -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ | --x-incl | --x-inc | --x-in | --x-i) ac_prev=x_includes ;; -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) x_includes=$ac_optarg ;; -x-libraries | --x-libraries | --x-librarie | --x-librari \ | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) ac_prev=x_libraries ;; -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; -*) as_fn_error $? "unrecognized option: \`$ac_option' Try \`$0 --help' for more information" ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. case $ac_envvar in #( '' | [0-9]* | *[!_$as_cr_alnum]* ) as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; esac eval $ac_envvar=\$ac_optarg export $ac_envvar ;; *) # FIXME: should be removed in autoconf 3.0. $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" ;; esac done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` as_fn_error $? "missing argument to $ac_option" fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi # Check all directory arguments for consistency. for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ libdir localedir mandir runstatedir do eval ac_val=\$$ac_var # Remove trailing slashes. case $ac_val in */ ) ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` eval $ac_var=\$ac_val;; esac # Be sure to have absolute directory names. case $ac_val in [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" done # There might be people who depend on the old broken behavior: `$host' # used to hold the argument of --host etc. # FIXME: To remove some day. build=$build_alias host=$host_alias target=$target_alias # FIXME: To remove some day. if test "x$host_alias" != x; then if test "x$build_alias" = x; then cross_compiling=maybe elif test "x$build_alias" != "x$host_alias"; then cross_compiling=yes fi fi ac_tool_prefix= test -n "$host_alias" && ac_tool_prefix=$host_alias- test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || as_fn_error $? "working directory cannot be determined" test "X$ac_ls_di" = "X$ac_pwd_ls_di" || as_fn_error $? "pwd does not report name of working directory" # Find the source files, if location was not specified. if test -z "$srcdir"; then ac_srcdir_defaulted=yes # Try the directory containing this script, then the parent directory. ac_confdir=`$as_dirname -- "$as_myself" || $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_myself" : 'X\(//\)[^/]' \| \ X"$as_myself" : 'X\(//\)$' \| \ X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_myself" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` srcdir=$ac_confdir if test ! -r "$srcdir/$ac_unique_file"; then srcdir=.. fi else ac_srcdir_defaulted=no fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then srcdir=. fi # Remove unnecessary trailing slashes from srcdir. # Double slashes in file names in object file debugging info # mess up M-x gdb in Emacs. case $srcdir in */) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; esac for ac_var in $ac_precious_vars; do eval ac_env_${ac_var}_set=\${${ac_var}+set} eval ac_env_${ac_var}_value=\$${ac_var} eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} eval ac_cv_env_${ac_var}_value=\$${ac_var} done # # Report the --help message. # if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF \`configure' configures Recoll 1.26.3 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... To assign environment variables (e.g., CC, CFLAGS...), specify them as VAR=VALUE. See below for descriptions of some of the useful variables. Defaults for the options are specified in brackets. Configuration: -h, --help display this help and exit --help=short display options specific to this package --help=recursive display the short help of all the included packages -V, --version display version information and exit -q, --quiet, --silent do not print \`checking ...' messages --cache-file=FILE cache test results in FILE [disabled] -C, --config-cache alias for \`--cache-file=config.cache' -n, --no-create do not create output files --srcdir=DIR find the sources in DIR [configure dir or \`..'] Installation directories: --prefix=PREFIX install architecture-independent files in PREFIX [$ac_default_prefix] --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX [PREFIX] By default, \`make install' will install all the files in \`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify an installation prefix other than \`$ac_default_prefix' using \`--prefix', for instance \`--prefix=\$HOME'. For better control, use the options below. Fine tuning of the installation directories: --bindir=DIR user executables [EPREFIX/bin] --sbindir=DIR system admin executables [EPREFIX/sbin] --libexecdir=DIR program executables [EPREFIX/libexec] --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] --datadir=DIR read-only architecture-independent data [DATAROOTDIR] --infodir=DIR info documentation [DATAROOTDIR/info] --localedir=DIR locale-dependent data [DATAROOTDIR/locale] --mandir=DIR man documentation [DATAROOTDIR/man] --docdir=DIR documentation root [DATAROOTDIR/doc/recoll] --htmldir=DIR html documentation [DOCDIR] --dvidir=DIR dvi documentation [DOCDIR] --pdfdir=DIR pdf documentation [DOCDIR] --psdir=DIR ps documentation [DOCDIR] _ACEOF cat <<\_ACEOF Program names: --program-prefix=PREFIX prepend PREFIX to installed program names --program-suffix=SUFFIX append SUFFIX to installed program names --program-transform-name=PROGRAM run sed PROGRAM on installed program names X features: --x-includes=DIR X include files are in DIR --x-libraries=DIR X library files are in DIR System types: --build=BUILD configure for building on BUILD [guessed] --host=HOST cross-compile to build programs to run on HOST [BUILD] _ACEOF fi if test -n "$ac_init_help"; then case $ac_init_help in short | recursive ) echo "Configuration of Recoll 1.26.3:";; esac cat <<\_ACEOF Optional Features: --disable-option-checking ignore unrecognized --enable/--with options --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) --enable-FEATURE[=ARG] include FEATURE [ARG=yes] --enable-silent-rules less verbose build output (undo: "make V=1") --disable-silent-rules verbose build output (undo: "make V=0") --enable-static[=PKGS] build static libraries [default=no] --enable-shared[=PKGS] build shared libraries [default=yes] --enable-fast-install[=PKGS] optimize for fast installation [default=yes] --enable-dependency-tracking do not reject slow dependency extractors --disable-dependency-tracking speeds up one-time build --disable-libtool-lock avoid locking (might break parallel builds) --disable-rpath do not hardcode runtime library paths --disable-largefile omit support for large files --enable-posix_spawn Enable the use of posix_spawn(). --enable-idxthreads Enable multithread indexing. --disable-idxthreads Disable multithread indexing. --enable-testmains Enable building small test drivers. These are not unit tests. --enable-camelcase Enable splitting camelCase words. This is not enabled by default as this makes phrase matches more difficult: you need to use matching case in the phrase query to get a match. Ie querying for "MySQL manual" and "my sql manual" are the same, but not the same as "mysql manual" (in phrases only and you could raise the phrase slack to get a match). --disable-python-module Do not build the Python module. --disable-python-chm Do not build the libchm Python wrapper. --enable-xadump Enable building the xadump low level Xapian access program. --disable-userdoc Disable building the user manual. (Avoids the need for docbook xml/xsl files and TeX tools. --disable-qtgui Disable the QT-based graphical user interface. --enable-recollq Enable building the recollq command line query tool (recoll -t without need for Qt). This is done by default if --disable-qtgui is set but this option enables forcing it. --disable-webkit Disable use of qt-webkit (only meaningful if qtgui is enabled). --enable-webengine Enable use of qt-webengine (only meaningful if qtgui is enabled), in place or qt-webkit. --disable-x11mon Disable recollindex support for X11 session monitoring. Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use both] --with-aix-soname=aix|svr4|both shared library versioning (aka "SONAME") variant to provide on AIX, [default=aix]. --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-sysroot[=DIR] Search for dependent libraries within DIR (or the compiler's sysroot if not specified). --with-gnu-ld assume the C compiler uses GNU ld [default=no] --with-libiconv-prefix[=DIR] search for libiconv in DIR/include and DIR/lib --without-libiconv-prefix don't search for libiconv in includedir and libdir --with-file-command Specify version of 'file' command (ie: --with-file-command=/usr/local/bin/file) --without-aspell Disable use of aspell spelling package to provide term expansion to other spellings --with-inotify Use inotify for almost real time indexing of modified files (the default is yes on Linux). --with-fam Use File Alteration Monitor for almost real time indexing of modified files. Give the fam/gamin library as argument (ie: /usr/lib/libfam.so) if configure does not find the right one. --with-qzeitgeist Enable the use of the qzeitgeist library to send zeitgeist events. --with-x use the X Window System Some influential environment variables: CC C compiler command CFLAGS C compiler flags LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if you have headers in a nonstandard directory LT_SYS_LIBRARY_PATH User-defined run-time library search path. CPP C preprocessor CXX C++ compiler command CXXFLAGS C++ compiler flags CXXCPP C++ preprocessor YACC The `Yet Another Compiler Compiler' implementation to use. Defaults to the first program found out of: `bison -y', `byacc', `yacc'. YFLAGS The list of arguments that will be passed by default to $YACC. This script will default YFLAGS to the empty string to avoid a default value of `-d' given by some make applications. XMKMF Path to xmkmf, Makefile generator for X Window System Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. Report bugs to the package provider. _ACEOF ac_status=$? fi if test "$ac_init_help" = "recursive"; then # If there are subdirs, report their specific --help. for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue test -d "$ac_dir" || { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || continue ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix cd "$ac_dir" || { ac_status=$?; continue; } # Check for guested configure. if test -f "$ac_srcdir/configure.gnu"; then echo && $SHELL "$ac_srcdir/configure.gnu" --help=recursive elif test -f "$ac_srcdir/configure"; then echo && $SHELL "$ac_srcdir/configure" --help=recursive else $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 fi || ac_status=$? cd "$ac_pwd" || { ac_status=$?; break; } done fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF Recoll configure 1.26.3 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi ## ------------------------ ## ## Autoconf initialization. ## ## ------------------------ ## # ac_fn_c_try_compile LINENO # -------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_compile # ac_fn_c_try_link LINENO # ----------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_c_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_link # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists and can be compiled using the include files in # INCLUDES, setting the cache variable VAR accordingly. ac_fn_c_check_header_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_compile # ac_fn_c_try_cpp LINENO # ---------------------- # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_c_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_cpp # ac_fn_c_try_run LINENO # ---------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. Assumes # that executables *can* be run. ac_fn_c_try_run () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then : ac_retval=0 else $as_echo "$as_me: program exited with status $ac_status" >&5 $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=$ac_status fi rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_c_try_run # ac_fn_c_check_func LINENO FUNC VAR # ---------------------------------- # Tests whether FUNC exists, setting the cache variable VAR accordingly ac_fn_c_check_func () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Define $2 to an innocuous variant, in case declares $2. For example, HP-UX 11i declares gettimeofday. */ #define $2 innocuous_$2 /* System header to define __stub macros and hopefully few prototypes, which can conflict with char $2 (); below. Prefer to if __STDC__ is defined, since exists even on freestanding compilers. */ #ifdef __STDC__ # include #else # include #endif #undef $2 /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char $2 (); /* The GNU C library defines this for functions which it implements to always fail with ENOSYS. Some functions are actually named something starting with __ and the normal name is an alias. */ #if defined __stub_$2 || defined __stub___$2 choke me #endif int main () { return $2 (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : eval "$3=yes" else eval "$3=no" fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_func # ac_fn_cxx_try_compile LINENO # ---------------------------- # Try to compile conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_compile () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest.$ac_objext; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_compile # ac_fn_cxx_try_cpp LINENO # ------------------------ # Try to preprocess conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_cpp () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if { { ac_try="$ac_cpp conftest.$ac_ext" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } > conftest.i && { test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || test ! -s conftest.err }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_cpp # ac_fn_cxx_try_link LINENO # ------------------------- # Try to link conftest.$ac_ext, and return whether this succeeded. ac_fn_cxx_try_link () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack rm -f conftest.$ac_objext conftest$ac_exeext if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>conftest.err ac_status=$? if test -s conftest.err; then grep -v '^ *+' conftest.err >conftest.er1 cat conftest.er1 >&5 mv -f conftest.er1 conftest.err fi $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && { test -z "$ac_cxx_werror_flag" || test ! -s conftest.err } && test -s conftest$ac_exeext && { test "$cross_compiling" = yes || test -x conftest$ac_exeext }; then : ac_retval=0 else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 ac_retval=1 fi # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would # interfere with the next link command; also delete a directory that is # left behind by Apple's compiler. We do this before executing the actions. rm -rf conftest.dSYM conftest_ipa8_conftest.oo eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno as_fn_set_status $ac_retval } # ac_fn_cxx_try_link # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_c_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_c_check_header_mongrel # ac_fn_cxx_check_header_mongrel LINENO HEADER VAR INCLUDES # --------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using # the include files in INCLUDES and setting the cache variable VAR # accordingly. ac_fn_cxx_check_header_mongrel () { as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack if eval \${$3+:} false; then : { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } else # Is the header compilable? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 $as_echo_n "checking $2 usability... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $4 #include <$2> _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_header_compiler=yes else ac_header_compiler=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 $as_echo "$ac_header_compiler" >&6; } # Is the header present? { $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 $as_echo_n "checking $2 presence... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include <$2> _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : ac_header_preproc=yes else ac_header_preproc=no fi rm -f conftest.err conftest.i conftest.$ac_ext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 $as_echo "$ac_header_preproc" >&6; } # So? What about this header? case $ac_header_compiler:$ac_header_preproc:$ac_cxx_preproc_warn_flag in #(( yes:no: ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 $as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; no:yes:* ) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 $as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 $as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 $as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 $as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 $as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 $as_echo_n "checking for $2... " >&6; } if eval \${$3+:} false; then : $as_echo_n "(cached) " >&6 else eval "$3=\$ac_header_compiler" fi eval ac_res=\$$3 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } fi eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno } # ac_fn_cxx_check_header_mongrel cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. It was created by Recoll $as_me 1.26.3, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ _ACEOF exec 5>>config.log { cat <<_ASUNAME ## --------- ## ## Platform. ## ## --------- ## hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` uname -m = `(uname -m) 2>/dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` /bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` /usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` /bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` /bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` _ASUNAME as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. $as_echo "PATH: $as_dir" done IFS=$as_save_IFS } >&5 cat >&5 <<_ACEOF ## ----------- ## ## Core tests. ## ## ----------- ## _ACEOF # Keep a trace of the command line. # Strip out --no-create and --no-recursion so they do not pile up. # Strip out --silent because we don't want to record it for future runs. # Also quote any args containing shell meta-characters. # Make two passes to allow for proper duplicate-argument suppression. ac_configure_args= ac_configure_args0= ac_configure_args1= ac_must_keep_next=false for ac_pass in 1 2 do for ac_arg do case $ac_arg in -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil) continue ;; *\'*) ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; 2) as_fn_append ac_configure_args1 " '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else case $ac_arg in *=* | --config-cache | -C | -disable-* | --disable-* \ | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ | -with-* | --with-* | -without-* | --without-* | --x) case "$ac_configure_args0 " in "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; esac ;; -* ) ac_must_keep_next=true ;; esac fi as_fn_append ac_configure_args " '$ac_arg'" ;; esac done done { ac_configure_args0=; unset ac_configure_args0;} { ac_configure_args1=; unset ac_configure_args1;} # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there # would cause problems or look ugly. # WARNING: Use '\'' to represent an apostrophe within the trap. # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. trap 'exit_status=$? # Save into config.log some information that might help in debugging. { echo $as_echo "## ---------------- ## ## Cache variables. ## ## ---------------- ##" echo # The following way of writing the cache mishandles newlines in values, ( for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( *${as_nl}ac_space=\ *) sed -n \ "s/'\''/'\''\\\\'\'''\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" ;; #( *) sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) echo $as_echo "## ----------------- ## ## Output variables. ## ## ----------------- ##" echo for ac_var in $ac_subst_vars do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo if test -n "$ac_subst_files"; then $as_echo "## ------------------- ## ## File substitutions. ## ## ------------------- ##" echo for ac_var in $ac_subst_files do eval ac_val=\$$ac_var case $ac_val in *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; esac $as_echo "$ac_var='\''$ac_val'\''" done | sort echo fi if test -s confdefs.h; then $as_echo "## ----------- ## ## confdefs.h. ## ## ----------- ##" echo cat confdefs.h echo fi test "$ac_signal" != 0 && $as_echo "$as_me: caught signal $ac_signal" $as_echo "$as_me: exit $exit_status" } >&5 rm -f core *.core core.conftest.* && rm -f -r conftest* confdefs* conf$$* $ac_clean_files && exit $exit_status ' 0 for ac_signal in 1 2 13 15; do trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h $as_echo "/* confdefs.h */" > confdefs.h # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF cat >>confdefs.h <<_ACEOF #define PACKAGE_URL "$PACKAGE_URL" _ACEOF # Let the site file select an alternate cache file if it wants to. # Prefer an explicitly selected file to automatically selected ones. ac_site_file1=NONE ac_site_file2=NONE if test -n "$CONFIG_SITE"; then # We do not want a PATH search for config.site. case $CONFIG_SITE in #(( -*) ac_site_file1=./$CONFIG_SITE;; */*) ac_site_file1=$CONFIG_SITE;; *) ac_site_file1=./$CONFIG_SITE;; esac elif test "x$prefix" != xNONE; then ac_site_file1=$prefix/share/config.site ac_site_file2=$prefix/etc/config.site else ac_site_file1=$ac_default_prefix/share/config.site ac_site_file2=$ac_default_prefix/etc/config.site fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" \ || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "failed to load site script $ac_site_file See \`config.log' for more details" "$LINENO" 5; } fi done if test -r "$cache_file"; then # Some versions of bash will fail to source /dev/null (special files # actually), so we avoid doing that. DJGPP emulates it as a regular file. if test /dev/null != "$cache_file" && test -f "$cache_file"; then { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; *) . "./$cache_file";; esac fi else { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi # Check that the precious variables saved in the cache have kept the same # value. ac_cache_corrupted=false for ac_var in $ac_precious_vars; do eval ac_old_set=\$ac_cv_env_${ac_var}_set eval ac_new_set=\$ac_env_${ac_var}_set eval ac_old_val=\$ac_cv_env_${ac_var}_value eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; *) if test "x$ac_old_val" != "x$ac_new_val"; then # differences in whitespace do not lead to failure. ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac # Pass precious variables to config.status. if test "$ac_new_set" = set; then case $ac_new_val in *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; *) ac_arg=$ac_var=$ac_new_val ;; esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. *) as_fn_append ac_configure_args " '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 fi ## -------------------- ## ## Main body of script. ## ## -------------------- ## ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_config_headers="$ac_config_headers common/autoconfig.h" am__api_version='1.15' ac_aux_dir= for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do if test -f "$ac_dir/install-sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install-sh -c" break elif test -f "$ac_dir/install.sh"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/install.sh -c" break elif test -f "$ac_dir/shtool"; then ac_aux_dir=$ac_dir ac_install_sh="$ac_aux_dir/shtool install -c" break fi done if test -z "$ac_aux_dir"; then as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5 fi # These three variables are undocumented and unsupported, # and are intended to be withdrawn in a future Autoconf release. # They can cause serious problems if a builder's source tree is in a directory # whose full name contains unusual characters. ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. # Find a good install program. We prefer a C program (faster), # so one script is as good as another. But avoid the broken or # incompatible versions: # SysV /etc/install, /usr/sbin/install # SunOS /usr/etc/install # IRIX /sbin/install # AIX /bin/install # AmigaOS /C/install, which installs bootblocks on floppy discs # AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag # AFS /usr/afsws/bin/install, which mishandles nonexistent args # SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then if ${ac_cv_path_install+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. # Account for people who put trailing slashes in PATH elements. case $as_dir/ in #(( ./ | .// | /[cC]/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. # Don't use installbsd from OSF since it installs stuff as root # by default. for ac_prog in ginstall scoinst install; do for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then if test $ac_prog = install && grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # AIX install. It has an incompatible calling convention. : elif test $ac_prog = install && grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then # program-specific install script used by HP pwplus--don't use. : else rm -rf conftest.one conftest.two conftest.dir echo one > conftest.one echo two > conftest.two mkdir conftest.dir if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && test -s conftest.one && test -s conftest.two && test -s conftest.dir/conftest.one && test -s conftest.dir/conftest.two then ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" break 3 fi fi fi done done ;; esac done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir fi if test "${ac_cv_path_install+set}" = set; then INSTALL=$ac_cv_path_install else # As a last resort, use the slow shell script. Don't cache a # value for INSTALL within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. INSTALL=$ac_install_sh fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. # It thinks the first close brace ends the variable substitution. test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 $as_echo_n "checking whether build environment is sane... " >&6; } # Reject unsafe characters in $srcdir or the absolute working directory # name. Accept space and tab only in the latter. am_lf=' ' case `pwd` in *[\\\"\#\$\&\'\`$am_lf]*) as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; esac case $srcdir in *[\\\"\#\$\&\'\`$am_lf\ \ ]*) as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;; esac # Do 'set' in a subshell so we don't clobber the current shell's # arguments. Must try -L first in case configure is actually a # symlink; some systems play weird games with the mod time of symlinks # (eg FreeBSD returns the mod time of the symlink's containing # directory). if ( am_has_slept=no for am_try in 1 2; do echo "timestamp, slept: $am_has_slept" > conftest.file set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` if test "$*" = "X"; then # -L didn't work. set X `ls -t "$srcdir/configure" conftest.file` fi if test "$*" != "X $srcdir/configure conftest.file" \ && test "$*" != "X conftest.file $srcdir/configure"; then # If neither matched, then we have a broken ls. This can happen # if, for instance, CONFIG_SHELL is bash and it inherits a # broken ls alias from the environment. This has actually # happened. Such a system could not be considered "sane". as_fn_error $? "ls -t appears to fail. Make sure there is not a broken alias in your environment" "$LINENO" 5 fi if test "$2" = conftest.file || test $am_try -eq 2; then break fi # Just in case. sleep 1 am_has_slept=yes done test "$2" = conftest.file ) then # Ok. : else as_fn_error $? "newly created file is older than distributed files! Check your system clock" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } # If we didn't sleep, we still need to ensure time stamps of config.status and # generated files are strictly newer. am_sleep_pid= if grep 'slept: no' conftest.file >/dev/null 2>&1; then ( sleep 1 ) & am_sleep_pid=$! fi rm -f conftest.file test "$program_prefix" != NONE && program_transform_name="s&^&$program_prefix&;$program_transform_name" # Use a double $ so make ignores it. test "$program_suffix" != NONE && program_transform_name="s&\$&$program_suffix&;$program_transform_name" # Double any \ or $. # By default was `s,x,x', remove it if useless. ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` # Expand $ac_aux_dir to an absolute path. am_aux_dir=`cd "$ac_aux_dir" && pwd` if test x"${MISSING+set}" != xset; then case $am_aux_dir in *\ * | *\ *) MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; *) MISSING="\${SHELL} $am_aux_dir/missing" ;; esac fi # Use eval to expand $SHELL if eval "$MISSING --is-lightweight"; then am_missing_run="$MISSING " else am_missing_run= { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5 $as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;} fi if test x"${install_sh+set}" != xset; then case $am_aux_dir in *\ * | *\ *) install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; *) install_sh="\${SHELL} $am_aux_dir/install-sh" esac fi # Installed binaries are usually stripped using 'strip' when the user # run "make install-strip". However 'strip' might not be the right # tool to use in cross-compilation environments, therefore Automake # will honor the 'STRIP' environment variable to overrule this program. if test "$cross_compiling" != no; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi fi INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 $as_echo_n "checking for a thread-safe mkdir -p... " >&6; } if test -z "$MKDIR_P"; then if ${ac_cv_path_mkdir+:} false; then : $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in mkdir gmkdir; do for ac_exec_ext in '' $ac_executable_extensions; do as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( 'mkdir (GNU coreutils) '* | \ 'mkdir (coreutils) '* | \ 'mkdir (fileutils) '4.1*) ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext break 3;; esac done done done IFS=$as_save_IFS fi test -d ./--version && rmdir ./--version if test "${ac_cv_path_mkdir+set}" = set; then MKDIR_P="$ac_cv_path_mkdir -p" else # As a last resort, use the slow shell script. Don't cache a # value for MKDIR_P within a source directory, because that will # break other packages using the cache if that directory is # removed, or if the value is a relative name. MKDIR_P="$ac_install_sh -d" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 $as_echo "$MKDIR_P" >&6; } for ac_prog in gawk mawk nawk awk do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AWK+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AWK"; then ac_cv_prog_AWK="$AWK" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AWK="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AWK=$ac_cv_prog_AWK if test -n "$AWK"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 $as_echo "$AWK" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AWK" && break done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 $as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } set x ${MAKE-make} ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : $as_echo_n "(cached) " >&6 else cat >conftest.make <<\_ACEOF SHELL = /bin/sh all: @echo '@@@%%%=$(MAKE)=@@@%%%' _ACEOF # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. case `${MAKE-make} -f conftest.make 2>/dev/null` in *@@@%%%=?*=@@@%%%*) eval ac_cv_prog_make_${ac_make}_set=yes;; *) eval ac_cv_prog_make_${ac_make}_set=no;; esac rm -f conftest.make fi if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } SET_MAKE= else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } SET_MAKE="MAKE=${MAKE-make}" fi rm -rf .tst 2>/dev/null mkdir .tst 2>/dev/null if test -d .tst; then am__leading_dot=. else am__leading_dot=_ fi rmdir .tst 2>/dev/null # Check whether --enable-silent-rules was given. if test "${enable_silent_rules+set}" = set; then : enableval=$enable_silent_rules; fi case $enable_silent_rules in # ((( yes) AM_DEFAULT_VERBOSITY=0;; no) AM_DEFAULT_VERBOSITY=1;; *) AM_DEFAULT_VERBOSITY=1;; esac am_make=${MAKE-make} { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5 $as_echo_n "checking whether $am_make supports nested variables... " >&6; } if ${am_cv_make_support_nested_variables+:} false; then : $as_echo_n "(cached) " >&6 else if $as_echo 'TRUE=$(BAR$(V)) BAR0=false BAR1=true V=1 am__doit: @$(TRUE) .PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then am_cv_make_support_nested_variables=yes else am_cv_make_support_nested_variables=no fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5 $as_echo "$am_cv_make_support_nested_variables" >&6; } if test $am_cv_make_support_nested_variables = yes; then AM_V='$(V)' AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)' else AM_V=$AM_DEFAULT_VERBOSITY AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY fi AM_BACKSLASH='\' if test "`cd $srcdir && pwd`" != "`pwd`"; then # Use -I$(srcdir) only when $(srcdir) != ., so that make's output # is not polluted with repeated "-I." am__isrc=' -I$(srcdir)' # test to see if srcdir already configured if test -f $srcdir/config.status; then as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 fi fi # test whether we have cygpath if test -z "$CYGPATH_W"; then if (cygpath --version) >/dev/null 2>/dev/null; then CYGPATH_W='cygpath -w' else CYGPATH_W=echo fi fi # Define the identity of the package. PACKAGE='recoll' VERSION='1.26.3' # Some tools Automake needs. ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} # For better backward compatibility. To be removed once Automake 1.9.x # dies out for good. For more background, see: # # mkdir_p='$(MKDIR_P)' # We need awk for the "check" target (and possibly the TAP driver). The # system "awk" is bad on some platforms. # Always define AMTAR for backward compatibility. Yes, it's still used # in the wild :-( We should find a proper way to deprecate it ... AMTAR='$${TAR-tar}' # We'll loop over all known methods to create a tar archive until one works. _am_tools='gnutar pax cpio none' am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' # POSIX will say in a future version that running "rm -f" with no argument # is OK; and we want to be able to make that assumption in our Makefile # recipes. So use an aggressive probe to check that the usage we want is # actually supported "in the wild" to an acceptable degree. # See automake bug#10828. # To make any issue more visible, cause the running configure to be aborted # by default if the 'rm' program in use doesn't match our expectations; the # user can still override this though. if rm -f && rm -fr && rm -rf; then : OK; else cat >&2 <<'END' Oops! Your 'rm' program seems unable to run without file operands specified on the command line, even when the '-f' option is present. This is contrary to the behaviour of most rm programs out there, and not conforming with the upcoming POSIX standard: Please tell bug-automake@gnu.org about your system, including the value of your $PATH and any error possibly output before this message. This can help us improve future automake versions. END if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then echo 'Configuration will proceed anyway, since you have set the' >&2 echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2 echo >&2 else cat >&2 <<'END' Aborting the configuration process, to ensure you take notice of the issue. You can download and install GNU coreutils to get an 'rm' implementation that behaves properly: . If you want to complete the configuration process using your problematic 'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM to "yes", and re-run configure. END as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5 fi fi # Check whether --enable-static was given. if test "${enable_static+set}" = set; then : enableval=$enable_static; p=${PACKAGE-default} case $enableval in yes) enable_static=yes ;; no) enable_static=no ;; *) enable_static=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_static=yes fi done IFS=$lt_save_ifs ;; esac else enable_static=no fi case `pwd` in *\ * | *\ *) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 $as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; esac macro_version='2.4.6' macro_revision='2.4.6' ltmain=$ac_aux_dir/ltmain.sh # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } if ${ac_cv_build+:} false; then : $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' set x $ac_cv_build shift build_cpu=$1 build_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: build_os=$* IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } if ${ac_cv_host+:} false; then : $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' set x $ac_cv_host shift host_cpu=$1 host_vendor=$2 shift; shift # Remember, the first character of IFS is used to create $*, # except with old shells: host_os=$* IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac # Backslashify metacharacters that are still active within # double-quoted strings. sed_quote_subst='s/\(["`$\\]\)/\\\1/g' # Same as above, but do not quote variable references. double_quote_subst='s/\(["`\\]\)/\\\1/g' # Sed substitution to delay expansion of an escaped shell variable in a # double_quote_subst'ed string. delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' # Sed substitution to delay expansion of an escaped single quote. delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' # Sed substitution to avoid accidental globbing in evaled expressions no_glob_subst='s/\*/\\\*/g' ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 $as_echo_n "checking how to print strings... " >&6; } # Test print first, because it will be a builtin if present. if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='print -r --' elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then ECHO='printf %s\n' else # Use this function as a fallback that always works. func_fallback_echo () { eval 'cat <<_LTECHO_EOF $1 _LTECHO_EOF' } ECHO='func_fallback_echo' fi # func_echo_all arg... # Invoke $ECHO with all args, space-separated. func_echo_all () { $ECHO "" } case $ECHO in printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 $as_echo "printf" >&6; } ;; print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 $as_echo "print -r" >&6; } ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 $as_echo "cat" >&6; } ;; esac DEPDIR="${am__leading_dot}deps" ac_config_commands="$ac_config_commands depfiles" am_make=${MAKE-make} cat > confinc << 'END' am__doit: @echo this is the am__doit target .PHONY: am__doit END # If we don't find an include directive, just comment out the code. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 $as_echo_n "checking for style of include used by $am_make... " >&6; } am__include="#" am__quote= _am_result=none # First try GNU make style include. echo "include confinc" > confmf # Ignore all kinds of additional output from 'make'. case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=include am__quote= _am_result=GNU ;; esac # Now try BSD make style include. if test "$am__include" = "#"; then echo '.include "confinc"' > confmf case `$am_make -s -f confmf 2> /dev/null` in #( *the\ am__doit\ target*) am__include=.include am__quote="\"" _am_result=BSD ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 $as_echo "$_am_result" >&6; } rm -f confinc confmf # Check whether --enable-dependency-tracking was given. if test "${enable_dependency_tracking+set}" = set; then : enableval=$enable_dependency_tracking; fi if test "x$enable_dependency_tracking" != xno; then am_depcomp="$ac_aux_dir/depcomp" AMDEPBACKSLASH='\' am__nodep='_no' fi if test "x$enable_dependency_tracking" != xno; then AMDEP_TRUE= AMDEP_FALSE='#' else AMDEP_TRUE='#' AMDEP_FALSE= fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="gcc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi else CC="$ac_cv_prog_CC" fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="${ac_tool_prefix}cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else ac_prog_rejected=no as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then # We found a bogon in the path, so make sure we never use it. set dummy $ac_cv_prog_CC shift if test $# != 0; then # We chose a different compiler from the bogus one. # However, it has the same basename, so the bogon will be chosen # first if we set CC to just the basename; use the full file name. shift ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" fi fi fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$CC"; then if test -n "$ac_tool_prefix"; then for ac_prog in cl.exe do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CC"; then ac_cv_prog_CC="$CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 $as_echo "$CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CC" && break done fi if test -z "$CC"; then ac_ct_CC=$CC for ac_prog in cl.exe do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CC" && break done if test "x$ac_ct_CC" = x; then CC="" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CC=$ac_ct_CC fi fi fi test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "no acceptable C compiler found in \$PATH See \`config.log' for more details" "$LINENO" 5; } # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 $as_echo_n "checking whether the C compiler works... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" ac_rmfiles= for ac_file in $ac_files do case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; * ) ac_rmfiles="$ac_rmfiles $ac_file";; esac done rm -f $ac_rmfiles if { { ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link_default") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, # so that the user can short-circuit this test for compilers unknown to # Autoconf. for ac_file in $ac_files '' do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; [ab].out ) # We found the default executable, but exeext='' is most # certainly right. break;; *.* ) if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi # We set ac_cv_exeext here because the later test for it is not # safe: cross compilers may not add the suffix if given an `-o' # argument, so we may need to know it at that point already. # Even if this section looks crufty: it has the advantage of # actually working. break;; * ) break;; esac done test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi if test -z "$ac_file"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error 77 "C compiler cannot create executables See \`config.log' for more details" "$LINENO" 5; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 $as_echo_n "checking for C compiler default output file name... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 $as_echo "$ac_file" >&6; } ac_exeext=$ac_cv_exeext rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } if { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with # `rm'. for ac_file in conftest.exe conftest conftest.*; do test -f "$ac_file" || continue case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` break;; * ) break;; esac done else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of executables: cannot compile and link See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest conftest$ac_cv_exeext { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { FILE *f = fopen ("conftest.out", "w"); return ferror (f) || fclose (f) != 0; ; return 0; } _ACEOF ac_clean_files="$ac_clean_files conftest.out" # Check that the compiler produces executables we can run. If not, either # the compiler is broken, or we cross compile. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 $as_echo_n "checking whether we are cross compiling... " >&6; } if test "$cross_compiling" != yes; then { { ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_link") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if { ac_try='./conftest$ac_cv_exeext' { { case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_try") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; }; then cross_compiling=no else if test "$cross_compiling" = maybe; then cross_compiling=yes else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot run C compiled programs. If you meant to cross compile, use \`--host'. See \`config.log' for more details" "$LINENO" 5; } fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 $as_echo "$cross_compiling" >&6; } rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out ac_clean_files=$ac_clean_files_save { $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } if ${ac_cv_objext+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF rm -f conftest.o conftest.obj if { { ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compile") 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then : for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` break;; esac done else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "cannot compute suffix of object files: cannot compile See \`config.log' for more details" "$LINENO" 5; } fi rm -f conftest.$ac_cv_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } if ${ac_cv_c_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes else GCC= fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } if ${ac_cv_prog_cc_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes else CFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : else ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS elif test $ac_cv_prog_cc_g = yes; then if test "$GCC" = yes; then CFLAGS="-g -O2" else CFLAGS="-g" fi else if test "$GCC" = yes; then CFLAGS="-O2" else CFLAGS= fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } if ${ac_cv_prog_cc_c89+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include struct stat; /* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ struct buf { int x; }; FILE * (*rcsopen) (struct buf *, struct stat *, int); static char *e (p, i) char **p; int i; { return p[i]; } static char *f (char * (*g) (char **, int), char **p, ...) { char *s; va_list v; va_start (v,p); s = g (p, va_arg (v,int)); va_end (v); return s; } /* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has function prototypes and stuff, but not '\xHH' hex character constants. These don't provoke an error unfortunately, instead are silently treated as 'x'. The following induces an error, until -std is added to get proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an array size at least. It's necessary to write '\x00'==0 to get something that's true only with -std. */ int osf4_cc_array ['\x00' == 0 ? 1 : -1]; /* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters inside strings and character constants. */ #define FOO(x) 'x' int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; int test (int i, double x); struct s1 {int (*f) (int a);}; struct s2 {int (*f) (double a);}; int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); int argc; char **argv; int main () { return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; ; return 0; } _ACEOF for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_prog_cc_c89=$ac_arg fi rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done rm -f conftest.$ac_ext CC=$ac_save_CC fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac if test "x$ac_cv_prog_cc_c89" != xno; then : fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5 $as_echo_n "checking whether $CC understands -c and -o together... " >&6; } if ${am_cv_prog_cc_c_o+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF # Make sure it works both with $CC and with simple cc. # Following AC_PROG_CC_C_O, we do the test twice because some # compilers refuse to overwrite an existing .o file with -o, # though they will create one. am_cv_prog_cc_c_o=yes for am_i in 1 2; do if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5 ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5 ac_status=$? echo "$as_me:$LINENO: \$? = $ac_status" >&5 (exit $ac_status); } \ && test -f conftest2.$ac_objext; then : OK else am_cv_prog_cc_c_o=no break fi done rm -f core conftest* unset am_i fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5 $as_echo "$am_cv_prog_cc_c_o" >&6; } if test "$am_cv_prog_cc_c_o" != yes; then # Losing compiler, so override with the script. # FIXME: It is wrong to rewrite CC. # But if we don't then we get into trouble of one sort or another. # A longer-term fix would be to have automake use am__CC in this case, # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)" CC="$am_aux_dir/compile $CC" fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CC" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CC_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CC_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CC_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CC_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 $as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then am__fastdepCC_TRUE= am__fastdepCC_FALSE='#' else am__fastdepCC_TRUE='#' am__fastdepCC_FALSE= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 $as_echo_n "checking for a sed that does not truncate output... " >&6; } if ${ac_cv_path_SED+:} false; then : $as_echo_n "(cached) " >&6 else ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ for ac_i in 1 2 3 4 5 6 7; do ac_script="$ac_script$as_nl$ac_script" done echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed { ac_script=; unset ac_script;} if test -z "$SED"; then ac_path_SED_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in sed gsed; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_SED" || continue # Check for GNU ac_path_SED and select it if it is found. # Check for GNU $ac_path_SED case `"$ac_path_SED" --version 2>&1` in *GNU*) ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo '' >> "conftest.nl" "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_SED_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_SED="$ac_path_SED" ac_path_SED_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_SED_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_SED"; then as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 fi else ac_cv_path_SED=$SED fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 $as_echo "$ac_cv_path_SED" >&6; } SED="$ac_cv_path_SED" rm -f conftest.sed test -z "$SED" && SED=sed Xsed="$SED -e 1s/^X//" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } if ${ac_cv_path_GREP+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then ac_path_GREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_GREP" || continue # Check for GNU ac_path_GREP and select it if it is found. # Check for GNU $ac_path_GREP case `"$ac_path_GREP" --version 2>&1` in *GNU*) ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_GREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_GREP=$GREP fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } if ${ac_cv_path_EGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 then ac_cv_path_EGREP="$GREP -E" else if test -z "$EGREP"; then ac_path_EGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_EGREP" || continue # Check for GNU ac_path_EGREP and select it if it is found. # Check for GNU $ac_path_EGREP case `"$ac_path_EGREP" --version 2>&1` in *GNU*) ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_EGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_EGREP=$EGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" { $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 $as_echo_n "checking for fgrep... " >&6; } if ${ac_cv_path_FGREP+:} false; then : $as_echo_n "(cached) " >&6 else if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 then ac_cv_path_FGREP="$GREP -F" else if test -z "$FGREP"; then ac_path_FGREP_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in fgrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_FGREP" || continue # Check for GNU ac_path_FGREP and select it if it is found. # Check for GNU $ac_path_FGREP case `"$ac_path_FGREP" --version 2>&1` in *GNU*) ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; *) ac_count=0 $as_echo_n 0123456789 >"conftest.in" while : do cat "conftest.in" "conftest.in" >"conftest.tmp" mv "conftest.tmp" "conftest.in" cp "conftest.in" "conftest.nl" $as_echo 'FGREP' >> "conftest.nl" "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break as_fn_arith $ac_count + 1 && ac_count=$as_val if test $ac_count -gt ${ac_path_FGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_max=$ac_count fi # 10*(2^10) chars as input seems more than enough test $ac_count -gt 10 && break done rm -f conftest.in conftest.tmp conftest.nl conftest.out;; esac $ac_path_FGREP_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_FGREP"; then as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 fi else ac_cv_path_FGREP=$FGREP fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 $as_echo "$ac_cv_path_FGREP" >&6; } FGREP="$ac_cv_path_FGREP" test -z "$GREP" && GREP=grep # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test yes = "$GCC"; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return, which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD=$ac_prog ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test yes = "$with_gnu_ld"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD=$ac_dir/$ac_prog # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 $as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } if ${lt_cv_path_NM+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NM"; then # Let the user override the test. lt_cv_path_NM=$NM else lt_nm_to_check=${ac_tool_prefix}nm if test -n "$ac_tool_prefix" && test "$build" = "$host"; then lt_nm_to_check="$lt_nm_to_check nm" fi for lt_tmp_nm in $lt_nm_to_check; do lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. tmp_nm=$ac_dir/$lt_tmp_nm if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then # Check to see if the nm accepts a BSD-compat flag. # Adding the 'sed 1q' prevents false positives on HP-UX, which says: # nm: unknown option "B" ignored # Tru64's nm complains that /dev/null is an invalid object file # MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty case $build_os in mingw*) lt_bad_file=conftest.nm/nofile ;; *) lt_bad_file=/dev/null ;; esac case `"$tmp_nm" -B $lt_bad_file 2>&1 | sed '1q'` in *$lt_bad_file* | *'Invalid file or object type'*) lt_cv_path_NM="$tmp_nm -B" break 2 ;; *) case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in */dev/null*) lt_cv_path_NM="$tmp_nm -p" break 2 ;; *) lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but continue # so that we can try to find one that supports BSD flags ;; esac ;; esac fi done IFS=$lt_save_ifs done : ${lt_cv_path_NM=no} fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 $as_echo "$lt_cv_path_NM" >&6; } if test no != "$lt_cv_path_NM"; then NM=$lt_cv_path_NM else # Didn't find any BSD compatible name lister, look for dumpbin. if test -n "$DUMPBIN"; then : # Let the user override the test. else if test -n "$ac_tool_prefix"; then for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DUMPBIN"; then ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DUMPBIN=$ac_cv_prog_DUMPBIN if test -n "$DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 $as_echo "$DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$DUMPBIN" && break done fi if test -z "$DUMPBIN"; then ac_ct_DUMPBIN=$DUMPBIN for ac_prog in dumpbin "link -dump" do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DUMPBIN"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN if test -n "$ac_ct_DUMPBIN"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 $as_echo "$ac_ct_DUMPBIN" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_DUMPBIN" && break done if test "x$ac_ct_DUMPBIN" = x; then DUMPBIN=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DUMPBIN=$ac_ct_DUMPBIN fi fi case `$DUMPBIN -symbols -headers /dev/null 2>&1 | sed '1q'` in *COFF*) DUMPBIN="$DUMPBIN -symbols -headers" ;; *) DUMPBIN=: ;; esac fi if test : != "$DUMPBIN"; then NM=$DUMPBIN fi fi test -z "$NM" && NM=nm { $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 $as_echo_n "checking the name lister ($NM) interface... " >&6; } if ${lt_cv_nm_interface+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_nm_interface="BSD nm" echo "int some_variable = 0;" > conftest.$ac_ext (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) (eval "$ac_compile" 2>conftest.err) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) cat conftest.err >&5 (eval echo "\"\$as_me:$LINENO: output\"" >&5) cat conftest.out >&5 if $GREP 'External.*some_variable' conftest.out > /dev/null; then lt_cv_nm_interface="MS dumpbin" fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 $as_echo "$lt_cv_nm_interface" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 $as_echo_n "checking whether ln -s works... " >&6; } LN_S=$as_ln_s if test "$LN_S" = "ln -s"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 $as_echo "no, using $LN_S" >&6; } fi # find the maximum length of command line arguments { $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 $as_echo_n "checking the maximum length of command line arguments... " >&6; } if ${lt_cv_sys_max_cmd_len+:} false; then : $as_echo_n "(cached) " >&6 else i=0 teststring=ABCD case $build_os in msdosdjgpp*) # On DJGPP, this test can blow up pretty badly due to problems in libc # (any single argument exceeding 2000 bytes causes a buffer overrun # during glob expansion). Even if it were fixed, the result of this # check would be larger than it should be. lt_cv_sys_max_cmd_len=12288; # 12K is about right ;; gnu*) # Under GNU Hurd, this test is not required because there is # no limit to the length of command line arguments. # Libtool will interpret -1 as no limit whatsoever lt_cv_sys_max_cmd_len=-1; ;; cygwin* | mingw* | cegcc*) # On Win9x/ME, this test blows up -- it succeeds, but takes # about 5 minutes as the teststring grows exponentially. # Worse, since 9x/ME are not pre-emptively multitasking, # you end up with a "frozen" computer, even though with patience # the test eventually succeeds (with a max line length of 256k). # Instead, let's just punt: use the minimum linelength reported by # all of the supported platforms: 8192 (on NT/2K/XP). lt_cv_sys_max_cmd_len=8192; ;; mint*) # On MiNT this can take a long time and run out of memory. lt_cv_sys_max_cmd_len=8192; ;; amigaos*) # On AmigaOS with pdksh, this test takes hours, literally. # So we just punt and use a minimum line length of 8192. lt_cv_sys_max_cmd_len=8192; ;; bitrig* | darwin* | dragonfly* | freebsd* | netbsd* | openbsd*) # This has been around since 386BSD, at least. Likely further. if test -x /sbin/sysctl; then lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` elif test -x /usr/sbin/sysctl; then lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` else lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs fi # And add a safety zone lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` ;; interix*) # We know the value 262144 and hardcode it with a safety zone (like BSD) lt_cv_sys_max_cmd_len=196608 ;; os2*) # The test takes a long time on OS/2. lt_cv_sys_max_cmd_len=8192 ;; osf*) # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not # nice to cause kernel panics so lets avoid the loop below. # First set a reasonable default. lt_cv_sys_max_cmd_len=16384 # if test -x /sbin/sysconfig; then case `/sbin/sysconfig -q proc exec_disable_arg_limit` in *1*) lt_cv_sys_max_cmd_len=-1 ;; esac fi ;; sco3.2v5*) lt_cv_sys_max_cmd_len=102400 ;; sysv5* | sco5v6* | sysv4.2uw2*) kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` if test -n "$kargmax"; then lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` else lt_cv_sys_max_cmd_len=32768 fi ;; *) lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` if test -n "$lt_cv_sys_max_cmd_len" && \ test undefined != "$lt_cv_sys_max_cmd_len"; then lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` else # Make teststring a little bigger before we do anything with it. # a 1K string should be a reasonable start. for i in 1 2 3 4 5 6 7 8; do teststring=$teststring$teststring done SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} # If test is not a shell built-in, we'll probably end up computing a # maximum length that is only half of the actual maximum length, but # we can't tell. while { test X`env echo "$teststring$teststring" 2>/dev/null` \ = "X$teststring$teststring"; } >/dev/null 2>&1 && test 17 != "$i" # 1/2 MB should be enough do i=`expr $i + 1` teststring=$teststring$teststring done # Only check the string length outside the loop. lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` teststring= # Add a significant safety factor because C++ compilers can tack on # massive amounts of additional arguments before passing them to the # linker. It appears as though 1/2 is a usable value. lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` fi ;; esac fi if test -n "$lt_cv_sys_max_cmd_len"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 $as_echo "$lt_cv_sys_max_cmd_len" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 $as_echo "none" >&6; } fi max_cmd_len=$lt_cv_sys_max_cmd_len : ${CP="cp -f"} : ${MV="mv -f"} : ${RM="rm -f"} if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then lt_unset=unset else lt_unset=false fi # test EBCDIC or ASCII case `echo X|tr X '\101'` in A) # ASCII based system # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr lt_SP2NL='tr \040 \012' lt_NL2SP='tr \015\012 \040\040' ;; *) # EBCDIC based system lt_SP2NL='tr \100 \n' lt_NL2SP='tr \r\n \100\100' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 $as_echo_n "checking how to convert $build file names to $host format... " >&6; } if ${lt_cv_to_host_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 ;; esac ;; *-*-cygwin* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin ;; *-*-cygwin* ) lt_cv_to_host_file_cmd=func_convert_file_noop ;; * ) # otherwise, assume *nix lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin ;; esac ;; * ) # unhandled hosts (and "normal" native builds) lt_cv_to_host_file_cmd=func_convert_file_noop ;; esac fi to_host_file_cmd=$lt_cv_to_host_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 $as_echo "$lt_cv_to_host_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 $as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } if ${lt_cv_to_tool_file_cmd+:} false; then : $as_echo_n "(cached) " >&6 else #assume ordinary cross tools, or native build. lt_cv_to_tool_file_cmd=func_convert_file_noop case $host in *-*-mingw* ) case $build in *-*-mingw* ) # actually msys lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 ;; esac ;; esac fi to_tool_file_cmd=$lt_cv_to_tool_file_cmd { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 $as_echo "$lt_cv_to_tool_file_cmd" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 $as_echo_n "checking for $LD option to reload object files... " >&6; } if ${lt_cv_ld_reload_flag+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_reload_flag='-r' fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 $as_echo "$lt_cv_ld_reload_flag" >&6; } reload_flag=$lt_cv_ld_reload_flag case $reload_flag in "" | " "*) ;; *) reload_flag=" $reload_flag" ;; esac reload_cmds='$LD$reload_flag -o $output$reload_objs' case $host_os in cygwin* | mingw* | pw32* | cegcc*) if test yes != "$GCC"; then reload_cmds=false fi ;; darwin*) if test yes = "$GCC"; then reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs' else reload_cmds='$LD$reload_flag -o $output$reload_objs' fi ;; esac if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. set dummy ${ac_tool_prefix}objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OBJDUMP"; then ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OBJDUMP=$ac_cv_prog_OBJDUMP if test -n "$OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 $as_echo "$OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OBJDUMP"; then ac_ct_OBJDUMP=$OBJDUMP # Extract the first word of "objdump", so it can be a program name with args. set dummy objdump; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OBJDUMP"; then ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OBJDUMP="objdump" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP if test -n "$ac_ct_OBJDUMP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 $as_echo "$ac_ct_OBJDUMP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OBJDUMP" = x; then OBJDUMP="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OBJDUMP=$ac_ct_OBJDUMP fi else OBJDUMP="$ac_cv_prog_OBJDUMP" fi test -z "$OBJDUMP" && OBJDUMP=objdump { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 $as_echo_n "checking how to recognize dependent libraries... " >&6; } if ${lt_cv_deplibs_check_method+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_file_magic_cmd='$MAGIC_CMD' lt_cv_file_magic_test_file= lt_cv_deplibs_check_method='unknown' # Need to set the preceding variable on all platforms that support # interlibrary dependencies. # 'none' -- dependencies not supported. # 'unknown' -- same as none, but documents that we really don't know. # 'pass_all' -- all dependencies passed with no checks. # 'test_compile' -- check by making test program. # 'file_magic [[regex]]' -- check by looking for files in library path # that responds to the $file_magic_cmd with a given extended regex. # If you have 'file' or equivalent on your system and you're not sure # whether 'pass_all' will *always* work, you probably want this one. case $host_os in aix[4-9]*) lt_cv_deplibs_check_method=pass_all ;; beos*) lt_cv_deplibs_check_method=pass_all ;; bsdi[45]*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' lt_cv_file_magic_cmd='/usr/bin/file -L' lt_cv_file_magic_test_file=/shlib/libc.so ;; cygwin*) # func_win32_libid is a shell function defined in ltmain.sh lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' ;; mingw* | pw32*) # Base MSYS/MinGW do not provide the 'file' command needed by # func_win32_libid shell function, so use a weaker test based on 'objdump', # unless we find 'file', for example because we are cross-compiling. if ( file / ) >/dev/null 2>&1; then lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' lt_cv_file_magic_cmd='func_win32_libid' else # Keep this pattern in sync with the one in func_win32_libid. lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' lt_cv_file_magic_cmd='$OBJDUMP -f' fi ;; cegcc*) # use the weaker test based on 'objdump'. See mingw*. lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' lt_cv_file_magic_cmd='$OBJDUMP -f' ;; darwin* | rhapsody*) lt_cv_deplibs_check_method=pass_all ;; freebsd* | dragonfly*) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then case $host_cpu in i*86 ) # Not sure whether the presence of OpenBSD here was a mistake. # Let's accept both of them until this is cleared up. lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` ;; esac else lt_cv_deplibs_check_method=pass_all fi ;; haiku*) lt_cv_deplibs_check_method=pass_all ;; hpux10.20* | hpux11*) lt_cv_file_magic_cmd=/usr/bin/file case $host_cpu in ia64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so ;; hppa*64*) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl ;; *) lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' lt_cv_file_magic_test_file=/usr/lib/libc.sl ;; esac ;; interix[3-9]*) # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' ;; irix5* | irix6* | nonstopux*) case $LD in *-32|*"-32 ") libmagic=32-bit;; *-n32|*"-n32 ") libmagic=N32;; *-64|*"-64 ") libmagic=64-bit;; *) libmagic=never-match;; esac lt_cv_deplibs_check_method=pass_all ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) lt_cv_deplibs_check_method=pass_all ;; netbsd* | netbsdelf*-gnu) if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' fi ;; newos6*) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' lt_cv_file_magic_cmd=/usr/bin/file lt_cv_file_magic_test_file=/usr/lib/libnls.so ;; *nto* | *qnx*) lt_cv_deplibs_check_method=pass_all ;; openbsd* | bitrig*) if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' else lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' fi ;; osf3* | osf4* | osf5*) lt_cv_deplibs_check_method=pass_all ;; rdos*) lt_cv_deplibs_check_method=pass_all ;; solaris*) lt_cv_deplibs_check_method=pass_all ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) lt_cv_deplibs_check_method=pass_all ;; sysv4 | sysv4.3*) case $host_vendor in motorola) lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` ;; ncr) lt_cv_deplibs_check_method=pass_all ;; sequent) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' ;; sni) lt_cv_file_magic_cmd='/bin/file' lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" lt_cv_file_magic_test_file=/lib/libc.so ;; siemens) lt_cv_deplibs_check_method=pass_all ;; pc) lt_cv_deplibs_check_method=pass_all ;; esac ;; tpf*) lt_cv_deplibs_check_method=pass_all ;; os2*) lt_cv_deplibs_check_method=pass_all ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 $as_echo "$lt_cv_deplibs_check_method" >&6; } file_magic_glob= want_nocaseglob=no if test "$build" = "$host"; then case $host_os in mingw* | pw32*) if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then want_nocaseglob=yes else file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` fi ;; esac fi file_magic_cmd=$lt_cv_file_magic_cmd deplibs_check_method=$lt_cv_deplibs_check_method test -z "$deplibs_check_method" && deplibs_check_method=unknown if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. set dummy ${ac_tool_prefix}dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DLLTOOL"; then ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DLLTOOL=$ac_cv_prog_DLLTOOL if test -n "$DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 $as_echo "$DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DLLTOOL"; then ac_ct_DLLTOOL=$DLLTOOL # Extract the first word of "dlltool", so it can be a program name with args. set dummy dlltool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DLLTOOL"; then ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DLLTOOL="dlltool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL if test -n "$ac_ct_DLLTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 $as_echo "$ac_ct_DLLTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DLLTOOL" = x; then DLLTOOL="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DLLTOOL=$ac_ct_DLLTOOL fi else DLLTOOL="$ac_cv_prog_DLLTOOL" fi test -z "$DLLTOOL" && DLLTOOL=dlltool { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 $as_echo_n "checking how to associate runtime and link libraries... " >&6; } if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_sharedlib_from_linklib_cmd='unknown' case $host_os in cygwin* | mingw* | pw32* | cegcc*) # two different shell functions defined in ltmain.sh; # decide which one to use based on capabilities of $DLLTOOL case `$DLLTOOL --help 2>&1` in *--identify-strict*) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib ;; *) lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback ;; esac ;; *) # fallback: assume linklib IS sharedlib lt_cv_sharedlib_from_linklib_cmd=$ECHO ;; esac fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 $as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO if test -n "$ac_tool_prefix"; then for ac_prog in ar do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$AR"; then ac_cv_prog_AR="$AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_AR="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi AR=$ac_cv_prog_AR if test -n "$AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 $as_echo "$AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$AR" && break done fi if test -z "$AR"; then ac_ct_AR=$AR for ac_prog in ar do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_AR+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_AR"; then ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_AR="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_AR=$ac_cv_prog_ac_ct_AR if test -n "$ac_ct_AR"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 $as_echo "$ac_ct_AR" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_AR" && break done if test "x$ac_ct_AR" = x; then AR="false" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac AR=$ac_ct_AR fi fi : ${AR=ar} : ${AR_FLAGS=cru} { $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 $as_echo_n "checking for archiver @FILE support... " >&6; } if ${lt_cv_ar_at_file+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ar_at_file=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : echo conftest.$ac_objext > conftest.lst lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test 0 -eq "$ac_status"; then # Ensure the archiver fails upon bogus file names. rm -f conftest.$ac_objext libconftest.a { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 (eval $lt_ar_try) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } if test 0 -ne "$ac_status"; then lt_cv_ar_at_file=@ fi fi rm -f conftest.* libconftest.a fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 $as_echo "$lt_cv_ar_at_file" >&6; } if test no = "$lt_cv_ar_at_file"; then archiver_list_spec= else archiver_list_spec=$lt_cv_ar_at_file fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. set dummy ${ac_tool_prefix}strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$STRIP"; then ac_cv_prog_STRIP="$STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_STRIP="${ac_tool_prefix}strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi STRIP=$ac_cv_prog_STRIP if test -n "$STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 $as_echo "$STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_STRIP"; then ac_ct_STRIP=$STRIP # Extract the first word of "strip", so it can be a program name with args. set dummy strip; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_STRIP+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_STRIP"; then ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_STRIP="strip" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP if test -n "$ac_ct_STRIP"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 $as_echo "$ac_ct_STRIP" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_STRIP" = x; then STRIP=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac STRIP=$ac_ct_STRIP fi else STRIP="$ac_cv_prog_STRIP" fi test -z "$STRIP" && STRIP=: if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. set dummy ${ac_tool_prefix}ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$RANLIB"; then ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi RANLIB=$ac_cv_prog_RANLIB if test -n "$RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 $as_echo "$RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_RANLIB"; then ac_ct_RANLIB=$RANLIB # Extract the first word of "ranlib", so it can be a program name with args. set dummy ranlib; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_RANLIB"; then ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_RANLIB="ranlib" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB if test -n "$ac_ct_RANLIB"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 $as_echo "$ac_ct_RANLIB" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_RANLIB" = x; then RANLIB=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac RANLIB=$ac_ct_RANLIB fi else RANLIB="$ac_cv_prog_RANLIB" fi test -z "$RANLIB" && RANLIB=: # Determine commands to create old-style static archives. old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' old_postinstall_cmds='chmod 644 $oldlib' old_postuninstall_cmds= if test -n "$RANLIB"; then case $host_os in bitrig* | openbsd*) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" ;; *) old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" ;; esac old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" fi case $host_os in darwin*) lock_old_archive_extraction=yes ;; *) lock_old_archive_extraction=no ;; esac # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Check for command to grab the raw symbol name followed by C symbol from nm. { $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 $as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } if ${lt_cv_sys_global_symbol_pipe+:} false; then : $as_echo_n "(cached) " >&6 else # These are sane defaults that work on at least a few old systems. # [They come from Ultrix. What could be older than Ultrix?!! ;)] # Character class describing NM global symbol codes. symcode='[BCDEGRST]' # Regexp to match symbols that can be accessed directly from C. sympat='\([_A-Za-z][_A-Za-z0-9]*\)' # Define system-specific variables. case $host_os in aix*) symcode='[BCDT]' ;; cygwin* | mingw* | pw32* | cegcc*) symcode='[ABCDGISTW]' ;; hpux*) if test ia64 = "$host_cpu"; then symcode='[ABCDEGRST]' fi ;; irix* | nonstopux*) symcode='[BCDEGRST]' ;; osf*) symcode='[BCDEGQRST]' ;; solaris*) symcode='[BDRT]' ;; sco3.2v5*) symcode='[DT]' ;; sysv4.2uw2*) symcode='[DT]' ;; sysv5* | sco5v6* | unixware* | OpenUNIX*) symcode='[ABDT]' ;; sysv4) symcode='[DFNSTU]' ;; esac # If we're using GNU nm, then use its standard symbol codes. case `$NM -V 2>&1` in *GNU* | *'with BFD'*) symcode='[ABCDGIRSTW]' ;; esac if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Gets list of data symbols to import. lt_cv_sys_global_symbol_to_import="sed -n -e 's/^I .* \(.*\)$/\1/p'" # Adjust the below global symbol transforms to fixup imported variables. lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'" lt_c_name_hook=" -e 's/^I .* \(.*\)$/ {\"\1\", (void *) 0},/p'" lt_c_name_lib_hook="\ -e 's/^I .* \(lib.*\)$/ {\"\1\", (void *) 0},/p'\ -e 's/^I .* \(.*\)$/ {\"lib\1\", (void *) 0},/p'" else # Disable hooks by default. lt_cv_sys_global_symbol_to_import= lt_cdecl_hook= lt_c_name_hook= lt_c_name_lib_hook= fi # Transform an extracted symbol line into a proper C declaration. # Some systems (esp. on ia64) link data and code symbols differently, # so use this general approach. lt_cv_sys_global_symbol_to_cdecl="sed -n"\ $lt_cdecl_hook\ " -e 's/^T .* \(.*\)$/extern int \1();/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'" # Transform an extracted symbol line into symbol name and symbol address lt_cv_sys_global_symbol_to_c_name_address="sed -n"\ $lt_c_name_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/p'" # Transform an extracted symbol line into symbol name with lib prefix and # symbol address. lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n"\ $lt_c_name_lib_hook\ " -e 's/^: \(.*\) .*$/ {\"\1\", (void *) 0},/p'"\ " -e 's/^$symcode$symcode* .* \(lib.*\)$/ {\"\1\", (void *) \&\1},/p'"\ " -e 's/^$symcode$symcode* .* \(.*\)$/ {\"lib\1\", (void *) \&\1},/p'" # Handle CRLF in mingw tool chain opt_cr= case $build_os in mingw*) opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp ;; esac # Try without a prefix underscore, then with it. for ac_symprfx in "" "_"; do # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. symxfrm="\\1 $ac_symprfx\\2 \\2" # Write the raw and C identifiers. if test "$lt_cv_nm_interface" = "MS dumpbin"; then # Fake it for dumpbin and say T for any non-static function, # D for any global variable and I for any imported variable. # Also find C++ and __fastcall symbols from MSVC++, # which start with @ or ?. lt_cv_sys_global_symbol_pipe="$AWK '"\ " {last_section=section; section=\$ 3};"\ " /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ " /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ " /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\ " /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\ " /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\ " \$ 0!~/External *\|/{next};"\ " / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ " {if(hide[section]) next};"\ " {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\ " {split(\$ 0,a,/\||\r/); split(a[2],s)};"\ " s[1]~/^[@?]/{print f,s[1],s[1]; next};"\ " s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\ " ' prfx=^$ac_symprfx" else lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" fi lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" # Check to see that the pipe works correctly. pipe_works=no rm -f conftest* cat > conftest.$ac_ext <<_LT_EOF #ifdef __cplusplus extern "C" { #endif char nm_test_var; void nm_test_func(void); void nm_test_func(void){} #ifdef __cplusplus } #endif int main(){nm_test_var='a';nm_test_func();return(0);} _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Now try to grab the symbols. nlist=conftest.nm if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "$nlist"; then # Try sorting and uniquifying the output. if sort "$nlist" | uniq > "$nlist"T; then mv -f "$nlist"T "$nlist" else rm -f "$nlist"T fi # Make sure that we snagged all the symbols we need. if $GREP ' nm_test_var$' "$nlist" >/dev/null; then if $GREP ' nm_test_func$' "$nlist" >/dev/null; then cat <<_LT_EOF > conftest.$ac_ext /* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ #if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE /* DATA imports from DLLs on WIN32 can't be const, because runtime relocations are performed -- see ld's documentation on pseudo-relocs. */ # define LT_DLSYM_CONST #elif defined __osf__ /* This system does not cope well with relocations in const data. */ # define LT_DLSYM_CONST #else # define LT_DLSYM_CONST const #endif #ifdef __cplusplus extern "C" { #endif _LT_EOF # Now generate the symbol file. eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' cat <<_LT_EOF >> conftest.$ac_ext /* The mapping between symbol names and symbols. */ LT_DLSYM_CONST struct { const char *name; void *address; } lt__PROGRAM__LTX_preloaded_symbols[] = { { "@PROGRAM@", (void *) 0 }, _LT_EOF $SED "s/^$symcode$symcode* .* \(.*\)$/ {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext cat <<\_LT_EOF >> conftest.$ac_ext {0, (void *) 0} }; /* This works around a problem in FreeBSD linker */ #ifdef FREEBSD_WORKAROUND static const void *lt_preloaded_setup() { return lt__PROGRAM__LTX_preloaded_symbols; } #endif #ifdef __cplusplus } #endif _LT_EOF # Now try linking the two files. mv conftest.$ac_objext conftstm.$ac_objext lt_globsym_save_LIBS=$LIBS lt_globsym_save_CFLAGS=$CFLAGS LIBS=conftstm.$ac_objext CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s conftest$ac_exeext; then pipe_works=yes fi LIBS=$lt_globsym_save_LIBS CFLAGS=$lt_globsym_save_CFLAGS else echo "cannot find nm_test_func in $nlist" >&5 fi else echo "cannot find nm_test_var in $nlist" >&5 fi else echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 fi else echo "$progname: failed program was:" >&5 cat conftest.$ac_ext >&5 fi rm -rf conftest* conftst* # Do not use the global_symbol_pipe unless it works. if test yes = "$pipe_works"; then break else lt_cv_sys_global_symbol_pipe= fi done fi if test -z "$lt_cv_sys_global_symbol_pipe"; then lt_cv_sys_global_symbol_to_cdecl= fi if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 $as_echo "failed" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 $as_echo "ok" >&6; } fi # Response file support. if test "$lt_cv_nm_interface" = "MS dumpbin"; then nm_file_list_spec='@' elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then nm_file_list_spec='@' fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 $as_echo_n "checking for sysroot... " >&6; } # Check whether --with-sysroot was given. if test "${with_sysroot+set}" = set; then : withval=$with_sysroot; else with_sysroot=no fi lt_sysroot= case $with_sysroot in #( yes) if test yes = "$GCC"; then lt_sysroot=`$CC --print-sysroot 2>/dev/null` fi ;; #( /*) lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` ;; #( no|'') ;; #( *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5 $as_echo "$with_sysroot" >&6; } as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 $as_echo "${lt_sysroot:-no}" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5 $as_echo_n "checking for a working dd... " >&6; } if ${ac_cv_path_lt_DD+:} false; then : $as_echo_n "(cached) " >&6 else printf 0123456789abcdef0123456789abcdef >conftest.i cat conftest.i conftest.i >conftest2.i : ${lt_DD:=$DD} if test -z "$lt_DD"; then ac_path_lt_DD_found=false # Loop through the user's path and test for each of PROGNAME-LIST as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_prog in dd; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_lt_DD="$as_dir/$ac_prog$ac_exec_ext" as_fn_executable_p "$ac_path_lt_DD" || continue if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=: fi $ac_path_lt_DD_found && break 3 done done done IFS=$as_save_IFS if test -z "$ac_cv_path_lt_DD"; then : fi else ac_cv_path_lt_DD=$lt_DD fi rm -f conftest.i conftest2.i conftest.out fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5 $as_echo "$ac_cv_path_lt_DD" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5 $as_echo_n "checking how to truncate binary pipes... " >&6; } if ${lt_cv_truncate_bin+:} false; then : $as_echo_n "(cached) " >&6 else printf 0123456789abcdef0123456789abcdef >conftest.i cat conftest.i conftest.i >conftest2.i lt_cv_truncate_bin= if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then cmp -s conftest.i conftest.out \ && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1" fi rm -f conftest.i conftest2.i conftest.out test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5 $as_echo "$lt_cv_truncate_bin" >&6; } # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. func_cc_basename () { for cc_temp in $*""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` } # Check whether --enable-libtool-lock was given. if test "${enable_libtool_lock+set}" = set; then : enableval=$enable_libtool_lock; fi test no = "$enable_libtool_lock" || enable_libtool_lock=yes # Some flags need to be propagated to the compiler or linker for good # libtool support. case $host in ia64-*-hpux*) # Find out what ABI is being produced by ac_compile, and set mode # options accordingly. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.$ac_objext` in *ELF-32*) HPUX_IA64_MODE=32 ;; *ELF-64*) HPUX_IA64_MODE=64 ;; esac fi rm -rf conftest* ;; *-*-irix6*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '#line '$LINENO' "configure"' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then if test yes = "$lt_cv_prog_gnu_ld"; then case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -melf32bsmip" ;; *N32*) LD="${LD-ld} -melf32bmipn32" ;; *64-bit*) LD="${LD-ld} -melf64bmip" ;; esac else case `/usr/bin/file conftest.$ac_objext` in *32-bit*) LD="${LD-ld} -32" ;; *N32*) LD="${LD-ld} -n32" ;; *64-bit*) LD="${LD-ld} -64" ;; esac fi fi rm -rf conftest* ;; mips64*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo '#line '$LINENO' "configure"' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then emul=elf case `/usr/bin/file conftest.$ac_objext` in *32-bit*) emul="${emul}32" ;; *64-bit*) emul="${emul}64" ;; esac case `/usr/bin/file conftest.$ac_objext` in *MSB*) emul="${emul}btsmip" ;; *LSB*) emul="${emul}ltsmip" ;; esac case `/usr/bin/file conftest.$ac_objext` in *N32*) emul="${emul}n32" ;; esac LD="${LD-ld} -m $emul" fi rm -rf conftest* ;; x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \ s390*-*linux*|s390*-*tpf*|sparc*-*linux*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. Note that the listed cases only cover the # situations where additional linker options are needed (such as when # doing 32-bit compilation for a host where ld defaults to 64-bit, or # vice versa); the common cases where no linker options are needed do # not appear in the list. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *32-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_i386_fbsd" ;; x86_64-*linux*) case `/usr/bin/file conftest.o` in *x86-64*) LD="${LD-ld} -m elf32_x86_64" ;; *) LD="${LD-ld} -m elf_i386" ;; esac ;; powerpc64le-*linux*) LD="${LD-ld} -m elf32lppclinux" ;; powerpc64-*linux*) LD="${LD-ld} -m elf32ppclinux" ;; s390x-*linux*) LD="${LD-ld} -m elf_s390" ;; sparc64-*linux*) LD="${LD-ld} -m elf32_sparc" ;; esac ;; *64-bit*) case $host in x86_64-*kfreebsd*-gnu) LD="${LD-ld} -m elf_x86_64_fbsd" ;; x86_64-*linux*) LD="${LD-ld} -m elf_x86_64" ;; powerpcle-*linux*) LD="${LD-ld} -m elf64lppc" ;; powerpc-*linux*) LD="${LD-ld} -m elf64ppc" ;; s390*-*linux*|s390*-*tpf*) LD="${LD-ld} -m elf64_s390" ;; sparc*-*linux*) LD="${LD-ld} -m elf64_sparc" ;; esac ;; esac fi rm -rf conftest* ;; *-*-sco3.2v5*) # On SCO OpenServer 5, we need -belf to get full-featured binaries. SAVE_CFLAGS=$CFLAGS CFLAGS="$CFLAGS -belf" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 $as_echo_n "checking whether the C compiler needs -belf... " >&6; } if ${lt_cv_cc_needs_belf+:} false; then : $as_echo_n "(cached) " >&6 else ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_cc_needs_belf=yes else lt_cv_cc_needs_belf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 $as_echo "$lt_cv_cc_needs_belf" >&6; } if test yes != "$lt_cv_cc_needs_belf"; then # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf CFLAGS=$SAVE_CFLAGS fi ;; *-*solaris*) # Find out what ABI is being produced by ac_compile, and set linker # options accordingly. echo 'int i;' > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then case `/usr/bin/file conftest.o` in *64-bit*) case $lt_cv_prog_gnu_ld in yes*) case $host in i?86-*-solaris*|x86_64-*-solaris*) LD="${LD-ld} -m elf_x86_64" ;; sparc*-*-solaris*) LD="${LD-ld} -m elf64_sparc" ;; esac # GNU ld 2.21 introduced _sol2 emulations. Use them if available. if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then LD=${LD-ld}_sol2 fi ;; *) if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then LD="${LD-ld} -64" fi ;; esac ;; esac fi rm -rf conftest* ;; esac need_locks=$enable_libtool_lock if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. set dummy ${ac_tool_prefix}mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$MANIFEST_TOOL"; then ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL if test -n "$MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 $as_echo "$MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_MANIFEST_TOOL"; then ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL # Extract the first word of "mt", so it can be a program name with args. set dummy mt; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_MANIFEST_TOOL"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL if test -n "$ac_ct_MANIFEST_TOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 $as_echo "$ac_ct_MANIFEST_TOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_MANIFEST_TOOL" = x; then MANIFEST_TOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL fi else MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" fi test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 $as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } if ${lt_cv_path_mainfest_tool+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_path_mainfest_tool=no echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out cat conftest.err >&5 if $GREP 'Manifest Tool' conftest.out > /dev/null; then lt_cv_path_mainfest_tool=yes fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 $as_echo "$lt_cv_path_mainfest_tool" >&6; } if test yes != "$lt_cv_path_mainfest_tool"; then MANIFEST_TOOL=: fi case $host_os in rhapsody* | darwin*) if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$DSYMUTIL"; then ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi DSYMUTIL=$ac_cv_prog_DSYMUTIL if test -n "$DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 $as_echo "$DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_DSYMUTIL"; then ac_ct_DSYMUTIL=$DSYMUTIL # Extract the first word of "dsymutil", so it can be a program name with args. set dummy dsymutil; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_DSYMUTIL"; then ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL if test -n "$ac_ct_DSYMUTIL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 $as_echo "$ac_ct_DSYMUTIL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_DSYMUTIL" = x; then DSYMUTIL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac DSYMUTIL=$ac_ct_DSYMUTIL fi else DSYMUTIL="$ac_cv_prog_DSYMUTIL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. set dummy ${ac_tool_prefix}nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$NMEDIT"; then ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi NMEDIT=$ac_cv_prog_NMEDIT if test -n "$NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 $as_echo "$NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_NMEDIT"; then ac_ct_NMEDIT=$NMEDIT # Extract the first word of "nmedit", so it can be a program name with args. set dummy nmedit; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_NMEDIT"; then ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_NMEDIT="nmedit" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT if test -n "$ac_ct_NMEDIT"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 $as_echo "$ac_ct_NMEDIT" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_NMEDIT" = x; then NMEDIT=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac NMEDIT=$ac_ct_NMEDIT fi else NMEDIT="$ac_cv_prog_NMEDIT" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. set dummy ${ac_tool_prefix}lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$LIPO"; then ac_cv_prog_LIPO="$LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_LIPO="${ac_tool_prefix}lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi LIPO=$ac_cv_prog_LIPO if test -n "$LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 $as_echo "$LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_LIPO"; then ac_ct_LIPO=$LIPO # Extract the first word of "lipo", so it can be a program name with args. set dummy lipo; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_LIPO+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_LIPO"; then ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_LIPO="lipo" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO if test -n "$ac_ct_LIPO"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 $as_echo "$ac_ct_LIPO" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_LIPO" = x; then LIPO=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac LIPO=$ac_ct_LIPO fi else LIPO="$ac_cv_prog_LIPO" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. set dummy ${ac_tool_prefix}otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL"; then ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL="${ac_tool_prefix}otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL=$ac_cv_prog_OTOOL if test -n "$OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 $as_echo "$OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL"; then ac_ct_OTOOL=$OTOOL # Extract the first word of "otool", so it can be a program name with args. set dummy otool; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL"; then ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL="otool" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL if test -n "$ac_ct_OTOOL"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 $as_echo "$ac_ct_OTOOL" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL" = x; then OTOOL=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL=$ac_ct_OTOOL fi else OTOOL="$ac_cv_prog_OTOOL" fi if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. set dummy ${ac_tool_prefix}otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$OTOOL64"; then ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi OTOOL64=$ac_cv_prog_OTOOL64 if test -n "$OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 $as_echo "$OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi fi if test -z "$ac_cv_prog_OTOOL64"; then ac_ct_OTOOL64=$OTOOL64 # Extract the first word of "otool64", so it can be a program name with args. set dummy otool64; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_OTOOL64"; then ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_OTOOL64="otool64" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 if test -n "$ac_ct_OTOOL64"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 $as_echo "$ac_ct_OTOOL64" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test "x$ac_ct_OTOOL64" = x; then OTOOL64=":" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac OTOOL64=$ac_ct_OTOOL64 fi else OTOOL64="$ac_cv_prog_OTOOL64" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 $as_echo_n "checking for -single_module linker flag... " >&6; } if ${lt_cv_apple_cc_single_mod+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_apple_cc_single_mod=no if test -z "$LT_MULTI_MODULE"; then # By default we will add the -single_module flag. You can override # by either setting the environment variable LT_MULTI_MODULE # non-empty at configure time, or by adding -multi_module to the # link flags. rm -rf libconftest.dylib* echo "int foo(void){return 1;}" > conftest.c echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c" >&5 $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ -dynamiclib -Wl,-single_module conftest.c 2>conftest.err _lt_result=$? # If there is a non-empty error log, and "single_module" # appears in it, assume the flag caused a linker warning if test -s conftest.err && $GREP single_module conftest.err; then cat conftest.err >&5 # Otherwise, if the output was created with a 0 exit code from # the compiler, it worked. elif test -f libconftest.dylib && test 0 = "$_lt_result"; then lt_cv_apple_cc_single_mod=yes else cat conftest.err >&5 fi rm -rf libconftest.dylib* rm -f conftest.* fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 $as_echo "$lt_cv_apple_cc_single_mod" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 $as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } if ${lt_cv_ld_exported_symbols_list+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_exported_symbols_list=no save_LDFLAGS=$LDFLAGS echo "_main" > conftest.sym LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_ld_exported_symbols_list=yes else lt_cv_ld_exported_symbols_list=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 $as_echo "$lt_cv_ld_exported_symbols_list" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 $as_echo_n "checking for -force_load linker flag... " >&6; } if ${lt_cv_ld_force_load+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_ld_force_load=no cat > conftest.c << _LT_EOF int forced_loaded() { return 2;} _LT_EOF echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 echo "$AR cru libconftest.a conftest.o" >&5 $AR cru libconftest.a conftest.o 2>&5 echo "$RANLIB libconftest.a" >&5 $RANLIB libconftest.a 2>&5 cat > conftest.c << _LT_EOF int main() { return 0;} _LT_EOF echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err _lt_result=$? if test -s conftest.err && $GREP force_load conftest.err; then cat conftest.err >&5 elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then lt_cv_ld_force_load=yes else cat conftest.err >&5 fi rm -f conftest.err libconftest.a conftest conftest.c rm -rf conftest.dSYM fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 $as_echo "$lt_cv_ld_force_load" >&6; } case $host_os in rhapsody* | darwin1.[012]) _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;; darwin1.*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; darwin*) # darwin 5.x on # if running on 10.5 or later, the deployment target defaults # to the OS version, if on x86, and 10.4, the deployment # target defaults to 10.4. Don't you love it? case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in 10.0,*86*-darwin8*|10.0,*-darwin[91]*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; 10.[012][,.]*) _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;; 10.*) _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;; esac ;; esac if test yes = "$lt_cv_apple_cc_single_mod"; then _lt_dar_single_mod='$single_module' fi if test yes = "$lt_cv_ld_exported_symbols_list"; then _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym' else _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib' fi if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then _lt_dsymutil='~$DSYMUTIL $lib || :' else _lt_dsymutil= fi ;; esac # func_munge_path_list VARIABLE PATH # ----------------------------------- # VARIABLE is name of variable containing _space_ separated list of # directories to be munged by the contents of PATH, which is string # having a format: # "DIR[:DIR]:" # string "DIR[ DIR]" will be prepended to VARIABLE # ":DIR[:DIR]" # string "DIR[ DIR]" will be appended to VARIABLE # "DIRP[:DIRP]::[DIRA:]DIRA" # string "DIRP[ DIRP]" will be prepended to VARIABLE and string # "DIRA[ DIRA]" will be appended to VARIABLE # "DIR[:DIR]" # VARIABLE will be replaced by "DIR[ DIR]" func_munge_path_list () { case x$2 in x) ;; *:) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" ;; x:*) eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" ;; *::*) eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" ;; *) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" ;; esac } ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 $as_echo_n "checking how to run the C preprocessor... " >&6; } # On Suns, sometimes $CPP names a directory. if test -n "$CPP" && test -d "$CPP"; then CPP= fi if test -z "$CPP"; then if ${ac_cv_prog_CPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CPP needs to be expanded for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" do ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CPP=$CPP fi CPP=$ac_cv_prog_CPP else ac_cv_prog_CPP=$CPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C preprocessor \"$CPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #include #include int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_header_stdc=yes else ac_cv_header_stdc=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "memchr" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "free" >/dev/null 2>&1; then : else ac_cv_header_stdc=no fi rm -f conftest* fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. if test "$cross_compiling" = yes; then : : else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include #if ((' ' & 0x0FF) == 0x020) # define ISLOWER(c) ('a' <= (c) && (c) <= 'z') # define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) #else # define ISLOWER(c) \ (('a' <= (c) && (c) <= 'i') \ || ('j' <= (c) && (c) <= 'r') \ || ('s' <= (c) && (c) <= 'z')) # define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif #define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { int i; for (i = 0; i < 256; i++) if (XOR (islower (i), ISLOWER (i)) || toupper (i) != TOUPPER (i)) return 2; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : else ac_cv_header_stdc=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then $as_echo "#define STDC_HEADERS 1" >>confdefs.h fi # On IRIX 5.3, sys/types and inttypes.h are conflicting. for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ inttypes.h stdint.h unistd.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_header in dlfcn.h do : ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default " if test "x$ac_cv_header_dlfcn_h" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_DLFCN_H 1 _ACEOF fi done # Set options enable_dlopen=no enable_win32_dll=no # Check whether --enable-shared was given. if test "${enable_shared+set}" = set; then : enableval=$enable_shared; p=${PACKAGE-default} case $enableval in yes) enable_shared=yes ;; no) enable_shared=no ;; *) enable_shared=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_shared=yes fi done IFS=$lt_save_ifs ;; esac else enable_shared=yes fi # Check whether --with-pic was given. if test "${with_pic+set}" = set; then : withval=$with_pic; lt_p=${PACKAGE-default} case $withval in yes|no) pic_mode=$withval ;; *) pic_mode=default # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for lt_pkg in $withval; do IFS=$lt_save_ifs if test "X$lt_pkg" = "X$lt_p"; then pic_mode=yes fi done IFS=$lt_save_ifs ;; esac else pic_mode=default fi # Check whether --enable-fast-install was given. if test "${enable_fast_install+set}" = set; then : enableval=$enable_fast_install; p=${PACKAGE-default} case $enableval in yes) enable_fast_install=yes ;; no) enable_fast_install=no ;; *) enable_fast_install=no # Look at the argument we got. We use all the common list separators. lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR, for pkg in $enableval; do IFS=$lt_save_ifs if test "X$pkg" = "X$p"; then enable_fast_install=yes fi done IFS=$lt_save_ifs ;; esac else enable_fast_install=yes fi shared_archive_member_spec= case $host,$enable_shared in power*-*-aix[5-9]*,yes) { $as_echo "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5 $as_echo_n "checking which variant of shared library versioning to provide... " >&6; } # Check whether --with-aix-soname was given. if test "${with_aix_soname+set}" = set; then : withval=$with_aix_soname; case $withval in aix|svr4|both) ;; *) as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5 ;; esac lt_cv_with_aix_soname=$with_aix_soname else if ${lt_cv_with_aix_soname+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_with_aix_soname=aix fi with_aix_soname=$lt_cv_with_aix_soname fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5 $as_echo "$with_aix_soname" >&6; } if test aix != "$with_aix_soname"; then # For the AIX way of multilib, we name the shared archive member # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o', # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File. # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag, # the AIX toolchain works better with OBJECT_MODE set (default 32). if test 64 = "${OBJECT_MODE-32}"; then shared_archive_member_spec=shr_64 else shared_archive_member_spec=shr fi fi ;; *) with_aix_soname=aix ;; esac # This can be used to rebuild libtool when needed LIBTOOL_DEPS=$ltmain # Always use our own libtool. LIBTOOL='$(SHELL) $(top_builddir)/libtool' test -z "$LN_S" && LN_S="ln -s" if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 $as_echo_n "checking for objdir... " >&6; } if ${lt_cv_objdir+:} false; then : $as_echo_n "(cached) " >&6 else rm -f .libs 2>/dev/null mkdir .libs 2>/dev/null if test -d .libs; then lt_cv_objdir=.libs else # MS-DOS does not allow filenames that begin with a dot. lt_cv_objdir=_libs fi rmdir .libs 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 $as_echo "$lt_cv_objdir" >&6; } objdir=$lt_cv_objdir cat >>confdefs.h <<_ACEOF #define LT_OBJDIR "$lt_cv_objdir/" _ACEOF case $host_os in aix3*) # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi ;; esac # Global variables: ofile=libtool can_build_shared=yes # All known linkers require a '.a' archive for static linking (except MSVC, # which needs '.lib'). libext=a with_gnu_ld=$lt_cv_prog_gnu_ld old_CC=$CC old_CFLAGS=$CFLAGS # Set sane defaults for various variables test -z "$CC" && CC=cc test -z "$LTCC" && LTCC=$CC test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS test -z "$LD" && LD=ld test -z "$ac_objext" && ac_objext=o func_cc_basename $compiler cc_basename=$func_cc_basename_result # Only perform the check for file, if the check method requires it test -z "$MAGIC_CMD" && MAGIC_CMD=file case $deplibs_check_method in file_magic*) if test "$file_magic_cmd" = '$MAGIC_CMD'; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 $as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD=$MAGIC_CMD lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/${ac_tool_prefix}file"; then lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD=$lt_cv_path_MAGIC_CMD if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS=$lt_save_ifs MAGIC_CMD=$lt_save_MAGIC_CMD ;; esac fi MAGIC_CMD=$lt_cv_path_MAGIC_CMD if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi if test -z "$lt_cv_path_MAGIC_CMD"; then if test -n "$ac_tool_prefix"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 $as_echo_n "checking for file... " >&6; } if ${lt_cv_path_MAGIC_CMD+:} false; then : $as_echo_n "(cached) " >&6 else case $MAGIC_CMD in [\\/*] | ?:[\\/]*) lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path. ;; *) lt_save_MAGIC_CMD=$MAGIC_CMD lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" for ac_dir in $ac_dummy; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/file"; then lt_cv_path_MAGIC_CMD=$ac_dir/"file" if test -n "$file_magic_test_file"; then case $deplibs_check_method in "file_magic "*) file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` MAGIC_CMD=$lt_cv_path_MAGIC_CMD if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | $EGREP "$file_magic_regex" > /dev/null; then : else cat <<_LT_EOF 1>&2 *** Warning: the command libtool uses to detect shared libraries, *** $file_magic_cmd, produces output that libtool cannot recognize. *** The result is that libtool may fail to recognize shared libraries *** as such. This will affect the creation of libtool libraries that *** depend on shared libraries, but programs linked with such libtool *** libraries will work regardless of this problem. Nevertheless, you *** may want to report the problem to your system manager and/or to *** bug-libtool@gnu.org _LT_EOF fi ;; esac fi break fi done IFS=$lt_save_ifs MAGIC_CMD=$lt_save_MAGIC_CMD ;; esac fi MAGIC_CMD=$lt_cv_path_MAGIC_CMD if test -n "$MAGIC_CMD"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 $as_echo "$MAGIC_CMD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi else MAGIC_CMD=: fi fi fi ;; esac # Use C for the default configuration in the libtool script lt_save_CC=$CC ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # Source file extension for C test sources. ac_ext=c # Object file extension for compiled C test sources. objext=o objext=$objext # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(){return(0);}' # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # Save the default compiler, since it gets overwritten when the other # tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. compiler_DEFAULT=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... if test -n "$compiler"; then lt_prog_compiler_no_builtin_flag= if test yes = "$GCC"; then case $cc_basename in nvcc*) lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; *) lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 $as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_rtti_exceptions=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-fno-rtti -fno-exceptions" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_rtti_exceptions=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 $as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" else : fi fi lt_prog_compiler_wl= lt_prog_compiler_pic= lt_prog_compiler_static= if test yes = "$GCC"; then lt_prog_compiler_wl='-Wl,' lt_prog_compiler_static='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' fi lt_prog_compiler_pic='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic='-DDLL_EXPORT' case $host_os in os2*) lt_prog_compiler_static='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static= ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) # +Z the default ;; *) lt_prog_compiler_pic='-fPIC' ;; esac ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; msdosdjgpp*) # Just because we use GCC doesn't mean we suddenly get shared libraries # on systems that don't support them. lt_prog_compiler_can_build_shared=no enable_shared=no ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic=-Kconform_pic fi ;; *) lt_prog_compiler_pic='-fPIC' ;; esac case $cc_basename in nvcc*) # Cuda Compiler Driver 2.2 lt_prog_compiler_wl='-Xlinker ' if test -n "$lt_prog_compiler_pic"; then lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" fi ;; esac else # PORTME Check for flag to pass linker flags through the system compiler. case $host_os in aix*) lt_prog_compiler_wl='-Wl,' if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static='-Bstatic' else lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' fi ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic='-fno-common' case $cc_basename in nagfor*) # NAG Fortran compiler lt_prog_compiler_wl='-Wl,-Wl,,' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; esac ;; mingw* | cygwin* | pw32* | os2* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic='-DDLL_EXPORT' case $host_os in os2*) lt_prog_compiler_static='$wl-static' ;; esac ;; hpux9* | hpux10* | hpux11*) lt_prog_compiler_wl='-Wl,' # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but # not for PA HP-UX. case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic='+Z' ;; esac # Is there a better lt_prog_compiler_static that works with the bundled CC? lt_prog_compiler_static='$wl-a ${wl}archive' ;; irix5* | irix6* | nonstopux*) lt_prog_compiler_wl='-Wl,' # PIC (with -KPIC) is the default. lt_prog_compiler_static='-non_shared' ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in # old Intel for x86_64, which still supported -KPIC. ecc*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-static' ;; # icc used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. icc* | ifort*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; # Lahey Fortran 8.1. lf95*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='--shared' lt_prog_compiler_static='--static' ;; nagfor*) # NAG Fortran compiler lt_prog_compiler_wl='-Wl,-Wl,,' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; tcc*) # Fabrice Bellard et al's Tiny C Compiler lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group compilers (*not* the Pentium gcc compiler, # which looks to be a dead project) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; ccc*) lt_prog_compiler_wl='-Wl,' # All Alpha code is PIC. lt_prog_compiler_static='-non_shared' ;; xl* | bgxl* | bgf* | mpixl*) # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-qpic' lt_prog_compiler_static='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) # Sun Fortran 8.3 passes all unrecognized flags to the linker lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='' ;; *Sun\ F* | *Sun*Fortran*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Qoption ld ' ;; *Sun\ C*) # Sun C 5.9 lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' lt_prog_compiler_wl='-Wl,' ;; *Intel*\ [CF]*Compiler*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fPIC' lt_prog_compiler_static='-static' ;; *Portland\ Group*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-fpic' lt_prog_compiler_static='-Bstatic' ;; esac ;; esac ;; newsos6) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; *nto* | *qnx*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic='-fPIC -shared' ;; osf3* | osf4* | osf5*) lt_prog_compiler_wl='-Wl,' # All OSF/1 code is PIC. lt_prog_compiler_static='-non_shared' ;; rdos*) lt_prog_compiler_static='-non_shared' ;; solaris*) lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' case $cc_basename in f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) lt_prog_compiler_wl='-Qoption ld ';; *) lt_prog_compiler_wl='-Wl,';; esac ;; sunos4*) lt_prog_compiler_wl='-Qoption ld ' lt_prog_compiler_pic='-PIC' lt_prog_compiler_static='-Bstatic' ;; sysv4 | sysv4.2uw2* | sysv4.3*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic='-Kconform_pic' lt_prog_compiler_static='-Bstatic' fi ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_pic='-KPIC' lt_prog_compiler_static='-Bstatic' ;; unicos*) lt_prog_compiler_wl='-Wl,' lt_prog_compiler_can_build_shared=no ;; uts4*) lt_prog_compiler_pic='-pic' lt_prog_compiler_static='-Bstatic' ;; *) lt_prog_compiler_can_build_shared=no ;; esac fi case $host_os in # For platforms that do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic= ;; *) lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic=$lt_prog_compiler_pic fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 $as_echo "$lt_cv_prog_compiler_pic" >&6; } lt_prog_compiler_pic=$lt_cv_prog_compiler_pic # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } if ${lt_cv_prog_compiler_pic_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic -DPIC" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 $as_echo "$lt_cv_prog_compiler_pic_works" >&6; } if test yes = "$lt_cv_prog_compiler_pic_works"; then case $lt_prog_compiler_pic in "" | " "*) ;; *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; esac else lt_prog_compiler_pic= lt_prog_compiler_can_build_shared=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works=yes fi else lt_cv_prog_compiler_static_works=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 $as_echo "$lt_cv_prog_compiler_static_works" >&6; } if test yes = "$lt_cv_prog_compiler_static_works"; then : else lt_prog_compiler_static= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 $as_echo "$lt_cv_prog_compiler_c_o" >&6; } hard_links=nottested if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test no = "$hard_links"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } runpath_var= allow_undefined_flag= always_export_symbols=no archive_cmds= archive_expsym_cmds= compiler_needs_object=no enable_shared_with_static_runtimes=no export_dynamic_flag_spec= export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' hardcode_automatic=no hardcode_direct=no hardcode_direct_absolute=no hardcode_libdir_flag_spec= hardcode_libdir_separator= hardcode_minus_L=no hardcode_shlibpath_var=unsupported inherit_rpath=no link_all_deplibs=unknown module_cmds= module_expsym_cmds= old_archive_from_new_cmds= old_archive_from_expsyms_cmds= thread_safe_flag_spec= whole_archive_flag_spec= # include_expsyms should be a list of space-separated symbols to be *always* # included in the symbol list include_expsyms= # exclude_expsyms can be an extended regexp of symbols to exclude # it will be wrapped by ' (' and ')$', so one must not match beginning or # end of line. Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc', # as well as any symbol that contains 'd'. exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out # platforms (ab)use it in PIC code, but their linkers get confused if # the symbol is explicitly referenced. Since portable code cannot # rely on this symbol name, it's probably fine to never include it in # preloaded symbol tables. # Exclude shared library initialization/finalization symbols. extract_expsyms_cmds= case $host_os in cygwin* | mingw* | pw32* | cegcc*) # FIXME: the MSVC++ port hasn't been tested in a loooong time # When not using gcc, we currently assume that we are using # Microsoft Visual C++. if test yes != "$GCC"; then with_gnu_ld=no fi ;; interix*) # we just hope/assume this is gcc and not c89 (= MSVC++) with_gnu_ld=yes ;; openbsd* | bitrig*) with_gnu_ld=no ;; linux* | k*bsd*-gnu | gnu*) link_all_deplibs=no ;; esac ld_shlibs=yes # On some targets, GNU ld is compatible enough with the native linker # that we're better off using the native interface for both. lt_use_gnu_ld_interface=no if test yes = "$with_gnu_ld"; then case $host_os in aix*) # The AIX port of GNU ld has always aspired to compatibility # with the native linker. However, as the warning in the GNU ld # block says, versions before 2.19.5* couldn't really create working # shared libraries, regardless of the interface used. case `$LD -v 2>&1` in *\ \(GNU\ Binutils\)\ 2.19.5*) ;; *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; *\ \(GNU\ Binutils\)\ [3-9]*) ;; *) lt_use_gnu_ld_interface=yes ;; esac ;; *) lt_use_gnu_ld_interface=yes ;; esac fi if test yes = "$lt_use_gnu_ld_interface"; then # If archive_cmds runs LD, not CC, wlarc should be empty wlarc='$wl' # Set some defaults for GNU ld with shared library support. These # are reset later if shared libraries are not supported. Putting them # here allows them to be overridden if necessary. runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' export_dynamic_flag_spec='$wl--export-dynamic' # ancient GNU ld didn't support --whole-archive et. al. if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else whole_archive_flag_spec= fi supports_anon_versioning=no case `$LD -v | $SED -e 's/(^)\+)\s\+//' 2>&1` in *GNU\ gold*) supports_anon_versioning=yes ;; *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... *\ 2.11.*) ;; # other 2.11 versions *) supports_anon_versioning=yes ;; esac # See if GNU ld supports shared libraries. case $host_os in aix[3-9]*) # On AIX/PPC, the GNU linker is very broken if test ia64 != "$host_cpu"; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: the GNU linker, at least up to release 2.19, is reported *** to be unable to reliably create shared libraries on AIX. *** Therefore, libtool is disabling shared libraries support. If you *** really care for shared libraries, you may want to install binutils *** 2.20 or above, or modify your PATH so that a non-GNU linker is found. *** You will then need to restart the configuration process. _LT_EOF fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else ld_shlibs=no fi ;; cygwin* | mingw* | pw32* | cegcc*) # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec='-L$libdir' export_dynamic_flag_spec='$wl--export-all-symbols' allow_undefined_flag=unsupported always_export_symbols=no enable_shared_with_static_runtimes=yes export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs=no fi ;; haiku*) archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' link_all_deplibs=yes ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported shrext_cmds=.dll archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' enable_shared_with_static_runtimes=yes ;; interix[3-9]*) hardcode_direct=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='$wl-rpath,$libdir' export_dynamic_flag_spec='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) tmp_diet=no if test linux-dietlibc = "$host_os"; then case $cc_basename in diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) esac fi if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ && test no = "$tmp_diet" then tmp_addflag=' $pic_flag' tmp_sharedflag='-shared' case $cc_basename,$host_cpu in pgcc*) # Portland Group C compiler whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag' ;; pgf77* | pgf90* | pgf95* | pgfortran*) # Portland Group f77 and f90 compilers whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' tmp_addflag=' $pic_flag -Mnomain' ;; ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 tmp_addflag=' -i_dynamic' ;; efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 tmp_addflag=' -i_dynamic -nofor_main' ;; ifc* | ifort*) # Intel Fortran compiler tmp_addflag=' -nofor_main' ;; lf95*) # Lahey Fortran 8.1 whole_archive_flag_spec= tmp_sharedflag='--shared' ;; nagfor*) # NAGFOR 5.3 tmp_sharedflag='-Wl,-shared' ;; xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) tmp_sharedflag='-qmkshrobj' tmp_addflag= ;; nvcc*) # Cuda Compiler Driver 2.2 whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' compiler_needs_object=yes ;; esac case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C 5.9 whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' compiler_needs_object=yes tmp_sharedflag='-G' ;; *Sun\ F*) # Sun Fortran 8.3 tmp_sharedflag='-G' ;; esac archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi case $cc_basename in tcc*) export_dynamic_flag_spec='-rdynamic' ;; xlf* | bgf* | bgxlf* | mpixlf*) # IBM XL Fortran 10.1 on PPC cannot create shared libs itself whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' if test yes = "$supports_anon_versioning"; then archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' fi ;; esac else ld_shlibs=no fi ;; netbsd* | netbsdelf*-gnu) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' wlarc= else archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' fi ;; solaris*) if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: The releases 2.8.* of the GNU linker cannot reliably *** create shared libraries on Solaris systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.9.1 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) case `$LD -v 2>&1` in *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) ld_shlibs=no cat <<_LT_EOF 1>&2 *** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot *** reliably create shared libraries on SCO systems. Therefore, libtool *** is disabling shared libraries support. We urge you to upgrade GNU *** binutils to release 2.16.91.0.3 or newer. Another option is to modify *** your PATH or compiler configuration so that the native linker is *** used, and then restart. _LT_EOF ;; *) # For security reasons, it is highly recommended that you always # use absolute paths for naming shared libraries, and exclude the # DT_RUNPATH tag from executables and libraries. But doing so # requires that you compile everything twice, which is a pain. if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac ;; sunos4*) archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' wlarc= hardcode_direct=yes hardcode_shlibpath_var=no ;; *) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' else ld_shlibs=no fi ;; esac if test no = "$ld_shlibs"; then runpath_var= hardcode_libdir_flag_spec= export_dynamic_flag_spec= whole_archive_flag_spec= fi else # PORTME fill in a description of your system's linker (not GNU ld) case $host_os in aix3*) allow_undefined_flag=unsupported always_export_symbols=yes archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' # Note: this linker hardcodes the directories in LIBPATH if there # are no directories specified by -L. hardcode_minus_L=yes if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then # Neither direct hardcoding nor static linking is supported with a # broken collect2. hardcode_direct=unsupported fi ;; aix[4-9]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then aix_use_runtimelinking=yes break fi done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds='' hardcode_direct=yes hardcode_direct_absolute=yes hardcode_libdir_separator=':' link_all_deplibs=yes file_list_spec='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # traditional, no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. hardcode_direct=no hardcode_direct_absolute=no ;; esac if test yes = "$GCC"; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L=yes hardcode_libdir_flag_spec='-L$libdir' hardcode_libdir_separator= fi ;; esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag="$shared_flag "'$wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi export_dynamic_flag_spec='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to export. always_export_symbols=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. allow_undefined_flag='-berok' # Determine the default libpath from the value encoded in an # empty executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib' allow_undefined_flag="-z nodefs" archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath_+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath_"; then lt_cv_aix_libpath_=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath_ fi hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag=' $wl-bernotok' allow_undefined_flag=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec='$convenience' fi archive_cmds_need_lc=yes archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared libraries. archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols archive_expsym_cmds="$archive_expsym_cmds"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d' fi fi ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds='' ;; m68k) archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes ;; esac ;; bsdi[45]*) export_dynamic_flag_spec=-rdynamic ;; cygwin* | mingw* | pw32* | cegcc*) # When not using gcc, we currently assume that we are using # Microsoft Visual C++. # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. case $cc_basename in cl*) # Native MSVC hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported always_export_symbols=yes file_list_spec='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' archive_expsym_cmds='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, )='true' enable_shared_with_static_runtimes=yes exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' # Don't use ranlib old_postinstall_cmds='chmod 644 $oldlib' postlink_cmds='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # Assume MSVC wrapper hardcode_libdir_flag_spec=' ' allow_undefined_flag=unsupported # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' # The linker will automatically build a .lib file if we build a DLL. old_archive_from_new_cmds='true' # FIXME: Should let the user specify the lib program. old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' enable_shared_with_static_runtimes=yes ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc=no hardcode_direct=no hardcode_automatic=yes hardcode_shlibpath_var=unsupported if test yes = "$lt_cv_ld_force_load"; then whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec='' fi link_all_deplibs=yes allow_undefined_flag=$_lt_dar_allow_undefined case $cc_basename in ifort*|nagfor*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test yes = "$_lt_dar_can_shared"; then output_verbose_link_cmd=func_echo_all archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" archive_expsym_cmds="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" module_expsym_cmds="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" else ld_shlibs=no fi ;; dgux*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor # support. Future versions do this automatically, but an explicit c++rt0.o # does not break anything, and helps significantly (at the cost of a little # extra space). freebsd2.2*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; # Unfortunately, older versions of FreeBSD 2 do not have this feature. freebsd2.*) archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; # FreeBSD 3 and greater uses gcc -shared to do shared libraries. freebsd* | dragonfly*) archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; hpux9*) if test yes = "$GCC"; then archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' fi hardcode_libdir_flag_spec='$wl+b $wl$libdir' hardcode_libdir_separator=: hardcode_direct=yes # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes export_dynamic_flag_spec='$wl-E' ;; hpux10*) if test yes,no = "$GCC,$with_gnu_ld"; then archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi if test no = "$with_gnu_ld"; then hardcode_libdir_flag_spec='$wl+b $wl$libdir' hardcode_libdir_separator=: hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes fi ;; hpux11*) if test yes,no = "$GCC,$with_gnu_ld"; then case $host_cpu in hppa*64*) archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' ;; esac else case $host_cpu in hppa*64*) archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' ;; ia64*) archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' ;; *) # Older versions of the 11.00 compiler do not understand -b yet # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 $as_echo_n "checking if $CC understands -b... " >&6; } if ${lt_cv_prog_compiler__b+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler__b=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS -b" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler__b=yes fi else lt_cv_prog_compiler__b=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 $as_echo "$lt_cv_prog_compiler__b" >&6; } if test yes = "$lt_cv_prog_compiler__b"; then archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' fi ;; esac fi if test no = "$with_gnu_ld"; then hardcode_libdir_flag_spec='$wl+b $wl$libdir' hardcode_libdir_separator=: case $host_cpu in hppa*64*|ia64*) hardcode_direct=no hardcode_shlibpath_var=no ;; *) hardcode_direct=yes hardcode_direct_absolute=yes export_dynamic_flag_spec='$wl-E' # hardcode_minus_L: Not really in the search PATH, # but as the default location of the library. hardcode_minus_L=yes ;; esac fi ;; irix5* | irix6* | nonstopux*) if test yes = "$GCC"; then archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' # Try to use the -exported_symbol ld option, if it does not # work, assume that -exports_file does not work either and # implicitly export all symbols. # This should be the same for all languages, so no per-tag cache variable. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 $as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } if ${lt_cv_irix_exported_symbol+:} false; then : $as_echo_n "(cached) " >&6 else save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int foo (void) { return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : lt_cv_irix_exported_symbol=yes else lt_cv_irix_exported_symbol=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 $as_echo "$lt_cv_irix_exported_symbol" >&6; } if test yes = "$lt_cv_irix_exported_symbol"; then archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib' fi link_all_deplibs=no else archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' hardcode_libdir_separator=: inherit_rpath=yes link_all_deplibs=yes ;; linux*) case $cc_basename in tcc*) # Fabrice Bellard et al's Tiny C Compiler ld_shlibs=yes archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; netbsd* | netbsdelf*-gnu) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out else archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF fi hardcode_libdir_flag_spec='-R$libdir' hardcode_direct=yes hardcode_shlibpath_var=no ;; newsos6) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' hardcode_libdir_separator=: hardcode_shlibpath_var=no ;; *nto* | *qnx*) ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then hardcode_direct=yes hardcode_shlibpath_var=no hardcode_direct_absolute=yes if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols' hardcode_libdir_flag_spec='$wl-rpath,$libdir' export_dynamic_flag_spec='$wl-E' else archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' hardcode_libdir_flag_spec='$wl-rpath,$libdir' fi else ld_shlibs=no fi ;; os2*) hardcode_libdir_flag_spec='-L$libdir' hardcode_minus_L=yes allow_undefined_flag=unsupported shrext_cmds=.dll archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' enable_shared_with_static_runtimes=yes ;; osf3*) if test yes = "$GCC"; then allow_undefined_flag=' $wl-expect_unresolved $wl\*' archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' fi archive_cmds_need_lc='no' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' hardcode_libdir_separator=: ;; osf4* | osf5*) # as osf3* with the addition of -msym flag if test yes = "$GCC"; then allow_undefined_flag=' $wl-expect_unresolved $wl\*' archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' hardcode_libdir_flag_spec='$wl-rpath $wl$libdir' else allow_undefined_flag=' -expect_unresolved \*' archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp' # Both c and cxx compiler support -rpath directly hardcode_libdir_flag_spec='-rpath $libdir' fi archive_cmds_need_lc='no' hardcode_libdir_separator=: ;; solaris*) no_undefined_flag=' -z defs' if test yes = "$GCC"; then wlarc='$wl' archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' else case `$CC -V 2>&1` in *"Compilers 5.0"*) wlarc='' archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' ;; *) wlarc='$wl' archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' ;; esac fi hardcode_libdir_flag_spec='-R$libdir' hardcode_shlibpath_var=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. GCC discards it without '$wl', # but is careful enough not to reorder. # Supported since Solaris 2.6 (maybe 2.5.1?) if test yes = "$GCC"; then whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' else whole_archive_flag_spec='-z allextract$convenience -z defaultextract' fi ;; esac link_all_deplibs=yes ;; sunos4*) if test sequent = "$host_vendor"; then # Use $CC to link under sequent, because it throws in some extra .o # files that make .init and .fini sections work. archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' fi hardcode_libdir_flag_spec='-L$libdir' hardcode_direct=yes hardcode_minus_L=yes hardcode_shlibpath_var=no ;; sysv4) case $host_vendor in sni) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=yes # is this really true??? ;; siemens) ## LD is ld it makes a PLAMLIB ## CC just makes a GrossModule. archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' reload_cmds='$CC -r -o $output$reload_objs' hardcode_direct=no ;; motorola) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_direct=no #Motorola manual says yes, but my tests say they lie ;; esac runpath_var='LD_RUN_PATH' hardcode_shlibpath_var=no ;; sysv4.3*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no export_dynamic_flag_spec='-Bexport' ;; sysv4*MP*) if test -d /usr/nec; then archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_shlibpath_var=no runpath_var=LD_RUN_PATH hardcode_runpath_var=yes ld_shlibs=yes fi ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag='$wl-z,text' archive_cmds_need_lc=no hardcode_shlibpath_var=no runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag='$wl-z,text' allow_undefined_flag='$wl-z,nodefs' archive_cmds_need_lc=no hardcode_shlibpath_var=no hardcode_libdir_flag_spec='$wl-R,$libdir' hardcode_libdir_separator=':' link_all_deplibs=yes export_dynamic_flag_spec='$wl-Bexport' runpath_var='LD_RUN_PATH' if test yes = "$GCC"; then archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' else archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' fi ;; uts4*) archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' hardcode_libdir_flag_spec='-L$libdir' hardcode_shlibpath_var=no ;; *) ld_shlibs=no ;; esac if test sni = "$host_vendor"; then case $host in sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) export_dynamic_flag_spec='$wl-Blargedynsym' ;; esac fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 $as_echo "$ld_shlibs" >&6; } test no = "$ld_shlibs" && can_build_shared=no with_gnu_ld=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc" in x|xyes) # Assume -lc should be added archive_cmds_need_lc=yes if test yes,yes = "$GCC,$enable_shared"; then case $archive_cmds in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl pic_flag=$lt_prog_compiler_pic compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag allow_undefined_flag= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc=no else lt_cv_archive_cmds_need_lc=yes fi allow_undefined_flag=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 $as_echo "$lt_cv_archive_cmds_need_lc" >&6; } archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } if test yes = "$GCC"; then case $host_os in darwin*) lt_awk_arg='/^libraries:/,/LR/' ;; *) lt_awk_arg='/^libraries:/' ;; esac case $host_os in mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;; *) lt_sed_strip_eq='s|=/|/|g' ;; esac lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` case $lt_search_path_spec in *\;*) # if the path contains ";" then we assume it to be the separator # otherwise default to the standard path separator (i.e. ":") - it is # assumed that no part of a normal pathname contains ";" but that should # okay in the real world where ";" in dirpaths is itself problematic. lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` ;; *) lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` ;; esac # Ok, now we have the path, separated by spaces, we can step through it # and add multilib dir if necessary... lt_tmp_lt_search_path_spec= lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` # ...but if some path component already ends with the multilib dir we assume # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer). case "$lt_multi_os_dir; $lt_search_path_spec " in "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*) lt_multi_os_dir= ;; esac for lt_sys_path in $lt_search_path_spec; do if test -d "$lt_sys_path$lt_multi_os_dir"; then lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir" elif test -n "$lt_multi_os_dir"; then test -d "$lt_sys_path" && \ lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" fi done lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' BEGIN {RS = " "; FS = "/|\n";} { lt_foo = ""; lt_count = 0; for (lt_i = NF; lt_i > 0; lt_i--) { if ($lt_i != "" && $lt_i != ".") { if ($lt_i == "..") { lt_count++; } else { if (lt_count == 0) { lt_foo = "/" $lt_i lt_foo; } else { lt_count--; } } } } if (lt_foo != "") { lt_freq[lt_foo]++; } if (lt_freq[lt_foo] == 1) { print lt_foo; } }'` # AWK program above erroneously prepends '/' to C:/dos/paths # for these hosts. case $host_os in mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ $SED 's|/\([A-Za-z]:\)|\1|g'` ;; esac sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` else sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" fi library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=.so postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='$libname$release$shared_ext$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test ia64 = "$host_cpu"; then # AIX 5 supports IA64 library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line '#! .'. This would cause the generated library to # depend on '.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # Using Import Files as archive members, it is possible to support # filename-based versioning of shared library archives on AIX. While # this would work for both with and without runtime linking, it will # prevent static linking of such archives. So we do filename-based # shared library versioning with .so extension only, which is used # when both runtime linking and shared linking is enabled. # Unfortunately, runtime linking may impact performance, so we do # not want this to be the default eventually. Also, we use the # versioned .so libs for executables only if there is the -brtl # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. # To allow for filename-based versioning support, we need to create # libNAME.so.V as an archive file, containing: # *) an Import File, referring to the versioned filename of the # archive as well as the shared archive member, telling the # bitwidth (32 or 64) of that shared object, and providing the # list of exported symbols of that shared object, eventually # decorated with the 'weak' keyword # *) the shared object with the F_LOADONLY flag set, to really avoid # it being seen by the linker. # At run time we better use the real file rather than another symlink, # but for link time we create the symlink libNAME.so -> libNAME.so.V case $with_aix_soname,$aix_use_runtimelinking in # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. aix,yes) # traditional libtool dynamic_linker='AIX unversionable lib.so' # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; aix,no) # traditional AIX only dynamic_linker='AIX lib.a(lib.so.V)' # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' ;; svr4,*) # full svr4 only dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,yes) # both, prefer svr4 dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # unpreferred sharedlib libNAME.a needs extra handling postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,no) # both, prefer aix dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ;; esac shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='$libname$shared_ext' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' library_names_spec='$libname.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec=$LIB if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' soname_spec='$libname$release$major$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=no sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' if test 32 = "$HPUX_IA64_MODE"; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" sys_lib_dlsearch_path_spec=/usr/lib/hpux32 else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" sys_lib_dlsearch_path_spec=/usr/lib/hpux64 fi ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test yes = "$lt_cv_prog_gnu_ld"; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; linux*android*) version_type=none # Android doesn't support versioned libraries. need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext' soname_spec='$libname$release$shared_ext' finish_cmds= shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes dynamic_linker='Android linker' # Don't embed -rpath directories since the linker doesn't support them. hardcode_libdir_flag_spec='-L$libdir' ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Ideally, we could use ldconfig to report *all* directores which are # searched for libraries, however this is still not possible. Aside from not # being certain /sbin/ldconfig is available, command # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, # even though it is searched at run-time. Try to do the best guess by # appending ld.so.conf contents (and includes) to the search path. if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsdelf*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='NetBSD ld.elf_so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd* | bitrig*) version_type=sunos sys_lib_dlsearch_path_spec=/usr/lib need_lib_prefix=no if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then need_version=no else need_version=yes fi library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; os2*) libname_spec='$name' version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no # OS/2 can only load a DLL with a base name of 8 characters or less. soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; v=$($ECHO $release$versuffix | tr -d .-); n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); $ECHO $n$v`$shared_ext' library_names_spec='${libname}_dll.$libext' dynamic_linker='OS/2 ld.exe' shlibpath_var=BEGINLIBPATH sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test yes = "$with_gnu_ld"; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec; then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' soname_spec='$libname$shared_ext.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=sco need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test yes = "$with_gnu_ld"; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test no = "$dynamic_linker" && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test yes = "$GCC"; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec fi if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi # remember unaugmented sys_lib_dlsearch_path content for libtool script decls... configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec # ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" # to be used as default LT_SYS_LIBRARY_PATH value in generated libtool configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action= if test -n "$hardcode_libdir_flag_spec" || test -n "$runpath_var" || test yes = "$hardcode_automatic"; then # We can hardcode non-existent directories. if test no != "$hardcode_direct" && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" && test no != "$hardcode_minus_L"; then # Linking always hardcodes the temporary library directory. hardcode_action=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 $as_echo "$hardcode_action" >&6; } if test relink = "$hardcode_action" || test yes = "$inherit_rpath"; then # Fast installation is not supported enable_fast_install=no elif test yes = "$shlibpath_overrides_runpath" || test no = "$enable_shared"; then # Fast installation is not necessary enable_fast_install=needless fi if test yes != "$enable_dlopen"; then enable_dlopen=unknown enable_dlopen_self=unknown enable_dlopen_self_static=unknown else lt_cv_dlopen=no lt_cv_dlopen_libs= case $host_os in beos*) lt_cv_dlopen=load_add_on lt_cv_dlopen_libs= lt_cv_dlopen_self=yes ;; mingw* | pw32* | cegcc*) lt_cv_dlopen=LoadLibrary lt_cv_dlopen_libs= ;; cygwin*) lt_cv_dlopen=dlopen lt_cv_dlopen_libs= ;; darwin*) # if libdl is installed we need to link against it { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl else lt_cv_dlopen=dyld lt_cv_dlopen_libs= lt_cv_dlopen_self=yes fi ;; tpf*) # Don't try to run any link tests for TPF. We know it's impossible # because TPF is a cross-compiler, and we know how we open DSOs. lt_cv_dlopen=dlopen lt_cv_dlopen_libs= lt_cv_dlopen_self=no ;; *) ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" if test "x$ac_cv_func_shl_load" = xyes; then : lt_cv_dlopen=shl_load else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 $as_echo_n "checking for shl_load in -ldld... " >&6; } if ${ac_cv_lib_dld_shl_load+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shl_load (); int main () { return shl_load (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_shl_load=yes else ac_cv_lib_dld_shl_load=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 $as_echo "$ac_cv_lib_dld_shl_load" >&6; } if test "x$ac_cv_lib_dld_shl_load" = xyes; then : lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld else ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" if test "x$ac_cv_func_dlopen" = xyes; then : lt_cv_dlopen=dlopen else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 $as_echo_n "checking for dlopen in -ldl... " >&6; } if ${ac_cv_lib_dl_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dl_dlopen=yes else ac_cv_lib_dl_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 $as_echo "$ac_cv_lib_dl_dlopen" >&6; } if test "x$ac_cv_lib_dl_dlopen" = xyes; then : lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 $as_echo_n "checking for dlopen in -lsvld... " >&6; } if ${ac_cv_lib_svld_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsvld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_svld_dlopen=yes else ac_cv_lib_svld_dlopen=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 $as_echo "$ac_cv_lib_svld_dlopen" >&6; } if test "x$ac_cv_lib_svld_dlopen" = xyes; then : lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 $as_echo_n "checking for dld_link in -ldld... " >&6; } if ${ac_cv_lib_dld_dld_link+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldld $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dld_link (); int main () { return dld_link (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dld_dld_link=yes else ac_cv_lib_dld_dld_link=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 $as_echo "$ac_cv_lib_dld_dld_link" >&6; } if test "x$ac_cv_lib_dld_dld_link" = xyes; then : lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld fi fi fi fi fi fi ;; esac if test no = "$lt_cv_dlopen"; then enable_dlopen=no else enable_dlopen=yes fi case $lt_cv_dlopen in dlopen) save_CPPFLAGS=$CPPFLAGS test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" save_LDFLAGS=$LDFLAGS wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" save_LIBS=$LIBS LIBS="$lt_cv_dlopen_libs $LIBS" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 $as_echo_n "checking whether a program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self+:} false; then : $as_echo_n "(cached) " >&6 else if test yes = "$cross_compiling"; then : lt_cv_dlopen_self=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisibility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; esac else : # compilation failed lt_cv_dlopen_self=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 $as_echo "$lt_cv_dlopen_self" >&6; } if test yes = "$lt_cv_dlopen_self"; then wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 $as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } if ${lt_cv_dlopen_self_static+:} false; then : $as_echo_n "(cached) " >&6 else if test yes = "$cross_compiling"; then : lt_cv_dlopen_self_static=cross else lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 lt_status=$lt_dlunknown cat > conftest.$ac_ext <<_LT_EOF #line $LINENO "configure" #include "confdefs.h" #if HAVE_DLFCN_H #include #endif #include #ifdef RTLD_GLOBAL # define LT_DLGLOBAL RTLD_GLOBAL #else # ifdef DL_GLOBAL # define LT_DLGLOBAL DL_GLOBAL # else # define LT_DLGLOBAL 0 # endif #endif /* We may have to define LT_DLLAZY_OR_NOW in the command line if we find out it does not work in some platform. */ #ifndef LT_DLLAZY_OR_NOW # ifdef RTLD_LAZY # define LT_DLLAZY_OR_NOW RTLD_LAZY # else # ifdef DL_LAZY # define LT_DLLAZY_OR_NOW DL_LAZY # else # ifdef RTLD_NOW # define LT_DLLAZY_OR_NOW RTLD_NOW # else # ifdef DL_NOW # define LT_DLLAZY_OR_NOW DL_NOW # else # define LT_DLLAZY_OR_NOW 0 # endif # endif # endif # endif #endif /* When -fvisibility=hidden is used, assume the code has been annotated correspondingly for the symbols needed. */ #if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) int fnord () __attribute__((visibility("default"))); #endif int fnord () { return 42; } int main () { void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); int status = $lt_dlunknown; if (self) { if (dlsym (self,"fnord")) status = $lt_dlno_uscore; else { if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; else puts (dlerror ()); } /* dlclose (self); */ } else puts (dlerror ()); return status; } _LT_EOF if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 (eval $ac_link) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then (./conftest; exit; ) >&5 2>/dev/null lt_status=$? case x$lt_status in x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; esac else : # compilation failed lt_cv_dlopen_self_static=no fi fi rm -fr conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 $as_echo "$lt_cv_dlopen_self_static" >&6; } fi CPPFLAGS=$save_CPPFLAGS LDFLAGS=$save_LDFLAGS LIBS=$save_LIBS ;; esac case $lt_cv_dlopen_self in yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; *) enable_dlopen_self=unknown ;; esac case $lt_cv_dlopen_self_static in yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; *) enable_dlopen_self_static=unknown ;; esac fi striplib= old_striplib= { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 $as_echo_n "checking whether stripping libraries is possible... " >&6; } if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" test -z "$striplib" && striplib="$STRIP --strip-unneeded" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else # FIXME - insert some real tests, host_os isn't really good enough case $host_os in darwin*) if test -n "$STRIP"; then striplib="$STRIP -x" old_striplib="$STRIP -S" { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } ;; esac fi # Report what library types will actually be built { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 $as_echo_n "checking if libtool supports shared libraries... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 $as_echo "$can_build_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 $as_echo_n "checking whether to build shared libraries... " >&6; } test no = "$can_build_shared" && enable_shared=no # On AIX, shared libraries and static libraries use the same namespace, and # are all built from PIC. case $host_os in aix3*) test yes = "$enable_shared" && enable_static=no if test -n "$RANLIB"; then archive_cmds="$archive_cmds~\$RANLIB \$lib" postinstall_cmds='$RANLIB $lib' fi ;; aix[4-9]*) if test ia64 != "$host_cpu"; then case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in yes,aix,yes) ;; # shared object as lib.so file only yes,svr4,*) ;; # shared object as lib.so archive member only yes,*) enable_static=no ;; # shared object in lib.a archive as well esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 $as_echo "$enable_shared" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 $as_echo_n "checking whether to build static libraries... " >&6; } # Make sure either enable_shared or enable_static is yes. test yes = "$enable_shared" || enable_static=yes { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 $as_echo "$enable_static" >&6; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu CC=$lt_save_CC ac_config_commands="$ac_config_commands libtool" # Only expand once: # iconv.m4 serial 11 (gettext-0.18.1) if test "X$prefix" = "XNONE"; then acl_final_prefix="$ac_default_prefix" else acl_final_prefix="$prefix" fi if test "X$exec_prefix" = "XNONE"; then acl_final_exec_prefix='${prefix}' else acl_final_exec_prefix="$exec_prefix" fi acl_save_prefix="$prefix" prefix="$acl_final_prefix" eval acl_final_exec_prefix=\"$acl_final_exec_prefix\" prefix="$acl_save_prefix" # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes else with_gnu_ld=no fi # Prepare PATH_SEPARATOR. # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then # Determine PATH_SEPARATOR by trying to find /bin/sh in a PATH which # contains only /bin. Note that ksh looks also at the FPATH variable, # so we have to set that as well for the test. PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 \ || PATH_SEPARATOR=';' } fi ac_prog=ld if test "$GCC" = yes; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`echo "$ac_prog"| sed 's%\\\\%/%g'` while echo "$ac_prog" | grep "$re_direlt" > /dev/null 2>&1; do ac_prog=`echo $ac_prog| sed "s%$re_direlt%/%"` done test -z "$LD" && LD="$ac_prog" ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test "$with_gnu_ld" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${acl_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then acl_save_ifs="$IFS"; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS="$acl_save_ifs" test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then acl_cv_path_LD="$ac_dir/$ac_prog" # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$acl_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${acl_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$acl_cv_prog_gnu_ld" >&6; } with_gnu_ld=$acl_cv_prog_gnu_ld { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shared library run path origin" >&5 $as_echo_n "checking for shared library run path origin... " >&6; } if ${acl_cv_rpath+:} false; then : $as_echo_n "(cached) " >&6 else CC="$CC" GCC="$GCC" LDFLAGS="$LDFLAGS" LD="$LD" with_gnu_ld="$with_gnu_ld" \ ${CONFIG_SHELL-/bin/sh} "$ac_aux_dir/config.rpath" "$host" > conftest.sh . ./conftest.sh rm -f ./conftest.sh acl_cv_rpath=done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acl_cv_rpath" >&5 $as_echo "$acl_cv_rpath" >&6; } wl="$acl_cv_wl" acl_libext="$acl_cv_libext" acl_shlibext="$acl_cv_shlibext" acl_libname_spec="$acl_cv_libname_spec" acl_library_names_spec="$acl_cv_library_names_spec" acl_hardcode_libdir_flag_spec="$acl_cv_hardcode_libdir_flag_spec" acl_hardcode_libdir_separator="$acl_cv_hardcode_libdir_separator" acl_hardcode_direct="$acl_cv_hardcode_direct" acl_hardcode_minus_L="$acl_cv_hardcode_minus_L" # Check whether --enable-rpath was given. if test "${enable_rpath+set}" = set; then : enableval=$enable_rpath; : else enable_rpath=yes fi acl_libdirstem=lib acl_libdirstem2= case "$host_os" in solaris*) { $as_echo "$as_me:${as_lineno-$LINENO}: checking for 64-bit host" >&5 $as_echo_n "checking for 64-bit host... " >&6; } if ${gl_cv_solaris_64bit+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef _LP64 sixtyfour bits #endif _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | $EGREP "sixtyfour bits" >/dev/null 2>&1; then : gl_cv_solaris_64bit=yes else gl_cv_solaris_64bit=no fi rm -f conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $gl_cv_solaris_64bit" >&5 $as_echo "$gl_cv_solaris_64bit" >&6; } if test $gl_cv_solaris_64bit = yes; then acl_libdirstem=lib/64 case "$host_cpu" in sparc*) acl_libdirstem2=lib/sparcv9 ;; i*86 | x86_64) acl_libdirstem2=lib/amd64 ;; esac fi ;; *) searchpath=`(LC_ALL=C $CC -print-search-dirs) 2>/dev/null | sed -n -e 's,^libraries: ,,p' | sed -e 's,^=,,'` if test -n "$searchpath"; then acl_save_IFS="${IFS= }"; IFS=":" for searchdir in $searchpath; do if test -d "$searchdir"; then case "$searchdir" in */lib64/ | */lib64 ) acl_libdirstem=lib64 ;; */../ | */.. ) # Better ignore directories of this form. They are misleading. ;; *) searchdir=`cd "$searchdir" && pwd` case "$searchdir" in */lib64 ) acl_libdirstem=lib64 ;; esac ;; esac fi done IFS="$acl_save_IFS" fi ;; esac test -n "$acl_libdirstem2" || acl_libdirstem2="$acl_libdirstem" use_additional=yes acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" # Check whether --with-libiconv-prefix was given. if test "${with_libiconv_prefix+set}" = set; then : withval=$with_libiconv_prefix; if test "X$withval" = "Xno"; then use_additional=no else if test "X$withval" = "X"; then acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval additional_includedir=\"$includedir\" eval additional_libdir=\"$libdir\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" else additional_includedir="$withval/include" additional_libdir="$withval/$acl_libdirstem" if test "$acl_libdirstem2" != "$acl_libdirstem" \ && ! test -d "$withval/$acl_libdirstem"; then additional_libdir="$withval/$acl_libdirstem2" fi fi fi fi LIBICONV= LTLIBICONV= INCICONV= LIBICONV_PREFIX= HAVE_LIBICONV= rpathdirs= ltrpathdirs= names_already_handled= names_next_round='iconv ' while test -n "$names_next_round"; do names_this_round="$names_next_round" names_next_round= for name in $names_this_round; do already_handled= for n in $names_already_handled; do if test "$n" = "$name"; then already_handled=yes break fi done if test -z "$already_handled"; then names_already_handled="$names_already_handled $name" uppername=`echo "$name" | sed -e 'y|abcdefghijklmnopqrstuvwxyz./+-|ABCDEFGHIJKLMNOPQRSTUVWXYZ____|'` eval value=\"\$HAVE_LIB$uppername\" if test -n "$value"; then if test "$value" = yes; then eval value=\"\$LIB$uppername\" test -z "$value" || LIBICONV="${LIBICONV}${LIBICONV:+ }$value" eval value=\"\$LTLIB$uppername\" test -z "$value" || LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$value" else : fi else found_dir= found_la= found_so= found_a= eval libname=\"$acl_libname_spec\" # typically: libname=lib$name if test -n "$acl_shlibext"; then shrext=".$acl_shlibext" # typically: shrext=.so else shrext= fi if test $use_additional = yes; then dir="$additional_libdir" if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi fi if test "X$found_dir" = "X"; then for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" case "$x" in -L*) dir=`echo "X$x" | sed -e 's/^X-L//'` if test -n "$acl_shlibext"; then if test -f "$dir/$libname$shrext"; then found_dir="$dir" found_so="$dir/$libname$shrext" else if test "$acl_library_names_spec" = '$libname$shrext$versuffix'; then ver=`(cd "$dir" && \ for f in "$libname$shrext".*; do echo "$f"; done \ | sed -e "s,^$libname$shrext\\\\.,," \ | sort -t '.' -n -r -k1,1 -k2,2 -k3,3 -k4,4 -k5,5 \ | sed 1q ) 2>/dev/null` if test -n "$ver" && test -f "$dir/$libname$shrext.$ver"; then found_dir="$dir" found_so="$dir/$libname$shrext.$ver" fi else eval library_names=\"$acl_library_names_spec\" for f in $library_names; do if test -f "$dir/$f"; then found_dir="$dir" found_so="$dir/$f" break fi done fi fi fi if test "X$found_dir" = "X"; then if test -f "$dir/$libname.$acl_libext"; then found_dir="$dir" found_a="$dir/$libname.$acl_libext" fi fi if test "X$found_dir" != "X"; then if test -f "$dir/$libname.la"; then found_la="$dir/$libname.la" fi fi ;; esac if test "X$found_dir" != "X"; then break fi done fi if test "X$found_dir" != "X"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$found_dir -l$name" if test "X$found_so" != "X"; then if test "$enable_rpath" = no \ || test "X$found_dir" = "X/usr/$acl_libdirstem" \ || test "X$found_dir" = "X/usr/$acl_libdirstem2"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else haveit= for x in $ltrpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $found_dir" fi if test "$acl_hardcode_direct" = yes; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else if test -n "$acl_hardcode_libdir_flag_spec" && test "$acl_hardcode_minus_L" = no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" haveit= for x in $rpathdirs; do if test "X$x" = "X$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $found_dir" fi else haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$found_dir"; then haveit=yes break fi done if test -z "$haveit"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir" fi if test "$acl_hardcode_minus_L" != no; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_so" else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" fi fi fi fi else if test "X$found_a" != "X"; then LIBICONV="${LIBICONV}${LIBICONV:+ }$found_a" else LIBICONV="${LIBICONV}${LIBICONV:+ }-L$found_dir -l$name" fi fi additional_includedir= case "$found_dir" in */$acl_libdirstem | */$acl_libdirstem/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem/"'*$,,'` if test "$name" = 'iconv'; then LIBICONV_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; */$acl_libdirstem2 | */$acl_libdirstem2/) basedir=`echo "X$found_dir" | sed -e 's,^X,,' -e "s,/$acl_libdirstem2/"'*$,,'` if test "$name" = 'iconv'; then LIBICONV_PREFIX="$basedir" fi additional_includedir="$basedir/include" ;; esac if test "X$additional_includedir" != "X"; then if test "X$additional_includedir" != "X/usr/include"; then haveit= if test "X$additional_includedir" = "X/usr/local/include"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then for x in $CPPFLAGS $INCICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-I$additional_includedir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_includedir"; then INCICONV="${INCICONV}${INCICONV:+ }-I$additional_includedir" fi fi fi fi fi if test -n "$found_la"; then save_libdir="$libdir" case "$found_la" in */* | *\\*) . "$found_la" ;; *) . "./$found_la" ;; esac libdir="$save_libdir" for dep in $dependency_libs; do case "$dep" in -L*) additional_libdir=`echo "X$dep" | sed -e 's/^X-L//'` if test "X$additional_libdir" != "X/usr/$acl_libdirstem" \ && test "X$additional_libdir" != "X/usr/$acl_libdirstem2"; then haveit= if test "X$additional_libdir" = "X/usr/local/$acl_libdirstem" \ || test "X$additional_libdir" = "X/usr/local/$acl_libdirstem2"; then if test -n "$GCC"; then case $host_os in linux* | gnu* | k*bsd*-gnu) haveit=yes;; esac fi fi if test -z "$haveit"; then haveit= for x in $LDFLAGS $LIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LIBICONV="${LIBICONV}${LIBICONV:+ }-L$additional_libdir" fi fi haveit= for x in $LDFLAGS $LTLIBICONV; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X-L$additional_libdir"; then haveit=yes break fi done if test -z "$haveit"; then if test -d "$additional_libdir"; then LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-L$additional_libdir" fi fi fi fi ;; -R*) dir=`echo "X$dep" | sed -e 's/^X-R//'` if test "$enable_rpath" != no; then haveit= for x in $rpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then rpathdirs="$rpathdirs $dir" fi haveit= for x in $ltrpathdirs; do if test "X$x" = "X$dir"; then haveit=yes break fi done if test -z "$haveit"; then ltrpathdirs="$ltrpathdirs $dir" fi fi ;; -l*) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's/^X-l//'` ;; *.la) names_next_round="$names_next_round "`echo "X$dep" | sed -e 's,^X.*/,,' -e 's,^lib,,' -e 's,\.la$,,'` ;; *) LIBICONV="${LIBICONV}${LIBICONV:+ }$dep" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }$dep" ;; esac done fi else LIBICONV="${LIBICONV}${LIBICONV:+ }-l$name" LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-l$name" fi fi fi done done if test "X$rpathdirs" != "X"; then if test -n "$acl_hardcode_libdir_separator"; then alldirs= for found_dir in $rpathdirs; do alldirs="${alldirs}${alldirs:+$acl_hardcode_libdir_separator}$found_dir" done acl_save_libdir="$libdir" libdir="$alldirs" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" else for found_dir in $rpathdirs; do acl_save_libdir="$libdir" libdir="$found_dir" eval flag=\"$acl_hardcode_libdir_flag_spec\" libdir="$acl_save_libdir" LIBICONV="${LIBICONV}${LIBICONV:+ }$flag" done fi fi if test "X$ltrpathdirs" != "X"; then for found_dir in $ltrpathdirs; do LTLIBICONV="${LTLIBICONV}${LTLIBICONV:+ }-R$found_dir" done fi am_save_CPPFLAGS="$CPPFLAGS" for element in $INCICONV; do haveit= for x in $CPPFLAGS; do acl_save_prefix="$prefix" prefix="$acl_final_prefix" acl_save_exec_prefix="$exec_prefix" exec_prefix="$acl_final_exec_prefix" eval x=\"$x\" exec_prefix="$acl_save_exec_prefix" prefix="$acl_save_prefix" if test "X$x" = "X$element"; then haveit=yes break fi done if test -z "$haveit"; then CPPFLAGS="${CPPFLAGS}${CPPFLAGS:+ }$element" fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv" >&5 $as_echo_n "checking for iconv... " >&6; } if ${am_cv_func_iconv+:} false; then : $as_echo_n "(cached) " >&6 else am_cv_func_iconv="no, consider installing GNU libiconv" am_cv_lib_iconv=no cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test "$am_cv_func_iconv" != yes; then am_save_LIBS="$LIBS" LIBS="$LIBS $LIBICONV" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { iconv_t cd = iconv_open("",""); iconv(cd,NULL,NULL,NULL,NULL); iconv_close(cd); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : am_cv_lib_iconv=yes am_cv_func_iconv=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$am_save_LIBS" fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv" >&5 $as_echo "$am_cv_func_iconv" >&6; } if test "$am_cv_func_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for working iconv" >&5 $as_echo_n "checking for working iconv... " >&6; } if ${am_cv_func_iconv_works+:} false; then : $as_echo_n "(cached) " >&6 else am_save_LIBS="$LIBS" if test $am_cv_lib_iconv = yes; then LIBS="$LIBS $LIBICONV" fi if test "$cross_compiling" = yes; then : case "$host_os" in aix* | hpux*) am_cv_func_iconv_works="guessing no" ;; *) am_cv_func_iconv_works="guessing yes" ;; esac else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { /* Test against AIX 5.1 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_utf8_to_88591 = iconv_open ("ISO8859-1", "UTF-8"); if (cd_utf8_to_88591 != (iconv_t)(-1)) { static const char input[] = "\342\202\254"; /* EURO SIGN */ char buf[10]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_utf8_to_88591, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) return 1; } } /* Test against Solaris 10 bug: Failures are not distinguishable from successful returns. */ { iconv_t cd_ascii_to_88591 = iconv_open ("ISO8859-1", "646"); if (cd_ascii_to_88591 != (iconv_t)(-1)) { static const char input[] = "\263"; char buf[10]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_ascii_to_88591, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if (res == 0) return 1; } } #if 0 /* This bug could be worked around by the caller. */ /* Test against HP-UX 11.11 bug: Positive return value instead of 0. */ { iconv_t cd_88591_to_utf8 = iconv_open ("utf8", "iso88591"); if (cd_88591_to_utf8 != (iconv_t)(-1)) { static const char input[] = "\304rger mit b\366sen B\374bchen ohne Augenma\337"; char buf[50]; const char *inptr = input; size_t inbytesleft = strlen (input); char *outptr = buf; size_t outbytesleft = sizeof (buf); size_t res = iconv (cd_88591_to_utf8, (char **) &inptr, &inbytesleft, &outptr, &outbytesleft); if ((int)res > 0) return 1; } } #endif /* Test against HP-UX 11.11 bug: No converter from EUC-JP to UTF-8 is provided. */ if (/* Try standardized names. */ iconv_open ("UTF-8", "EUC-JP") == (iconv_t)(-1) /* Try IRIX, OSF/1 names. */ && iconv_open ("UTF-8", "eucJP") == (iconv_t)(-1) /* Try AIX names. */ && iconv_open ("UTF-8", "IBM-eucJP") == (iconv_t)(-1) /* Try HP-UX names. */ && iconv_open ("utf8", "eucJP") == (iconv_t)(-1)) return 1; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : am_cv_func_iconv_works=yes else am_cv_func_iconv_works=no fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi LIBS="$am_save_LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_func_iconv_works" >&5 $as_echo "$am_cv_func_iconv_works" >&6; } case "$am_cv_func_iconv_works" in *no) am_func_iconv=no am_cv_lib_iconv=no ;; *) am_func_iconv=yes ;; esac else am_func_iconv=no am_cv_lib_iconv=no fi if test "$am_func_iconv" = yes; then $as_echo "#define HAVE_ICONV 1" >>confdefs.h fi if test "$am_cv_lib_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to link with libiconv" >&5 $as_echo_n "checking how to link with libiconv... " >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIBICONV" >&5 $as_echo "$LIBICONV" >&6; } else CPPFLAGS="$am_save_CPPFLAGS" LIBICONV= LTLIBICONV= fi if test "$am_cv_func_iconv" = yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for iconv declaration" >&5 $as_echo_n "checking for iconv declaration... " >&6; } if ${am_cv_proto_iconv+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include extern #ifdef __cplusplus "C" #endif #if defined(__STDC__) || defined(__cplusplus) size_t iconv (iconv_t cd, char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft); #else size_t iconv(); #endif int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : am_cv_proto_iconv_arg1="" else am_cv_proto_iconv_arg1="const" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext am_cv_proto_iconv="extern size_t iconv (iconv_t cd, $am_cv_proto_iconv_arg1 char * *inbuf, size_t *inbytesleft, char * *outbuf, size_t *outbytesleft);" fi am_cv_proto_iconv=`echo "$am_cv_proto_iconv" | tr -s ' ' | sed -e 's/( /(/'` { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_proto_iconv" >&5 $as_echo " $am_cv_proto_iconv" >&6; } cat >>confdefs.h <<_ACEOF #define ICONV_CONST $am_cv_proto_iconv_arg1 _ACEOF fi INCICONV=$CPPFLAGS LIBICONV=$LTLIBICONV ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu if test -z "$CXX"; then if test -n "$CCC"; then CXX=$CCC else if test -n "$ac_tool_prefix"; then for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$CXX"; then ac_cv_prog_CXX="$CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi CXX=$ac_cv_prog_CXX if test -n "$CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 $as_echo "$CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$CXX" && break done fi if test -z "$CXX"; then ac_ct_CXX=$CXX for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_ac_ct_CXX+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CXX"; then ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_ac_ct_CXX="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi ac_ct_CXX=$ac_cv_prog_ac_ct_CXX if test -n "$ac_ct_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 $as_echo "$ac_ct_CXX" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$ac_ct_CXX" && break done if test "x$ac_ct_CXX" = x; then CXX="g++" else case $cross_compiling:$ac_tool_warned in yes:) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac CXX=$ac_ct_CXX fi fi fi fi # Provide some information about the compiler. $as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 set X $ac_compile ac_compiler=$2 for ac_option in --version -v -V -qversion; do { { ac_try="$ac_compiler $ac_option >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" $as_echo "$ac_try_echo"; } >&5 (eval "$ac_compiler $ac_option >&5") 2>conftest.err ac_status=$? if test -s conftest.err; then sed '10a\ ... rest of stderr output deleted ... 10q' conftest.err >conftest.er1 cat conftest.er1 >&5 fi rm -f conftest.er1 conftest.err $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } done { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 $as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } if ${ac_cv_cxx_compiler_gnu+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { #ifndef __GNUC__ choke me #endif ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_compiler_gnu=yes else ac_compiler_gnu=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_cxx_compiler_gnu=$ac_compiler_gnu fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 $as_echo "$ac_cv_cxx_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GXX=yes else GXX= fi ac_test_CXXFLAGS=${CXXFLAGS+set} ac_save_CXXFLAGS=$CXXFLAGS { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 $as_echo_n "checking whether $CXX accepts -g... " >&6; } if ${ac_cv_prog_cxx_g+:} false; then : $as_echo_n "(cached) " >&6 else ac_save_cxx_werror_flag=$ac_cxx_werror_flag ac_cxx_werror_flag=yes ac_cv_prog_cxx_g=no CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes else CXXFLAGS="" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : else ac_cxx_werror_flag=$ac_save_cxx_werror_flag CXXFLAGS="-g" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : ac_cv_prog_cxx_g=yes fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cxx_werror_flag=$ac_save_cxx_werror_flag fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 $as_echo "$ac_cv_prog_cxx_g" >&6; } if test "$ac_test_CXXFLAGS" = set; then CXXFLAGS=$ac_save_CXXFLAGS elif test $ac_cv_prog_cxx_g = yes; then if test "$GXX" = yes; then CXXFLAGS="-g -O2" else CXXFLAGS="-g" fi else if test "$GXX" = yes; then CXXFLAGS="-O2" else CXXFLAGS= fi fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu depcc="$CXX" am_compiler_list= { $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 $as_echo_n "checking dependency style of $depcc... " >&6; } if ${am_cv_CXX_dependencies_compiler_type+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then # We make a subdir and do the tests there. Otherwise we can end up # making bogus files that we don't know about and never remove. For # instance it was reported that on HP-UX the gcc test will end up # making a dummy file named 'D' -- because '-MD' means "put the output # in D". rm -rf conftest.dir mkdir conftest.dir # Copy depcomp to subdir because otherwise we won't find it if we're # using a relative directory. cp "$am_depcomp" conftest.dir cd conftest.dir # We will build objects and dependencies in a subdirectory because # it helps to detect inapplicable dependency modes. For instance # both Tru64's cc and ICC support -MD to output dependencies as a # side effect of compilation, but ICC will put the dependencies in # the current directory while Tru64 will put them in the object # directory. mkdir sub am_cv_CXX_dependencies_compiler_type=none if test "$am_compiler_list" = ""; then am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` fi am__universal=false case " $depcc " in #( *\ -arch\ *\ -arch\ *) am__universal=true ;; esac for depmode in $am_compiler_list; do # Setup a source with many dependencies, because some compilers # like to wrap large dependency lists on column 80 (with \), and # we should not choose a depcomp mode which is confused by this. # # We need to recreate these files for each test, as the compiler may # overwrite some of them when testing with obscure command lines. # This happens at least with the AIX C compiler. : > sub/conftest.c for i in 1 2 3 4 5 6; do echo '#include "conftst'$i'.h"' >> sub/conftest.c # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with # Solaris 10 /bin/sh. echo '/* dummy */' > sub/conftst$i.h done echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf # We check with '-c' and '-o' for the sake of the "dashmstdout" # mode. It turns out that the SunPro C++ compiler does not properly # handle '-M -o', and we need to detect this. Also, some Intel # versions had trouble with output in subdirs. am__obj=sub/conftest.${OBJEXT-o} am__minus_obj="-o $am__obj" case $depmode in gcc) # This depmode causes a compiler race in universal mode. test "$am__universal" = false || continue ;; nosideeffect) # After this tag, mechanisms are not by side-effect, so they'll # only be used when explicitly requested. if test "x$enable_dependency_tracking" = xyes; then continue else break fi ;; msvc7 | msvc7msys | msvisualcpp | msvcmsys) # This compiler won't grok '-c -o', but also, the minuso test has # not run yet. These depmodes are late enough in the game, and # so weak that their functioning should not be impacted. am__obj=conftest.${OBJEXT-o} am__minus_obj= ;; none) break ;; esac if depmode=$depmode \ source=sub/conftest.c object=$am__obj \ depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ >/dev/null 2>conftest.err && grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && grep $am__obj sub/conftest.Po > /dev/null 2>&1 && ${MAKE-make} -s -f confmf > /dev/null 2>&1; then # icc doesn't choke on unknown options, it will just issue warnings # or remarks (even with -Werror). So we grep stderr for any message # that says an option was ignored or not supported. # When given -MP, icc 7.0 and 7.1 complain thusly: # icc: Command line warning: ignoring option '-M'; no argument required # The diagnosis changed in icc 8.0: # icc: Command line remark: option '-MP' not supported if (grep 'ignoring option' conftest.err || grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else am_cv_CXX_dependencies_compiler_type=$depmode break fi fi done cd .. rm -rf conftest.dir else am_cv_CXX_dependencies_compiler_type=none fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 $as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type if test "x$enable_dependency_tracking" != xno \ && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then am__fastdepCXX_TRUE= am__fastdepCXX_FALSE='#' else am__fastdepCXX_TRUE='#' am__fastdepCXX_FALSE= fi func_stripname_cnf () { case $2 in .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;; *) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;; esac } # func_stripname_cnf if test -n "$CXX" && ( test no != "$CXX" && ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) || (test g++ != "$CXX"))); then ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 $as_echo_n "checking how to run the C++ preprocessor... " >&6; } if test -z "$CXXCPP"; then if ${ac_cv_prog_CXXCPP+:} false; then : $as_echo_n "(cached) " >&6 else # Double quotes because CXXCPP needs to be expanded for CXXCPP in "$CXX -E" "/lib/cpp" do ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : break fi done ac_cv_prog_CXXCPP=$CXXCPP fi CXXCPP=$ac_cv_prog_CXXCPP else ac_cv_prog_CXXCPP=$CXXCPP fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 $as_echo "$CXXCPP" >&6; } ac_preproc_ok=false for ac_cxx_preproc_warn_flag in '' yes do # Use a header file that comes with gcc, so configuring glibc # with a fresh cross-compiler works. # Prefer to if __STDC__ is defined, since # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifdef __STDC__ # include #else # include #endif Syntax error _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : else # Broken: fails on valid input. continue fi rm -f conftest.err conftest.i conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_cxx_try_cpp "$LINENO"; then : # Broken: success on invalid input. continue else # Passes both tests. ac_preproc_ok=: break fi rm -f conftest.err conftest.i conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.i conftest.err conftest.$ac_ext if $ac_preproc_ok; then : else { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check See \`config.log' for more details" "$LINENO" 5; } fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu else _lt_caught_CXX_error=yes fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu archive_cmds_need_lc_CXX=no allow_undefined_flag_CXX= always_export_symbols_CXX=no archive_expsym_cmds_CXX= compiler_needs_object_CXX=no export_dynamic_flag_spec_CXX= hardcode_direct_CXX=no hardcode_direct_absolute_CXX=no hardcode_libdir_flag_spec_CXX= hardcode_libdir_separator_CXX= hardcode_minus_L_CXX=no hardcode_shlibpath_var_CXX=unsupported hardcode_automatic_CXX=no inherit_rpath_CXX=no module_cmds_CXX= module_expsym_cmds_CXX= link_all_deplibs_CXX=unknown old_archive_cmds_CXX=$old_archive_cmds reload_flag_CXX=$reload_flag reload_cmds_CXX=$reload_cmds no_undefined_flag_CXX= whole_archive_flag_spec_CXX= enable_shared_with_static_runtimes_CXX=no # Source file extension for C++ test sources. ac_ext=cpp # Object file extension for compiled C++ test sources. objext=o objext_CXX=$objext # No sense in running all these tests if we already determined that # the CXX compiler isn't working. Some variables (like enable_shared) # are currently assumed to apply to all compilers on this platform, # and will be corrupted by setting them based on a non-working compiler. if test yes != "$_lt_caught_CXX_error"; then # Code to be used in simple compile tests lt_simple_compile_test_code="int some_variable = 0;" # Code to be used in simple link tests lt_simple_link_test_code='int main(int, char *[]) { return(0); }' # ltmain only uses $CC for tagged configurations so make sure $CC is set. # If no C compiler was specified, use CC. LTCC=${LTCC-"$CC"} # If no C compiler flags were specified, use CFLAGS. LTCFLAGS=${LTCFLAGS-"$CFLAGS"} # Allow CC to be a program name with arguments. compiler=$CC # save warnings/boilerplate of simple test code ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" >conftest.$ac_ext eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_compiler_boilerplate=`cat conftest.err` $RM conftest* ac_outfile=conftest.$ac_objext echo "$lt_simple_link_test_code" >conftest.$ac_ext eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err _lt_linker_boilerplate=`cat conftest.err` $RM -r conftest* # Allow CC to be a program name with arguments. lt_save_CC=$CC lt_save_CFLAGS=$CFLAGS lt_save_LD=$LD lt_save_GCC=$GCC GCC=$GXX lt_save_with_gnu_ld=$with_gnu_ld lt_save_path_LD=$lt_cv_path_LD if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx else $as_unset lt_cv_prog_gnu_ld fi if test -n "${lt_cv_path_LDCXX+set}"; then lt_cv_path_LD=$lt_cv_path_LDCXX else $as_unset lt_cv_path_LD fi test -z "${LDCXX+set}" || LD=$LDCXX CC=${CXX-"c++"} CFLAGS=$CXXFLAGS compiler=$CC compiler_CXX=$CC func_cc_basename $compiler cc_basename=$func_cc_basename_result if test -n "$compiler"; then # We don't want -fno-exception when compiling C++ code, so set the # no_builtin_flag separately if test yes = "$GXX"; then lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' else lt_prog_compiler_no_builtin_flag_CXX= fi if test yes = "$GXX"; then # Set up default GNU C++ configuration # Check whether --with-gnu-ld was given. if test "${with_gnu_ld+set}" = set; then : withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes else with_gnu_ld=no fi ac_prog=ld if test yes = "$GCC"; then # Check if gcc -print-prog-name=ld gives a path. { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 $as_echo_n "checking for ld used by $CC... " >&6; } case $host in *-*-mingw*) # gcc leaves a trailing carriage return, which upsets mingw ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; *) ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; esac case $ac_prog in # Accept absolute paths. [\\/]* | ?:[\\/]*) re_direlt='/[^/][^/]*/\.\./' # Canonicalize the pathname of ld ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` done test -z "$LD" && LD=$ac_prog ;; "") # If it fails, then pretend we aren't using GCC. ac_prog=ld ;; *) # If it is relative, then search for the first ld in PATH. with_gnu_ld=unknown ;; esac elif test yes = "$with_gnu_ld"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 $as_echo_n "checking for GNU ld... " >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 $as_echo_n "checking for non-GNU ld... " >&6; } fi if ${lt_cv_path_LD+:} false; then : $as_echo_n "(cached) " >&6 else if test -z "$LD"; then lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR for ac_dir in $PATH; do IFS=$lt_save_ifs test -z "$ac_dir" && ac_dir=. if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then lt_cv_path_LD=$ac_dir/$ac_prog # Check to see if the program is GNU ld. I'd rather use --version, # but apparently some variants of GNU ld only accept -v. # Break only if it was the GNU/non-GNU ld that we prefer. case `"$lt_cv_path_LD" -v 2>&1 &5 $as_echo "$LD" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 { $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 $as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } if ${lt_cv_prog_gnu_ld+:} false; then : $as_echo_n "(cached) " >&6 else # I'd rather use --version here, but apparently some GNU lds only accept -v. case `$LD -v 2>&1 &5 $as_echo "$lt_cv_prog_gnu_ld" >&6; } with_gnu_ld=$lt_cv_prog_gnu_ld # Check if GNU C++ uses GNU ld as the underlying linker, since the # archiving commands below assume that GNU ld is being used. if test yes = "$with_gnu_ld"; then archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' # If archive_cmds runs LD, not CC, wlarc should be empty # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to # investigate it a little bit more. (MM) wlarc='$wl' # ancient GNU ld didn't support --whole-archive et. al. if eval "`$CC -print-prog-name=ld` --help 2>&1" | $GREP 'no-whole-archive' > /dev/null; then whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' else whole_archive_flag_spec_CXX= fi else with_gnu_ld=no wlarc= # A generic and very simple default shared library creation # command for GNU C++ for the case where it uses the native # linker, instead of GNU ld. If possible, this setting should # overridden to take advantage of the native linker features on # the platform it is being used on. archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' fi # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else GXX=no with_gnu_ld=no wlarc= fi # PORTME: fill in a description of your system's C++ link characteristics { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } ld_shlibs_CXX=yes case $host_os in aix3*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aix[4-9]*) if test ia64 = "$host_cpu"; then # On IA64, the linker does run time linking by default, so we don't # have to do anything special. aix_use_runtimelinking=no exp_sym_flag='-Bexport' no_entry_flag= else aix_use_runtimelinking=no # Test if we are trying to use run time linking or normal # AIX style linking. If -brtl is somewhere in LDFLAGS, we # have runtime linking enabled, and use it for executables. # For shared libraries, we enable/disable runtime linking # depending on the kind of the shared library created - # when "with_aix_soname,aix_use_runtimelinking" is: # "aix,no" lib.a(lib.so.V) shared, rtl:no, for executables # "aix,yes" lib.so shared, rtl:yes, for executables # lib.a static archive # "both,no" lib.so.V(shr.o) shared, rtl:yes # lib.a(lib.so.V) shared, rtl:no, for executables # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a(lib.so.V) shared, rtl:no # "svr4,*" lib.so.V(shr.o) shared, rtl:yes, for executables # lib.a static archive case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) for ld_flag in $LDFLAGS; do case $ld_flag in *-brtl*) aix_use_runtimelinking=yes break ;; esac done if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then # With aix-soname=svr4, we create the lib.so.V shared archives only, # so we don't have lib.a shared libs to link our executables. # We have to force runtime linking in this case. aix_use_runtimelinking=yes LDFLAGS="$LDFLAGS -Wl,-brtl" fi ;; esac exp_sym_flag='-bexport' no_entry_flag='-bnoentry' fi # When large executables or shared objects are built, AIX ld can # have problems creating the table of contents. If linking a library # or program results in "error TOC overflow" add -mminimal-toc to # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. archive_cmds_CXX='' hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes file_list_spec_CXX='$wl-f,' case $with_aix_soname,$aix_use_runtimelinking in aix,*) ;; # no import file svr4,* | *,yes) # use import file # The Import File defines what to hardcode. hardcode_direct_CXX=no hardcode_direct_absolute_CXX=no ;; esac if test yes = "$GXX"; then case $host_os in aix4.[012]|aix4.[012].*) # We only want to do this on AIX 4.2 and lower, the check # below for broken collect2 doesn't work under 4.3+ collect2name=`$CC -print-prog-name=collect2` if test -f "$collect2name" && strings "$collect2name" | $GREP resolve_lib_name >/dev/null then # We have reworked collect2 : else # We have old collect2 hardcode_direct_CXX=unsupported # It fails to find uninstalled libraries when the uninstalled # path is not listed in the libpath. Setting hardcode_minus_L # to unsupported forces relinking hardcode_minus_L_CXX=yes hardcode_libdir_flag_spec_CXX='-L$libdir' hardcode_libdir_separator_CXX= fi esac shared_flag='-shared' if test yes = "$aix_use_runtimelinking"; then shared_flag=$shared_flag' $wl-G' fi # Need to ensure runtime linking is disabled for the traditional # shared library, or the linker may eventually find shared libraries # /with/ Import File - we do not want to mix them. shared_flag_aix='-shared' shared_flag_svr4='-shared $wl-G' else # not using gcc if test ia64 = "$host_cpu"; then # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release # chokes on -Wl,-G. The following line is correct: shared_flag='-G' else if test yes = "$aix_use_runtimelinking"; then shared_flag='$wl-G' else shared_flag='$wl-bM:SRE' fi shared_flag_aix='$wl-bM:SRE' shared_flag_svr4='$wl-G' fi fi export_dynamic_flag_spec_CXX='$wl-bexpall' # It seems that -bexpall does not export symbols beginning with # underscore (_), so it is better to generate a list of symbols to # export. always_export_symbols_CXX=yes if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then # Warning - without using the other runtime loading flags (-brtl), # -berok will link without error, but may produce a broken library. # The "-G" linker flag allows undefined symbols. no_undefined_flag_CXX='-bernotok' # Determine the default libpath from the value encoded in an empty # executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag else if test ia64 = "$host_cpu"; then hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib' allow_undefined_flag_CXX="-z nodefs" archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols" else # Determine the default libpath from the value encoded in an # empty executable. if test set = "${lt_cv_aix_libpath+set}"; then aix_libpath=$lt_cv_aix_libpath else if ${lt_cv_aix_libpath__CXX+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : lt_aix_libpath_sed=' /Import File Strings/,/^$/ { /^0/ { s/^0 *\([^ ]*\) *$/\1/ p } }' lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` # Check for a 64-bit object if we didn't find anything. if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test -z "$lt_cv_aix_libpath__CXX"; then lt_cv_aix_libpath__CXX=/usr/lib:/lib fi fi aix_libpath=$lt_cv_aix_libpath__CXX fi hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath" # Warning - without using the other run time loading flags, # -berok will link without error, but may produce a broken library. no_undefined_flag_CXX=' $wl-bernotok' allow_undefined_flag_CXX=' $wl-berok' if test yes = "$with_gnu_ld"; then # We only use this code for GNU lds that support --whole-archive. whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' else # Exported symbols can be pulled into shared objects from archives whole_archive_flag_spec_CXX='$convenience' fi archive_cmds_need_lc_CXX=yes archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d' # -brtl affects multiple linker settings, -berok does not and is overridden later compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`' if test svr4 != "$with_aix_soname"; then # This is similar to how AIX traditionally builds its shared # libraries. Need -bnortl late, we may have -brtl in LDFLAGS. archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname' fi if test aix != "$with_aix_soname"; then archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp' else # used by -dlpreopen to get the symbols archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV $output_objdir/$realname.d/$soname $output_objdir' fi archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d' fi fi ;; beos*) if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then allow_undefined_flag_CXX=unsupported # Joseph Beckenbach says some releases of gcc # support --undefined. This deserves some investigation. FIXME archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' else ld_shlibs_CXX=no fi ;; chorus*) case $cc_basename in *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; cygwin* | mingw* | pw32* | cegcc*) case $GXX,$cc_basename in ,cl* | no,cl*) # Native MSVC # hardcode_libdir_flag_spec is actually meaningless, as there is # no search path for DLLs. hardcode_libdir_flag_spec_CXX=' ' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=yes file_list_spec_CXX='@' # Tell ltmain to make .lib files, not .a files. libext=lib # Tell ltmain to make .dll files, not .so files. shrext_cmds=.dll # FIXME: Setting linknames here is a bad hack. archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames=' archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp "$export_symbols" "$output_objdir/$soname.def"; echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp"; else $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp; fi~ $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ linknames=' # The linker will not automatically build a static lib if we build a DLL. # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' enable_shared_with_static_runtimes_CXX=yes # Don't use ranlib old_postinstall_cmds_CXX='chmod 644 $oldlib' postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ lt_tool_outputfile="@TOOL_OUTPUT@"~ case $lt_outputfile in *.exe|*.EXE) ;; *) lt_outputfile=$lt_outputfile.exe lt_tool_outputfile=$lt_tool_outputfile.exe ;; esac~ func_to_tool_file "$lt_outputfile"~ if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; $RM "$lt_outputfile.manifest"; fi' ;; *) # g++ # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, # as there is no search path for DLLs. hardcode_libdir_flag_spec_CXX='-L$libdir' export_dynamic_flag_spec_CXX='$wl--export-all-symbols' allow_undefined_flag_CXX=unsupported always_export_symbols_CXX=no enable_shared_with_static_runtimes_CXX=yes if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' # If the export-symbols file already is a .def file, use it as # is; otherwise, prepend EXPORTS... archive_expsym_cmds_CXX='if test DEF = "`$SED -n -e '\''s/^[ ]*//'\'' -e '\''/^\(;.*\)*$/d'\'' -e '\''s/^\(EXPORTS\|LIBRARY\)\([ ].*\)*$/DEF/p'\'' -e q $export_symbols`" ; then cp $export_symbols $output_objdir/$soname.def; else echo EXPORTS > $output_objdir/$soname.def; cat $export_symbols >> $output_objdir/$soname.def; fi~ $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' else ld_shlibs_CXX=no fi ;; esac ;; darwin* | rhapsody*) archive_cmds_need_lc_CXX=no hardcode_direct_CXX=no hardcode_automatic_CXX=yes hardcode_shlibpath_var_CXX=unsupported if test yes = "$lt_cv_ld_force_load"; then whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' else whole_archive_flag_spec_CXX='' fi link_all_deplibs_CXX=yes allow_undefined_flag_CXX=$_lt_dar_allow_undefined case $cc_basename in ifort*|nagfor*) _lt_dar_can_shared=yes ;; *) _lt_dar_can_shared=$GCC ;; esac if test yes = "$_lt_dar_can_shared"; then output_verbose_link_cmd=func_echo_all archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil" module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil" archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil" module_expsym_cmds_CXX="sed -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil" if test yes != "$lt_cv_apple_cc_single_mod"; then archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil" archive_expsym_cmds_CXX="sed 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil" fi else ld_shlibs_CXX=no fi ;; os2*) hardcode_libdir_flag_spec_CXX='-L$libdir' hardcode_minus_L_CXX=yes allow_undefined_flag_CXX=unsupported shrext_cmds=.dll archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~ $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~ $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~ $ECHO EXPORTS >> $output_objdir/$libname.def~ prefix_cmds="$SED"~ if test EXPORTS = "`$SED 1q $export_symbols`"; then prefix_cmds="$prefix_cmds -e 1d"; fi~ prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~ cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~ $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~ emximp -o $lib $output_objdir/$libname.def' old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def' enable_shared_with_static_runtimes_CXX=yes ;; dgux*) case $cc_basename in ec++*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; ghcx*) # Green Hills C++ Compiler # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; freebsd2.*) # C++ shared libraries reported to be fairly broken before # switch to ELF ld_shlibs_CXX=no ;; freebsd-elf*) archive_cmds_need_lc_CXX=no ;; freebsd* | dragonfly*) # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF # conventions ld_shlibs_CXX=yes ;; haiku*) archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' link_all_deplibs_CXX=yes ;; hpux9*) hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' hardcode_libdir_separator_CXX=: export_dynamic_flag_spec_CXX='$wl-E' hardcode_direct_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; hpux10*|hpux11*) if test no = "$with_gnu_ld"; then hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir' hardcode_libdir_separator_CXX=: case $host_cpu in hppa*64*|ia64*) ;; *) export_dynamic_flag_spec_CXX='$wl-E' ;; esac fi case $host_cpu in hppa*64*|ia64*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no ;; *) hardcode_direct_CXX=yes hardcode_direct_absolute_CXX=yes hardcode_minus_L_CXX=yes # Not in the search PATH, # but as the default # location of the library. ;; esac case $cc_basename in CC*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; aCC*) case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then case $host_cpu in hppa*64*) archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; ia64*) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' ;; esac fi else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; interix[3-9]*) hardcode_direct_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' export_dynamic_flag_spec_CXX='$wl-E' # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. # Instead, shared libraries are loaded at an image base (0x10000000 by # default) and relocated if they conflict, which is a slow very memory # consuming and fragmenting process. To avoid this, we pick a random, # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link # time. Moving up from 0x10000000 also allows more sbrk(2) space. archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' archive_expsym_cmds_CXX='sed "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' ;; irix5* | irix6*) case $cc_basename in CC*) # SGI C++ archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' # Archives containing C++ object files must be created using # "CC -ar", where "CC" is the IRIX C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' ;; *) if test yes = "$GXX"; then if test no = "$with_gnu_ld"; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' else archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib' fi fi link_all_deplibs_CXX=yes ;; esac hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' hardcode_libdir_separator_CXX=: inherit_rpath_CXX=yes ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' # Archives containing C++ object files must be created using # "CC -Bstatic", where "CC" is the KAI C++ compiler. old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; icpc* | ecpc* ) # Intel C++ with_gnu_ld=yes # version 8.0 and above of icpc choke on multiply defined symbols # if we add $predep_objects and $postdep_objects, however 7.1 and # earlier do not add the objects themselves. case `$CC -V 2>&1` in *"Version 7."*) archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 8.0 or newer tmp_idyn= case $host_cpu in ia64*) tmp_idyn=' -i_dynamic';; esac archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac archive_cmds_need_lc_CXX=no hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive' ;; pgCC* | pgcpp*) # Portland Group C++ compiler case `$CC -V` in *pgCC\ [1-5].* | *pgcpp\ [1-5].*) prelink_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' old_archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ $RANLIB $oldlib' archive_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='tpldir=Template.dir~ rm -rf $tpldir~ $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; *) # Version 6 and above use weak symbols archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' ;; cxx*) # Compaq C++ archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib' archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib $wl-retain-symbols-file $wl$export_symbols' runpath_var=LD_RUN_PATH hardcode_libdir_flag_spec_CXX='-rpath $libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' ;; xl* | mpixl* | bgxl*) # IBM XL 8.0 on PPC, with GNU ld hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' export_dynamic_flag_spec_CXX='$wl--export-dynamic' archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib' if test yes = "$supports_anon_versioning"; then archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ echo "local: *; };" >> $output_objdir/$libname.ver~ $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib' fi ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols' hardcode_libdir_flag_spec_CXX='-R$libdir' whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive' compiler_needs_object_CXX=yes # Not sure whether something based on # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 # would be better. output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; esac ;; esac ;; lynxos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; m88k*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; mvs*) case $cc_basename in cxx*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; netbsd*) if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' wlarc= hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no fi # Workaround some broken pre-1.5 toolchains output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' ;; *nto* | *qnx*) ld_shlibs_CXX=yes ;; openbsd* | bitrig*) if test -f /usr/libexec/ld.so; then hardcode_direct_CXX=yes hardcode_shlibpath_var_CXX=no hardcode_direct_absolute_CXX=yes archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib' export_dynamic_flag_spec_CXX='$wl-E' whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive' fi output_verbose_link_cmd=func_echo_all else ld_shlibs_CXX=no fi ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) # Kuck and Associates, Inc. (KAI) C++ Compiler # KCC will only create a shared library if the output file # ends with ".so" (or ".sl" for HP-UX), so rename the library # to its proper name (with version) after linking. archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir' hardcode_libdir_separator_CXX=: # Archives containing C++ object files must be created using # the KAI C++ compiler. case $host in osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; esac ;; RCC*) # Rational C++ 2.4.1 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; cxx*) case $host in osf3*) allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' ;; *) allow_undefined_flag_CXX=' -expect_unresolved \*' archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib' archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ echo "-hidden">> $lib.exp~ $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~ $RM $lib.exp' hardcode_libdir_flag_spec_CXX='-rpath $libdir' ;; esac hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. # # There doesn't appear to be a way to prevent this compiler from # explicitly linking system object files so we need to strip them # from the output so that they don't get included in the library # dependencies. output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' ;; *) if test yes,no = "$GXX,$with_gnu_ld"; then allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*' case $host in osf3*) archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; *) archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib' ;; esac hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir' hardcode_libdir_separator_CXX=: # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # FIXME: insert proper C++ library support ld_shlibs_CXX=no fi ;; esac ;; psos*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; lcc*) # Lucid # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ archive_cmds_need_lc_CXX=yes no_undefined_flag_CXX=' -zdefs' archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' hardcode_libdir_flag_spec_CXX='-R$libdir' hardcode_shlibpath_var_CXX=no case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) # The compiler driver will combine and reorder linker options, # but understands '-z linker_flag'. # Supported since Solaris 2.6 (maybe 2.5.1?) whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' ;; esac link_all_deplibs_CXX=yes output_verbose_link_cmd='func_echo_all' # Archives containing C++ object files must be created using # "CC -xar", where "CC" is the Sun C++ compiler. This is # necessary to make sure instantiated templates are included # in the archive. old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' ;; gcx*) # Green Hills C++ Compiler archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' # The C++ compiler must be used to create the archive. old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' ;; *) # GNU C++ compiler with Solaris linker if test yes,no = "$GXX,$with_gnu_ld"; then no_undefined_flag_CXX=' $wl-z ${wl}defs' if $CC --version | $GREP -v '^2\.7' > /dev/null; then archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' else # g++ 2.7 appears to require '-G' NOT '-shared' on this # platform. archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib' archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' # Commands to make compiler produce verbose output that lists # what "hidden" libraries, object files and flags are used when # linking a shared library. output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' fi hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir' case $host_os in solaris2.[0-5] | solaris2.[0-5].*) ;; *) whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract' ;; esac fi ;; esac ;; sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) no_undefined_flag_CXX='$wl-z,text' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; *) archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; sysv5* | sco3.2v5* | sco5v6*) # Note: We CANNOT use -z defs as we might desire, because we do not # link with -lc, and that would cause any symbols used from libc to # always be unresolved, which means just about no library would # ever link correctly. If we're not using GNU ld we use -z text # though, which does catch some bad symbols but isn't as heavy-handed # as -z defs. no_undefined_flag_CXX='$wl-z,text' allow_undefined_flag_CXX='$wl-z,nodefs' archive_cmds_need_lc_CXX=no hardcode_shlibpath_var_CXX=no hardcode_libdir_flag_spec_CXX='$wl-R,$libdir' hardcode_libdir_separator_CXX=':' link_all_deplibs_CXX=yes export_dynamic_flag_spec_CXX='$wl-Bexport' runpath_var='LD_RUN_PATH' case $cc_basename in CC*) archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ '"$old_archive_cmds_CXX" reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ '"$reload_cmds_CXX" ;; *) archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac ;; vxworks*) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; *) # FIXME: insert proper C++ library support ld_shlibs_CXX=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test no = "$ld_shlibs_CXX" && can_build_shared=no GCC_CXX=$GXX LD_CXX=$LD ## CAVEAT EMPTOR: ## There is no encapsulation within the following macros, do not change ## the running order or otherwise move them around unless you know exactly ## what you are doing... # Dependencies to place before and after the object being linked: predep_objects_CXX= postdep_objects_CXX= predeps_CXX= postdeps_CXX= compiler_lib_search_path_CXX= cat > conftest.$ac_ext <<_LT_EOF class Foo { public: Foo (void) { a = 0; } private: int a; }; _LT_EOF _lt_libdeps_save_CFLAGS=$CFLAGS case "$CC $CFLAGS " in #( *\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; *\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; *\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; esac if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; }; then # Parse the compiler output and extract the necessary # objects, libraries and library flags. # Sentinel used to keep track of whether or not we are before # the conftest object file. pre_test_object_deps_done=no for p in `eval "$output_verbose_link_cmd"`; do case $prev$p in -L* | -R* | -l*) # Some compilers place space between "-{L,R}" and the path. # Remove the space. if test x-L = "$p" || test x-R = "$p"; then prev=$p continue fi # Expand the sysroot to ease extracting the directories later. if test -z "$prev"; then case $p in -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; esac fi case $p in =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; esac if test no = "$pre_test_object_deps_done"; then case $prev in -L | -R) # Internal compiler library paths should come after those # provided the user. The postdeps already come after the # user supplied libs so there is no need to process them. if test -z "$compiler_lib_search_path_CXX"; then compiler_lib_search_path_CXX=$prev$p else compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p" fi ;; # The "-l" case would never come before the object being # linked, so don't bother handling this case. esac else if test -z "$postdeps_CXX"; then postdeps_CXX=$prev$p else postdeps_CXX="${postdeps_CXX} $prev$p" fi fi prev= ;; *.lto.$objext) ;; # Ignore GCC LTO objects *.$objext) # This assumes that the test object file only shows up # once in the compiler output. if test "$p" = "conftest.$objext"; then pre_test_object_deps_done=yes continue fi if test no = "$pre_test_object_deps_done"; then if test -z "$predep_objects_CXX"; then predep_objects_CXX=$p else predep_objects_CXX="$predep_objects_CXX $p" fi else if test -z "$postdep_objects_CXX"; then postdep_objects_CXX=$p else postdep_objects_CXX="$postdep_objects_CXX $p" fi fi ;; *) ;; # Ignore the rest. esac done # Clean up. rm -f a.out a.exe else echo "libtool.m4: error: problem compiling CXX test program" fi $RM -f confest.$objext CFLAGS=$_lt_libdeps_save_CFLAGS # PORTME: override above test on systems where it is broken case $host_os in interix[3-9]*) # Interix 3.5 installs completely hosed .la files for C++, so rather than # hack all around it, let's just trust "g++" to DTRT. predep_objects_CXX= postdep_objects_CXX= postdeps_CXX= ;; esac case " $postdeps_CXX " in *" -lc "*) archive_cmds_need_lc_CXX=no ;; esac compiler_lib_search_dirs_CXX= if test -n "${compiler_lib_search_path_CXX}"; then compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'` fi lt_prog_compiler_wl_CXX= lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX= # C++ specific cases for pic, static, wl, etc. if test yes = "$GXX"; then lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-static' case $host_os in aix*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' fi lt_prog_compiler_pic_CXX='-fPIC' ;; amigaos*) case $host_cpu in powerpc) # see comment about AmigaOS4 .so support lt_prog_compiler_pic_CXX='-fPIC' ;; m68k) # FIXME: we need at least 68020 code to build shared libraries, but # adding the '-m68020' flag to GCC prevents building anything better, # like '-m68040'. lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' ;; esac ;; beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) # PIC is the default for these OSes. ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). # Although the cygwin gcc ignores -fPIC, still need this for old-style # (--disable-auto-import) libraries lt_prog_compiler_pic_CXX='-DDLL_EXPORT' case $host_os in os2*) lt_prog_compiler_static_CXX='$wl-static' ;; esac ;; darwin* | rhapsody*) # PIC is the default on this platform # Common symbols not allowed in MH_DYLIB files lt_prog_compiler_pic_CXX='-fno-common' ;; *djgpp*) # DJGPP does not support shared libraries at all lt_prog_compiler_pic_CXX= ;; haiku*) # PIC is the default for Haiku. # The "-static" flag exists, but is broken. lt_prog_compiler_static_CXX= ;; interix[3-9]*) # Interix 3.x gcc -fpic/-fPIC options generate broken code. # Instead, we relocate shared libraries at runtime. ;; sysv4*MP*) if test -d /usr/nec; then lt_prog_compiler_pic_CXX=-Kconform_pic fi ;; hpux*) # PIC is the default for 64-bit PA HP-UX, but not for 32-bit # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag # sets the default TLS model and affects inlining. case $host_cpu in hppa*64*) ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; *) lt_prog_compiler_pic_CXX='-fPIC' ;; esac else case $host_os in aix[4-9]*) # All AIX code is PIC. if test ia64 = "$host_cpu"; then # AIX 5 now supports IA64 processor lt_prog_compiler_static_CXX='-Bstatic' else lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' fi ;; chorus*) case $cc_basename in cxch68*) # Green Hills C++ Compiler # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" ;; esac ;; mingw* | cygwin* | os2* | pw32* | cegcc*) # This hack is so that the source file can tell whether it is being # built for inclusion in a dll (and should export symbols for example). lt_prog_compiler_pic_CXX='-DDLL_EXPORT' ;; dgux*) case $cc_basename in ec++*) lt_prog_compiler_pic_CXX='-KPIC' ;; ghcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; freebsd* | dragonfly*) # FreeBSD uses GNU C++ ;; hpux9* | hpux10* | hpux11*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='$wl-a ${wl}archive' if test ia64 != "$host_cpu"; then lt_prog_compiler_pic_CXX='+Z' fi ;; aCC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='$wl-a ${wl}archive' case $host_cpu in hppa*64*|ia64*) # +Z the default ;; *) lt_prog_compiler_pic_CXX='+Z' ;; esac ;; *) ;; esac ;; interix*) # This is c89, which is MS Visual C++ (no shared libs) # Anyone wants to do a port? ;; irix5* | irix6* | nonstopux*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_static_CXX='-non_shared' # CC pic flag -KPIC is the default. ;; *) ;; esac ;; linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) case $cc_basename in KCC*) # KAI C++ Compiler lt_prog_compiler_wl_CXX='--backend -Wl,' lt_prog_compiler_pic_CXX='-fPIC' ;; ecpc* ) # old Intel C++ for x86_64, which still supported -KPIC. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-static' ;; icpc* ) # Intel C++, used to be incompatible with GCC. # ICC 10 doesn't accept -KPIC any more. lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fPIC' lt_prog_compiler_static_CXX='-static' ;; pgCC* | pgcpp*) # Portland Group C++ compiler lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-fpic' lt_prog_compiler_static_CXX='-Bstatic' ;; cxx*) # Compaq C++ # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) # IBM XL 8.0, 9.0 on PPC and BlueGene lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-qpic' lt_prog_compiler_static_CXX='-qstaticlink' ;; *) case `$CC -V 2>&1 | sed 5q` in *Sun\ C*) # Sun C++ 5.9 lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; esac ;; esac ;; lynxos*) ;; m88k*) ;; mvs*) case $cc_basename in cxx*) lt_prog_compiler_pic_CXX='-W c,exportall' ;; *) ;; esac ;; netbsd* | netbsdelf*-gnu) ;; *qnx* | *nto*) # QNX uses GNU C++, but need to define -shared option too, otherwise # it will coredump. lt_prog_compiler_pic_CXX='-fPIC -shared' ;; osf3* | osf4* | osf5*) case $cc_basename in KCC*) lt_prog_compiler_wl_CXX='--backend -Wl,' ;; RCC*) # Rational C++ 2.4.1 lt_prog_compiler_pic_CXX='-pic' ;; cxx*) # Digital/Compaq C++ lt_prog_compiler_wl_CXX='-Wl,' # Make sure the PIC flag is empty. It appears that all Alpha # Linux and Compaq Tru64 Unix objects are PIC. lt_prog_compiler_pic_CXX= lt_prog_compiler_static_CXX='-non_shared' ;; *) ;; esac ;; psos*) ;; solaris*) case $cc_basename in CC* | sunCC*) # Sun C++ 4.2, 5.x and Centerline C++ lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' lt_prog_compiler_wl_CXX='-Qoption ld ' ;; gcx*) # Green Hills C++ Compiler lt_prog_compiler_pic_CXX='-PIC' ;; *) ;; esac ;; sunos4*) case $cc_basename in CC*) # Sun C++ 4.x lt_prog_compiler_pic_CXX='-pic' lt_prog_compiler_static_CXX='-Bstatic' ;; lcc*) # Lucid lt_prog_compiler_pic_CXX='-pic' ;; *) ;; esac ;; sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) case $cc_basename in CC*) lt_prog_compiler_wl_CXX='-Wl,' lt_prog_compiler_pic_CXX='-KPIC' lt_prog_compiler_static_CXX='-Bstatic' ;; esac ;; tandem*) case $cc_basename in NCC*) # NonStop-UX NCC 3.20 lt_prog_compiler_pic_CXX='-KPIC' ;; *) ;; esac ;; vxworks*) ;; *) lt_prog_compiler_can_build_shared_CXX=no ;; esac fi case $host_os in # For platforms that do not support PIC, -DPIC is meaningless: *djgpp*) lt_prog_compiler_pic_CXX= ;; *) lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 $as_echo_n "checking for $compiler option to produce PIC... " >&6; } if ${lt_cv_prog_compiler_pic_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX # # Check to make sure the PIC flag actually works. # if test -n "$lt_prog_compiler_pic_CXX"; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 $as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_pic_works_CXX=no ac_outfile=conftest.$ac_objext echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" ## exclude from sc_useless_quotes_in_assignment # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. # The option is referenced via a variable to avoid confusing sed. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>conftest.err) ac_status=$? cat conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s "$ac_outfile"; then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings other than the usual output. $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_pic_works_CXX=yes fi fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then case $lt_prog_compiler_pic_CXX in "" | " "*) ;; *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; esac else lt_prog_compiler_pic_CXX= lt_prog_compiler_can_build_shared_CXX=no fi fi # # Check to make sure the static flag actually works. # wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 $as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_static_works_CXX=no save_LDFLAGS=$LDFLAGS LDFLAGS="$LDFLAGS $lt_tmp_static_flag" echo "$lt_simple_link_test_code" > conftest.$ac_ext if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then # The linker can only warn and ignore the option if not recognized # So say no if there are warnings if test -s conftest.err; then # Append any errors to the config.log. cat conftest.err 1>&5 $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 if diff conftest.exp conftest.er2 >/dev/null; then lt_cv_prog_compiler_static_works_CXX=yes fi else lt_cv_prog_compiler_static_works_CXX=yes fi fi $RM -r conftest* LDFLAGS=$save_LDFLAGS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 $as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then : else lt_prog_compiler_static_CXX= fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 $as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_prog_compiler_c_o_CXX=no $RM -r conftest 2>/dev/null mkdir conftest cd conftest mkdir out echo "$lt_simple_compile_test_code" > conftest.$ac_ext lt_compiler_flag="-o out/conftest2.$ac_objext" # Insert the option either (1) after the last *FLAGS variable, or # (2) before a word containing "conftest.", or (3) at the end. # Note that $ac_compile itself does not contain backslashes and begins # with a dollar sign (not a hyphen), so the echo should work correctly. lt_compile=`echo "$ac_compile" | $SED \ -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ -e 's:$: $lt_compiler_flag:'` (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) (eval "$lt_compile" 2>out/conftest.err) ac_status=$? cat out/conftest.err >&5 echo "$as_me:$LINENO: \$? = $ac_status" >&5 if (exit $ac_status) && test -s out/conftest2.$ac_objext then # The compiler can only warn and ignore the option if not recognized # So say no if there are warnings $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then lt_cv_prog_compiler_c_o_CXX=yes fi fi chmod u+w . 2>&5 $RM conftest* # SGI C++ compiler will create directory out/ii_files/ for # template instantiation test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files $RM out/* && rmdir out cd .. $RM -r conftest $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 $as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } hard_links=nottested if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then # do not overwrite the value of need_locks provided by the user { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 $as_echo_n "checking if we can lock with hard links... " >&6; } hard_links=yes $RM conftest* ln conftest.a conftest.b 2>/dev/null && hard_links=no touch conftest.a ln conftest.a conftest.b 2>&5 || hard_links=no ln conftest.a conftest.b 2>/dev/null && hard_links=no { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 $as_echo "$hard_links" >&6; } if test no = "$hard_links"; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5 $as_echo "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;} need_locks=warn fi else need_locks=no fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 $as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' case $host_os in aix[4-9]*) # If we're using GNU nm, then we don't want the "-C" option. # -C means demangle to GNU nm, but means don't demangle to AIX nm. # Without the "-l" option, or with the "-B" option, AIX nm treats # weak defined symbols like other global defined symbols, whereas # GNU nm marks them as "W". # While the 'weak' keyword is ignored in the Export File, we need # it in the Import File for the 'aix-soname' feature, so we have # to replace the "-B" option with "-P" for AIX nm. if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols' else export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols' fi ;; pw32*) export_symbols_cmds_CXX=$ltdll_cmds ;; cygwin* | mingw* | cegcc*) case $cc_basename in cl*) exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' ;; esac ;; linux* | k*bsd*-gnu | gnu*) link_all_deplibs_CXX=no ;; *) export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 $as_echo "$ld_shlibs_CXX" >&6; } test no = "$ld_shlibs_CXX" && can_build_shared=no with_gnu_ld_CXX=$with_gnu_ld # # Do we need to explicitly link libc? # case "x$archive_cmds_need_lc_CXX" in x|xyes) # Assume -lc should be added archive_cmds_need_lc_CXX=yes if test yes,yes = "$GCC,$enable_shared"; then case $archive_cmds_CXX in *'~'*) # FIXME: we may have to deal with multi-command sequences. ;; '$CC '*) # Test whether the compiler implicitly links with -lc since on some # systems, -lgcc has to come before -lc. If gcc already passes -lc # to ld, don't add -lc before -lgcc. { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 $as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : $as_echo_n "(cached) " >&6 else $RM conftest* echo "$lt_simple_compile_test_code" > conftest.$ac_ext if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 (eval $ac_compile) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } 2>conftest.err; then soname=conftest lib=conftest libobjs=conftest.$ac_objext deplibs= wl=$lt_prog_compiler_wl_CXX pic_flag=$lt_prog_compiler_pic_CXX compiler_flags=-v linker_flags=-v verstring= output_objdir=. libname=conftest lt_save_allow_undefined_flag=$allow_undefined_flag_CXX allow_undefined_flag_CXX= if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 ac_status=$? $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 test $ac_status = 0; } then lt_cv_archive_cmds_need_lc_CXX=no else lt_cv_archive_cmds_need_lc_CXX=yes fi allow_undefined_flag_CXX=$lt_save_allow_undefined_flag else cat conftest.err 1>&5 fi $RM conftest* fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 $as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX ;; esac fi ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 $as_echo_n "checking dynamic linker characteristics... " >&6; } library_names_spec= libname_spec='lib$name' soname_spec= shrext_cmds=.so postinstall_cmds= postuninstall_cmds= finish_cmds= finish_eval= shlibpath_var= shlibpath_overrides_runpath=unknown version_type=none dynamic_linker="$host_os ld.so" sys_lib_dlsearch_path_spec="/lib /usr/lib" need_lib_prefix=unknown hardcode_into_libs=no # when you set need_version to no, make sure it does not cause -set_version # flags to be left without arguments need_version=unknown case $host_os in aix3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname.a' shlibpath_var=LIBPATH # AIX 3 has no versioning support, so we append a major version to the name. soname_spec='$libname$release$shared_ext$major' ;; aix[4-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no hardcode_into_libs=yes if test ia64 = "$host_cpu"; then # AIX 5 supports IA64 library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH else # With GCC up to 2.95.x, collect2 would create an import file # for dependence libraries. The import file would start with # the line '#! .'. This would cause the generated library to # depend on '.', always an invalid library. This was fixed in # development snapshots of GCC prior to 3.0. case $host_os in aix4 | aix4.[01] | aix4.[01].*) if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' echo ' yes ' echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then : else can_build_shared=no fi ;; esac # Using Import Files as archive members, it is possible to support # filename-based versioning of shared library archives on AIX. While # this would work for both with and without runtime linking, it will # prevent static linking of such archives. So we do filename-based # shared library versioning with .so extension only, which is used # when both runtime linking and shared linking is enabled. # Unfortunately, runtime linking may impact performance, so we do # not want this to be the default eventually. Also, we use the # versioned .so libs for executables only if there is the -brtl # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only. # To allow for filename-based versioning support, we need to create # libNAME.so.V as an archive file, containing: # *) an Import File, referring to the versioned filename of the # archive as well as the shared archive member, telling the # bitwidth (32 or 64) of that shared object, and providing the # list of exported symbols of that shared object, eventually # decorated with the 'weak' keyword # *) the shared object with the F_LOADONLY flag set, to really avoid # it being seen by the linker. # At run time we better use the real file rather than another symlink, # but for link time we create the symlink libNAME.so -> libNAME.so.V case $with_aix_soname,$aix_use_runtimelinking in # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct # soname into executable. Probably we can add versioning support to # collect2, so additional links can be useful in future. aix,yes) # traditional libtool dynamic_linker='AIX unversionable lib.so' # If using run time linking (on AIX 4.2 or later) use lib.so # instead of lib.a to let people know that these are not # typical AIX shared libraries. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; aix,no) # traditional AIX only dynamic_linker='AIX lib.a(lib.so.V)' # We preserve .a as extension for shared libraries through AIX4.2 # and later when we are not doing run time linking. library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' ;; svr4,*) # full svr4 only dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,yes) # both, prefer svr4 dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)" library_names_spec='$libname$release$shared_ext$major $libname$shared_ext' # unpreferred sharedlib libNAME.a needs extra handling postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"' postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"' # We do not specify a path in Import Files, so LIBPATH fires. shlibpath_overrides_runpath=yes ;; *,no) # both, prefer aix dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)" library_names_spec='$libname$release.a $libname.a' soname_spec='$libname$release$shared_ext$major' # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)' postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"' ;; esac shlibpath_var=LIBPATH fi ;; amigaos*) case $host_cpu in powerpc) # Since July 2007 AmigaOS4 officially supports .so libraries. # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' ;; m68k) library_names_spec='$libname.ixlibrary $libname.a' # Create ${libname}_ixlibrary.a entries in /sys/libs. finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' ;; esac ;; beos*) library_names_spec='$libname$shared_ext' dynamic_linker="$host_os ld.so" shlibpath_var=LIBRARY_PATH ;; bsdi[45]*) version_type=linux # correct to gnu/linux during the next big refactor need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" # the default ld.so.conf also contains /usr/contrib/lib and # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow # libtool to hard-code these into programs ;; cygwin* | mingw* | pw32* | cegcc*) version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no case $GCC,$cc_basename in yes,*) # gcc library_names_spec='$libname.dll.a' # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes case $host_os in cygwin*) # Cygwin DLLs use 'cyg' prefix rather than 'lib' soname_spec='`echo $libname | sed -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; mingw* | cegcc*) # MinGW DLLs use traditional 'lib' prefix soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; pw32*) # pw32 DLLs use 'pw' prefix rather than 'lib' library_names_spec='`echo $libname | sed -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' ;; esac dynamic_linker='Win32 ld.exe' ;; *,cl*) # Native MSVC libname_spec='$name' soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext' library_names_spec='$libname.dll.lib' case $build_os in mingw*) sys_lib_search_path_spec= lt_save_ifs=$IFS IFS=';' for lt_path in $LIB do IFS=$lt_save_ifs # Let DOS variable expansion print the short 8.3 style file name. lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" done IFS=$lt_save_ifs # Convert to MSYS style. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` ;; cygwin*) # Convert to unix form, then to dos form, then back to unix form # but this time dos style (no spaces!) so that the unix form looks # like /cygdrive/c/PROGRA~1:/cygdr... sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` ;; *) sys_lib_search_path_spec=$LIB if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then # It is most probably a Windows format PATH. sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` else sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` fi # FIXME: find the short name or the path components, as spaces are # common. (e.g. "Program Files" -> "PROGRA~1") ;; esac # DLL is installed to $(libdir)/../bin by postinstall_cmds postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' shlibpath_overrides_runpath=yes dynamic_linker='Win32 link.exe' ;; *) # Assume MSVC wrapper library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib' dynamic_linker='Win32 ld.exe' ;; esac # FIXME: first we should search . and the directory the executable is in shlibpath_var=PATH ;; darwin* | rhapsody*) dynamic_linker="$host_os dyld" version_type=darwin need_lib_prefix=no need_version=no library_names_spec='$libname$release$major$shared_ext $libname$shared_ext' soname_spec='$libname$release$major$shared_ext' shlibpath_overrides_runpath=yes shlibpath_var=DYLD_LIBRARY_PATH shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' ;; dgux*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; freebsd* | dragonfly*) # DragonFly does not have aout. When/if they implement a new # versioning mechanism, adjust this. if test -x /usr/bin/objformat; then objformat=`/usr/bin/objformat` else case $host_os in freebsd[23].*) objformat=aout ;; *) objformat=elf ;; esac fi version_type=freebsd-$objformat case $version_type in freebsd-elf*) library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' need_version=no need_lib_prefix=no ;; freebsd-*) library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' need_version=yes ;; esac shlibpath_var=LD_LIBRARY_PATH case $host_os in freebsd2.*) shlibpath_overrides_runpath=yes ;; freebsd3.[01]* | freebsdelf3.[01]*) shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; *) # from 4.6 on, and DragonFly shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; esac ;; haiku*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no dynamic_linker="$host_os runtime_loader" library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LIBRARY_PATH shlibpath_overrides_runpath=no sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' hardcode_into_libs=yes ;; hpux9* | hpux10* | hpux11*) # Give a soname corresponding to the major version so that dld.sl refuses to # link against other versions. version_type=sunos need_lib_prefix=no need_version=no case $host_cpu in ia64*) shrext_cmds='.so' hardcode_into_libs=yes dynamic_linker="$host_os dld.so" shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' if test 32 = "$HPUX_IA64_MODE"; then sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" sys_lib_dlsearch_path_spec=/usr/lib/hpux32 else sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" sys_lib_dlsearch_path_spec=/usr/lib/hpux64 fi ;; hppa*64*) shrext_cmds='.sl' hardcode_into_libs=yes dynamic_linker="$host_os dld.sl" shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; *) shrext_cmds='.sl' dynamic_linker="$host_os dld.sl" shlibpath_var=SHLIB_PATH shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' ;; esac # HP-UX runs *really* slowly unless shared libraries are mode 555, ... postinstall_cmds='chmod 555 $lib' # or fails outright, so override atomically: install_override_mode=555 ;; interix[3-9]*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; irix5* | irix6* | nonstopux*) case $host_os in nonstopux*) version_type=nonstopux ;; *) if test yes = "$lt_cv_prog_gnu_ld"; then version_type=linux # correct to gnu/linux during the next big refactor else version_type=irix fi ;; esac need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext' case $host_os in irix5* | nonstopux*) libsuff= shlibsuff= ;; *) case $LD in # libtool.m4 will add one of these switches to LD *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") libsuff= shlibsuff= libmagic=32-bit;; *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") libsuff=32 shlibsuff=N32 libmagic=N32;; *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") libsuff=64 shlibsuff=64 libmagic=64-bit;; *) libsuff= shlibsuff= libmagic=never-match;; esac ;; esac shlibpath_var=LD_LIBRARY${shlibsuff}_PATH shlibpath_overrides_runpath=no sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff" sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff" hardcode_into_libs=yes ;; # No shared lib support for Linux oldld, aout, or coff. linux*oldld* | linux*aout* | linux*coff*) dynamic_linker=no ;; linux*android*) version_type=none # Android doesn't support versioned libraries. need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext' soname_spec='$libname$release$shared_ext' finish_cmds= shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes dynamic_linker='Android linker' # Don't embed -rpath directories since the linker doesn't support them. hardcode_libdir_flag_spec_CXX='-L$libdir' ;; # This must be glibc/ELF. linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no # Some binutils ld are patched to set DT_RUNPATH if ${lt_cv_shlibpath_overrides_runpath+:} false; then : $as_echo_n "(cached) " >&6 else lt_cv_shlibpath_overrides_runpath=no save_LDFLAGS=$LDFLAGS save_libdir=$libdir eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : lt_cv_shlibpath_overrides_runpath=yes fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LDFLAGS=$save_LDFLAGS libdir=$save_libdir fi shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath # This implies no fast_install, which is unacceptable. # Some rework will be needed to allow for fast_install # before this can be enabled. hardcode_into_libs=yes # Ideally, we could use ldconfig to report *all* directores which are # searched for libraries, however this is still not possible. Aside from not # being certain /sbin/ldconfig is available, command # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64, # even though it is searched at run-time. Try to do the best guess by # appending ld.so.conf contents (and includes) to the search path. if test -f /etc/ld.so.conf; then lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" fi # We used to test for /lib/ld.so.1 and disable shared libraries on # powerpc, because MkLinux only supported shared libraries with the # GNU dynamic linker. Since this was broken with cross compilers, # most powerpc-linux boxes support dynamic linking these days and # people can always --disable-shared, the test was removed, and we # assume the GNU/Linux dynamic linker is in use. dynamic_linker='GNU/Linux ld.so' ;; netbsdelf*-gnu) version_type=linux need_lib_prefix=no need_version=no library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' soname_spec='${libname}${release}${shared_ext}$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='NetBSD ld.elf_so' ;; netbsd*) version_type=sunos need_lib_prefix=no need_version=no if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' dynamic_linker='NetBSD (a.out) ld.so' else library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' dynamic_linker='NetBSD ld.elf_so' fi shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes ;; newsos6) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; *nto* | *qnx*) version_type=qnx need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes dynamic_linker='ldqnx.so' ;; openbsd* | bitrig*) version_type=sunos sys_lib_dlsearch_path_spec=/usr/lib need_lib_prefix=no if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then need_version=no else need_version=yes fi library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes ;; os2*) libname_spec='$name' version_type=windows shrext_cmds=.dll need_version=no need_lib_prefix=no # OS/2 can only load a DLL with a base name of 8 characters or less. soname_spec='`test -n "$os2dllname" && libname="$os2dllname"; v=$($ECHO $release$versuffix | tr -d .-); n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _); $ECHO $n$v`$shared_ext' library_names_spec='${libname}_dll.$libext' dynamic_linker='OS/2 ld.exe' shlibpath_var=BEGINLIBPATH sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec postinstall_cmds='base_file=`basename \$file`~ dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~ dldir=$destdir/`dirname \$dlpath`~ test -d \$dldir || mkdir -p \$dldir~ $install_prog $dir/$dlname \$dldir/$dlname~ chmod a+x \$dldir/$dlname~ if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; fi' postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~ dlpath=$dir/\$dldll~ $RM \$dlpath' ;; osf3* | osf4* | osf5*) version_type=osf need_lib_prefix=no need_version=no soname_spec='$libname$release$shared_ext$major' library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec ;; rdos*) dynamic_linker=no ;; solaris*) version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes # ldd complains unless libraries are executable postinstall_cmds='chmod +x $lib' ;; sunos4*) version_type=sunos library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix' finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes if test yes = "$with_gnu_ld"; then need_lib_prefix=no fi need_version=yes ;; sysv4 | sysv4.3*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH case $host_vendor in sni) shlibpath_overrides_runpath=no need_lib_prefix=no runpath_var=LD_RUN_PATH ;; siemens) need_lib_prefix=no ;; motorola) need_lib_prefix=no need_version=no shlibpath_overrides_runpath=no sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' ;; esac ;; sysv4*MP*) if test -d /usr/nec; then version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext' soname_spec='$libname$shared_ext.$major' shlibpath_var=LD_LIBRARY_PATH fi ;; sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) version_type=sco need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=yes hardcode_into_libs=yes if test yes = "$with_gnu_ld"; then sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' else sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' case $host_os in sco3.2v5*) sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" ;; esac fi sys_lib_dlsearch_path_spec='/usr/lib' ;; tpf*) # TPF is a cross-target only. Preferred cross-host = GNU/Linux. version_type=linux # correct to gnu/linux during the next big refactor need_lib_prefix=no need_version=no library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' shlibpath_var=LD_LIBRARY_PATH shlibpath_overrides_runpath=no hardcode_into_libs=yes ;; uts4*) version_type=linux # correct to gnu/linux during the next big refactor library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext' soname_spec='$libname$release$shared_ext$major' shlibpath_var=LD_LIBRARY_PATH ;; *) dynamic_linker=no ;; esac { $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 $as_echo "$dynamic_linker" >&6; } test no = "$dynamic_linker" && can_build_shared=no variables_saved_for_relink="PATH $shlibpath_var $runpath_var" if test yes = "$GCC"; then variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" fi if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec fi if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec fi # remember unaugmented sys_lib_dlsearch_path content for libtool script decls... configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec # ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH" # to be used as default LT_SYS_LIBRARY_PATH value in generated libtool configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 $as_echo_n "checking how to hardcode library paths into programs... " >&6; } hardcode_action_CXX= if test -n "$hardcode_libdir_flag_spec_CXX" || test -n "$runpath_var_CXX" || test yes = "$hardcode_automatic_CXX"; then # We can hardcode non-existent directories. if test no != "$hardcode_direct_CXX" && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library # when we should be linking with a yet-to-be-installed one ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" && test no != "$hardcode_minus_L_CXX"; then # Linking always hardcodes the temporary library directory. hardcode_action_CXX=relink else # We can link without hardcoding, and we can hardcode nonexisting dirs. hardcode_action_CXX=immediate fi else # We cannot hardcode anything, or else we can only hardcode existing # directories. hardcode_action_CXX=unsupported fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 $as_echo "$hardcode_action_CXX" >&6; } if test relink = "$hardcode_action_CXX" || test yes = "$inherit_rpath_CXX"; then # Fast installation is not supported enable_fast_install=no elif test yes = "$shlibpath_overrides_runpath" || test no = "$enable_shared"; then # Fast installation is not necessary enable_fast_install=needless fi fi # test -n "$compiler" CC=$lt_save_CC CFLAGS=$lt_save_CFLAGS LDCXX=$LD LD=$lt_save_LD GCC=$lt_save_GCC with_gnu_ld=$lt_save_with_gnu_ld lt_cv_path_LDCXX=$lt_cv_path_LD lt_cv_path_LD=$lt_save_path_LD lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld fi # test yes != "$_lt_caught_CXX_error" ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu # AC_PROG_CXX used to set CXX to C when no compiler was found, but now it's # g++. So actually try to build a program to verify the compiler. if test C$CXX = C ; then as_fn_error $? "C++ compiler needed. Please install one (ie: gnu g++)" "$LINENO" 5 fi ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_cxx_try_link "$LINENO"; then : rcl_link_ok=yes else rcl_link_ok=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext if test "$rcl_link_ok" = "no" ; then as_fn_error $? "No working C++ compiler was found" "$LINENO" 5 fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu for ac_prog in 'bison -y' byacc do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_prog_YACC+:} false; then : $as_echo_n "(cached) " >&6 else if test -n "$YACC"; then ac_cv_prog_YACC="$YACC" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_prog_YACC="$ac_prog" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS fi fi YACC=$ac_cv_prog_YACC if test -n "$YACC"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $YACC" >&5 $as_echo "$YACC" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi test -n "$YACC" && break done test -n "$YACC" || YACC="yacc" { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 $as_echo_n "checking whether byte ordering is bigendian... " >&6; } if ${ac_cv_c_bigendian+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_c_bigendian=unknown # See if we're dealing with a universal compiler. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #ifndef __APPLE_CC__ not a universal capable compiler #endif typedef int dummy; _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # Check for potential -arch flags. It is not universal unless # there are at least two -arch flags with different values. ac_arch= ac_prev= for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do if test -n "$ac_prev"; then case $ac_word in i?86 | x86_64 | ppc | ppc64) if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then ac_arch=$ac_word else ac_cv_c_bigendian=universal break fi ;; esac ac_prev= elif test "x$ac_word" = "x-arch"; then ac_prev=arch fi done fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_c_bigendian = unknown; then # See if sys/param.h defines the BYTE_ORDER macro. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ && LITTLE_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include #include int main () { #if BYTE_ORDER != BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) bogus endian macros #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : # It does; now see whether it defined to _BIG_ENDIAN or not. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { #ifndef _BIG_ENDIAN not big endian #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_c_bigendian=yes else ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi if test $ac_cv_c_bigendian = unknown; then # Compile a test program. if test "$cross_compiling" = yes; then : # Try to guess by grepping values from an object file. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; int use_ascii (int i) { return ascii_mm[i] + ascii_ii[i]; } short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; int use_ebcdic (int i) { return ebcdic_mm[i] + ebcdic_ii[i]; } extern int foo; int main () { return use_ascii (foo) == use_ebcdic (foo); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then ac_cv_c_bigendian=yes fi if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then if test "$ac_cv_c_bigendian" = unknown; then ac_cv_c_bigendian=no else # finding both strings is unlikely to happen, but who knows? ac_cv_c_bigendian=unknown fi fi fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ $ac_includes_default int main () { /* Are we little or big endian? From Harbison&Steele. */ union { long int l; char c[sizeof (long int)]; } u; u.l = 1; return u.c[sizeof (long int) - 1] == 1; ; return 0; } _ACEOF if ac_fn_c_try_run "$LINENO"; then : ac_cv_c_bigendian=no else ac_cv_c_bigendian=yes fi rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ conftest.$ac_objext conftest.beam conftest.$ac_ext fi fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 $as_echo "$ac_cv_c_bigendian" >&6; } case $ac_cv_c_bigendian in #( yes) $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h ;; #( no) ;; #( universal) $as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h ;; #( *) as_fn_error $? "unknown endianness presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; esac # Check whether --enable-largefile was given. if test "${enable_largefile+set}" = set; then : enableval=$enable_largefile; fi if test "$enable_largefile" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 $as_echo_n "checking for special C compiler options needed for large files... " >&6; } if ${ac_cv_sys_largefile_CC+:} false; then : $as_echo_n "(cached) " >&6 else ac_cv_sys_largefile_CC=no if test "$GCC" != yes; then ac_save_CC=$CC while :; do # IRIX 6.2 and later do not support large files by default, # so use the C compiler's -n32 option if that helps. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : break fi rm -f core conftest.err conftest.$ac_objext CC="$CC -n32" if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_largefile_CC=' -n32'; break fi rm -f core conftest.err conftest.$ac_objext break done CC=$ac_save_CC rm -f conftest.$ac_ext fi fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 $as_echo "$ac_cv_sys_largefile_CC" >&6; } if test "$ac_cv_sys_largefile_CC" != no; then CC=$CC$ac_cv_sys_largefile_CC fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 $as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } if ${ac_cv_sys_file_offset_bits+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _FILE_OFFSET_BITS 64 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_file_offset_bits=64; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_file_offset_bits=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 $as_echo "$ac_cv_sys_file_offset_bits" >&6; } case $ac_cv_sys_file_offset_bits in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF ;; esac rm -rf conftest* if test $ac_cv_sys_file_offset_bits = unknown; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 $as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } if ${ac_cv_sys_large_files+:} false; then : $as_echo_n "(cached) " >&6 else while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=no; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #define _LARGE_FILES 1 #include /* Check that off_t can represent 2**63 - 1 correctly. We can't simply define LARGE_OFF_T to be 9223372036854775807, since some C++ compilers masquerading as C compilers incorrectly reject 9223372036854775807. */ #define LARGE_OFF_T ((((off_t) 1 << 31) << 31) - 1 + (((off_t) 1 << 31) << 31)) int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 && LARGE_OFF_T % 2147483647 == 1) ? 1 : -1]; int main () { ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : ac_cv_sys_large_files=1; break fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_sys_large_files=unknown break done fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 $as_echo "$ac_cv_sys_large_files" >&6; } case $ac_cv_sys_large_files in #( no | unknown) ;; *) cat >>confdefs.h <<_ACEOF #define _LARGE_FILES $ac_cv_sys_large_files _ACEOF ;; esac rm -rf conftest* fi fi # OpenBSD needs sys/param.h for mount.h to compile for ac_header in sys/param.h, spawn.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done for ac_func in posix_spawn setrlimit kqueue vsnprintf do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF fi done if test "x$ac_cv_func_posix_spawn" = xyes; then : # Check whether --enable-posix_spawn was given. if test "${enable_posix_spawn+set}" = set; then : enableval=$enable_posix_spawn; posixSpawnEnabled=$enableval else posixSpawnEnabled=no fi fi if test X$posixSpawnEnabled = Xyes ; then $as_echo "#define USE_POSIX_SPAWN 1" >>confdefs.h fi # Check for where to find unordered_map etc. ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu ac_fn_cxx_check_header_mongrel "$LINENO" "tr1/unordered_map" "ac_cv_header_tr1_unordered_map" "$ac_includes_default" if test "x$ac_cv_header_tr1_unordered_map" = xyes; then : $as_echo "#define HAVE_TR1_UNORDERED /**/" >>confdefs.h fi ac_fn_cxx_check_header_mongrel "$LINENO" "unordered_map" "ac_cv_header_unordered_map" "$ac_includes_default" if test "x$ac_cv_header_unordered_map" = xyes; then : $as_echo "#define HAVE_CXX0X_UNORDERED /**/" >>confdefs.h fi cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { std::shared_ptr ptr; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : rcl_shared_ptr_std="1" else rcl_shared_ptr_std="0" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { std::tr1::shared_ptr ptr; ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : rcl_shared_ptr_tr1="1" else rcl_shared_ptr_tr1="0" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test X$rcl_shared_ptr_std = X1; then $as_echo "#define HAVE_SHARED_PTR_STD /**/" >>confdefs.h elif test X$rcl_shared_ptr_tr1 = X1; then $as_echo "#define HAVE_SHARED_PTR_TR1 /**/" >>confdefs.h fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu for ac_header in sys/mount.h sys/statfs.h sys/statvfs.h sys/vfs.h do : as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "#ifdef HAVE_SYS_PARAM_H # include #endif " if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF fi done # Use specific 'file' command ? (Useful on solaris to specify # /usr/local/bin/file instead of the system's which doesn't understand '-i' # Check whether --with-file-command was given. if test "${with_file_command+set}" = set; then : withval=$with_file_command; withFileCommand=$withval else withFileCommand=file fi case $withFileCommand in file) # Extract the first word of "file", so it can be a program name with args. set dummy file; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_fileProg+:} false; then : $as_echo_n "(cached) " >&6 else case $fileProg in [\\/]* | ?:[\\/]*) ac_cv_path_fileProg="$fileProg" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_fileProg="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi fileProg=$ac_cv_path_fileProg if test -n "$fileProg"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $fileProg" >&5 $as_echo "$fileProg" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; *) fileProg=$withFileCommand;; esac if test ! -x "$fileProg"; then as_fn_error $? "$fileProg does not exist or is not executable" "$LINENO" 5 fi cat >>confdefs.h <<_ACEOF #define FILE_PROG "$fileProg" _ACEOF # Can't use Solaris standard 'file' command, it doesn't support -i $as_echo "#define USE_SYSTEM_FILE_COMMAND 1" >>confdefs.h # Use aspell to provide spelling expansions ? # The default is yes. If we do find an aspell installation, we use it. Else # we do compile the aspell module using an internal copy of aspell.h # Only --with-aspell=no will completely disable aspell support # Check whether --with-aspell was given. if test "${with_aspell+set}" = set; then : withval=$with_aspell; withAspell=$withval else withAspell=yes fi case $withAspell in no);; yes) # Extract the first word of "aspell", so it can be a program name with args. set dummy aspell; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_aspellProg+:} false; then : $as_echo_n "(cached) " >&6 else case $aspellProg in [\\/]* | ?:[\\/]*) ac_cv_path_aspellProg="$aspellProg" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_aspellProg="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS ;; esac fi aspellProg=$ac_cv_path_aspellProg if test -n "$aspellProg"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $aspellProg" >&5 $as_echo "$aspellProg" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; *) # The argument should be the path to the aspell program aspellProg=$withAspell ;; esac if test X$withAspell != Xno ; then $as_echo "#define RCL_USE_ASPELL 1" >>confdefs.h if test X$aspellProg != X ; then aspellBase=`dirname $aspellProg` aspellBase=`dirname $aspellBase` cat >>confdefs.h <<_ACEOF #define ASPELL_PROG "$aspellProg" _ACEOF if test -f $aspellBase/include/aspell.h ; then cat >>confdefs.h <<_ACEOF #define ASPELL_INCLUDE "$aspellBase/include/aspell.h" _ACEOF else { $as_echo "$as_me:${as_lineno-$LINENO}: aspell support enabled but aspell package not found. Compiling with internal aspell interface file" >&5 $as_echo "$as_me: aspell support enabled but aspell package not found. Compiling with internal aspell interface file" >&6;} $as_echo "#define ASPELL_INCLUDE \"aspell-local.h\"" >>confdefs.h fi else # aspell support enabled but no aspell install yet { $as_echo "$as_me:${as_lineno-$LINENO}: aspell support enabled but aspell package not found. Compiling with internal aspell interface file" >&5 $as_echo "$as_me: aspell support enabled but aspell package not found. Compiling with internal aspell interface file" >&6;} $as_echo "#define ASPELL_INCLUDE \"aspell-local.h\"" >>confdefs.h fi fi if test -f /usr/include/sys/inotify.h -o -f /usr/include/linux/inotify.h; then inot_default=yes else inot_default=no fi # Real time monitoring with inotify # Check whether --with-inotify was given. if test "${with_inotify+set}" = set; then : withval=$with_inotify; withInotify=$withval else withInotify=$inot_default fi if test X$withInotify != Xno ; then { $as_echo "$as_me:${as_lineno-$LINENO}: enabled support for inotify monitoring" >&5 $as_echo "$as_me: enabled support for inotify monitoring" >&6;} $as_echo "#define RCL_MONITOR 1" >>confdefs.h $as_echo "#define RCL_USE_INOTIFY 1" >>confdefs.h else { $as_echo "$as_me:${as_lineno-$LINENO}: inotify not found, inotify monitoring disabled" >&5 $as_echo "$as_me: inotify not found, inotify monitoring disabled" >&6;} fi # Real time monitoring with FAM # Check whether --with-fam was given. if test "${with_fam+set}" = set; then : withval=$with_fam; withFam=$withval else withFam=yes fi if test X$withFam != Xno -a X$withInotify != Xno ; then { $as_echo "$as_me:${as_lineno-$LINENO}: FAM support enabled but inotify support also enabled. Disabling FAM support and using inotify" >&5 $as_echo "$as_me: FAM support enabled but inotify support also enabled. Disabling FAM support and using inotify" >&6;} withFam=no fi famLib="" case $withFam in no);; yes) for dir in /usr/local/lib ${libdir};do if test -f $dir/libfam.so ; then famLib=$dir/libfam.so;break;fi done if test X$famLib = X ; then { $as_echo "$as_me:${as_lineno-$LINENO}: FAM library not found, disabling FAM and real time indexing support" >&5 $as_echo "$as_me: FAM library not found, disabling FAM and real time indexing support" >&6;} withFam=no fi ;; *) # The argument should be the path to the fam library famLib=$withFam ;; esac if test X$withFam != Xno ; then $as_echo "#define RCL_MONITOR 1" >>confdefs.h $as_echo "#define RCL_USE_FAM 1" >>confdefs.h if test X$famLib != X ; then famLibDir=`dirname $famLib` famBase=`dirname $famLibDir` famBLib=`basename $famLib .so | sed -e s/lib//` if test ! -f $famBase/include/fam.h ; then as_fn_error $? "fam.h not found in $famBase/include. Specify --with-fam=no to disable fam support" "$LINENO" 5 fi LIBFAM="-L$famLibDir -l$famBLib" { $as_echo "$as_me:${as_lineno-$LINENO}: fam library directive: $LIBFAM" >&5 $as_echo "$as_me: fam library directive: $LIBFAM" >&6;} cat >>confdefs.h <<_ACEOF #define FAM_INCLUDE "$famBase/include/fam.h" _ACEOF else as_fn_error $? "fam library not found" "$LINENO" 5 fi fi # Enable use of threads in the indexing pipeline. # Disabled by default on OS X as this actually hurts performance. # Also disabled on Windows (which does not use configure, see autoconfig-win.h) case ${host_os} in darwin*) # Check whether --enable-idxthreads was given. if test "${enable_idxthreads+set}" = set; then : enableval=$enable_idxthreads; idxthreadsEnabled=$enableval else idxthreadsEnabled=no fi ;; *) # Check whether --enable-idxthreads was given. if test "${enable_idxthreads+set}" = set; then : enableval=$enable_idxthreads; idxthreadsEnabled=$enableval else idxthreadsEnabled=yes fi ;; esac if test X$idxthreadsEnabled = Xno; then NOTHREADS_TRUE= NOTHREADS_FALSE='#' else NOTHREADS_TRUE='#' NOTHREADS_FALSE= fi if test X$idxthreadsEnabled = Xyes ; then $as_echo "#define IDX_THREADS 1" >>confdefs.h fi # Check whether --enable-testmains was given. if test "${enable_testmains+set}" = set; then : enableval=$enable_testmains; buildtestmains=$enableval else buildtestmains=no fi if test "$buildtestmains" = yes; then COND_TESTMAINS_TRUE= COND_TESTMAINS_FALSE='#' else COND_TESTMAINS_TRUE='#' COND_TESTMAINS_FALSE= fi # Enable CamelCase word splitting. This is optional because it causes # problems with phrases: with camelcase enabled, "MySQL manual" # will be matched by "MySQL manual" and "my sql manual" but not # "mysql manual" (which would need increased slack as manual is now at pos # 2 instead of 1 # Check whether --enable-camelcase was given. if test "${enable_camelcase+set}" = set; then : enableval=$enable_camelcase; camelcaseEnabled=$enableval else camelcaseEnabled=no fi if test X$camelcaseEnabled = Xyes ; then $as_echo "#define RCL_SPLIT_CAMELCASE 1" >>confdefs.h fi # Disable building the python module. # Check whether --enable-python-module was given. if test "${enable_python_module+set}" = set; then : enableval=$enable_python_module; pythonEnabled=$enableval else pythonEnabled=yes fi if test X$pythonEnabled = Xyes; then MAKEPYTHON_TRUE= MAKEPYTHON_FALSE='#' else MAKEPYTHON_TRUE='#' MAKEPYTHON_FALSE= fi # Disable building the libchm python wrapper # Check whether --enable-python-chm was given. if test "${enable_python_chm+set}" = set; then : enableval=$enable_python_chm; pythonChmEnabled=$enableval else pythonChmEnabled=yes fi if test X$pythonChmEnabled = Xyes; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for chm_resolve_object in -lchm" >&5 $as_echo_n "checking for chm_resolve_object in -lchm... " >&6; } if ${ac_cv_lib_chm_chm_resolve_object+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lchm $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char chm_resolve_object (); int main () { return chm_resolve_object (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_chm_chm_resolve_object=yes else ac_cv_lib_chm_chm_resolve_object=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_chm_chm_resolve_object" >&5 $as_echo "$ac_cv_lib_chm_chm_resolve_object" >&6; } if test "x$ac_cv_lib_chm_chm_resolve_object" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBCHM 1 _ACEOF LIBS="-lchm $LIBS" else as_fn_error $? "--enable-python-chm is set but libchm is not found" "$LINENO" 5 fi fi if test X$pythonChmEnabled = Xyes; then MAKEPYTHONCHM_TRUE= MAKEPYTHONCHM_FALSE='#' else MAKEPYTHONCHM_TRUE='#' MAKEPYTHONCHM_FALSE= fi for ac_func in mkdtemp do : ac_fn_c_check_func "$LINENO" "mkdtemp" "ac_cv_func_mkdtemp" if test "x$ac_cv_func_mkdtemp" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_MKDTEMP 1 _ACEOF fi done { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_create in -lpthread" >&5 $as_echo_n "checking for pthread_create in -lpthread... " >&6; } if ${ac_cv_lib_pthread_pthread_create+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lpthread $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char pthread_create (); int main () { return pthread_create (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_pthread_pthread_create=yes else ac_cv_lib_pthread_pthread_create=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pthread_pthread_create" >&5 $as_echo "$ac_cv_lib_pthread_pthread_create" >&6; } if test "x$ac_cv_lib_pthread_pthread_create" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBPTHREAD 1 _ACEOF LIBS="-lpthread $LIBS" fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 $as_echo_n "checking for library containing dlopen... " >&6; } if ${ac_cv_search_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dlopen (); int main () { return dlopen (); ; return 0; } _ACEOF for ac_lib in '' dl; do if test -z "$ac_lib"; then ac_res="none required" else ac_res=-l$ac_lib LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : ac_cv_search_dlopen=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext if ${ac_cv_search_dlopen+:} false; then : break fi done if ${ac_cv_search_dlopen+:} false; then : else ac_cv_search_dlopen=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 $as_echo "$ac_cv_search_dlopen" >&6; } ac_res=$ac_cv_search_dlopen if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi if test X$ac_cv_search_function != Xno ; then $as_echo "#define HAVE_DLOPEN 1" >>confdefs.h fi { $as_echo "$as_me:${as_lineno-$LINENO}: checking for zlibVersion in -lz" >&5 $as_echo_n "checking for zlibVersion in -lz... " >&6; } if ${ac_cv_lib_z_zlibVersion+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lz $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char zlibVersion (); int main () { return zlibVersion (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_z_zlibVersion=yes else ac_cv_lib_z_zlibVersion=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_zlibVersion" >&5 $as_echo "$ac_cv_lib_z_zlibVersion" >&6; } if test "x$ac_cv_lib_z_zlibVersion" = xyes; then : cat >>confdefs.h <<_ACEOF #define HAVE_LIBZ 1 _ACEOF LIBS="-lz $LIBS" fi ############# Putenv { $as_echo "$as_me:${as_lineno-$LINENO}: checking for type of string parameter to putenv" >&5 $as_echo_n "checking for type of string parameter to putenv... " >&6; } ac_ext=cpp ac_cpp='$CXXCPP $CPPFLAGS' ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_cxx_compiler_gnu cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { putenv((const char *)0); ; return 0; } _ACEOF if ac_fn_cxx_try_compile "$LINENO"; then : rcl_putenv_string_const="1" else rcl_putenv_string_const="0" fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test X$rcl_putenv_string_const = X1 ; then $as_echo "#define PUTENV_ARG_CONST 1" >>confdefs.h fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu #### Look for Xapian. Done in a strange way to work around autoconf # cache XAPIAN_CONFIG=${XAPIAN_CONFIG:-no} if test "$XAPIAN_CONFIG" = "no"; then # Extract the first word of "xapian-config", so it can be a program name with args. set dummy xapian-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XAPIAN_CONFIG0+:} false; then : $as_echo_n "(cached) " >&6 else case $XAPIAN_CONFIG0 in [\\/]* | ?:[\\/]*) ac_cv_path_XAPIAN_CONFIG0="$XAPIAN_CONFIG0" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_XAPIAN_CONFIG0="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_XAPIAN_CONFIG0" && ac_cv_path_XAPIAN_CONFIG0="no" ;; esac fi XAPIAN_CONFIG0=$ac_cv_path_XAPIAN_CONFIG0 if test -n "$XAPIAN_CONFIG0"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XAPIAN_CONFIG0" >&5 $as_echo "$XAPIAN_CONFIG0" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi XAPIAN_CONFIG=$XAPIAN_CONFIG0 fi if test "$XAPIAN_CONFIG" = "no"; then # Extract the first word of "xapian-config-1.3", so it can be a program name with args. set dummy xapian-config-1.3; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XAPIAN_CONFIG1+:} false; then : $as_echo_n "(cached) " >&6 else case $XAPIAN_CONFIG1 in [\\/]* | ?:[\\/]*) ac_cv_path_XAPIAN_CONFIG1="$XAPIAN_CONFIG1" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_XAPIAN_CONFIG1="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_XAPIAN_CONFIG1" && ac_cv_path_XAPIAN_CONFIG1="no" ;; esac fi XAPIAN_CONFIG1=$ac_cv_path_XAPIAN_CONFIG1 if test -n "$XAPIAN_CONFIG1"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XAPIAN_CONFIG1" >&5 $as_echo "$XAPIAN_CONFIG1" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi XAPIAN_CONFIG=$XAPIAN_CONFIG1 fi if test "$XAPIAN_CONFIG" = "no"; then # Extract the first word of "xapian-config-1.1", so it can be a program name with args. set dummy xapian-config-1.1; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XAPIAN_CONFIG2+:} false; then : $as_echo_n "(cached) " >&6 else case $XAPIAN_CONFIG2 in [\\/]* | ?:[\\/]*) ac_cv_path_XAPIAN_CONFIG2="$XAPIAN_CONFIG2" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_XAPIAN_CONFIG2="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_XAPIAN_CONFIG2" && ac_cv_path_XAPIAN_CONFIG2="no" ;; esac fi XAPIAN_CONFIG2=$ac_cv_path_XAPIAN_CONFIG2 if test -n "$XAPIAN_CONFIG2"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XAPIAN_CONFIG2" >&5 $as_echo "$XAPIAN_CONFIG2" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi XAPIAN_CONFIG=$XAPIAN_CONFIG2 fi if test "$XAPIAN_CONFIG" = "no" ; then as_fn_error $? "Cannot find xapian-config command in $PATH. Is xapian-core installed ?" "$LINENO" 5 exit 1 fi LIBXAPIAN=`$XAPIAN_CONFIG --libs` # The --static thing fails with older Xapians. Happily enough they don't # need it either (because there are no needed libraries (no uuid and we # deal explicitly with libz) LIBXAPIANSTATICEXTRA=`$XAPIAN_CONFIG --static --libs 2> /dev/null` # Workaround for problem in xapian-config in some versions: wrongly lists # libstdc++.la in the lib list for i in $LIBXAPIAN ; do case $i in *stdc++*|-lm|-lgcc_s|-lc);; *) tmpxaplib="$tmpxaplib $i";; esac done LIBXAPIAN=$tmpxaplib LIBXAPIANDIR=`$XAPIAN_CONFIG --libs | awk '{print $1}'` case A"$LIBXAPIANDIR" in A-L*) LIBXAPIANDIR=`echo $LIBXAPIANDIR | sed -e 's/-L//'`;; *) LIBXAPIANDIR="";; esac XAPIANCXXFLAGS=`$XAPIAN_CONFIG --cxxflags` #echo XAPIAN_CONFIG: $XAPIAN_CONFIG #echo LIBXAPIAN: $LIBXAPIAN #echo LIBXAPIANDIR: $LIBXAPIANDIR #echo LIBXAPIANSTATICEXTRA: $LIBXAPIANSTATICEXTRA #echo XAPIANCXXFLAGS: $XAPIANCXXFLAGS XSLT_CONFIG=${XSLT_CONFIG:-no} if test "$XSLT_CONFIG" = "no"; then # Extract the first word of "xslt-config", so it can be a program name with args. set dummy xslt-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_XSLT_CONFIG0+:} false; then : $as_echo_n "(cached) " >&6 else case $XSLT_CONFIG0 in [\\/]* | ?:[\\/]*) ac_cv_path_XSLT_CONFIG0="$XSLT_CONFIG0" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_XSLT_CONFIG0="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_XSLT_CONFIG0" && ac_cv_path_XSLT_CONFIG0="no" ;; esac fi XSLT_CONFIG0=$ac_cv_path_XSLT_CONFIG0 if test -n "$XSLT_CONFIG0"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $XSLT_CONFIG0" >&5 $as_echo "$XSLT_CONFIG0" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi XSLT_CONFIG=$XSLT_CONFIG0 fi if test "$XSLT_CONFIG" = "no" ; then as_fn_error $? "Cannot find xslt-config command in $PATH. Is libxslt installed ?" "$LINENO" 5 exit 1 fi XSLT_CFLAGS=`xslt-config --cflags` XSLT_LINKADD=`xslt-config --libs` # Check whether --enable-xadump was given. if test "${enable_xadump+set}" = set; then : enableval=$enable_xadump; enableXADUMP=$enableval else enableXADUMP="no" fi if test X$enableXADUMP = Xyes; then MAKEXADUMP_TRUE= MAKEXADUMP_FALSE='#' else MAKEXADUMP_TRUE='#' MAKEXADUMP_FALSE= fi # Check whether --enable-userdoc was given. if test "${enable_userdoc+set}" = set; then : enableval=$enable_userdoc; enableUserdoc=$enableval else enableUserdoc="yes" fi if test X$enableUserdoc = Xyes; then MAKEUSERDOC_TRUE= MAKEUSERDOC_FALSE='#' else MAKEUSERDOC_TRUE='#' MAKEUSERDOC_FALSE= fi #### QT # The way qt and its tools (qmake especially) are installed is very # different between systems (and maybe qt versions) # # In general we need QTDIR to be set, because it is used inside the # qmake-generated makefiles. But there are exceptions: ie on debian3.1 (at # least on the sourceforge compile farm), QTDIR is not needed because qmake # generates hard paths (and is installed in /usr/bin). We don't want to # force the user to set QTDIR if it is not needed. # # The logic is then to first look for qmake, possibly using QTDIR if it is # set. # # If QTDIR is not set, we then generate a bogus qt project and check if # QTDIR is needed in the Makefile, in which case we complain. # # QMAKESPEC: on most Linux system, there is a 'default' link inside the # mkspecs directory, so that QMAKESPEC is not needed. # If QMAKESPEC is not set and needed, the qmake test at the previous test # will have failed, and we tell the user to check his environment. # # Check whether --enable-qtgui was given. if test "${enable_qtgui+set}" = set; then : enableval=$enable_qtgui; enableQT=$enableval else enableQT="yes" fi if test X$enableQT = Xyes; then MAKEQT_TRUE= MAKEQT_FALSE='#' else MAKEQT_TRUE='#' MAKEQT_FALSE= fi # Check whether --enable-recollq was given. if test "${enable_recollq+set}" = set; then : enableval=$enable_recollq; enableRECOLLQ=$enableval else enableRECOLLQ="no" fi if test X"$enableRECOLLQ" != X ; then if test X$enableRECOLLQ = Xyes; then MAKECMDLINE_TRUE= MAKECMDLINE_FALSE='#' else MAKECMDLINE_TRUE='#' MAKECMDLINE_FALSE= fi else if test X$enableQT = Xno; then MAKECMDLINE_TRUE= MAKECMDLINE_FALSE='#' else MAKECMDLINE_TRUE='#' MAKECMDLINE_FALSE= fi fi if test X$enableQT = Xyes ; then if test X$QTDIR != X ; then PATH=$PATH:$QTDIR/bin export PATH fi if test X$QMAKE = X ; then QMAKE=qmake fi case $QMAKE in */*) QMAKEPATH=$QMAKE;; *) # Extract the first word of "$QMAKE", so it can be a program name with args. set dummy $QMAKE; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } if ${ac_cv_path_QMAKEPATH+:} false; then : $as_echo_n "(cached) " >&6 else case $QMAKEPATH in [\\/]* | ?:[\\/]*) ac_cv_path_QMAKEPATH="$QMAKEPATH" # Let the user override the test with a path. ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then ac_cv_path_QMAKEPATH="$as_dir/$ac_word$ac_exec_ext" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done done IFS=$as_save_IFS test -z "$ac_cv_path_QMAKEPATH" && ac_cv_path_QMAKEPATH="NOTFOUND" ;; esac fi QMAKEPATH=$ac_cv_path_QMAKEPATH if test -n "$QMAKEPATH"; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $QMAKEPATH" >&5 $as_echo "$QMAKEPATH" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi ;; esac if test X$QMAKEPATH = XNOTFOUND ; then as_fn_error $? "Cannot find the qmake program. Maybe you need to install qt development files and tools and/or set the QTDIR environment variable?" "$LINENO" 5 fi QMAKE=$QMAKEPATH # Check Qt version qmakevers="`${QMAKE} --version 2>&1`" #echo "qmake version: $qmakevers" v4=`expr "$qmakevers" : '.*Qt *version *4.*'` v5=`expr "$qmakevers" : '.*Qt *version *5.*'` if test X$v4 = X0 -a X$v5 = X0; then as_fn_error $? "Bad qt/qmake version string (not 4 or 5?): $qmakevers" "$LINENO" 5 else if test X$v4 != X0 ; then { $as_echo "$as_me:${as_lineno-$LINENO}: using qt version 4 user interface" >&5 $as_echo "$as_me: using qt version 4 user interface" >&6;} else { $as_echo "$as_me:${as_lineno-$LINENO}: using qt version 5 user interface" >&5 $as_echo "$as_me: using qt version 5 user interface" >&6;} fi QTGUI=qtgui fi ##### Using Qt webkit for reslist display? Else Qt textbrowser # Check whether --enable-webkit was given. if test "${enable_webkit+set}" = set; then : enableval=$enable_webkit; enableWebkit=$enableval else enableWebkit="yes" fi if test "$enableWebkit" = "yes" ; then QMAKE_ENABLE_WEBKIT="" QMAKE_DISABLE_WEBKIT="#" else QMAKE_ENABLE_WEBKIT="#" QMAKE_DISABLE_WEBKIT="" fi # Check whether --enable-webengine was given. if test "${enable_webengine+set}" = set; then : enableval=$enable_webengine; enableWebengine=$enableval else enableWebengine="no" fi if test "$enableWebengine" = "yes" ; then QMAKE_ENABLE_WEBENGINE="" QMAKE_DISABLE_WEBENGINE="#" QMAKE_ENABLE_WEBKIT="#" QMAKE_DISABLE_WEBKIT="" else QMAKE_ENABLE_WEBENGINE="#" QMAKE_DISABLE_WEBENGINE="" fi ##### Using QZeitGeist lib ? Default no for now # Check whether --with-qzeitgeist was given. if test "${with_qzeitgeist+set}" = set; then : withval=$with_qzeitgeist; withQZeitgeist=$withval else withQZeitgeist="no" fi case "$withQZeitgeist" in no) LIBQZEITGEIST=;; yes) LIBQZEITGEIST=-lqzeitgeist;; *) LIBQZEITGEIST=$withQZeitgeist;; esac if test "$withQZeitgeist" != "no" ; then QMAKE_ENABLE_ZEITGEIST="" QMAKE_DISABLE_ZEITGEIST="#" else QMAKE_ENABLE_ZEITGEIST="#" QMAKE_DISABLE_ZEITGEIST="" fi ac_config_files="$ac_config_files $QTGUI/recoll.pro" ##################### End QT stuff fi ### X11: this is needed for the session monitoring code (in recollindex -m) # Check whether --enable-x11mon was given. if test "${enable_x11mon+set}" = set; then : enableval=$enable_x11mon; enableX11mon=$enableval else enableX11mon="yes" fi if test X$withInotify = Xno -a X$withFam = Xno ; then enableX11mon=no fi if test "$enableX11mon" = "yes" ; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for X" >&5 $as_echo_n "checking for X... " >&6; } # Check whether --with-x was given. if test "${with_x+set}" = set; then : withval=$with_x; fi # $have_x is `yes', `no', `disabled', or empty when we do not yet know. if test "x$with_x" = xno; then # The user explicitly disabled X. have_x=disabled else case $x_includes,$x_libraries in #( *\'*) as_fn_error $? "cannot use X directory names containing '" "$LINENO" 5;; #( *,NONE | NONE,*) if ${ac_cv_have_x+:} false; then : $as_echo_n "(cached) " >&6 else # One or both of the vars are not set, and there is no cached value. ac_x_includes=no ac_x_libraries=no rm -f -r conftest.dir if mkdir conftest.dir; then cd conftest.dir cat >Imakefile <<'_ACEOF' incroot: @echo incroot='${INCROOT}' usrlibdir: @echo usrlibdir='${USRLIBDIR}' libdir: @echo libdir='${LIBDIR}' _ACEOF if (export CC; ${XMKMF-xmkmf}) >/dev/null 2>/dev/null && test -f Makefile; then # GNU make sometimes prints "make[1]: Entering ...", which would confuse us. for ac_var in incroot usrlibdir libdir; do eval "ac_im_$ac_var=\`\${MAKE-make} $ac_var 2>/dev/null | sed -n 's/^$ac_var=//p'\`" done # Open Windows xmkmf reportedly sets LIBDIR instead of USRLIBDIR. for ac_extension in a so sl dylib la dll; do if test ! -f "$ac_im_usrlibdir/libX11.$ac_extension" && test -f "$ac_im_libdir/libX11.$ac_extension"; then ac_im_usrlibdir=$ac_im_libdir; break fi done # Screen out bogus values from the imake configuration. They are # bogus both because they are the default anyway, and because # using them would break gcc on systems where it needs fixed includes. case $ac_im_incroot in /usr/include) ac_x_includes= ;; *) test -f "$ac_im_incroot/X11/Xos.h" && ac_x_includes=$ac_im_incroot;; esac case $ac_im_usrlibdir in /usr/lib | /usr/lib64 | /lib | /lib64) ;; *) test -d "$ac_im_usrlibdir" && ac_x_libraries=$ac_im_usrlibdir ;; esac fi cd .. rm -f -r conftest.dir fi # Standard set of common directories for X headers. # Check X11 before X11Rn because it is often a symlink to the current release. ac_x_header_dirs=' /usr/X11/include /usr/X11R7/include /usr/X11R6/include /usr/X11R5/include /usr/X11R4/include /usr/include/X11 /usr/include/X11R7 /usr/include/X11R6 /usr/include/X11R5 /usr/include/X11R4 /usr/local/X11/include /usr/local/X11R7/include /usr/local/X11R6/include /usr/local/X11R5/include /usr/local/X11R4/include /usr/local/include/X11 /usr/local/include/X11R7 /usr/local/include/X11R6 /usr/local/include/X11R5 /usr/local/include/X11R4 /usr/X386/include /usr/x386/include /usr/XFree86/include/X11 /usr/include /usr/local/include /usr/unsupported/include /usr/athena/include /usr/local/x11r5/include /usr/lpp/Xamples/include /usr/openwin/include /usr/openwin/share/include' if test "$ac_x_includes" = no; then # Guess where to find include files, by looking for Xlib.h. # First, try using that file with no special directory specified. cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include _ACEOF if ac_fn_c_try_cpp "$LINENO"; then : # We can compile using X headers with no special include directory. ac_x_includes= else for ac_dir in $ac_x_header_dirs; do if test -r "$ac_dir/X11/Xlib.h"; then ac_x_includes=$ac_dir break fi done fi rm -f conftest.err conftest.i conftest.$ac_ext fi # $ac_x_includes = no if test "$ac_x_libraries" = no; then # Check for the libraries. # See if we find them without any special options. # Don't add to $LIBS permanently. ac_save_LIBS=$LIBS LIBS="-lX11 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include int main () { XrmInitialize () ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : LIBS=$ac_save_LIBS # We can link X programs with no special library path. ac_x_libraries= else LIBS=$ac_save_LIBS for ac_dir in `$as_echo "$ac_x_includes $ac_x_header_dirs" | sed s/include/lib/g` do # Don't even attempt the hair of trying to link an X program! for ac_extension in a so sl dylib la dll; do if test -r "$ac_dir/libX11.$ac_extension"; then ac_x_libraries=$ac_dir break 2 fi done done fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi # $ac_x_libraries = no case $ac_x_includes,$ac_x_libraries in #( no,* | *,no | *\'*) # Didn't find X, or a directory has "'" in its name. ac_cv_have_x="have_x=no";; #( *) # Record where we found X for the cache. ac_cv_have_x="have_x=yes\ ac_x_includes='$ac_x_includes'\ ac_x_libraries='$ac_x_libraries'" esac fi ;; #( *) have_x=yes;; esac eval "$ac_cv_have_x" fi # $with_x != no if test "$have_x" != yes; then { $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_x" >&5 $as_echo "$have_x" >&6; } no_x=yes else # If each of the values was on the command line, it overrides each guess. test "x$x_includes" = xNONE && x_includes=$ac_x_includes test "x$x_libraries" = xNONE && x_libraries=$ac_x_libraries # Update the cache value to reflect the command line values. ac_cv_have_x="have_x=yes\ ac_x_includes='$x_includes'\ ac_x_libraries='$x_libraries'" { $as_echo "$as_me:${as_lineno-$LINENO}: result: libraries $x_libraries, headers $x_includes" >&5 $as_echo "libraries $x_libraries, headers $x_includes" >&6; } fi if test "$no_x" = yes; then # Not all programs may use this symbol, but it does not hurt to define it. $as_echo "#define X_DISPLAY_MISSING 1" >>confdefs.h X_CFLAGS= X_PRE_LIBS= X_LIBS= X_EXTRA_LIBS= else if test -n "$x_includes"; then X_CFLAGS="$X_CFLAGS -I$x_includes" fi # It would also be nice to do this for all -L options, not just this one. if test -n "$x_libraries"; then X_LIBS="$X_LIBS -L$x_libraries" # For Solaris; some versions of Sun CC require a space after -R and # others require no space. Words are not sufficient . . . . { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -R must be followed by a space" >&5 $as_echo_n "checking whether -R must be followed by a space... " >&6; } ac_xsave_LIBS=$LIBS; LIBS="$LIBS -R$x_libraries" ac_xsave_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } X_LIBS="$X_LIBS -R$x_libraries" else LIBS="$ac_xsave_LIBS -R $x_libraries" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ int main () { ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 $as_echo "yes" >&6; } X_LIBS="$X_LIBS -R $x_libraries" else { $as_echo "$as_me:${as_lineno-$LINENO}: result: neither works" >&5 $as_echo "neither works" >&6; } fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext ac_c_werror_flag=$ac_xsave_c_werror_flag LIBS=$ac_xsave_LIBS fi # Check for system-dependent libraries X programs must link with. # Do this before checking for the system-independent R6 libraries # (-lICE), since we may need -lsocket or whatever for X linking. if test "$ISC" = yes; then X_EXTRA_LIBS="$X_EXTRA_LIBS -lnsl_s -linet" else # Martyn Johnson says this is needed for Ultrix, if the X # libraries were built with DECnet support. And Karl Berry says # the Alpha needs dnet_stub (dnet does not exist). ac_xsave_LIBS="$LIBS"; LIBS="$LIBS $X_LIBS -lX11" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char XOpenDisplay (); int main () { return XOpenDisplay (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : else { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dnet_ntoa in -ldnet" >&5 $as_echo_n "checking for dnet_ntoa in -ldnet... " >&6; } if ${ac_cv_lib_dnet_dnet_ntoa+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldnet $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dnet_ntoa (); int main () { return dnet_ntoa (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dnet_dnet_ntoa=yes else ac_cv_lib_dnet_dnet_ntoa=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dnet_dnet_ntoa" >&5 $as_echo "$ac_cv_lib_dnet_dnet_ntoa" >&6; } if test "x$ac_cv_lib_dnet_dnet_ntoa" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -ldnet" fi if test $ac_cv_lib_dnet_dnet_ntoa = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dnet_ntoa in -ldnet_stub" >&5 $as_echo_n "checking for dnet_ntoa in -ldnet_stub... " >&6; } if ${ac_cv_lib_dnet_stub_dnet_ntoa+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-ldnet_stub $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char dnet_ntoa (); int main () { return dnet_ntoa (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_dnet_stub_dnet_ntoa=yes else ac_cv_lib_dnet_stub_dnet_ntoa=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dnet_stub_dnet_ntoa" >&5 $as_echo "$ac_cv_lib_dnet_stub_dnet_ntoa" >&6; } if test "x$ac_cv_lib_dnet_stub_dnet_ntoa" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -ldnet_stub" fi fi fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS="$ac_xsave_LIBS" # msh@cis.ufl.edu says -lnsl (and -lsocket) are needed for his 386/AT, # to get the SysV transport functions. # Chad R. Larson says the Pyramis MIS-ES running DC/OSx (SVR4) # needs -lnsl. # The nsl library prevents programs from opening the X display # on Irix 5.2, according to T.E. Dickey. # The functions gethostbyname, getservbyname, and inet_addr are # in -lbsd on LynxOS 3.0.1/i386, according to Lars Hecking. ac_fn_c_check_func "$LINENO" "gethostbyname" "ac_cv_func_gethostbyname" if test "x$ac_cv_func_gethostbyname" = xyes; then : fi if test $ac_cv_func_gethostbyname = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lnsl" >&5 $as_echo_n "checking for gethostbyname in -lnsl... " >&6; } if ${ac_cv_lib_nsl_gethostbyname+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lnsl $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gethostbyname (); int main () { return gethostbyname (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_nsl_gethostbyname=yes else ac_cv_lib_nsl_gethostbyname=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nsl_gethostbyname" >&5 $as_echo "$ac_cv_lib_nsl_gethostbyname" >&6; } if test "x$ac_cv_lib_nsl_gethostbyname" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -lnsl" fi if test $ac_cv_lib_nsl_gethostbyname = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lbsd" >&5 $as_echo_n "checking for gethostbyname in -lbsd... " >&6; } if ${ac_cv_lib_bsd_gethostbyname+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lbsd $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char gethostbyname (); int main () { return gethostbyname (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_bsd_gethostbyname=yes else ac_cv_lib_bsd_gethostbyname=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_bsd_gethostbyname" >&5 $as_echo "$ac_cv_lib_bsd_gethostbyname" >&6; } if test "x$ac_cv_lib_bsd_gethostbyname" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -lbsd" fi fi fi # lieder@skyler.mavd.honeywell.com says without -lsocket, # socket/setsockopt and other routines are undefined under SCO ODT # 2.0. But -lsocket is broken on IRIX 5.2 (and is not necessary # on later versions), says Simon Leinen: it contains gethostby* # variants that don't use the name server (or something). -lsocket # must be given before -lnsl if both are needed. We assume that # if connect needs -lnsl, so does gethostbyname. ac_fn_c_check_func "$LINENO" "connect" "ac_cv_func_connect" if test "x$ac_cv_func_connect" = xyes; then : fi if test $ac_cv_func_connect = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for connect in -lsocket" >&5 $as_echo_n "checking for connect in -lsocket... " >&6; } if ${ac_cv_lib_socket_connect+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsocket $X_EXTRA_LIBS $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char connect (); int main () { return connect (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_socket_connect=yes else ac_cv_lib_socket_connect=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_connect" >&5 $as_echo "$ac_cv_lib_socket_connect" >&6; } if test "x$ac_cv_lib_socket_connect" = xyes; then : X_EXTRA_LIBS="-lsocket $X_EXTRA_LIBS" fi fi # Guillermo Gomez says -lposix is necessary on A/UX. ac_fn_c_check_func "$LINENO" "remove" "ac_cv_func_remove" if test "x$ac_cv_func_remove" = xyes; then : fi if test $ac_cv_func_remove = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for remove in -lposix" >&5 $as_echo_n "checking for remove in -lposix... " >&6; } if ${ac_cv_lib_posix_remove+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lposix $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char remove (); int main () { return remove (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_posix_remove=yes else ac_cv_lib_posix_remove=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_posix_remove" >&5 $as_echo "$ac_cv_lib_posix_remove" >&6; } if test "x$ac_cv_lib_posix_remove" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -lposix" fi fi # BSDI BSD/OS 2.1 needs -lipc for XOpenDisplay. ac_fn_c_check_func "$LINENO" "shmat" "ac_cv_func_shmat" if test "x$ac_cv_func_shmat" = xyes; then : fi if test $ac_cv_func_shmat = no; then { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shmat in -lipc" >&5 $as_echo_n "checking for shmat in -lipc... " >&6; } if ${ac_cv_lib_ipc_shmat+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lipc $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char shmat (); int main () { return shmat (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ipc_shmat=yes else ac_cv_lib_ipc_shmat=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ipc_shmat" >&5 $as_echo "$ac_cv_lib_ipc_shmat" >&6; } if test "x$ac_cv_lib_ipc_shmat" = xyes; then : X_EXTRA_LIBS="$X_EXTRA_LIBS -lipc" fi fi fi # Check for libraries that X11R6 Xt/Xaw programs need. ac_save_LDFLAGS=$LDFLAGS test -n "$x_libraries" && LDFLAGS="$LDFLAGS -L$x_libraries" # SM needs ICE to (dynamically) link under SunOS 4.x (so we have to # check for ICE first), but we must link in the order -lSM -lICE or # we get undefined symbols. So assume we have SM if we have ICE. # These have to be linked with before -lX11, unlike the other # libraries we check for below, so use a different variable. # John Interrante, Karl Berry { $as_echo "$as_me:${as_lineno-$LINENO}: checking for IceConnectionNumber in -lICE" >&5 $as_echo_n "checking for IceConnectionNumber in -lICE... " >&6; } if ${ac_cv_lib_ICE_IceConnectionNumber+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lICE $X_EXTRA_LIBS $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC builtin and then its argument prototype would still apply. */ #ifdef __cplusplus extern "C" #endif char IceConnectionNumber (); int main () { return IceConnectionNumber (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : ac_cv_lib_ICE_IceConnectionNumber=yes else ac_cv_lib_ICE_IceConnectionNumber=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ICE_IceConnectionNumber" >&5 $as_echo "$ac_cv_lib_ICE_IceConnectionNumber" >&6; } if test "x$ac_cv_lib_ICE_IceConnectionNumber" = xyes; then : X_PRE_LIBS="$X_PRE_LIBS -lSM -lICE" fi LDFLAGS=$ac_save_LDFLAGS fi X_LIBX11=-lX11 else $as_echo "#define DISABLE_X11MON 1" >>confdefs.h X_LIBX11="" fi #echo X_CFLAGS "'$X_CFLAGS'" X_PRE_LIBS "'$X_PRE_LIBS'" X_LIBS \ # "'$X_LIBS'" X_LIBX11 "'$X_LIBX11'" X_EXTRA_LIBS "'$X_EXTRA_LIBS'" # For communicating the value of RECOLL_DATADIR to non-make-based # subpackages like python-recoll, we have to expand prefix in here, because # things like "datadir = ${prefix}/share" (which is what we'd get by # expanding @datadir@) don't mean a thing in Python... I guess we could # have a piece of shell-script text to be substituted into and executed by # setup.py for getting the value of pkgdatadir, but really... m_prefix=$prefix test "X$m_prefix" = "XNONE" && m_prefix=/usr/local m_datadir=${m_prefix}/share RECOLL_DATADIR=${m_datadir}/recoll RCLVERSION=$PACKAGE_VERSION RCLLIBVERSION=$RCLVERSION ac_config_files="$ac_config_files Makefile python/recoll/setup.py python/pychm/setup.py" if test X$buildtestmains = Xyes ; then ac_config_files="$ac_config_files testmains/Makefile" fi cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure # tests run on this system so they can be shared between configure # scripts and configure runs, see configure's option --config-cache. # It is not useful on other systems. If it contains results you don't # want to keep, you may remove or edit it. # # config.status only pays attention to the cache file if you give it # the --recheck option to rerun configure. # # `ac_cv_env_foo' variables (set or unset) will be overridden when # loading this file, other *unset* `ac_cv_foo' will be assigned the # following values. _ACEOF # The following way of writing the cache mishandles newlines in values, # but we know of no workaround that is simple, portable, and efficient. # So, we kill variables containing newlines. # Ultrix sh set writes to stderr and can't be redirected directly, # and sets the high bit in the cache file unless we assign to the vars. ( for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do eval ac_val=\$$ac_var case $ac_val in #( *${as_nl}*) case $ac_var in #( *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( *) { eval $ac_var=; unset $ac_var;} ;; esac ;; esac done (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) # `set' does not quote correctly, so add quotes: double-quote # substitution turns \\\\ into \\, and sed turns \\ into \. sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" ;; #( *) # `set' quotes correctly as required by POSIX, so do not add quotes. sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" ;; esac | sort ) | sed ' /^ac_cv_env_/b end t clear :clear s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ t end s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ :end' >>confcache if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then if test "x$cache_file" != "x/dev/null"; then { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} if test ! -f "$cache_file" || test -h "$cache_file"; then cat confcache >"$cache_file" else case $cache_file in #( */* | ?:*) mv -f confcache "$cache_file"$$ && mv -f "$cache_file"$$ "$cache_file" ;; #( *) mv -f confcache "$cache_file" ;; esac fi fi else { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi rm -f confcache test "x$prefix" = xNONE && prefix=$ac_default_prefix # Let make expand exec_prefix. test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' DEFS=-DHAVE_CONFIG_H ac_libobjs= ac_ltlibobjs= U= for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue # 1. Remove the extension, and $U if already installed. ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs LTLIBOBJS=$ac_ltlibobjs { $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5 $as_echo_n "checking that generated files are newer than configure... " >&6; } if test -n "$am_sleep_pid"; then # Hide warnings about reused PIDs. wait $am_sleep_pid 2>/dev/null fi { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5 $as_echo "done" >&6; } if test -n "$EXEEXT"; then am__EXEEXT_TRUE= am__EXEEXT_FALSE='#' else am__EXEEXT_TRUE='#' am__EXEEXT_FALSE= fi if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then as_fn_error $? "conditional \"AMDEP\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCC\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${NOTHREADS_TRUE}" && test -z "${NOTHREADS_FALSE}"; then as_fn_error $? "conditional \"NOTHREADS\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${COND_TESTMAINS_TRUE}" && test -z "${COND_TESTMAINS_FALSE}"; then as_fn_error $? "conditional \"COND_TESTMAINS\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MAKEPYTHON_TRUE}" && test -z "${MAKEPYTHON_FALSE}"; then as_fn_error $? "conditional \"MAKEPYTHON\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MAKEPYTHONCHM_TRUE}" && test -z "${MAKEPYTHONCHM_FALSE}"; then as_fn_error $? "conditional \"MAKEPYTHONCHM\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MAKEXADUMP_TRUE}" && test -z "${MAKEXADUMP_FALSE}"; then as_fn_error $? "conditional \"MAKEXADUMP\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MAKEUSERDOC_TRUE}" && test -z "${MAKEUSERDOC_FALSE}"; then as_fn_error $? "conditional \"MAKEUSERDOC\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MAKEQT_TRUE}" && test -z "${MAKEQT_FALSE}"; then as_fn_error $? "conditional \"MAKEQT\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MAKECMDLINE_TRUE}" && test -z "${MAKECMDLINE_FALSE}"; then as_fn_error $? "conditional \"MAKECMDLINE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi if test -z "${MAKECMDLINE_TRUE}" && test -z "${MAKECMDLINE_FALSE}"; then as_fn_error $? "conditional \"MAKECMDLINE\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi : "${CONFIG_STATUS=./config.status}" ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} as_write_fail=0 cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. # Compiler output produced by configure, useful for debugging # configure, is in config.log if it exists. debug=false ac_cs_recheck=false ac_cs_silent=false SHELL=\${CONFIG_SHELL-$SHELL} export SHELL _ASEOF cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 ## -------------------- ## ## M4sh Initialization. ## ## -------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which # is contrary to our usage. Disable this feature. alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else case `(set -o) 2>/dev/null` in #( *posix*) : set -o posix ;; #( *) : ;; esac fi as_nl=' ' export as_nl # Printing a long string crashes Solaris 7 /usr/bin/printf. as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo # Prefer a ksh shell builtin over an external printf program on Solaris, # but without wasting forks for bash or zsh. if test -z "$BASH_VERSION$ZSH_VERSION" \ && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='print -r --' as_echo_n='print -rn --' elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' as_echo_n='/usr/ucb/echo -n' else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; case $arg in #( *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; esac; expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" ' export as_echo_n_body as_echo_n='sh -c $as_echo_n_body as_echo' fi export as_echo_body as_echo='sh -c $as_echo_body as_echo' fi # The user is always right. if test "${PATH_SEPARATOR+set}" != set; then PATH_SEPARATOR=: (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || PATH_SEPARATOR=';' } fi # IFS # We need space, tab and new line, in precisely that order. Quoting is # there to prevent editors from complaining about space-tab. # (If _AS_PATH_WALK were called with IFS unset, it would disable word # splitting by setting IFS to empty value.) IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. as_myself= case $0 in #(( *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break done IFS=$as_save_IFS ;; esac # We did not find ourselves, most probably we were run as `sh COMMAND' # in which case we are not to be found in the path. if test "x$as_myself" = x; then as_myself=$0 fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 exit 1 fi # Unset variables that we do not need and which cause bugs (e.g. in # pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" # suppresses any "Segmentation fault" message there. '((' could # trigger a bug in pdksh 5.2.14. for as_var in BASH_ENV ENV MAIL MAILPATH do eval test x\${$as_var+set} = xset \ && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : done PS1='$ ' PS2='> ' PS4='+ ' # NLS nuisances. LC_ALL=C export LC_ALL LANGUAGE=C export LANGUAGE # CDPATH. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH # as_fn_error STATUS ERROR [LINENO LOG_FD] # ---------------------------------------- # Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are # provided, also output the error to LOG_FD, referencing LINENO. Then exit the # script with STATUS, using 1 if that was 0. as_fn_error () { as_status=$1; test $as_status -eq 0 && as_status=1 if test "$4"; then as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 fi $as_echo "$as_me: error: $2" >&2 as_fn_exit $as_status } # as_fn_error # as_fn_set_status STATUS # ----------------------- # Set $? to STATUS, without forking. as_fn_set_status () { return $1 } # as_fn_set_status # as_fn_exit STATUS # ----------------- # Exit the shell with STATUS, even in a "trap 0" or "set -e" context. as_fn_exit () { set +e as_fn_set_status $1 exit $1 } # as_fn_exit # as_fn_unset VAR # --------------- # Portably unset VAR. as_fn_unset () { { eval $1=; unset $1;} } as_unset=as_fn_unset # as_fn_append VAR VALUE # ---------------------- # Append the text in VALUE to the end of the definition contained in VAR. Take # advantage of any shell optimizations that allow amortized linear growth over # repeated appends, instead of the typical quadratic growth present in naive # implementations. if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : eval 'as_fn_append () { eval $1+=\$2 }' else as_fn_append () { eval $1=\$$1\$2 } fi # as_fn_append # as_fn_arith ARG... # ------------------ # Perform arithmetic evaluation on the ARGs, and store the result in the # global $as_val. Take advantage of shells that can avoid forks. The arguments # must be portable across $(()) and expr. if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : eval 'as_fn_arith () { as_val=$(( $* )) }' else as_fn_arith () { as_val=`expr "$@" || test $? -eq 1` } fi # as_fn_arith if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr else as_expr=false fi if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then as_basename=basename else as_basename=false fi if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then as_dirname=dirname else as_dirname=false fi as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ X"$0" : 'X\(/\)' \| . 2>/dev/null || $as_echo X/"$0" | sed '/^.*\/\([^/][^/]*\)\/*$/{ s//\1/ q } /^X\/\(\/\/\)$/{ s//\1/ q } /^X\/\(\/\).*/{ s//\1/ q } s/.*/./; q'` # Avoid depending upon Character Ranges. as_cr_letters='abcdefghijklmnopqrstuvwxyz' as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' as_cr_Letters=$as_cr_letters$as_cr_LETTERS as_cr_digits='0123456789' as_cr_alnum=$as_cr_Letters$as_cr_digits ECHO_C= ECHO_N= ECHO_T= case `echo -n x` in #((((( -n*) case `echo 'xy\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. xy) ECHO_C='\c';; *) echo `echo ksh88 bug on AIX 6.1` > /dev/null ECHO_T=' ';; esac;; *) ECHO_N='-n';; esac rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then rm -f conf$$.dir/conf$$.file else rm -f conf$$.dir mkdir conf$$.dir 2>/dev/null fi if (echo >conf$$.file) 2>/dev/null; then if ln -s conf$$.file conf$$ 2>/dev/null; then as_ln_s='ln -s' # ... but there are two gotchas: # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. # In both cases, we have to default to `cp -pR'. ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || as_ln_s='cp -pR' elif ln conf$$.file conf$$ 2>/dev/null; then as_ln_s=ln else as_ln_s='cp -pR' fi else as_ln_s='cp -pR' fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null # as_fn_mkdir_p # ------------- # Create "$as_dir" as a directory, including parents if necessary. as_fn_mkdir_p () { case $as_dir in #( -*) as_dir=./$as_dir;; esac test -d "$as_dir" || eval $as_mkdir_p || { as_dirs= while :; do case $as_dir in #( *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( *) as_qdir=$as_dir;; esac as_dirs="'$as_qdir' $as_dirs" as_dir=`$as_dirname -- "$as_dir" || $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$as_dir" : 'X\(//\)[^/]' \| \ X"$as_dir" : 'X\(//\)$' \| \ X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$as_dir" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` test -d "$as_dir" && break done test -z "$as_dirs" || eval "mkdir $as_dirs" } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" } # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then as_mkdir_p='mkdir -p "$as_dir"' else test -d ./-p && rmdir ./-p as_mkdir_p=false fi # as_fn_executable_p FILE # ----------------------- # Test if FILE is an executable regular file. as_fn_executable_p () { test -f "$1" && test -x "$1" } # as_fn_executable_p as_test_x='test -x' as_executable_p=as_fn_executable_p # Sed expression to map a string onto a valid CPP name. as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" # Sed expression to map a string onto a valid variable name. as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 ## ----------------------------------- ## ## Main body of $CONFIG_STATUS script. ## ## ----------------------------------- ## _ASEOF test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Save the log message, to keep $0 and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by Recoll $as_me 1.26.3, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS CONFIG_LINKS = $CONFIG_LINKS CONFIG_COMMANDS = $CONFIG_COMMANDS $ $0 $@ on `(hostname || uname -n) 2>/dev/null | sed 1q` " _ACEOF case $ac_config_files in *" "*) set x $ac_config_files; shift; ac_config_files=$*;; esac case $ac_config_headers in *" "*) set x $ac_config_headers; shift; ac_config_headers=$*;; esac cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # Files that config.status was made for. config_files="$ac_config_files" config_headers="$ac_config_headers" config_commands="$ac_config_commands" _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ \`$as_me' instantiates files and other configuration actions from templates according to the current configuration. Unless the files and actions are specified as TAGs, all are instantiated by default. Usage: $0 [OPTION]... [TAG]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files --recheck update $as_me by reconfiguring in the same conditions --file=FILE[:TEMPLATE] instantiate the configuration file FILE --header=FILE[:TEMPLATE] instantiate the configuration header FILE Configuration files: $config_files Configuration headers: $config_headers Configuration commands: $config_commands Report bugs to the package provider." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ Recoll config.status 1.26.3 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" Copyright (C) 2012 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." ac_pwd='$ac_pwd' srcdir='$srcdir' INSTALL='$INSTALL' MKDIR_P='$MKDIR_P' AWK='$AWK' test -n "\$AWK" || AWK=awk _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # The default lists apply if the user does not specify any file. ac_need_defaults=: while test $# != 0 do case $1 in --*=?*) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` ac_shift=: ;; --*=) ac_option=`expr "X$1" : 'X\([^=]*\)='` ac_optarg= ac_shift=: ;; *) ac_option=$1 ac_optarg=$2 ac_shift=shift ;; esac case $ac_option in # Handling of the options. -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; --config | --confi | --conf | --con | --co | --c ) $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; '') as_fn_error $? "missing file argument" ;; esac as_fn_append CONFIG_FILES " '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac as_fn_append CONFIG_HEADERS " '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header as_fn_error $? "ambiguous option: \`$1' Try \`$0 --help' for more information.";; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ | -silent | --silent | --silen | --sile | --sil | --si | --s) ac_cs_silent=: ;; # This is an error. -*) as_fn_error $? "unrecognized option: \`$1' Try \`$0 --help' for more information." ;; *) as_fn_append ac_config_targets " $1" ac_need_defaults=false ;; esac shift done ac_configure_extra_args= if $ac_cs_silent; then exec 6>/dev/null ac_configure_extra_args="$ac_configure_extra_args --silent" fi _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 if \$ac_cs_recheck; then set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion shift \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 CONFIG_SHELL='$SHELL' export CONFIG_SHELL exec "\$@" fi _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 exec 5>>config.log { echo sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX ## Running $as_me. ## _ASBOX $as_echo "$ac_log" } >&5 _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 # # INIT-COMMANDS # AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" # The HP-UX ksh and POSIX shell print the target directory to stdout # if CDPATH is set. (unset CDPATH) >/dev/null 2>&1 && unset CDPATH sed_quote_subst='$sed_quote_subst' double_quote_subst='$double_quote_subst' delay_variable_subst='$delay_variable_subst' enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`' SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`' nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`' objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`' configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`' hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' LTCC='$LTCC' LTCFLAGS='$LTCFLAGS' compiler='$compiler_DEFAULT' # A function that is used when there is no print builtin or printf. func_fallback_echo () { eval 'cat <<_LTECHO_EOF \$1 _LTECHO_EOF' } # Quote evaled strings. for var in SHELL \ ECHO \ PATH_SEPARATOR \ SED \ GREP \ EGREP \ FGREP \ LD \ NM \ LN_S \ lt_SP2NL \ lt_NL2SP \ reload_flag \ OBJDUMP \ deplibs_check_method \ file_magic_cmd \ file_magic_glob \ want_nocaseglob \ DLLTOOL \ sharedlib_from_linklib_cmd \ AR \ AR_FLAGS \ archiver_list_spec \ STRIP \ RANLIB \ CC \ CFLAGS \ compiler \ lt_cv_sys_global_symbol_pipe \ lt_cv_sys_global_symbol_to_cdecl \ lt_cv_sys_global_symbol_to_import \ lt_cv_sys_global_symbol_to_c_name_address \ lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ lt_cv_nm_interface \ nm_file_list_spec \ lt_cv_truncate_bin \ lt_prog_compiler_no_builtin_flag \ lt_prog_compiler_pic \ lt_prog_compiler_wl \ lt_prog_compiler_static \ lt_cv_prog_compiler_c_o \ need_locks \ MANIFEST_TOOL \ DSYMUTIL \ NMEDIT \ LIPO \ OTOOL \ OTOOL64 \ shrext_cmds \ export_dynamic_flag_spec \ whole_archive_flag_spec \ compiler_needs_object \ with_gnu_ld \ allow_undefined_flag \ no_undefined_flag \ hardcode_libdir_flag_spec \ hardcode_libdir_separator \ exclude_expsyms \ include_expsyms \ file_list_spec \ variables_saved_for_relink \ libname_spec \ library_names_spec \ soname_spec \ install_override_mode \ finish_eval \ old_striplib \ striplib \ compiler_lib_search_dirs \ predep_objects \ postdep_objects \ predeps \ postdeps \ compiler_lib_search_path \ LD_CXX \ reload_flag_CXX \ compiler_CXX \ lt_prog_compiler_no_builtin_flag_CXX \ lt_prog_compiler_pic_CXX \ lt_prog_compiler_wl_CXX \ lt_prog_compiler_static_CXX \ lt_cv_prog_compiler_c_o_CXX \ export_dynamic_flag_spec_CXX \ whole_archive_flag_spec_CXX \ compiler_needs_object_CXX \ with_gnu_ld_CXX \ allow_undefined_flag_CXX \ no_undefined_flag_CXX \ hardcode_libdir_flag_spec_CXX \ hardcode_libdir_separator_CXX \ exclude_expsyms_CXX \ include_expsyms_CXX \ file_list_spec_CXX \ compiler_lib_search_dirs_CXX \ predep_objects_CXX \ postdep_objects_CXX \ predeps_CXX \ postdeps_CXX \ compiler_lib_search_path_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done # Double-quote double-evaled strings. for var in reload_cmds \ old_postinstall_cmds \ old_postuninstall_cmds \ old_archive_cmds \ extract_expsyms_cmds \ old_archive_from_new_cmds \ old_archive_from_expsyms_cmds \ archive_cmds \ archive_expsym_cmds \ module_cmds \ module_expsym_cmds \ export_symbols_cmds \ prelink_cmds \ postlink_cmds \ postinstall_cmds \ postuninstall_cmds \ finish_cmds \ sys_lib_search_path_spec \ configure_time_dlsearch_path \ configure_time_lt_sys_library_path \ reload_cmds_CXX \ old_archive_cmds_CXX \ old_archive_from_new_cmds_CXX \ old_archive_from_expsyms_cmds_CXX \ archive_cmds_CXX \ archive_expsym_cmds_CXX \ module_cmds_CXX \ module_expsym_cmds_CXX \ export_symbols_cmds_CXX \ prelink_cmds_CXX \ postlink_cmds_CXX; do case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in *[\\\\\\\`\\"\\\$]*) eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes ;; *) eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" ;; esac done ac_aux_dir='$ac_aux_dir' # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes INIT. if test -n "\${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi PACKAGE='$PACKAGE' VERSION='$VERSION' RM='$RM' ofile='$ofile' _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # Handling of arguments. for ac_config_target in $ac_config_targets do case $ac_config_target in "common/autoconfig.h") CONFIG_HEADERS="$CONFIG_HEADERS common/autoconfig.h" ;; "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; "$QTGUI/recoll.pro") CONFIG_FILES="$CONFIG_FILES $QTGUI/recoll.pro" ;; "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; "python/recoll/setup.py") CONFIG_FILES="$CONFIG_FILES python/recoll/setup.py" ;; "python/pychm/setup.py") CONFIG_FILES="$CONFIG_FILES python/pychm/setup.py" ;; "testmains/Makefile") CONFIG_FILES="$CONFIG_FILES testmains/Makefile" ;; *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac done # If the user did not use the arguments to specify the items to instantiate, # then the envvar interface is used. Set only those that are not. # We use the long form for the default assignment because of an extremely # bizarre bug on SunOS 4.1.3. if $ac_need_defaults; then test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands fi # Have a temporary directory for convenience. Make it in the build tree # simply because there is no reason against having it here, and in addition, # creating and moving files from /tmp can sometimes cause problems. # Hook for its removal unless debugging. # Note that there is a small window in which the directory will not be cleaned: # after its creation but before its name has been assigned to `$tmp'. $debug || { tmp= ac_tmp= trap 'exit_status=$? : "${ac_tmp:=$tmp}" { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status ' 0 trap 'as_fn_exit 1' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. { tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && test -d "$tmp" } || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") } || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 ac_tmp=$tmp # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. # This happens for instance with `./config.status config.h'. if test -n "$CONFIG_FILES"; then ac_cr=`echo X | tr X '\015'` # On cygwin, bash can eat \r inside `` if the user requested igncr. # But we know of no other shell where ac_cr would be empty at this # point, so we can use a bashism as a fallback. if test "x$ac_cr" = x; then eval ac_cr=\$\'\\r\' fi ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi echo 'BEGIN {' >"$ac_tmp/subs1.awk" && _ACEOF { echo "cat >conf$$subs.awk <<_ACEOF" && echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done rm -f conf$$subs.sh cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && _ACEOF sed -n ' h s/^/S["/; s/!.*/"]=/ p g s/^[^!]*!// :repl t repl s/'"$ac_delim"'$// t delim :nl h s/\(.\{148\}\)..*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p n b repl :more1 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t nl :delim h s/\(.\{148\}\)..*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p b :more2 s/["\\]/\\&/g; s/^/"/; s/$/"\\/ p g s/.\{148\}// t delim ' >$CONFIG_STATUS || ac_write_fail=1 rm -f conf$$subs.awk cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && for (key in S) S_is_set[key] = 1 FS = "" } { line = $ 0 nfields = split(line, field, "@") substed = 0 len = length(field[1]) for (i = 2; i < nfields; i++) { key = field[i] keylen = length(key) if (S_is_set[key]) { value = S[key] line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) len += length(value) + length(field[++i]) substed = 1 } else len += 1 + keylen } print line } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" else cat fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 _ACEOF # VPATH may cause trouble with some makes, so we remove sole $(srcdir), # ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and # trailing colons and then remove the whole line if VPATH becomes empty # (actually we leave an empty line to preserve line numbers). if test "x$srcdir" = x.; then ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ h s/// s/^/:/ s/[ ]*$/:/ s/:\$(srcdir):/:/g s/:\${srcdir}:/:/g s/:@srcdir@:/:/g s/^:*// s/:*$// x s/\(=[ ]*\).*/\1/ G s/\n// s/^[^=]*=[ ]*$// }' fi cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 fi # test -n "$CONFIG_FILES" # Set up the scripts for CONFIG_HEADERS section. # No need to generate them if there are no CONFIG_HEADERS. # This happens for instance with `./config.status Makefile'. if test -n "$CONFIG_HEADERS"; then cat >"$ac_tmp/defines.awk" <<\_ACAWK || BEGIN { _ACEOF # Transform confdefs.h into an awk script `defines.awk', embedded as # here-document in config.status, that substitutes the proper values into # config.h.in to produce config.h. # Create a delimiter string that does not exist in confdefs.h, to ease # handling of long lines. ac_delim='%!_!# ' for ac_last_try in false false :; do ac_tt=`sed -n "/$ac_delim/p" confdefs.h` if test -z "$ac_tt"; then break elif $ac_last_try; then as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi done # For the awk script, D is an array of macro values keyed by name, # likewise P contains macro parameters if any. Preserve backslash # newline sequences. ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* sed -n ' s/.\{148\}/&'"$ac_delim"'/g t rset :rset s/^[ ]*#[ ]*define[ ][ ]*/ / t def d :def s/\\$// t bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3"/p s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p d :bsnl s/["\\]/\\&/g s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ D["\1"]=" \3\\\\\\n"\\/p t cont s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p t cont d :cont n s/.\{148\}/&'"$ac_delim"'/g t clear :clear s/\\$// t bsnlc s/["\\]/\\&/g; s/^/"/; s/$/"/p d :bsnlc s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p b cont ' >$CONFIG_STATUS || ac_write_fail=1 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 for (key in D) D_is_set[key] = 1 FS = "" } /^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { line = \$ 0 split(line, arg, " ") if (arg[1] == "#") { defundef = arg[2] mac1 = arg[3] } else { defundef = substr(arg[1], 2) mac1 = arg[2] } split(mac1, mac2, "(") #) macro = mac2[1] prefix = substr(line, 1, index(line, defundef) - 1) if (D_is_set[macro]) { # Preserve the white space surrounding the "#". print prefix "define", macro P[macro] D[macro] next } else { # Replace #undef with comments. This is necessary, for example, # in the case of _POSIX_SOURCE, which is predefined and required # on some systems where configure will not decide to define it. if (defundef == "undef") { print "/*", prefix defundef, macro, "*/" next } } } { print } _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 fi # test -n "$CONFIG_HEADERS" eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" shift for ac_tag do case $ac_tag in :[FHLC]) ac_mode=$ac_tag; continue;; esac case $ac_mode$ac_tag in :[FHL]*:*);; :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac ac_save_IFS=$IFS IFS=: set x $ac_tag IFS=$ac_save_IFS shift ac_file=$1 shift case $ac_mode in :L) ac_source=$1;; :[FH]) ac_file_inputs= for ac_f do case $ac_f in -) ac_f="$ac_tmp/stdin";; *) # Look for the file first in the build tree, then in the source tree # (if the path is not absolute). The absolute path cannot be DOS-style, # because $ac_f cannot contain `:'. test -f "$ac_f" || case $ac_f in [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac as_fn_append ac_file_inputs " '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't # use $as_me), people would be surprised to read: # /* config.h. Generated by config.status. */ configure_input='Generated from '` $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. case $configure_input in #( *\&* | *\|* | *\\* ) ac_sed_conf_input=`$as_echo "$configure_input" | sed 's/[\\\\&|]/\\\\&/g'`;; #( *) ac_sed_conf_input=$configure_input;; esac case $ac_tag in *:-:* | *:-) cat >"$ac_tmp/stdin" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; esac ;; esac ac_dir=`$as_dirname -- "$ac_file" || $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$ac_file" : 'X\(//\)[^/]' \| \ X"$ac_file" : 'X\(//\)$' \| \ X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$ac_file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir="$ac_dir"; as_fn_mkdir_p ac_builddir=. case "$ac_dir" in .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` # A ".." for each directory in $ac_dir_suffix. ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` case $ac_top_builddir_sub in "") ac_top_builddir_sub=. ac_top_build_prefix= ;; *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; esac ;; esac ac_abs_top_builddir=$ac_pwd ac_abs_builddir=$ac_pwd$ac_dir_suffix # for backward compatibility: ac_top_builddir=$ac_top_build_prefix case $srcdir in .) # We are building in place. ac_srcdir=. ac_top_srcdir=$ac_top_builddir_sub ac_abs_top_srcdir=$ac_pwd ;; [\\/]* | ?:[\\/]* ) # Absolute name. ac_srcdir=$srcdir$ac_dir_suffix; ac_top_srcdir=$srcdir ac_abs_top_srcdir=$srcdir ;; *) # Relative name. ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix ac_top_srcdir=$ac_top_build_prefix$srcdir ac_abs_top_srcdir=$ac_pwd/$srcdir ;; esac ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix case $ac_mode in :F) # # CONFIG_FILE # case $INSTALL in [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; esac ac_MKDIR_P=$MKDIR_P case $MKDIR_P in [\\/$]* | ?:[\\/]* ) ;; */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; esac _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= ac_sed_dataroot=' /datarootdir/ { p q } /@datadir@/p /@docdir@/p /@infodir@/p /@localedir@/p /@mandir@/p' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_datarootdir_hack=' s&@datadir@&$datadir&g s&@docdir@&$docdir&g s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF # Neutralize VPATH when `$srcdir' = `.'. # Shell code in configure.ac might set extrasub. # FIXME: do we really want to maintain this feature? cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_sed_extra="$ac_vpsub $extrasub _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 :t /@[a-zA-Z_][a-zA-Z_0-9]*@/!b s|@configure_input@|$ac_sed_conf_input|;t t s&@top_builddir@&$ac_top_builddir_sub&;t t s&@top_build_prefix@&$ac_top_build_prefix&;t t s&@srcdir@&$ac_srcdir&;t t s&@abs_srcdir@&$ac_abs_srcdir&;t t s&@top_srcdir@&$ac_top_srcdir&;t t s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t s&@builddir@&$ac_builddir&;t t s&@abs_builddir@&$ac_abs_builddir&;t t s&@abs_top_builddir@&$ac_abs_top_builddir&;t t s&@INSTALL@&$ac_INSTALL&;t t s&@MKDIR_P@&$ac_MKDIR_P&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ "$ac_tmp/out"`; test -z "$ac_out"; } && { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined" >&2;} rm -f "$ac_tmp/stdin" case $ac_file in -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; esac \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; :H) # # CONFIG_HEADER # if test x"$ac_file" != x-; then { $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" } >"$ac_tmp/config.h" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$ac_tmp/config.h" "$ac_file" \ || as_fn_error $? "could not create $ac_file" "$LINENO" 5 fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ || as_fn_error $? "could not create -" "$LINENO" 5 fi # Compute "$ac_file"'s index in $config_headers. _am_arg="$ac_file" _am_stamp_count=1 for _am_header in $config_headers :; do case $_am_header in $_am_arg | $_am_arg:* ) break ;; * ) _am_stamp_count=`expr $_am_stamp_count + 1` ;; esac done echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$_am_arg" : 'X\(//\)[^/]' \| \ X"$_am_arg" : 'X\(//\)$' \| \ X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$_am_arg" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'`/stamp-h$_am_stamp_count ;; :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 $as_echo "$as_me: executing $ac_file commands" >&6;} ;; esac case $ac_file$ac_mode in "depfiles":C) test x"$AMDEP_TRUE" != x"" || { # Older Autoconf quotes --file arguments for eval, but not when files # are listed without --file. Let's play safe and only enable the eval # if we detect the quoting. case $CONFIG_FILES in *\'*) eval set x "$CONFIG_FILES" ;; *) set x $CONFIG_FILES ;; esac shift for mf do # Strip MF so we end up with the name of the file. mf=`echo "$mf" | sed -e 's/:.*$//'` # Check whether this is an Automake generated Makefile or not. # We used to match only the files named 'Makefile.in', but # some people rename them; so instead we look at the file content. # Grep'ing the first line is not enough: some people post-process # each Makefile.in and add a new line on top of each file to say so. # Grep'ing the whole file is not good either: AIX grep has a line # limit of 2048, but all sed's we know have understand at least 4000. if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then dirpart=`$as_dirname -- "$mf" || $as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$mf" : 'X\(//\)[^/]' \| \ X"$mf" : 'X\(//\)$' \| \ X"$mf" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$mf" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` else continue fi # Extract the definition of DEPDIR, am__include, and am__quote # from the Makefile without running 'make'. DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` test -z "$DEPDIR" && continue am__include=`sed -n 's/^am__include = //p' < "$mf"` test -z "$am__include" && continue am__quote=`sed -n 's/^am__quote = //p' < "$mf"` # Find all dependency output files, they are included files with # $(DEPDIR) in their names. We invoke sed twice because it is the # simplest approach to changing $(DEPDIR) to its actual value in the # expansion. for file in `sed -n " s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g'`; do # Make sure the directory exists. test -f "$dirpart/$file" && continue fdir=`$as_dirname -- "$file" || $as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ X"$file" : 'X\(//\)[^/]' \| \ X"$file" : 'X\(//\)$' \| \ X"$file" : 'X\(/\)' \| . 2>/dev/null || $as_echo X"$file" | sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ s//\1/ q } /^X\(\/\/\)[^/].*/{ s//\1/ q } /^X\(\/\/\)$/{ s//\1/ q } /^X\(\/\).*/{ s//\1/ q } s/.*/./; q'` as_dir=$dirpart/$fdir; as_fn_mkdir_p # echo "creating $dirpart/$file" echo '# dummy' > "$dirpart/$file" done done } ;; "libtool":C) # See if we are running on zsh, and set the options that allow our # commands through without removal of \ escapes. if test -n "${ZSH_VERSION+set}"; then setopt NO_GLOB_SUBST fi cfgfile=${ofile}T trap "$RM \"$cfgfile\"; exit 1" 1 2 15 $RM "$cfgfile" cat <<_LT_EOF >> "$cfgfile" #! $SHELL # Generated automatically by $as_me ($PACKAGE) $VERSION # NOTE: Changes made to this file will be lost: look at ltmain.sh. # Provide generalized library-building support services. # Written by Gordon Matzigkeit, 1996 # Copyright (C) 2014 Free Software Foundation, Inc. # This is free software; see the source for copying conditions. There is NO # warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # GNU Libtool is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of of the License, or # (at your option) any later version. # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program or library that is built # using GNU Libtool, you may include this file under the same # distribution terms that you use for the rest of that program. # # GNU Libtool is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see . # The names of the tagged configurations supported by this script. available_tags='CXX ' # Configured defaults for sys_lib_dlsearch_path munging. : \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"} # ### BEGIN LIBTOOL CONFIG # Whether or not to build static libraries. build_old_libs=$enable_static # Which release of libtool.m4 was used? macro_version=$macro_version macro_revision=$macro_revision # Whether or not to build shared libraries. build_libtool_libs=$enable_shared # What type of objects to build. pic_mode=$pic_mode # Whether or not to optimize for fast installation. fast_install=$enable_fast_install # Shared archive member basename,for filename based shared library versioning on AIX. shared_archive_member_spec=$shared_archive_member_spec # Shell to use when invoking shell scripts. SHELL=$lt_SHELL # An echo program that protects backslashes. ECHO=$lt_ECHO # The PATH separator for the build system. PATH_SEPARATOR=$lt_PATH_SEPARATOR # The host system. host_alias=$host_alias host=$host host_os=$host_os # The build system. build_alias=$build_alias build=$build build_os=$build_os # A sed program that does not truncate output. SED=$lt_SED # Sed that helps us avoid accidentally triggering echo(1) options like -n. Xsed="\$SED -e 1s/^X//" # A grep program that handles long lines. GREP=$lt_GREP # An ERE matcher. EGREP=$lt_EGREP # A literal string matcher. FGREP=$lt_FGREP # A BSD- or MS-compatible name lister. NM=$lt_NM # Whether we need soft or hard links. LN_S=$lt_LN_S # What is the maximum length of a command? max_cmd_len=$max_cmd_len # Object file suffix (normally "o"). objext=$ac_objext # Executable file suffix (normally ""). exeext=$exeext # whether the shell understands "unset". lt_unset=$lt_unset # turn spaces into newlines. SP2NL=$lt_lt_SP2NL # turn newlines into spaces. NL2SP=$lt_lt_NL2SP # convert \$build file names to \$host format. to_host_file_cmd=$lt_cv_to_host_file_cmd # convert \$build files to toolchain format. to_tool_file_cmd=$lt_cv_to_tool_file_cmd # An object symbol dumper. OBJDUMP=$lt_OBJDUMP # Method to check whether dependent libraries are shared objects. deplibs_check_method=$lt_deplibs_check_method # Command to use when deplibs_check_method = "file_magic". file_magic_cmd=$lt_file_magic_cmd # How to find potential files when deplibs_check_method = "file_magic". file_magic_glob=$lt_file_magic_glob # Find potential files using nocaseglob when deplibs_check_method = "file_magic". want_nocaseglob=$lt_want_nocaseglob # DLL creation program. DLLTOOL=$lt_DLLTOOL # Command to associate shared and link libraries. sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd # The archiver. AR=$lt_AR # Flags to create an archive. AR_FLAGS=$lt_AR_FLAGS # How to feed a file listing to the archiver. archiver_list_spec=$lt_archiver_list_spec # A symbol stripping program. STRIP=$lt_STRIP # Commands used to install an old-style archive. RANLIB=$lt_RANLIB old_postinstall_cmds=$lt_old_postinstall_cmds old_postuninstall_cmds=$lt_old_postuninstall_cmds # Whether to use a lock for old archive extraction. lock_old_archive_extraction=$lock_old_archive_extraction # A C compiler. LTCC=$lt_CC # LTCC compiler flags. LTCFLAGS=$lt_CFLAGS # Take the output of nm and produce a listing of raw symbols and C names. global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe # Transform the output of nm in a proper C declaration. global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl # Transform the output of nm into a list of symbols to manually relocate. global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import # Transform the output of nm in a C name address pair. global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address # Transform the output of nm in a C name address pair when lib prefix is needed. global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix # The name lister interface. nm_interface=$lt_lt_cv_nm_interface # Specify filename containing input files for \$NM. nm_file_list_spec=$lt_nm_file_list_spec # The root where to search for dependent libraries,and where our libraries should be installed. lt_sysroot=$lt_sysroot # Command to truncate a binary pipe. lt_truncate_bin=$lt_lt_cv_truncate_bin # The name of the directory that contains temporary libtool files. objdir=$objdir # Used to examine libraries when file_magic_cmd begins with "file". MAGIC_CMD=$MAGIC_CMD # Must we lock files when doing compilation? need_locks=$lt_need_locks # Manifest tool. MANIFEST_TOOL=$lt_MANIFEST_TOOL # Tool to manipulate archived DWARF debug symbol files on Mac OS X. DSYMUTIL=$lt_DSYMUTIL # Tool to change global to local symbols on Mac OS X. NMEDIT=$lt_NMEDIT # Tool to manipulate fat objects and archives on Mac OS X. LIPO=$lt_LIPO # ldd/readelf like tool for Mach-O binaries on Mac OS X. OTOOL=$lt_OTOOL # ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. OTOOL64=$lt_OTOOL64 # Old archive suffix (normally "a"). libext=$libext # Shared library suffix (normally ".so"). shrext_cmds=$lt_shrext_cmds # The commands to extract the exported symbol list from a shared archive. extract_expsyms_cmds=$lt_extract_expsyms_cmds # Variables whose values should be saved in libtool wrapper scripts and # restored at link time. variables_saved_for_relink=$lt_variables_saved_for_relink # Do we need the "lib" prefix for modules? need_lib_prefix=$need_lib_prefix # Do we need a version for libraries? need_version=$need_version # Library versioning type. version_type=$version_type # Shared library runtime path variable. runpath_var=$runpath_var # Shared library path variable. shlibpath_var=$shlibpath_var # Is shlibpath searched before the hard-coded library search path? shlibpath_overrides_runpath=$shlibpath_overrides_runpath # Format of library name prefix. libname_spec=$lt_libname_spec # List of archive names. First name is the real one, the rest are links. # The last name is the one that the linker finds with -lNAME library_names_spec=$lt_library_names_spec # The coded name of the library, if different from the real name. soname_spec=$lt_soname_spec # Permission mode override for installation of shared libraries. install_override_mode=$lt_install_override_mode # Command to use after installation of a shared archive. postinstall_cmds=$lt_postinstall_cmds # Command to use after uninstallation of a shared archive. postuninstall_cmds=$lt_postuninstall_cmds # Commands used to finish a libtool library installation in a directory. finish_cmds=$lt_finish_cmds # As "finish_cmds", except a single script fragment to be evaled but # not shown. finish_eval=$lt_finish_eval # Whether we should hardcode library paths into libraries. hardcode_into_libs=$hardcode_into_libs # Compile-time system search path for libraries. sys_lib_search_path_spec=$lt_sys_lib_search_path_spec # Detected run-time system search path for libraries. sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path # Explicit LT_SYS_LIBRARY_PATH set during ./configure time. configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path # Whether dlopen is supported. dlopen_support=$enable_dlopen # Whether dlopen of programs is supported. dlopen_self=$enable_dlopen_self # Whether dlopen of statically linked programs is supported. dlopen_self_static=$enable_dlopen_self_static # Commands to strip libraries. old_striplib=$lt_old_striplib striplib=$lt_striplib # The linker used to build libraries. LD=$lt_LD # How to create reloadable object files. reload_flag=$lt_reload_flag reload_cmds=$lt_reload_cmds # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds # A language specific compiler. CC=$lt_compiler # Is the compiler the GNU compiler? with_gcc=$GCC # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds archive_expsym_cmds=$lt_archive_expsym_cmds # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds module_expsym_cmds=$lt_module_expsym_cmds # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \$shlibpath_var if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms # Symbols that must always be exported. include_expsyms=$lt_include_expsyms # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds # Specify filename containing input files. file_list_spec=$lt_file_list_spec # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects postdep_objects=$lt_postdep_objects predeps=$lt_predeps postdeps=$lt_postdeps # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path # ### END LIBTOOL CONFIG _LT_EOF cat <<'_LT_EOF' >> "$cfgfile" # ### BEGIN FUNCTIONS SHARED WITH CONFIGURE # func_munge_path_list VARIABLE PATH # ----------------------------------- # VARIABLE is name of variable containing _space_ separated list of # directories to be munged by the contents of PATH, which is string # having a format: # "DIR[:DIR]:" # string "DIR[ DIR]" will be prepended to VARIABLE # ":DIR[:DIR]" # string "DIR[ DIR]" will be appended to VARIABLE # "DIRP[:DIRP]::[DIRA:]DIRA" # string "DIRP[ DIRP]" will be prepended to VARIABLE and string # "DIRA[ DIRA]" will be appended to VARIABLE # "DIR[:DIR]" # VARIABLE will be replaced by "DIR[ DIR]" func_munge_path_list () { case x$2 in x) ;; *:) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\" ;; x:*) eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\" ;; *::*) eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\" eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\" ;; *) eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\" ;; esac } # Calculate cc_basename. Skip known compiler wrappers and cross-prefix. func_cc_basename () { for cc_temp in $*""; do case $cc_temp in compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; \-*) ;; *) break;; esac done func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` } # ### END FUNCTIONS SHARED WITH CONFIGURE _LT_EOF case $host_os in aix3*) cat <<\_LT_EOF >> "$cfgfile" # AIX sometimes has problems with the GCC collect2 program. For some # reason, if we set the COLLECT_NAMES environment variable, the problems # vanish in a puff of smoke. if test set != "${COLLECT_NAMES+set}"; then COLLECT_NAMES= export COLLECT_NAMES fi _LT_EOF ;; esac ltmain=$ac_aux_dir/ltmain.sh # We use sed instead of cat because bash on DJGPP gets confused if # if finds mixed CR/LF and LF-only lines. Since sed operates in # text mode, it properly converts lines to CR/LF. This bash problem # is reportedly fixed, but why not run on old versions too? sed '$q' "$ltmain" >> "$cfgfile" \ || (rm -f "$cfgfile"; exit 1) mv -f "$cfgfile" "$ofile" || (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") chmod +x "$ofile" cat <<_LT_EOF >> "$ofile" # ### BEGIN LIBTOOL TAG CONFIG: CXX # The linker used to build libraries. LD=$lt_LD_CXX # How to create reloadable object files. reload_flag=$lt_reload_flag_CXX reload_cmds=$lt_reload_cmds_CXX # Commands used to build an old-style archive. old_archive_cmds=$lt_old_archive_cmds_CXX # A language specific compiler. CC=$lt_compiler_CXX # Is the compiler the GNU compiler? with_gcc=$GCC_CXX # Compiler flag to turn off builtin functions. no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX # Additional compiler flags for building library objects. pic_flag=$lt_lt_prog_compiler_pic_CXX # How to pass a linker flag through the compiler. wl=$lt_lt_prog_compiler_wl_CXX # Compiler flag to prevent dynamic linking. link_static_flag=$lt_lt_prog_compiler_static_CXX # Does compiler simultaneously support -c and -o options? compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX # Whether or not to add -lc for building shared libraries. build_libtool_need_lc=$archive_cmds_need_lc_CXX # Whether or not to disallow shared libs when runtime libs are static. allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX # Compiler flag to allow reflexive dlopens. export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX # Compiler flag to generate shared objects directly from archives. whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX # Whether the compiler copes with passing no objects directly. compiler_needs_object=$lt_compiler_needs_object_CXX # Create an old-style archive from a shared archive. old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX # Create a temporary old-style archive to link instead of a shared archive. old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX # Commands used to build a shared archive. archive_cmds=$lt_archive_cmds_CXX archive_expsym_cmds=$lt_archive_expsym_cmds_CXX # Commands used to build a loadable module if different from building # a shared archive. module_cmds=$lt_module_cmds_CXX module_expsym_cmds=$lt_module_expsym_cmds_CXX # Whether we are building with GNU ld or not. with_gnu_ld=$lt_with_gnu_ld_CXX # Flag that allows shared libraries with undefined symbols to be built. allow_undefined_flag=$lt_allow_undefined_flag_CXX # Flag that enforces no undefined symbols. no_undefined_flag=$lt_no_undefined_flag_CXX # Flag to hardcode \$libdir into a binary during linking. # This must work even if \$libdir does not exist hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX # Whether we need a single "-rpath" flag with a separated argument. hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary. hardcode_direct=$hardcode_direct_CXX # Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes # DIR into the resulting binary and the resulting library dependency is # "absolute",i.e impossible to change by setting \$shlibpath_var if the # library is relocated. hardcode_direct_absolute=$hardcode_direct_absolute_CXX # Set to "yes" if using the -LDIR flag during linking hardcodes DIR # into the resulting binary. hardcode_minus_L=$hardcode_minus_L_CXX # Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR # into the resulting binary. hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX # Set to "yes" if building a shared library automatically hardcodes DIR # into the library and all subsequent libraries and executables linked # against it. hardcode_automatic=$hardcode_automatic_CXX # Set to yes if linker adds runtime paths of dependent libraries # to runtime path list. inherit_rpath=$inherit_rpath_CXX # Whether libtool must link a program against all its dependency libraries. link_all_deplibs=$link_all_deplibs_CXX # Set to "yes" if exported symbols are required. always_export_symbols=$always_export_symbols_CXX # The commands to list exported symbols. export_symbols_cmds=$lt_export_symbols_cmds_CXX # Symbols that should not be listed in the preloaded symbols. exclude_expsyms=$lt_exclude_expsyms_CXX # Symbols that must always be exported. include_expsyms=$lt_include_expsyms_CXX # Commands necessary for linking programs (against libraries) with templates. prelink_cmds=$lt_prelink_cmds_CXX # Commands necessary for finishing linking programs. postlink_cmds=$lt_postlink_cmds_CXX # Specify filename containing input files. file_list_spec=$lt_file_list_spec_CXX # How to hardcode a shared library path into an executable. hardcode_action=$hardcode_action_CXX # The directories searched by this compiler when creating a shared library. compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX # Dependencies to place before and after the objects being linked to # create a shared library. predep_objects=$lt_predep_objects_CXX postdep_objects=$lt_postdep_objects_CXX predeps=$lt_predeps_CXX postdeps=$lt_postdeps_CXX # The library search path used internally by the compiler when linking # a shared library. compiler_lib_search_path=$lt_compiler_lib_search_path_CXX # ### END LIBTOOL TAG CONFIG: CXX _LT_EOF ;; esac done # for ac_tag as_fn_exit 0 _ACEOF ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 # configure is writing to config.log, and then calls config.status. # config.status does its own redirection, appending to config.log. # Unfortunately, on DOS this fails, as config.log is still kept open # by configure, so config.status won't be able to write to it; its # output is simply discarded. So we exec the FD to /dev/null, # effectively closing config.log, so it can be properly (re)opened and # appended to by config.status. When coming back to configure, we # need to make the FD available again. if test "$no_create" != yes; then ac_cs_success=: ac_config_status_args= test "$silent" = yes && ac_config_status_args="$ac_config_status_args --quiet" exec 5>/dev/null $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. $ac_cs_success || as_fn_exit 1 fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi recoll-1.26.3/common/0000755000175000017500000000000013570165407011364 500000000000000recoll-1.26.3/common/rclconfig.h0000644000175000017500000004337513533651561013437 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _RCLCONFIG_H_INCLUDED_ #define _RCLCONFIG_H_INCLUDED_ #include "autoconfig.h" #include #include #include #include #include #include using std::string; using std::vector; using std::pair; using std::set; using std::map; #include "conftree.h" #include "smallut.h" class RclConfig; // Cache parameter string values for params which need computation and // which can change with the keydir. Minimize work by using the // keydirgen and a saved string to avoid unneeded recomputations: // keydirgen is incremented in RclConfig with each setKeyDir(). We // compare our saved value with the current one. If it did not change // no get() is needed. If it did change, but the resulting param get() // string value is identical, no recomputation is needed. class ParamStale { public: ParamStale() {} ParamStale(RclConfig *rconf, const string& nm) : parent(rconf), paramnames(vector(1, nm)), savedvalues(1) { } ParamStale(RclConfig *rconf, const vector& nms) : parent(rconf), paramnames(nms), savedvalues(nms.size()) { } void init(ConfNull *cnf); bool needrecompute(); const string& getvalue(unsigned int i = 0) const; private: // The config we belong to. RclConfig *parent{0}; // The configuration file we search for values. This is a borrowed // pointer belonging to the parent, we do not manage it. ConfNull *conffile{0}; vector paramnames; vector savedvalues; // Check at init if the configuration defines our vars at all. No // further processing is needed if it does not. bool active{false}; int savedkeydirgen{-1}; }; // Hold the description for an external metadata-gathering command struct MDReaper { string fieldname; vector cmdv; }; // Data associated to a indexed field name: struct FieldTraits { string pfx; // indexing prefix, uint32_t valueslot{0}; enum ValueType {STR, INT}; ValueType valuetype{STR}; int valuelen{0}; int wdfinc{1}; // Index time term frequency increment (default 1) double boost{1.0}; // Query time boost (default 1.0) bool pfxonly{false}; // Suppress prefix-less indexing bool noterms{false}; // Don't add term to highlight data (e.g.: rclbes) }; class RclConfig { public: // Constructor: we normally look for a configuration file, except // if this was specified on the command line and passed through // argcnf RclConfig(const string *argcnf = 0); RclConfig(const RclConfig &r); ~RclConfig() { freeAll(); } // Return a writable clone of the main config. This belongs to the // caller (must delete it when done) ConfNull *cloneMainConfig(); /** (re)Read recoll.conf */ bool updateMainConfig(); bool ok() const {return m_ok;} const string &getReason() const {return m_reason;} /** Return the directory where this configuration is stored. * This was possibly silently created by the rclconfig * constructor it it is the default one (~/.recoll) and it did * not exist yet. */ string getConfDir() const {return m_confdir;} string getCacheDir() const; /** Check if the config files were modified since we read them */ bool sourceChanged() const; /** Returns true if this is ~/.recoll */ bool isDefaultConfig() const; /** Get the local value for /usr/local/share/recoll/ */ const string& getDatadir() const {return m_datadir;} /** Set current directory reference, and fetch automatic parameters. */ void setKeyDir(const string &dir); string getKeyDir() const {return m_keydir;} /** Get generic configuration parameter according to current keydir */ bool getConfParam(const string &name, string &value, bool shallow=false) const { if (m_conf == 0) return false; return m_conf->get(name, value, m_keydir, shallow); } /** Variant with autoconversion to int */ bool getConfParam(const string &name, int *value, bool shallow=false) const; /** Variant with autoconversion to bool */ bool getConfParam(const string &name, bool *value, bool shallow=false) const; /** Variant with conversion to vector * (stringToStrings). Can fail if the string is malformed. */ bool getConfParam(const string &name, vector *value, bool shallow=false) const; /** Variant with conversion to unordered_set * (stringToStrings). Can fail if the string is malformed. */ bool getConfParam(const string &name, std::unordered_set *v, bool shallow=false) const; /** Variant with conversion to vector */ bool getConfParam(const string &name, vector *value, bool shallow=false) const; enum ThrStage {ThrIntern=0, ThrSplit=1, ThrDbWrite=2}; pair getThrConf(ThrStage who) const; /** * Get list of config names under current sk, with possible * wildcard filtering */ vector getConfNames(const char *pattern = 0) const { return m_conf->getNames(m_keydir, pattern); } /** Check if name exists anywhere in config */ bool hasNameAnywhere(const string& nm) const { return m_conf? m_conf->hasNameAnywhere(nm) : false; } /** Get default charset for current keydir (was set during setKeydir) * filenames are handled differently */ const string &getDefCharset(bool filename = false) const; /** Get list of top directories. This is needed from a number of places * and needs some cleaning-up code. An empty list is always an error, no * need for other status * @param formonitor if set retrieve the list for real time monitoring * (if the monitor list does not exist we return the normal one). */ vector getTopdirs(bool formonitor = false) const; string getConfdirPath(const char *varname, const char *dflt) const; string getCachedirPath(const char *varname, const char *dflt) const; /** Get database and other directories */ string getDbDir() const; string getWebcacheDir() const; string getMboxcacheDir() const; string getAspellcacheDir() const; /** Get stoplist file name */ string getStopfile() const; /** Get synonym groups file name */ string getSynGroupsFile() const; /** Get indexing pid file name */ string getPidfile() const; /** Get indexing status file name */ string getIdxStatusFile() const; string getIdxStopFile() const; /** Do path translation according to the ptrans table */ void urlrewrite(const string& dbdir, string& url) const; ConfSimple *getPTrans() { return m_ptrans; } /** Get Web Queue directory name */ string getWebQueueDir() const; /** Get list of skipped file names for current keydir */ vector& getSkippedNames(); /** Get list of file name filters for current keydir (only those names indexed) */ vector& getOnlyNames(); /** Get list of skipped paths patterns. Doesn't depend on the keydir */ vector getSkippedPaths() const; /** Get list of skipped paths patterns, daemon version (may add some) Doesn't depend on the keydir */ vector getDaemSkippedPaths() const; /** Return list of no content suffixes. Used by confgui, indexing uses inStopSuffixes() for testing suffixes */ std::vector& getStopSuffixes(); /** * mimemap: Check if file name should be ignored because of suffix * * The list of ignored suffixes is initialized on first call, and * not changed for subsequent setKeydirs. */ bool inStopSuffixes(const string& fn); /** * Check in mimeconf if input mime type is a compressed one, and * return command to uncompress if it is. * * The returned command has substitutable places for input file name * and temp dir name, and will return output name */ bool getUncompressor(const string &mtpe, vector& cmd) const; /** mimemap: compute mimetype */ string getMimeTypeFromSuffix(const string &suffix) const; /** mimemap: get a list of all indexable mime types defined */ vector getAllMimeTypes() const; /** mimemap: Get appropriate suffix for mime type. This is inefficient */ string getSuffixFromMimeType(const string &mt) const; /** mimeconf: get input filter for mimetype */ string getMimeHandlerDef(const string &mimetype, bool filtertypes=false); /** For lines like: "name = some value; attr1 = value1; attr2 = val2" * Separate the value and store the attributes in a ConfSimple * @param whole the raw value. No way to escape a semi-colon in there. */ static bool valueSplitAttributes(const string& whole, string& value, ConfSimple& attrs) ; /** Compute difference between 'base' and 'changed', as elements to be * added and substracted from base. Input and output strings are in * stringToStrings() format. */ static void setPlusMinus( const std::string& base, const std::set& changed, std::string& plus, std::string& minus); /** Return the locale's character set */ static const std::string& getLocaleCharset(); /** Return icon path for mime type and tag */ string getMimeIconPath(const string &mt, const string& apptag) const; /** mimeconf: get list of file categories */ bool getMimeCategories(vector&) const; /** mimeconf: is parameter one of the categories ? */ bool isMimeCategory(string&) const; /** mimeconf: get list of mime types for category */ bool getMimeCatTypes(const string& cat, vector&) const; /** mimeconf: get list of gui filters (doc cats by default */ bool getGuiFilterNames(vector&) const; /** mimeconf: get query lang frag for named filter */ bool getGuiFilter(const string& filtername, string& frag) const; /** fields: get field prefix from field name. Use additional query aliases if isquery is set */ bool getFieldTraits(const string& fldname, const FieldTraits **, bool isquery = false) const; const set& getStoredFields() const {return m_storedFields;} set getIndexedFields() const; /** Get canonic name for possible alias */ string fieldCanon(const string& fld) const; /** Get canonic name for possible alias, including query-only aliases */ string fieldQCanon(const string& fld) const; /** Get xattr name to field names translations */ const map& getXattrToField() const {return m_xattrtofld;} /** Get value of a parameter inside the "fields" file. Only some filters * use this (ie: mh_mail). The information specific to a given filter * is typically stored in a separate section(ie: [mail]) */ vector getFieldSectNames(const string &sk, const char* = 0) const; bool getFieldConfParam(const string &name, const string &sk, string &value) const; /** mimeview: get/set external viewer exec string(s) for mimetype(s) */ string getMimeViewerDef(const string &mimetype, const string& apptag, bool useall) const; set getMimeViewerAllEx() const; bool setMimeViewerAllEx(const set& allex); bool getMimeViewerDefs(vector >&) const; bool setMimeViewerDef(const string& mimetype, const string& cmd); /** Check if mime type is designated as needing no uncompress before view * (if a file of this type is found compressed). Default is true, * exceptions are found in the nouncompforviewmts mimeview list */ bool mimeViewerNeedsUncomp(const string &mimetype) const; /** Retrieve extra metadata-gathering commands */ const vector& getMDReapers(); /** Store/retrieve missing helpers description string */ bool getMissingHelperDesc(string&) const; void storeMissingHelperDesc(const string &s); /** Find exec file for external filter. * * If the input is an absolute path, we just return it. Else We * look in $RECOLL_FILTERSDIR, "filtersdir" from the config file, * $RECOLL_CONFDIR/. If nothing is found, we return the input with * the assumption that this will be used with a PATH-searching * exec. * * @param cmd is normally the command name from the command string * returned by getMimeHandlerDef(), but this could be used for any * command. If cmd begins with a /, we return cmd without * further processing. */ string findFilter(const string& cmd) const; /** Thread config init is not done automatically because not all programs need it and it uses the debug log so that it's better to call it after primary init */ void initThrConf(); const string& getOrigCwd() { return o_origcwd; } RclConfig& operator=(const RclConfig &r) { if (this != &r) { freeAll(); initFrom(r); } return *this; } friend class ParamStale; private: int m_ok; string m_reason; // Explanation for bad state string m_confdir; // User directory where the customized files are stored // Normally same as confdir. Set to store all bulk data elsewhere. // Provides defaults top location for dbdir, webcachedir, // mboxcachedir, aspellDictDir, which can still be used to // override. string m_cachedir; string m_datadir; // Example: /usr/local/share/recoll string m_keydir; // Current directory used for parameter fetches. int m_keydirgen; // To help with knowing when to update computed data. vector m_cdirs; // directory stack for the confstacks map m_fldtotraits; // Field to field params map m_aliastocanon; map m_aliastoqcanon; set m_storedFields; map m_xattrtofld; unsigned int m_maxsufflen; ParamStale m_oldstpsuffstate; // Values from user mimemap, now obsolete ParamStale m_stpsuffstate; vector m_stopsuffvec; // skippedNames state ParamStale m_skpnstate; vector m_skpnlist; // onlyNames state ParamStale m_onlnstate; vector m_onlnlist; // Original current working directory. Set once at init before we do any // chdir'ing and used for converting user args to absolute paths. static string o_origcwd; // Parameters auto-fetched on setkeydir string m_defcharset; static string o_localecharset; // Limiting set of mime types to be processed. Normally empty. ParamStale m_rmtstate; std::unordered_set m_restrictMTypes; // Exclusion set of mime types. Normally empty ParamStale m_xmtstate; std::unordered_set m_excludeMTypes; vector > m_thrConf; // Same idea with the metadata-gathering external commands, // (e.g. used to reap tagging info: "tmsu tags %f") ParamStale m_mdrstate; vector m_mdreapers; ////////////////// // Members needing explicit processing when copying void *m_stopsuffixes; ConfStack *m_conf; // Parsed configuration files ConfStack *mimemap; // The files don't change with keydir, ConfStack *mimeconf; // but their content may depend on it. ConfStack *mimeview; // ConfStack *m_fields; ConfSimple *m_ptrans; // Paths translations /////////////////// /** Create initial user configuration */ bool initUserConfig(); /** Init all ParamStale members */ void initParamStale(ConfNull *cnf, ConfNull *mimemap); /** Copy from other */ void initFrom(const RclConfig& r); /** Init pointers to 0 */ void zeroMe(); /** Free data then zero pointers */ void freeAll(); bool readFieldsConfig(const string& errloc); }; // This global variable defines if we are running with an index // stripped of accents and case or a raw one. Ideally, it should be // constant, but it needs to be initialized from the configuration, so // there is no way to do this. It never changes after initialization // of course. Changing the value on a given index imposes a // reset. When using multiple indexes, all must have the same value extern bool o_index_stripchars; // Store document text in index. Allows extracting snippets from text // instead of building them from index position data. Has become // necessary for versions of Xapian 1.6, which have dropped support // for the chert index format, and adopted a setup which renders our // use of positions list unacceptably slow in cases. The text just // translated from its original format to UTF-8 plain text, and is not // stripped of upper-case, diacritics, or punctuation signs. Defaults to true. extern bool o_index_storedoctext; // This global variable defines if we use mtime instead of ctime for // up-to-date tests. This is mostly incompatible with xattr indexing, // in addition to other issues. See recoll.conf comments. extern bool o_uptodate_test_use_mtime; #endif /* _RCLCONFIG_H_INCLUDED_ */ recoll-1.26.3/common/webstore.h0000644000175000017500000000300513533651561013305 00000000000000/* Copyright (C) 2009 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _webstore_h_included_ #define _webstore_h_included_ #include class RclConfig; namespace Rcl { class Db; class Doc; } class CirCache; /** * Manage the CirCache for the Web Queue indexer. Separated from the main * indexer code because it's also used for querying (getting the data for a * preview */ class WebStore { public: WebStore(RclConfig *config); ~WebStore(); bool getFromCache(const std::string& udi, Rcl::Doc &doc, std::string& data, std::string *hittype = 0); // We could write proxies for all the circache ops, but why bother? CirCache *cc() {return m_cache;} private: CirCache *m_cache; }; extern const std::string cstr_bgc_mimetype; #endif /* _webstore_h_included_ */ recoll-1.26.3/common/autoconfig.h.in0000644000175000017500000001125113566731742014226 00000000000000/* common/autoconfig.h.in. Generated from configure.ac by autoheader. */ /* Define if building universal (internal helper macro) */ #undef AC_APPLE_UNIVERSAL_BUILD /* Path to the aspell api include file */ #undef ASPELL_INCLUDE /* Path to the aspell program */ #undef ASPELL_PROG /* No X11 session monitoring support */ #undef DISABLE_X11MON /* Path to the fam api include file */ #undef FAM_INCLUDE /* Path to the file program */ #undef FILE_PROG /* "Have C++0x" */ #undef HAVE_CXX0X_UNORDERED /* Define to 1 if you have the header file. */ #undef HAVE_DLFCN_H /* dlopen function is available */ #undef HAVE_DLOPEN /* Define if you have the iconv() function and it works. */ #undef HAVE_ICONV /* Define to 1 if you have the header file. */ #undef HAVE_INTTYPES_H /* Define to 1 if you have the `kqueue' function. */ #undef HAVE_KQUEUE /* Define to 1 if you have the `chm' library (-lchm). */ #undef HAVE_LIBCHM /* Define to 1 if you have the `pthread' library (-lpthread). */ #undef HAVE_LIBPTHREAD /* Define to 1 if you have the `z' library (-lz). */ #undef HAVE_LIBZ /* Define to 1 if you have the header file. */ #undef HAVE_MEMORY_H /* Define to 1 if you have the `mkdtemp' function. */ #undef HAVE_MKDTEMP /* Define to 1 if you have the `posix_spawn' function. */ #undef HAVE_POSIX_SPAWN /* Define to 1 if you have the `setrlimit' function. */ #undef HAVE_SETRLIMIT /* Has std::shared_ptr */ #undef HAVE_SHARED_PTR_STD /* Has std::tr1::shared_ptr */ #undef HAVE_SHARED_PTR_TR1 /* Define to 1 if you have the header file. */ #undef HAVE_SPAWN_H /* Define to 1 if you have the header file. */ #undef HAVE_STDINT_H /* Define to 1 if you have the header file. */ #undef HAVE_STDLIB_H /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H /* Define to 1 if you have the header file. */ #undef HAVE_STRING_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_MOUNT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_PARAM_H_ /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STATFS_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STATVFS_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_STAT_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H /* Define to 1 if you have the header file. */ #undef HAVE_SYS_VFS_H /* "Have tr1" */ #undef HAVE_TR1_UNORDERED /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H /* Define to 1 if you have the `vsnprintf' function. */ #undef HAVE_VSNPRINTF /* Define as const if the declaration of iconv() needs const. */ #undef ICONV_CONST /* Use multiple threads for indexing */ #undef IDX_THREADS /* Define to the sub-directory where libtool stores uninstalled libraries. */ #undef LT_OBJDIR /* Define to the address where bug reports for this package should be sent. */ #undef PACKAGE_BUGREPORT /* Define to the full name of this package. */ #undef PACKAGE_NAME /* Define to the full name and version of this package. */ #undef PACKAGE_STRING /* Define to the one symbol short name of this package. */ #undef PACKAGE_TARNAME /* Define to the home page for this package. */ #undef PACKAGE_URL /* Define to the version of this package. */ #undef PACKAGE_VERSION /* putenv parameter is const */ #undef PUTENV_ARG_CONST /* Real time monitoring option */ #undef RCL_MONITOR /* Split camelCase words */ #undef RCL_SPLIT_CAMELCASE /* Compile the aspell interface */ #undef RCL_USE_ASPELL /* Compile the fam interface */ #undef RCL_USE_FAM /* Compile the inotify interface */ #undef RCL_USE_INOTIFY /* Define to 1 if you have the ANSI C header files. */ #undef STDC_HEADERS /* Use posix_spawn() */ #undef USE_POSIX_SPAWN /* Enable using the system's 'file' command to id mime if we fail internally */ #undef USE_SYSTEM_FILE_COMMAND /* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most significant byte first (like Motorola and SPARC, unlike Intel). */ #if defined AC_APPLE_UNIVERSAL_BUILD # if defined __BIG_ENDIAN__ # define WORDS_BIGENDIAN 1 # endif #else # ifndef WORDS_BIGENDIAN # undef WORDS_BIGENDIAN # endif #endif /* Define to 1 if the X Window System is missing or not being used. */ #undef X_DISPLAY_MISSING /* Enable large inode numbers on Mac OS X 10.5. */ #ifndef _DARWIN_USE_64_BIT_INODE # define _DARWIN_USE_64_BIT_INODE 1 #endif /* Number of bits in a file offset, on hosts where this is settable. */ #undef _FILE_OFFSET_BITS /* Define for large files, on AIX-style hosts. */ #undef _LARGE_FILES #include "conf_post.h" recoll-1.26.3/common/cstr.h0000644000175000017500000000650113566424763012442 00000000000000/* Copyright (C) 2011-2018 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _CSTR_H_INCLUDED_ #define _CSTR_H_INCLUDED_ // recoll mostly uses STL strings. In many places we had automatic // conversion from a C string to an STL one. This costs, and can // become significant if used often. // // This file and the associated .cpp file declares/defines constant // strings used in the program. Strings are candidates for a move here // when they are used in a fast loop or are shared. #include // The following slightly hacky preprocessing directives and the // companion code in the cpp file looks complicated, but it just // ensures that we only have to write the strings once to get the // extern declaration and the definition. #ifdef RCLIN_CSTR_CPPFILE #undef DEF_CSTR #define DEF_CSTR(NM, STR) const std::string cstr_##NM(STR) #else #define DEF_CSTR(NM, STR) extern const std::string cstr_##NM #endif DEF_CSTR(caption, "caption"); DEF_CSTR(colon, ":"); DEF_CSTR(dmtime, "dmtime"); DEF_CSTR(dquote, "\""); DEF_CSTR(fbytes, "fbytes"); DEF_CSTR(fileu, "file://"); DEF_CSTR(fmtime, "fmtime"); DEF_CSTR(iso_8859_1, "ISO-8859-1"); DEF_CSTR(utf8, "UTF-8"); DEF_CSTR(cp1252, "CP1252"); DEF_CSTR(minwilds, "*?["); DEF_CSTR(newline, "\n"); DEF_CSTR(null, ""); DEF_CSTR(plus, "+"); DEF_CSTR(textplain, "text/plain"); DEF_CSTR(texthtml, "text/html"); DEF_CSTR(url, "url"); // Marker for HTML format fields DEF_CSTR(fldhtm, "\007"); // Characters that can -begin- a wildcard or regexp expression. DEF_CSTR(wildSpecStChars, "*?["); DEF_CSTR(regSpecStChars, "(.[{"); // Values used as keys inside Dijon::Filter::metaData[]. // The document data. DEF_CSTR(dj_keycontent, "content"); // These fields go from the topmost handler (text/plain) into the // Rcl::Doc::meta, possibly with some massaging. DEF_CSTR(dj_keyanc, "rclanc"); DEF_CSTR(dj_keyorigcharset, "origcharset"); DEF_CSTR(dj_keyds, "description"); DEF_CSTR(dj_keyabstract, "abstract"); // Built or inherited along the handler stack, then copied to doc DEF_CSTR(dj_keyipath, "ipath"); DEF_CSTR(dj_keyfn, "filename"); DEF_CSTR(dj_keyauthor, "author"); DEF_CSTR(dj_keymd, "modificationdate"); // charset and mimetype are explicitly blocked from going into the doc meta DEF_CSTR(dj_keycharset, "charset"); DEF_CSTR(dj_keymt, "mimetype"); // All other meta fields are directly copied from // Dijon::Filter::metaData to Rcl::Doc::meta. The defininitions which // follow are just for well-known names, with no particular processing // in internfile. DEF_CSTR(dj_keytitle, "title"); DEF_CSTR(dj_keyrecipient, "recipient"); DEF_CSTR(dj_keymsgid, "msgid"); DEF_CSTR(dj_keymd5, "md5"); #endif /* _CSTR_H_INCLUDED_ */ recoll-1.26.3/common/rclconfig.cpp0000644000175000017500000016101313566450615013763 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #ifndef _WIN32 #include #include #else #include "wincodepages.h" #endif #include #include "safesysstat.h" #include "safeunistd.h" #ifdef __FreeBSD__ #include #endif #include #include #include #include #include #include #include #include "cstr.h" #include "pathut.h" #include "rclutil.h" #include "rclconfig.h" #include "conftree.h" #include "log.h" #include "smallut.h" #include "readfile.h" #include "fstreewalk.h" #include "cpuconf.h" #include "execmd.h" using namespace std; // Static, logically const, RclConfig members or module static // variables are initialized once from the first object build during // process initialization. // We default to a case- and diacritics-less index for now bool o_index_stripchars = true; // Default to storing the text contents for generating snippets. This // is only an approximate 10% bigger index and produces nicer // snippets. bool o_index_storedoctext = true; bool o_uptodate_test_use_mtime = false; string RclConfig::o_localecharset; string RclConfig::o_origcwd; // We build this once. Used to ensure that the suffix used for a temp // file of a given MIME type is the FIRST one from the mimemap config // file. Previously it was the first in alphabetic (map) order, with // sometimes strange results. static unordered_map mime_suffixes; // Compute the difference of 1st to 2nd sets and return as plus/minus // sets. Some args are std::set and some others stringToString() // strings for convenience void RclConfig::setPlusMinus(const string& sbase, const set& upd, string& splus, string& sminus) { set base; stringToStrings(sbase, base); vector diff; auto it = set_difference(base.begin(), base.end(), upd.begin(), upd.end(), std::inserter(diff, diff.begin())); sminus = stringsToString(diff); diff.clear(); it = set_difference(upd.begin(), upd.end(), base.begin(), base.end(), std::inserter(diff, diff.begin())); splus = stringsToString(diff); } /* Compute result of substracting strminus and adding strplus to base string. All string represent sets of values to be computed with stringToStrings() */ static void computeBasePlusMinus(set& res, const string& strbase, const string& strplus, const string& strminus) { set plus, minus; res.clear(); stringToStrings(strbase, res); stringToStrings(strplus, plus); stringToStrings(strminus, minus); for (auto& it : minus) { auto it1 = res.find(it); if (it1 != res.end()) { res.erase(it1); } } for (auto& it : plus) { res.insert(it); } } bool ParamStale::needrecompute() { LOGDEB1("ParamStale:: needrecompute. parent gen " << parent->m_keydirgen << " mine " << savedkeydirgen << "\n"); if (!conffile) { LOGDEB("ParamStale::needrecompute: conffile not set\n"); return false; } bool needrecomp = false; if (active && parent->m_keydirgen != savedkeydirgen) { savedkeydirgen = parent->m_keydirgen; for (unsigned int i = 0; i < paramnames.size(); i++) { string newvalue; conffile->get(paramnames[i], newvalue, parent->m_keydir); LOGDEB1("ParamStale::needrecompute: " << paramnames[i] << " -> " << newvalue << " keydir " << parent->m_keydir << endl); if (newvalue.compare(savedvalues[i])) { savedvalues[i] = newvalue; needrecomp = true; } } } return needrecomp; } const string& ParamStale::getvalue(unsigned int i) const { if (i < savedvalues.size()) { return savedvalues[i]; } else { static string nll; return nll; } } void ParamStale::init(ConfNull *cnf) { conffile = cnf; active = false; if (conffile) { for (auto& nm : paramnames) { if (conffile->hasNameAnywhere(nm)) { active = true; break; } } } savedkeydirgen = -1; } bool RclConfig::isDefaultConfig() const { string defaultconf = path_cat(path_homedata(), path_defaultrecollconfsubdir()); path_catslash(defaultconf); string specifiedconf = path_canon(m_confdir); path_catslash(specifiedconf); return !defaultconf.compare(specifiedconf); } RclConfig::RclConfig(const RclConfig &r) : m_oldstpsuffstate(this, "recoll_noindex"), m_stpsuffstate(this, {"noContentSuffixes", "noContentSuffixes+", "noContentSuffixes-"}), m_skpnstate(this, {"skippedNames", "skippedNames+", "skippedNames-"}), m_onlnstate(this, "onlyNames"), m_rmtstate(this, "indexedmimetypes"), m_xmtstate(this, "excludedmimetypes"), m_mdrstate(this, "metadatacmds") { initFrom(r); } RclConfig::RclConfig(const string *argcnf) : m_oldstpsuffstate(this, "recoll_noindex"), m_stpsuffstate(this, {"noContentSuffixes", "noContentSuffixes+", "noContentSuffixes-"}), m_skpnstate(this, {"skippedNames", "skippedNames+", "skippedNames-"}), m_onlnstate(this, "onlyNames"), m_rmtstate(this, "indexedmimetypes"), m_xmtstate(this, "excludedmimetypes"), m_mdrstate(this, "metadatacmds") { zeroMe(); if (o_origcwd.empty()) { char buf[MAXPATHLEN]; if (getcwd(buf, MAXPATHLEN)) { o_origcwd = string(buf); } else { fprintf(stderr, "recollxx: can't retrieve current working " "directory: relative path translations will fail\n"); } } // Compute our data dir name, typically /usr/local/share/recoll m_datadir = path_pkgdatadir(); // We only do the automatic configuration creation thing for the default // config dir, not if it was specified through -c or RECOLL_CONFDIR bool autoconfdir = false; // Command line config name overrides environment if (argcnf && !argcnf->empty()) { m_confdir = path_absolute(*argcnf); if (m_confdir.empty()) { m_reason = string("Cant turn [") + *argcnf + "] into absolute path"; return; } } else { const char *cp = getenv("RECOLL_CONFDIR"); if (cp) { m_confdir = path_canon(cp); } else { autoconfdir = true; m_confdir=path_cat(path_homedata(), path_defaultrecollconfsubdir()); } } // Note: autoconfdir and isDefaultConfig() are normally the same. We just // want to avoid the imperfect test in isDefaultConfig() if we actually know // this is the default conf if (!autoconfdir && !isDefaultConfig()) { if (!path_exists(m_confdir)) { m_reason = "Explicitly specified configuration " "directory must exist" " (won't be automatically created). Use mkdir first"; return; } } if (!path_exists(m_confdir)) { if (!initUserConfig()) return; } // This can't change once computed inside a process. It would be // nicer to move this to a static class initializer to avoid // possible threading issues but this doesn't work (tried) as // things would not be ready. In practise we make sure that this // is called from the main thread at once, by constructing a config // from recollinit if (o_localecharset.empty()) { #ifdef _WIN32 o_localecharset = winACPName(); #elif defined(__APPLE__) o_localecharset = "UTF-8"; #else const char *cp; cp = nl_langinfo(CODESET); // We don't keep US-ASCII. It's better to use a superset // Ie: me have a C locale and some french file names, and I // can't imagine a version of iconv that couldn't translate // from iso8859? // The 646 thing is for solaris. if (cp && *cp && strcmp(cp, "US-ASCII") #ifdef sun && strcmp(cp, "646") #endif ) { o_localecharset = string(cp); } else { // Use cp1252 instead of iso-8859-1, it's a superset. o_localecharset = string(cstr_cp1252); } #endif LOGDEB1("RclConfig::getDefCharset: localecharset [" << o_localecharset << "]\n"); } const char *cp; // Additional config directory, values override user ones if ((cp = getenv("RECOLL_CONFTOP"))) { m_cdirs.push_back(cp); } // User config m_cdirs.push_back(m_confdir); // Additional config directory, overrides system's, overridden by user's if ((cp = getenv("RECOLL_CONFMID"))) { m_cdirs.push_back(cp); } // Base/installation config m_cdirs.push_back(path_cat(m_datadir, "examples")); string cnferrloc; for (const auto& dir : m_cdirs) { cnferrloc += "[" + dir + "] or "; } if (cnferrloc.size() > 4) { cnferrloc.erase(cnferrloc.size()-4); } // Read and process "recoll.conf" if (!updateMainConfig()) { m_reason = string("No/bad main configuration file in: ") + cnferrloc; return; } // Other files mimemap = new ConfStack("mimemap", m_cdirs, true); if (mimemap == 0 || !mimemap->ok()) { m_reason = string("No or bad mimemap file in: ") + cnferrloc; return; } // Maybe create the MIME to suffix association reverse map. Do it // in file order so that we can control what suffix is used when // there are several. This only uses the distributed file, not any // local customization (too complicated). if (mime_suffixes.empty()) { ConfSimple mm( path_cat(path_cat(m_datadir, "examples"), "mimemap").c_str()); vector order = mm.getlines(); for (const auto& entry: order) { if (entry.m_kind == ConfLine::CFL_VAR) { LOGDEB1("CONFIG: " << entry.m_data << " -> " << entry.m_value << endl); // Remember: insert() only does anything for new keys, // so we only have the first value in the map mime_suffixes.insert( pair(entry.m_value, entry.m_data)); } } } mimeconf = new ConfStack("mimeconf", m_cdirs, true); if (mimeconf == 0 || !mimeconf->ok()) { m_reason = string("No/bad mimeconf in: ") + cnferrloc; return; } mimeview = new ConfStack("mimeview", m_cdirs, false); if (mimeview == 0) mimeview = new ConfStack("mimeview", m_cdirs, true); if (mimeview == 0 || !mimeview->ok()) { m_reason = string("No/bad mimeview in: ") + cnferrloc; return; } if (!readFieldsConfig(cnferrloc)) return; // Default is no threading m_thrConf = {{-1, 0}, {-1, 0}, {-1, 0}}; m_ptrans = new ConfSimple(path_cat(m_confdir, "ptrans").c_str()); m_ok = true; setKeyDir(cstr_null); initParamStale(m_conf, mimemap); return; } bool RclConfig::updateMainConfig() { ConfStack *newconf = new ConfStack("recoll.conf", m_cdirs, true); if (newconf == 0 || !newconf->ok()) { if (m_conf) return false; m_ok = false; initParamStale(0, 0); return false; } delete m_conf; m_conf = newconf; initParamStale(m_conf, mimemap); setKeyDir(cstr_null); bool bvalue = true; if (getConfParam("skippedPathsFnmPathname", &bvalue) && bvalue == false) { FsTreeWalker::setNoFnmPathname(); } string nowalkfn; getConfParam("nowalkfn", nowalkfn); if (!nowalkfn.empty()) { FsTreeWalker::setNoWalkFn(nowalkfn); } static int m_index_stripchars_init = 0; if (!m_index_stripchars_init) { getConfParam("indexStripChars", &o_index_stripchars); getConfParam("indexStoreDocText", &o_index_storedoctext); getConfParam("testmodifusemtime", &o_uptodate_test_use_mtime); m_index_stripchars_init = 1; } if (getConfParam("cachedir", m_cachedir)) { m_cachedir = path_canon(path_tildexpand(m_cachedir)); } return true; } ConfNull *RclConfig::cloneMainConfig() { ConfNull *conf = new ConfStack("recoll.conf", m_cdirs, false); if (conf == 0 || !conf->ok()) { m_reason = string("Can't read config"); return 0; } return conf; } // Remember what directory we're under (for further conf->get()s), and // prefetch a few common values. void RclConfig::setKeyDir(const string &dir) { if (!dir.compare(m_keydir)) return; m_keydirgen++; m_keydir = dir; if (m_conf == 0) return; if (!m_conf->get("defaultcharset", m_defcharset, m_keydir)) m_defcharset.erase(); } bool RclConfig::getConfParam(const string &name, int *ivp, bool shallow) const { string value; if (!getConfParam(name, value, shallow)) return false; errno = 0; long lval = strtol(value.c_str(), 0, 0); if (lval == 0 && errno) return 0; if (ivp) *ivp = int(lval); return true; } bool RclConfig::getConfParam(const string &name, bool *bvp, bool shallow) const { if (!bvp) return false; *bvp = false; string s; if (!getConfParam(name, s, shallow)) return false; *bvp = stringToBool(s); return true; } bool RclConfig::getConfParam(const string &name, vector *svvp, bool shallow) const { if (!svvp) return false; svvp->clear(); string s; if (!getConfParam(name, s, shallow)) return false; return stringToStrings(s, *svvp); } bool RclConfig::getConfParam(const string &name, unordered_set *out, bool shallow) const { vector v; if (!out || !getConfParam(name, &v, shallow)) { return false; } out->clear(); out->insert(v.begin(), v.end()); return true; } bool RclConfig::getConfParam(const string &name, vector *vip, bool shallow) const { if (!vip) return false; vip->clear(); vector vs; if (!getConfParam(name, &vs, shallow)) return false; vip->reserve(vs.size()); for (unsigned int i = 0; i < vs.size(); i++) { char *ep; vip->push_back(strtol(vs[i].c_str(), &ep, 0)); if (ep == vs[i].c_str()) { LOGDEB("RclConfig::getConfParam: bad int value in [" << name << "]\n"); return false; } } return true; } void RclConfig::initThrConf() { // Default is no threading m_thrConf = {{-1, 0}, {-1, 0}, {-1, 0}}; vector vq; vector vt; if (!getConfParam("thrQSizes", &vq)) { LOGINFO("RclConfig::initThrConf: no thread info (queues)\n"); goto out; } // If the first queue size is 0, autoconf is requested. if (vq.size() > 0 && vq[0] == 0) { CpuConf cpus; if (!getCpuConf(cpus) || cpus.ncpus < 1) { LOGERR("RclConfig::initThrConf: could not retrieve cpu conf\n"); cpus.ncpus = 1; } if (cpus.ncpus != 1) { LOGDEB("RclConfig::initThrConf: autoconf requested. " << cpus.ncpus << " concurrent threads available.\n"); } // Arbitrarily set threads config based on number of CPUS. This also // depends on the IO setup actually, so we're bound to be wrong... if (cpus.ncpus == 1) { // Somewhat counter-intuitively (because of possible IO//) // it seems that the best config here is no threading } else if (cpus.ncpus < 4) { // Untested so let's guess... m_thrConf = {{2, 2}, {2, 2}, {2, 1}}; } else if (cpus.ncpus < 6) { m_thrConf = {{2, 4}, {2, 2}, {2, 1}}; } else { m_thrConf = {{2, 5}, {2, 3}, {2, 1}}; } goto out; } else if (vq.size() > 0 && vq[0] < 0) { // threads disabled by config goto out; } if (!getConfParam("thrTCounts", &vt) ) { LOGINFO("RclConfig::initThrConf: no thread info (threads)\n"); goto out; } if (vq.size() != 3 || vt.size() != 3) { LOGINFO("RclConfig::initThrConf: bad thread info vector sizes\n"); goto out; } // Normal case: record info from config m_thrConf.clear(); for (unsigned int i = 0; i < 3; i++) { m_thrConf.push_back({vq[i], vt[i]}); } out: ostringstream sconf; for (unsigned int i = 0; i < 3; i++) { sconf << "(" << m_thrConf[i].first << ", " << m_thrConf[i].second << ") "; } LOGDEB("RclConfig::initThrConf: chosen config (ql,nt): " << sconf.str() << "\n"); } pair RclConfig::getThrConf(ThrStage who) const { if (m_thrConf.size() != 3) { LOGERR("RclConfig::getThrConf: bad data in rclconfig\n"); return pair(-1,-1); } return m_thrConf[who]; } vector RclConfig::getTopdirs(bool formonitor) const { vector tdl; if (formonitor) { if (!getConfParam("monitordirs", &tdl)) { getConfParam("topdirs", &tdl); } } else { getConfParam("topdirs", &tdl); } if (tdl.empty()) { LOGERR("RclConfig::getTopdirs: nothing to index: topdirs/monitordirs " " are not set or have a bad list format\n"); return tdl; } for (auto& dir : tdl) { dir = path_canon(path_tildexpand(dir)); } return tdl; } const string& RclConfig::getLocaleCharset() { return o_localecharset; } // Get charset to be used for transcoding to utf-8 if unspecified by doc // For document contents: // If defcharset was set (from the config or a previous call, this // is done in setKeydir), use it. // Else, try to guess it from the locale // Use cp1252 (as a superset of iso8859-1) as ultimate default // // For filenames, same thing except that we do not use the config file value // (only the locale). const string& RclConfig::getDefCharset(bool filename) const { if (filename) { return o_localecharset; } else { return m_defcharset.empty() ? o_localecharset : m_defcharset; } } // Get all known document mime values. We get them from the mimeconf // 'index' submap. // It's quite possible that there are other mime types in the index // (defined in mimemap and not mimeconf, or output by "file -i"). We // just ignore them, because there may be myriads, and their contents // are not indexed. // // This unfortunately means that searches by file names and mime type // filtering don't work well together. vector RclConfig::getAllMimeTypes() const { return mimeconf ? mimeconf->getNames("index") : vector(); } // Things for suffix comparison. We define a string class and string // comparison with suffix-only sensitivity class SfString { public: SfString(const string& s) : m_str(s) {} bool operator==(const SfString& s2) const { string::const_reverse_iterator r1 = m_str.rbegin(), re1 = m_str.rend(), r2 = s2.m_str.rbegin(), re2 = s2.m_str.rend(); while (r1 != re1 && r2 != re2) { if (*r1 != *r2) { return 0; } ++r1; ++r2; } return 1; } string m_str; }; class SuffCmp { public: int operator()(const SfString& s1, const SfString& s2) const { //cout << "Comparing " << s1.m_str << " and " << s2.m_str << endl; string::const_reverse_iterator r1 = s1.m_str.rbegin(), re1 = s1.m_str.rend(), r2 = s2.m_str.rbegin(), re2 = s2.m_str.rend(); while (r1 != re1 && r2 != re2) { if (*r1 != *r2) { return *r1 < *r2 ? 1 : 0; } ++r1; ++r2; } return 0; } }; typedef multiset SuffixStore; #define STOPSUFFIXES ((SuffixStore *)m_stopsuffixes) vector& RclConfig::getStopSuffixes() { bool needrecompute = m_stpsuffstate.needrecompute(); needrecompute = m_oldstpsuffstate.needrecompute() || needrecompute; if (needrecompute || m_stopsuffixes == 0) { // Need to initialize the suffixes // Let the old customisation have priority: if recoll_noindex from // mimemap is set, it the user's (the default value is gone). Else // use the new variable if (!m_oldstpsuffstate.getvalue(0).empty()) { stringToStrings(m_oldstpsuffstate.getvalue(0), m_stopsuffvec); } else { std::set ss; computeBasePlusMinus(ss, m_stpsuffstate.getvalue(0), m_stpsuffstate.getvalue(1), m_stpsuffstate.getvalue(2)); m_stopsuffvec = vector(ss.begin(), ss.end()); } // Compute the special suffixes store delete STOPSUFFIXES; if ((m_stopsuffixes = new SuffixStore) == 0) { LOGERR("RclConfig::inStopSuffixes: out of memory\n"); return m_stopsuffvec; } m_maxsufflen = 0; for (const auto& entry : m_stopsuffvec) { STOPSUFFIXES->insert(SfString(stringtolower(entry))); if (m_maxsufflen < entry.length()) m_maxsufflen = int(entry.length()); } } LOGDEB1("RclConfig::getStopSuffixes: ->" << stringsToString(m_stopsuffvec) << endl); return m_stopsuffvec; } bool RclConfig::inStopSuffixes(const string& fni) { LOGDEB2("RclConfig::inStopSuffixes(" << fni << ")\n"); // Call getStopSuffixes() to possibly update state, ignore result getStopSuffixes(); // Only need a tail as long as the longest suffix. int pos = MAX(0, int(fni.length() - m_maxsufflen)); string fn(fni, pos); stringtolower(fn); SuffixStore::const_iterator it = STOPSUFFIXES->find(fn); if (it != STOPSUFFIXES->end()) { LOGDEB2("RclConfig::inStopSuffixes: Found (" << fni << ") [" << ((*it).m_str) << "]\n"); return true; } else { LOGDEB2("RclConfig::inStopSuffixes: not found [" << fni << "]\n"); return false; } } string RclConfig::getMimeTypeFromSuffix(const string& suff) const { string mtype; mimemap->get(suff, mtype, m_keydir); return mtype; } string RclConfig::getSuffixFromMimeType(const string &mt) const { // First try from standard data, ensuring that we can control the value // from the order in the configuration file. auto rclsuff = mime_suffixes.find(mt); if (rclsuff != mime_suffixes.end()) { return rclsuff->second; } // Try again from local data. The map is in the wrong direction, // have to walk it. vector sfs = mimemap->getNames(cstr_null); for (const auto& suff : sfs) { string mt1; if (mimemap->get(suff, mt1, cstr_null) && !stringicmp(mt, mt1)) { return suff; } } return cstr_null; } /** Get list of file categories from mimeconf */ bool RclConfig::getMimeCategories(vector& cats) const { if (!mimeconf) return false; cats = mimeconf->getNames("categories"); return true; } bool RclConfig::isMimeCategory(string& cat) const { vectorcats; getMimeCategories(cats); for (vector::iterator it = cats.begin(); it != cats.end(); it++) { if (!stringicmp(*it,cat)) return true; } return false; } /** Get list of mime types for category from mimeconf */ bool RclConfig::getMimeCatTypes(const string& cat, vector& tps) const { tps.clear(); if (!mimeconf) return false; string slist; if (!mimeconf->get(cat, slist, "categories")) return false; stringToStrings(slist, tps); return true; } string RclConfig::getMimeHandlerDef(const string &mtype, bool filtertypes) { string hs; if (filtertypes) { if(m_rmtstate.needrecompute()) { m_restrictMTypes.clear(); stringToStrings(stringtolower((const string&)m_rmtstate.getvalue()), m_restrictMTypes); } if (m_xmtstate.needrecompute()) { m_excludeMTypes.clear(); stringToStrings(stringtolower((const string&)m_xmtstate.getvalue()), m_excludeMTypes); } if (!m_restrictMTypes.empty() && !m_restrictMTypes.count(stringtolower(mtype))) { LOGDEB2("RclConfig::getMimeHandlerDef: not in mime type list\n"); return hs; } if (!m_excludeMTypes.empty() && m_excludeMTypes.count(stringtolower(mtype))) { LOGDEB2("RclConfig::getMimeHandlerDef: in excluded mime list\n"); return hs; } } if (!mimeconf->get(mtype, hs, "index")) { LOGDEB1("getMimeHandlerDef: no handler for '" << mtype << "'\n"); } return hs; } const vector& RclConfig::getMDReapers() { string hs; if (m_mdrstate.needrecompute()) { m_mdreapers.clear(); // New value now stored in m_mdrstate.getvalue(0) const string& sreapers = m_mdrstate.getvalue(0); if (sreapers.empty()) return m_mdreapers; string value; ConfSimple attrs; valueSplitAttributes(sreapers, value, attrs); vector nmlst = attrs.getNames(cstr_null); for (vector::const_iterator it = nmlst.begin(); it != nmlst.end(); it++) { MDReaper reaper; reaper.fieldname = fieldCanon(*it); string s; attrs.get(*it, s); stringToStrings(s, reaper.cmdv); m_mdreapers.push_back(reaper); } } return m_mdreapers; } bool RclConfig::getGuiFilterNames(vector& cats) const { if (!mimeconf) return false; cats = mimeconf->getNamesShallow("guifilters"); return true; } bool RclConfig::getGuiFilter(const string& catfiltername, string& frag) const { frag.clear(); if (!mimeconf) return false; if (!mimeconf->get(catfiltername, frag, "guifilters")) return false; return true; } bool RclConfig::valueSplitAttributes(const string& whole, string& value, ConfSimple& attrs) { /* There is currently no way to escape a semi-colon */ string::size_type semicol0 = whole.find_first_of(";"); value = whole.substr(0, semicol0); trimstring(value); string attrstr; if (semicol0 != string::npos && semicol0 < whole.size() - 1) { attrstr = whole.substr(semicol0+1); } // Handle additional attributes. We substitute the semi-colons // with newlines and use a ConfSimple if (!attrstr.empty()) { for (string::size_type i = 0; i < attrstr.size(); i++) { if (attrstr[i] == ';') attrstr[i] = '\n'; } attrs.reparse(attrstr); } else { attrs.clear(); } return true; } bool RclConfig::getMissingHelperDesc(string& out) const { string fmiss = path_cat(getConfDir(), "missing"); out.clear(); if (!file_to_string(fmiss, out)) return false; return true; } void RclConfig::storeMissingHelperDesc(const string &s) { string fmiss = path_cat(getCacheDir(), "missing"); FILE *fp = fopen(fmiss.c_str(), "w"); if (fp) { if (s.size() > 0 && fwrite(s.c_str(), s.size(), 1, fp) != 1) { LOGERR("storeMissingHelperDesc: fwrite failed\n"); } fclose(fp); } } // Read definitions for field prefixes, aliases, and hierarchy and arrange // things for speed (theses are used a lot during indexing) bool RclConfig::readFieldsConfig(const string& cnferrloc) { LOGDEB2("RclConfig::readFieldsConfig\n"); m_fields = new ConfStack("fields", m_cdirs, true); if (m_fields == 0 || !m_fields->ok()) { m_reason = string("No/bad fields file in: ") + cnferrloc; return false; } // Build a direct map avoiding all indirections for field to // prefix translation // Add direct prefixes from the [prefixes] section vector tps = m_fields->getNames("prefixes"); for (const auto& fieldname : tps) { string val; m_fields->get(fieldname, val, "prefixes"); ConfSimple attrs; FieldTraits ft; // fieldname = prefix ; attr1=val;attr2=val... if (!valueSplitAttributes(val, ft.pfx, attrs)) { LOGERR("readFieldsConfig: bad config line for [" << fieldname << "]: [" << val << "]\n"); return 0; } string tval; if (attrs.get("wdfinc", tval)) ft.wdfinc = atoi(tval.c_str()); if (attrs.get("boost", tval)) ft.boost = atof(tval.c_str()); if (attrs.get("pfxonly", tval)) ft.pfxonly = stringToBool(tval); if (attrs.get("noterms", tval)) ft.noterms = stringToBool(tval); m_fldtotraits[stringtolower(fieldname)] = ft; LOGDEB2("readFieldsConfig: [" << fieldname << "] -> [" << ft.pfx << "] " << ft.wdfinc << " " << ft.boost << "\n"); } // Values section tps = m_fields->getNames("values"); for (const auto& fieldname : tps) { string canonic = stringtolower(fieldname); // canonic name string val; m_fields->get(fieldname, val, "values"); ConfSimple attrs; string svslot; // fieldname = valueslot ; attr1=val;attr2=val... if (!valueSplitAttributes(val, svslot, attrs)) { LOGERR("readFieldsConfig: bad value line for [" << fieldname << "]: [" << val << "]\n"); return 0; } uint32_t valueslot = uint32_t(atoi(svslot.c_str())); if (valueslot == 0) { LOGERR("readFieldsConfig: found 0 value slot for [" << fieldname << "]: [" << val << "]\n"); continue; } string tval; FieldTraits::ValueType valuetype{FieldTraits::STR}; if (attrs.get("type", tval)) { if (tval == "string") { valuetype = FieldTraits::STR; } else if (tval == "int") { valuetype = FieldTraits::INT; } else { LOGERR("readFieldsConfig: bad type for value for " << fieldname << " : " << tval << endl); return 0; } } int valuelen{0}; if (attrs.get("len", tval)) { valuelen = atoi(tval.c_str()); } // Find or insert traits entry const auto pit = m_fldtotraits.insert( pair(canonic, FieldTraits())).first; pit->second.valueslot = valueslot; pit->second.valuetype = valuetype; pit->second.valuelen = valuelen; } // Add prefixes for aliases and build alias-to-canonic map while // we're at it. Having the aliases in the prefix map avoids an // additional indirection at index time. tps = m_fields->getNames("aliases"); for (const auto& fieldname : tps) { string canonic = stringtolower(fieldname); // canonic name FieldTraits ft; const auto pit = m_fldtotraits.find(canonic); if (pit != m_fldtotraits.end()) { ft = pit->second; } string aliases; m_fields->get(canonic, aliases, "aliases"); vector l; stringToStrings(aliases, l); for (const auto& alias : l) { if (pit != m_fldtotraits.end()) m_fldtotraits[stringtolower(alias)] = ft; m_aliastocanon[stringtolower(alias)] = canonic; } } // Query aliases map tps = m_fields->getNames("queryaliases"); for (const auto& entry: tps) { string canonic = stringtolower(entry); // canonic name string aliases; m_fields->get(canonic, aliases, "queryaliases"); vector l; stringToStrings(aliases, l); for (const auto& alias : l) { m_aliastoqcanon[stringtolower(alias)] = canonic; } } #if 0 for (map::const_iterator it = m_fldtotraits.begin(); it != m_fldtotraits.end(); it++) { LOGDEB("readFieldsConfig: [" << entry << "] -> [" << it->second.pfx << "] " << it->second.wdfinc << " " << it->second.boost << "\n"); } #endif vector sl = m_fields->getNames("stored"); for (const auto& fieldname : sl) { m_storedFields.insert(fieldCanon(stringtolower(fieldname))); } // Extended file attribute to field translations vectorxattrs = m_fields->getNames("xattrtofields"); for (const auto& xattr : xattrs) { string val; m_fields->get(xattr, val, "xattrtofields"); m_xattrtofld[xattr] = val; } return true; } // Return specifics for field name: bool RclConfig::getFieldTraits(const string& _fld, const FieldTraits **ftpp, bool isquery) const { string fld = isquery ? fieldQCanon(_fld) : fieldCanon(_fld); map::const_iterator pit = m_fldtotraits.find(fld); if (pit != m_fldtotraits.end()) { *ftpp = &pit->second; LOGDEB1("RclConfig::getFieldTraits: [" << _fld << "]->[" << pit->second.pfx << "]\n"); return true; } else { LOGDEB1("RclConfig::getFieldTraits: no prefix for field [" << fld << "]\n"); *ftpp = 0; return false; } } set RclConfig::getIndexedFields() const { set flds; if (m_fields == 0) return flds; vector sl = m_fields->getNames("prefixes"); flds.insert(sl.begin(), sl.end()); return flds; } string RclConfig::fieldCanon(const string& f) const { string fld = stringtolower(f); map::const_iterator it = m_aliastocanon.find(fld); if (it != m_aliastocanon.end()) { LOGDEB1("RclConfig::fieldCanon: [" << f << "] -> [" << it->second << "]\n"); return it->second; } LOGDEB1("RclConfig::fieldCanon: [" << (f) << "] -> [" << (fld) << "]\n"); return fld; } string RclConfig::fieldQCanon(const string& f) const { string fld = stringtolower(f); map::const_iterator it = m_aliastoqcanon.find(fld); if (it != m_aliastoqcanon.end()) { LOGDEB1("RclConfig::fieldQCanon: [" << f << "] -> [" << it->second << "]\n"); return it->second; } return fieldCanon(f); } vector RclConfig::getFieldSectNames(const string &sk, const char* patrn) const { if (m_fields == 0) return vector(); return m_fields->getNames(sk, patrn); } bool RclConfig::getFieldConfParam(const string &name, const string &sk, string &value) const { if (m_fields == 0) return false; return m_fields->get(name, value, sk); } set RclConfig::getMimeViewerAllEx() const { set res; if (mimeview == 0) return res; string base, plus, minus; mimeview->get("xallexcepts", base, ""); LOGDEB1("RclConfig::getMimeViewerAllEx(): base: " << s << endl); mimeview->get("xallexcepts+", plus, ""); LOGDEB1("RclConfig::getMimeViewerAllEx(): plus: " << plus << endl); mimeview->get("xallexcepts-", minus, ""); LOGDEB1("RclConfig::getMimeViewerAllEx(): minus: " << minus << endl); computeBasePlusMinus(res, base, plus, minus); LOGDEB1("RclConfig::getMimeViewerAllEx(): res: " << stringsToString(res) << endl); return res; } bool RclConfig::setMimeViewerAllEx(const set& allex) { if (mimeview == 0) return false; string sbase; mimeview->get("xallexcepts", sbase, ""); string splus, sminus; setPlusMinus(sbase, allex, splus, sminus); if (!mimeview->set("xallexcepts-", sminus, "")) { m_reason = string("RclConfig:: cant set value. Readonly?"); return false; } if (!mimeview->set("xallexcepts+", splus, "")) { m_reason = string("RclConfig:: cant set value. Readonly?"); return false; } return true; } string RclConfig::getMimeViewerDef(const string &mtype, const string& apptag, bool useall) const { LOGDEB2("RclConfig::getMimeViewerDef: mtype [" << mtype << "] apptag [" << apptag << "]\n"); string hs; if (mimeview == 0) return hs; if (useall) { // Check for exception set allex = getMimeViewerAllEx(); bool isexcept = false; for (auto& it : allex) { vector mita; stringToTokens(it, mita, "|"); if ((mita.size() == 1 && apptag.empty() && mita[0] == mtype) || (mita.size() == 2 && mita[1] == apptag && mita[0] == mtype)) { // Exception to x-all isexcept = true; break; } } if (isexcept == false) { mimeview->get("application/x-all", hs, "view"); return hs; } // Fallthrough to normal case. } if (apptag.empty() || !mimeview->get(mtype + string("|") + apptag, hs, "view")) mimeview->get(mtype, hs, "view"); return hs; } bool RclConfig::getMimeViewerDefs(vector >& defs) const { if (mimeview == 0) return false; vectortps = mimeview->getNames("view"); for (vector::const_iterator it = tps.begin(); it != tps.end();it++) { defs.push_back(pair(*it, getMimeViewerDef(*it, "", 0))); } return true; } bool RclConfig::setMimeViewerDef(const string& mt, const string& def) { if (mimeview == 0) return false; bool status; if (!def.empty()) status = mimeview->set(mt, def, "view"); else status = mimeview->erase(mt, "view"); if (!status) { m_reason = string("RclConfig:: cant set value. Readonly?"); return false; } return true; } bool RclConfig::mimeViewerNeedsUncomp(const string &mimetype) const { string s; vector v; if (mimeview != 0 && mimeview->get("nouncompforviewmts", s, "") && stringToStrings(s, v) && find_if(v.begin(), v.end(), StringIcmpPred(mimetype)) != v.end()) return false; return true; } string RclConfig::getMimeIconPath(const string &mtype, const string &apptag) const { string iconname; if (!apptag.empty()) mimeconf->get(mtype + string("|") + apptag, iconname, "icons"); if (iconname.empty()) mimeconf->get(mtype, iconname, "icons"); if (iconname.empty()) iconname = "document"; string iconpath; #if defined (__FreeBSD__) && __FreeBSD_version < 500000 // gcc 2.95 dies if we call getConfParam here ?? if (m_conf) m_conf->get(string("iconsdir"), iconpath, m_keydir); #else getConfParam("iconsdir", iconpath); #endif if (iconpath.empty()) { iconpath = path_cat(m_datadir, "images"); } else { iconpath = path_tildexpand(iconpath); } return path_cat(iconpath, iconname) + ".png"; } // Return path defined by varname. May be absolute or relative to // confdir, with default in confdir string RclConfig::getConfdirPath(const char *varname, const char *dflt) const { string result; if (!getConfParam(varname, result)) { result = path_cat(getConfDir(), dflt); } else { result = path_tildexpand(result); // If not an absolute path, compute relative to config dir if (!path_isabsolute(result)) { result = path_cat(getConfDir(), result); } } return path_canon(result); } string RclConfig::getCacheDir() const { return m_cachedir.empty() ? getConfDir() : m_cachedir; } // Return path defined by varname. May be absolute or relative to // confdir, with default in confdir string RclConfig::getCachedirPath(const char *varname, const char *dflt) const { string result; if (!getConfParam(varname, result)) { result = path_cat(getCacheDir(), dflt); } else { result = path_tildexpand(result); // If not an absolute path, compute relative to cache dir if (!path_isabsolute(result)) { result = path_cat(getCacheDir(), result); } } return path_canon(result); } string RclConfig::getDbDir() const { return getCachedirPath("dbdir", "xapiandb"); } string RclConfig::getWebcacheDir() const { return getCachedirPath("webcachedir", "webcache"); } string RclConfig::getMboxcacheDir() const { return getCachedirPath("mboxcachedir", "mboxcache"); } string RclConfig::getAspellcacheDir() const { return getCachedirPath("aspellDicDir", ""); } string RclConfig::getStopfile() const { return getConfdirPath("stoplistfile", "stoplist.txt"); } string RclConfig::getSynGroupsFile() const { return getConfdirPath("syngroupsfile", "syngroups.txt"); } // The index status file is fast changing, so it's possible to put it outside // of the config directory (for ssds, not sure this is really useful). // To enable being quite xdg-correct we should add a getRundirPath() string RclConfig::getIdxStatusFile() const { return getCachedirPath("idxstatusfile", "idxstatus.txt"); } string RclConfig::getPidfile() const { return path_cat(getCacheDir(), "index.pid"); } string RclConfig::getIdxStopFile() const { return path_cat(getCacheDir(), "index.stop"); } /* Eliminate the common leaf part of file paths p1 and p2. Example: * /mnt1/common/part /mnt2/common/part -> /mnt1 /mnt2. This is used * for computing translations for paths when the dataset has been * moved. Of course this could be done more efficiently than by splitting * into vectors, but we don't care.*/ static string path_diffstems(const string& p1, const string& p2, string& r1, string& r2) { string reason; r1.clear(); r2.clear(); vector v1, v2; stringToTokens(p1, v1, "/"); stringToTokens(p2, v2, "/"); unsigned int l1 = v1.size(); unsigned int l2 = v2.size(); // Search for common leaf part unsigned int cl = 0; for (; cl < MIN(l1, l2); cl++) { if (v1[l1-cl-1] != v2[l2-cl-1]) { break; } } //cerr << "Common length = " << cl << endl; if (cl == 0) { reason = "Input paths are empty or have no common part"; return reason; } for (unsigned i = 0; i < l1 - cl; i++) { r1 += "/" + v1[i]; } for (unsigned i = 0; i < l2 - cl; i++) { r2 += "/" + v2[i]; } return reason; } void RclConfig::urlrewrite(const string& dbdir, string& url) const { LOGDEB1("RclConfig::urlrewrite: dbdir [" << dbdir << "] url [" << url << "]\n"); // If orgidxconfdir is set, we assume that this index is for a // movable dataset, with the configuration directory stored inside // the dataset tree. This allows computing automatic path // translations if the dataset has been moved. string orig_confdir; string cur_confdir; string confstemorg, confstemrep; if (m_conf->get("orgidxconfdir", orig_confdir, "")) { if (!m_conf->get("curidxconfdir", cur_confdir, "")) { cur_confdir = m_confdir; } LOGDEB1("RclConfig::urlrewrite: orgidxconfdir: " << orig_confdir << " cur_confdir " << cur_confdir << endl); string reason = path_diffstems(orig_confdir, cur_confdir, confstemorg, confstemrep); if (!reason.empty()) { LOGERR("urlrewrite: path_diffstems failed: " << reason << " : orig_confdir [" << orig_confdir << "] cur_confdir [" << cur_confdir << endl); confstemorg = confstemrep = ""; } } // Do path translations exist for this index ? bool needptrans = true; if (m_ptrans == 0 || !m_ptrans->hasSubKey(dbdir)) { LOGDEB2("RclConfig::urlrewrite: no paths translations (m_ptrans " << m_ptrans << ")\n"); needptrans = false; } if (!needptrans && confstemorg.empty()) { return; } bool computeurl = false; string path = fileurltolocalpath(url); if (path.empty()) { LOGDEB2("RclConfig::urlrewrite: not file url\n"); return; } // Do the movable volume thing. if (!confstemorg.empty() && confstemorg.size() <= path.size() && !path.compare(0, confstemorg.size(), confstemorg)) { path = path.replace(0, confstemorg.size(), confstemrep); computeurl = true; } if (needptrans) { // For each translation check if the prefix matches the input path, // replace and return the result if it does. vector opaths = m_ptrans->getNames(dbdir); for (const auto& opath: opaths) { if (opath.size() <= path.size() && !path.compare(0, opath.size(), opath)) { string npath; // Key comes from getNames()=> call must succeed if (m_ptrans->get(opath, npath, dbdir)) { path = path_canon(path.replace(0, opath.size(), npath)); computeurl = true; } break; } } } if (computeurl) { url = path_pathtofileurl(path); } } bool RclConfig::sourceChanged() const { if (m_conf && m_conf->sourceChanged()) return true; if (mimemap && mimemap->sourceChanged()) return true; if (mimeconf && mimeconf->sourceChanged()) return true; if (mimeview && mimeview->sourceChanged()) return true; if (m_fields && m_fields->sourceChanged()) return true; if (m_ptrans && m_ptrans->sourceChanged()) return true; return false; } string RclConfig::getWebQueueDir() const { string webqueuedir; if (!getConfParam("webqueuedir", webqueuedir)) { #ifdef _WIN32 webqueuedir = "~/AppData/Local/RecollWebQueue"; #else webqueuedir = "~/.recollweb/ToIndex/"; #endif } webqueuedir = path_tildexpand(webqueuedir); return webqueuedir; } vector& RclConfig::getSkippedNames() { if (m_skpnstate.needrecompute()) { set ss; computeBasePlusMinus(ss, m_skpnstate.getvalue(0), m_skpnstate.getvalue(1), m_skpnstate.getvalue(2)); m_skpnlist = vector(ss.begin(), ss.end()); } return m_skpnlist; } vector& RclConfig::getOnlyNames() { if (m_onlnstate.needrecompute()) { stringToStrings(m_onlnstate.getvalue(), m_onlnlist); } return m_onlnlist; } vector RclConfig::getSkippedPaths() const { vector skpl; getConfParam("skippedPaths", &skpl); // Always add the dbdir and confdir to the skipped paths. This is // especially important for the rt monitor which will go into a loop if we // don't do this. skpl.push_back(getDbDir()); skpl.push_back(getConfDir()); #ifdef _WIN32 skpl.push_back(TempFile::rcltmpdir()); #endif if (getCacheDir().compare(getConfDir())) { skpl.push_back(getCacheDir()); } // And the web queue dir skpl.push_back(getWebQueueDir()); for (vector::iterator it = skpl.begin(); it != skpl.end(); it++) { *it = path_tildexpand(*it); *it = path_canon(*it); } sort(skpl.begin(), skpl.end()); vector::iterator uit = unique(skpl.begin(), skpl.end()); skpl.resize(uit - skpl.begin()); return skpl; } vector RclConfig::getDaemSkippedPaths() const { vector dskpl; getConfParam("daemSkippedPaths", &dskpl); for (vector::iterator it = dskpl.begin(); it != dskpl.end(); it++) { *it = path_tildexpand(*it); *it = path_canon(*it); } vector skpl1 = getSkippedPaths(); vector skpl; if (dskpl.empty()) { skpl = skpl1; } else { sort(dskpl.begin(), dskpl.end()); merge(dskpl.begin(), dskpl.end(), skpl1.begin(), skpl1.end(), skpl.begin()); vector::iterator uit = unique(skpl.begin(), skpl.end()); skpl.resize(uit - skpl.begin()); } return skpl; } // Look up an executable filter. We add $RECOLL_FILTERSDIR, // and filtersdir from the config file to the PATH, then use execmd::which() string RclConfig::findFilter(const string &icmd) const { // If the path is absolute, this is it if (path_isabsolute(icmd)) return icmd; const char *cp = getenv("PATH"); if (!cp) //?? cp = ""; string PATH(cp); // For historical reasons: check in personal config directory PATH = getConfDir() + path_PATHsep() + PATH; string temp; // Prepend $datadir/filters temp = path_cat(m_datadir, "filters"); PATH = temp + path_PATHsep() + PATH; #ifdef _WIN32 // Windows only: use the bundled Python temp = path_cat(m_datadir, "filters"); temp = path_cat(temp, "python"); PATH = temp + path_PATHsep() + PATH; #endif // Prepend possible configuration parameter? if (getConfParam(string("filtersdir"), temp)) { temp = path_tildexpand(temp); PATH = temp + path_PATHsep() + PATH; } // Prepend possible environment variable if ((cp = getenv("RECOLL_FILTERSDIR"))) { PATH = string(cp) + path_PATHsep() + PATH; } string cmd; if (ExecCmd::which(icmd, cmd, PATH.c_str())) { return cmd; } else { // Let the shell try to find it... return icmd; } } /** * Return decompression command line for given mime type */ bool RclConfig::getUncompressor(const string &mtype, vector& cmd) const { string hs; mimeconf->get(mtype, hs, cstr_null); if (hs.empty()) return false; vector tokens; stringToStrings(hs, tokens); if (tokens.empty()) { LOGERR("getUncompressor: empty spec for mtype " << mtype << "\n"); return false; } vector::iterator it = tokens.begin(); if (tokens.size() < 2) return false; if (stringlowercmp("uncompress", *it++)) return false; cmd.clear(); cmd.push_back(findFilter(*it)); // Special-case python and perl on windows: we need to also locate the // first argument which is the script name "python somescript.py". // On Unix, thanks to #!, we usually just run "somescript.py", but need // the same change if we ever want to use the same cmdling as windows if (!stringlowercmp("python", *it) || !stringlowercmp("perl", *it)) { it++; if (tokens.size() < 3) { LOGERR("getUncpressor: python/perl cmd: no script?. [" << mtype << "]\n"); } else { *it = findFilter(*it); } } else { it++; } cmd.insert(cmd.end(), it, tokens.end()); return true; } static const char blurb0[] = "# The system-wide configuration files for recoll are located in:\n" "# %s\n" "# The default configuration files are commented, you should take a look\n" "# at them for an explanation of what can be set (you could also take a look\n" "# at the manual instead).\n" "# Values set in this file will override the system-wide values for the file\n" "# with the same name in the central directory. The syntax for setting\n" "# values is identical.\n" ; // We just use path_max to print the path to /usr/share/recoll/examples // inside the config file. At worse, the text is truncated (using // snprintf). But 4096 should be enough :) #ifndef PATH_MAX #define MYPATHALLOC 4096 #else #define MYPATHALLOC PATH_MAX #endif // Use uni2ascii -a K to generate these from the utf-8 strings // Swedish and Danish. static const char swedish_ex[] = "unac_except_trans = \303\244\303\244 \303\204\303\244 \303\266\303\266 \303\226\303\266 \303\274\303\274 \303\234\303\274 \303\237ss \305\223oe \305\222oe \303\246ae \303\206ae \357\254\201fi \357\254\202fl \303\245\303\245 \303\205\303\245"; // German: static const char german_ex[] = "unac_except_trans = \303\244\303\244 \303\204\303\244 \303\266\303\266 \303\226\303\266 \303\274\303\274 \303\234\303\274 \303\237ss \305\223oe \305\222oe \303\246ae \303\206ae \357\254\201fi \357\254\202fl"; // Create initial user config by creating commented empty files static const char *configfiles[] = {"recoll.conf", "mimemap", "mimeconf", "mimeview", "fields"}; static int ncffiles = sizeof(configfiles) / sizeof(char *); bool RclConfig::initUserConfig() { // Explanatory text const int bs = sizeof(blurb0)+MYPATHALLOC+1; char blurb[bs]; string exdir = path_cat(m_datadir, "examples"); snprintf(blurb, bs, blurb0, exdir.c_str()); // Use protective 700 mode to create the top configuration // directory: documents can be reconstructed from index data. if (!path_exists(m_confdir) && mkdir(m_confdir.c_str(), 0700) < 0) { m_reason += string("mkdir(") + m_confdir + ") failed: " + strerror(errno); return false; } string lang = localelang(); for (int i = 0; i < ncffiles; i++) { string dst = path_cat(m_confdir, string(configfiles[i])); if (!path_exists(dst)) { FILE *fp = fopen(dst.c_str(), "w"); if (fp) { fprintf(fp, "%s\n", blurb); if (!strcmp(configfiles[i], "recoll.conf")) { // Add improved unac_except_trans for some languages if (lang == "se" || lang == "dk" || lang == "no" || lang == "fi") { fprintf(fp, "%s\n", swedish_ex); } else if (lang == "de") { fprintf(fp, "%s\n", german_ex); } } fclose(fp); } else { m_reason += string("fopen ") + dst + ": " + strerror(errno); return false; } } } return true; } void RclConfig::zeroMe() { m_ok = false; m_keydirgen = 0; m_conf = 0; mimemap = 0; mimeconf = 0; mimeview = 0; m_fields = 0; m_ptrans = 0; m_stopsuffixes = 0; m_maxsufflen = 0; initParamStale(0, 0); } void RclConfig::freeAll() { delete m_conf; delete mimemap; delete mimeconf; delete mimeview; delete m_fields; delete m_ptrans; delete STOPSUFFIXES; // just in case zeroMe(); } void RclConfig::initFrom(const RclConfig& r) { zeroMe(); if (!(m_ok = r.m_ok)) return; // Copyable fields m_ok = r.m_ok; m_reason = r.m_reason; m_confdir = r.m_confdir; m_cachedir = r.m_cachedir; m_datadir = r.m_datadir; m_keydir = r.m_keydir; m_keydirgen = r.m_keydirgen; m_cdirs = r.m_cdirs; m_fldtotraits = r.m_fldtotraits; m_aliastocanon = r.m_aliastocanon; m_aliastoqcanon = r.m_aliastoqcanon; m_storedFields = r.m_storedFields; m_xattrtofld = r.m_xattrtofld; m_maxsufflen = r.m_maxsufflen; m_skpnlist = r.m_skpnlist; m_onlnlist = r.m_onlnlist; m_stopsuffixes = r.m_stopsuffixes; m_defcharset = r.m_defcharset; m_restrictMTypes = r.m_restrictMTypes; m_excludeMTypes = r.m_excludeMTypes; m_thrConf = r.m_thrConf; m_mdreapers = r.m_mdreapers; // Special treatment if (r.m_conf) m_conf = new ConfStack(*(r.m_conf)); if (r.mimemap) mimemap = new ConfStack(*(r.mimemap)); if (r.mimeconf) mimeconf = new ConfStack(*(r.mimeconf)); if (r.mimeview) mimeview = new ConfStack(*(r.mimeview)); if (r.m_fields) m_fields = new ConfStack(*(r.m_fields)); if (r.m_ptrans) m_ptrans = new ConfSimple(*(r.m_ptrans)); if (r.m_stopsuffixes) m_stopsuffixes = new SuffixStore(*((SuffixStore*)r.m_stopsuffixes)); initParamStale(m_conf, mimemap); } void RclConfig::initParamStale(ConfNull *cnf, ConfNull *mimemap) { m_oldstpsuffstate.init(mimemap); m_stpsuffstate.init(cnf); m_skpnstate.init(cnf); m_onlnstate.init(cnf); m_rmtstate.init(cnf); m_xmtstate.init(cnf); m_mdrstate.init(cnf); } recoll-1.26.3/common/utf8fn.h0000644000175000017500000000057513303776060012673 00000000000000#ifndef _UTF8FN_H_ #define _UTF8FN_H_ #include class RclConfig; // Translate file name/path to utf8 for indexing. // // @param simple If true we extract and process only the simple file name // (ignore the path) std::string compute_utf8fn(const RclConfig *config, const std::string& ifn, bool simple); #endif // _UTF8FN_H_ recoll-1.26.3/common/rclinit.h0000644000175000017500000000555413533651561013132 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _RCLINIT_H_INCLUDED_ #define _RCLINIT_H_INCLUDED_ #include class RclConfig; /** * Initialize by reading configuration, opening log file, etc. * * This must be called from the main thread before starting any others. It sets * up the global signal handling. other threads must call recoll_threadinit() * when starting. * * @param flags misc modifiers. These are currently only used to customize * the log file and verbosity. * @param cleanup function to call before exiting (atexit) * @param sigcleanup function to call on terminal signal (INT/HUP...) This * should typically set a flag which tells the program (recoll, * recollindex etc.. to exit as soon as possible (after closing the db, * etc.). cleanup will then be called by exit(). * @param reason in case of error: output string explaining things * @param argcnf Configuration directory name from the command line (overriding * default and environment * @return the parsed configuration. */ enum RclInitFlags {RCLINIT_NONE = 0, RCLINIT_DAEMON = 1, RCLINIT_IDX = 2, RCLINIT_PYTHON = 4}; // Kinds of termination requests, in addition to the normal signal // values. Passed as type int to sigcleanup() when it is not invoked // directly as a sig handler. Note that because of the existence of // sigset_t, we are pretty sure that no signals can have a high value enum RclSigKind { // System resume from sleep RCLSIG_RESUME = 1002}; extern RclConfig *recollinit(int flags, void (*cleanup)(void), void (*sigcleanup)(int), std::string& reason, const std::string *argcnf = 0); // Threads need to call this to block signals. // The main thread handles all signals. extern void recoll_threadinit(); // Check if main thread extern bool recoll_ismainthread(); // Should be called while exiting asap when critical cleanup (db // close) has been performed. Only useful for the indexer (writes to // the db), and only actually does something on Windows. extern void recoll_exitready(); #endif /* _RCLINIT_H_INCLUDED_ */ recoll-1.26.3/common/webstore.cpp0000644000175000017500000000506613533651561013651 00000000000000/* Copyright (C) 2011 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "webstore.h" #include #include "cstr.h" #include "circache.h" #include "log.h" #include "rclconfig.h" #include "pathut.h" #include "rcldoc.h" const string cstr_bgc_mimetype("mimetype"); WebStore::WebStore(RclConfig *cnf) { string ccdir = cnf->getWebcacheDir(); int maxmbs = 40; cnf->getConfParam("webcachemaxmbs", &maxmbs); if ((m_cache = new CirCache(ccdir)) == 0) { LOGERR("WebStore: cant create CirCache object\n" ); return; } if (!m_cache->create(int64_t(maxmbs)*1000*1024, CirCache::CC_CRUNIQUE)) { LOGERR("WebStore: cache file creation failed: " << m_cache->getReason() << "\n"); delete m_cache; m_cache = 0; return; } } WebStore::~WebStore() { delete m_cache; } // Read document from cache. Return the metadata as an Rcl::Doc // @param htt Web Hit Type bool WebStore::getFromCache(const string& udi, Rcl::Doc &dotdoc, string& data, string *htt) { string dict; if (m_cache == 0) { LOGERR("WebStore::getFromCache: cache is null\n"); return false; } if (!m_cache->get(udi, dict, &data)) { LOGDEB("WebStore::getFromCache: get failed\n"); return false; } ConfSimple cf(dict, 1); if (htt) cf.get(Rcl::Doc::keybght, *htt, cstr_null); // Build a doc from saved metadata cf.get(cstr_url, dotdoc.url, cstr_null); cf.get(cstr_bgc_mimetype, dotdoc.mimetype, cstr_null); cf.get(cstr_fmtime, dotdoc.fmtime, cstr_null); cf.get(cstr_fbytes, dotdoc.pcbytes, cstr_null); dotdoc.sig.clear(); vector names = cf.getNames(cstr_null); for (vector::const_iterator it = names.begin(); it != names.end(); it++) { cf.get(*it, dotdoc.meta[*it], cstr_null); } dotdoc.meta[Rcl::Doc::keyudi] = udi; return true; } recoll-1.26.3/common/conf_post.h0000644000175000017500000000174013533651561013451 00000000000000 #ifdef _WIN32 #include "safewindows.h" #ifdef _MSC_VER // gmtime is supposedly thread-safe on windows #define gmtime_r(A, B) gmtime(A) #define localtime_r(A,B) localtime(A) typedef int mode_t; #define fseeko _fseeki64 #define ftello (off_t)_ftelli64 #define ftruncate _chsize_s #define PATH_MAX MAX_PATH #define RCL_ICONV_INBUF_CONST 1 #define HAVE_STRUCT_TIMESPEC #define strdup _strdup #define timegm _mkgmtime #else // End _MSC_VER -> Gminw #undef RCL_ICONV_INBUF_CONST #define timegm portable_timegm #endif // GMinw only typedef int pid_t; inline int readlink(const char *a, void *b, int c) { a = a; b = b; c = c; return -1; } #define MAXPATHLEN PATH_MAX typedef DWORD32 u_int32_t; typedef DWORD64 u_int64_t; typedef unsigned __int8 u_int8_t; typedef int ssize_t; #define strncasecmp _strnicmp #define strcasecmp _stricmp #define chdir _chdir #define R_OK 4 #define W_OK 2 #ifndef X_OK #define X_OK 4 #endif #define S_ISLNK(X) false #define lstat stat #endif // _WIN32 recoll-1.26.3/common/textsplit.cpp0000644000175000017500000011667413566424763014077 00000000000000/* Copyright (C) 2004-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include #include #include #include "textsplit.h" #include "log.h" //#define UTF8ITER_CHECK #include "utf8iter.h" #include "uproplist.h" #include "smallut.h" #include "rclconfig.h" // Decide if we treat katakana as western scripts, splitting into // words instead of n-grams. This is not absurd (katakana is a kind of // alphabet, albeit phonetic and syllabic and is mostly used to // transcribe western words), but it does not work well because // japanese uses separator-less compound katakana words, and because // the plural terminaisons are irregular and would need a specialized // stemmer. So we for now process katakana as the rest of cjk, using // ngrams #undef KATAKANA_AS_WORDS // Same for Korean syllabic, and same problem, not used. #undef HANGUL_AS_WORDS using namespace std; /** * Splitting a text into words. The code in this file works with utf-8 * in a semi-clean way (see uproplist.h). Ascii still gets special * treatment in the sense that many special characters can only be * ascii (e.g. @, _,...). However, this compromise works quite well * while being much more light-weight than a full-blown Unicode * approach (ICU...) */ // Ascii character classes: we have three main groups, and then some chars // are their own class because they want special handling. // // We have an array with 256 slots where we keep the character types. // The array could be fully static, but we use a small function to fill it // once. // The array is actually a remnant of the original version which did no utf8. // Only the lower 127 slots are now used, but keep it at 256 // because it makes some tests in the code simpler. const unsigned int charclasses_size = 256; enum CharClass {LETTER=256, SPACE=257, DIGIT=258, WILD=259, A_ULETTER=260, A_LLETTER=261, SKIP=262}; static int charclasses[charclasses_size]; // Non-ascii UTF-8 characters are handled with sets holding all // characters with interesting properties. This is far from full-blown // management of Unicode properties, but seems to do the job well // enough in most common cases static vector vpuncblocks; static std::unordered_set spunc; static std::unordered_set visiblewhite; static std::unordered_set sskip; class CharClassInit { public: CharClassInit() { unsigned int i; // Set default value for all: SPACE for (i = 0 ; i < 256 ; i ++) charclasses[i] = SPACE; char digits[] = "0123456789"; for (i = 0; i < strlen(digits); i++) charclasses[int(digits[i])] = DIGIT; char upper[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"; for (i = 0; i < strlen(upper); i++) charclasses[int(upper[i])] = A_ULETTER; char lower[] = "abcdefghijklmnopqrstuvwxyz"; for (i = 0; i < strlen(lower); i++) charclasses[int(lower[i])] = A_LLETTER; char wild[] = "*?[]"; for (i = 0; i < strlen(wild); i++) charclasses[int(wild[i])] = WILD; // Characters with special treatment: // // The first ones are mostly span-constructing "glue" // characters, for example those typically allowing us to // search for an email address as a whole (bob@isp.org instead // of as a phrase "bob isp org" // // The case of the minus sign is a complicated one. It went // from glue to non-glue to glue along Recoll versions. // See minus-hyphen-dash.txt in doc/notes char special[] = ".@+-#'_\n\r\f"; for (i = 0; i < strlen(special); i++) charclasses[int(special[i])] = special[i]; for (i = 0; i < sizeof(unipunc) / sizeof(int); i++) { spunc.insert(unipunc[i]); } spunc.insert((unsigned int)-1); for (i = 0; i < sizeof(unipuncblocks) / sizeof(int); i++) { vpuncblocks.push_back(unipuncblocks[i]); } assert((vpuncblocks.size() % 2) == 0); for (i = 0; i < sizeof(avsbwht) / sizeof(int); i++) { visiblewhite.insert(avsbwht[i]); } for (i = 0; i < sizeof(uniskip) / sizeof(int); i++) { sskip.insert(uniskip[i]); } } }; static const CharClassInit charClassInitInstance; static inline int whatcc(unsigned int c, char *asciirep = nullptr) { if (c <= 127) { return charclasses[c]; } else { if (c == 0x2010) { // Special treatment for hyphen: handle as ascii minus. See // doc/notes/minus-hyphen-dash.txt if (asciirep) *asciirep = '-'; return c; } else if (c == 0x2019 || c == 0x275c || c == 0x02bc) { // Things sometimes replacing a single quote. Use single // quote so that span processing works ok if (asciirep) *asciirep = '\''; return c; } else if (sskip.find(c) != sskip.end()) { return SKIP; } else if (spunc.find(c) != spunc.end()) { return SPACE; } else { vector::iterator it = lower_bound(vpuncblocks.begin(), vpuncblocks.end(), c); if (it == vpuncblocks.end()) return LETTER; if (c == *it) return SPACE; if ((it - vpuncblocks.begin()) % 2 == 1) { return SPACE; } else { return LETTER; } } } } // testing whatcc... #if 0 unsigned int testvalues[] = {'a', '0', 0x80, 0xbf, 0xc0, 0x05c3, 0x1000, 0x2000, 0x2001, 0x206e, 0x206f, 0x20d0, 0x2399, 0x2400, 0x2401, 0x243f, 0x2440, 0xff65}; int ntest = sizeof(testvalues) / sizeof(int); for (int i = 0; i < ntest; i++) { int ret = whatcc(testvalues[i]); printf("Tested value 0x%x, returned value %d %s\n", testvalues[i], ret, ret == LETTER ? "LETTER" : ret == SPACE ? "SPACE" : "OTHER"); } #endif // CJK Unicode character detection: // // 1100..11FF; Hangul Jamo (optional: see UNICODE_IS_HANGUL) // 2E80..2EFF; CJK Radicals Supplement // 3000..303F; CJK Symbols and Punctuation // 3040..309F; Hiragana // 30A0..30FF; Katakana // 3100..312F; Bopomofo // 3130..318F; Hangul Compatibility Jamo (optional: see UNICODE_IS_HANGUL) // 3190..319F; Kanbun // 31A0..31BF; Bopomofo Extended // 31C0..31EF; CJK Strokes // 31F0..31FF; Katakana Phonetic Extensions // 3200..32FF; Enclosed CJK Letters and Months // 3300..33FF; CJK Compatibility // 3400..4DBF; CJK Unified Ideographs Extension A // 4DC0..4DFF; Yijing Hexagram Symbols // 4E00..9FFF; CJK Unified Ideographs // A700..A71F; Modifier Tone Letters // AC00..D7AF; Hangul Syllables (optional: see UNICODE_IS_HANGUL) // F900..FAFF; CJK Compatibility Ideographs // FE30..FE4F; CJK Compatibility Forms // FF00..FFEF; Halfwidth and Fullwidth Forms // 20000..2A6DF; CJK Unified Ideographs Extension B // 2F800..2FA1F; CJK Compatibility Ideographs Supplement #define UNICODE_IS_CJK(p) \ (((p) >= 0x1100 && (p) <= 0x11FF) || \ ((p) >= 0x2E80 && (p) <= 0x2EFF) || \ ((p) >= 0x3000 && (p) <= 0x9FFF) || \ ((p) >= 0xA700 && (p) <= 0xA71F) || \ ((p) >= 0xAC00 && (p) <= 0xD7AF) || \ ((p) >= 0xF900 && (p) <= 0xFAFF) || \ ((p) >= 0xFE30 && (p) <= 0xFE4F) || \ ((p) >= 0xFF00 && (p) <= 0xFFEF) || \ ((p) >= 0x20000 && (p) <= 0x2A6DF) || \ ((p) >= 0x2F800 && (p) <= 0x2FA1F)) // We should probably map 'fullwidth ascii variants' and 'halfwidth // katakana variants' to something else. Look up "Kuromoji" Lucene // filter, KuromojiNormalizeFilter.java // 309F is Hiragana. #ifdef KATAKANA_AS_WORDS #define UNICODE_IS_KATAKANA(p) \ ((p) != 0x309F && \ (((p) >= 0x3099 && (p) <= 0x30FF) || \ ((p) >= 0x31F0 && (p) <= 0x31FF))) #else #define UNICODE_IS_KATAKANA(p) false #endif #ifdef HANGUL_AS_WORDS #define UNICODE_IS_HANGUL(p) ( \ ((p) >= 0x1100 && (p) <= 0x11FF) || \ ((p) >= 0x3130 && (p) <= 0x318F) || \ ((p) >= 0x3200 && (p) <= 0x321e) || \ ((p) >= 0x3248 && (p) <= 0x327F) || \ ((p) >= 0x3281 && (p) <= 0x32BF) || \ ((p) >= 0xAC00 && (p) <= 0xD7AF) \ ) #else #define UNICODE_IS_HANGUL(p) false #endif bool TextSplit::isCJK(int c) { return UNICODE_IS_CJK(c) && !UNICODE_IS_KATAKANA(c) && !UNICODE_IS_HANGUL(c); } bool TextSplit::isKATAKANA(int c) { return UNICODE_IS_KATAKANA(c); } bool TextSplit::isHANGUL(int c) { return UNICODE_IS_HANGUL(c); } // This is used to detect katakana/other transitions, which must // trigger a word split (there is not always a separator, and katakana // is otherwise treated like other, in the same routine, unless cjk // which has its span reader causing a word break) enum CharSpanClass {CSC_HANGUL, CSC_CJK, CSC_KATAKANA, CSC_OTHER}; std::vector csc_names {CHARFLAGENTRY(CSC_HANGUL), CHARFLAGENTRY(CSC_CJK), CHARFLAGENTRY(CSC_KATAKANA), CHARFLAGENTRY(CSC_OTHER)}; bool TextSplit::o_processCJK{true}; unsigned int TextSplit::o_CJKNgramLen{2}; bool TextSplit::o_noNumbers{false}; bool TextSplit::o_deHyphenate{false}; int TextSplit::o_maxWordLength{40}; static const int o_CJKMaxNgramLen{5}; void TextSplit::staticConfInit(RclConfig *config) { config->getConfParam("maxtermlength", &o_maxWordLength); bool bvalue{false}; if (config->getConfParam("nocjk", &bvalue) && bvalue == true) { o_processCJK = false; } else { o_processCJK = true; int ngramlen; if (config->getConfParam("cjkngramlen", &ngramlen)) { o_CJKNgramLen = (unsigned int)(ngramlen <= o_CJKMaxNgramLen ? ngramlen : o_CJKMaxNgramLen); } } bvalue = false; if (config->getConfParam("nonumbers", &bvalue)) { o_noNumbers = bvalue; } bvalue = false; if (config->getConfParam("dehyphenate", &bvalue)) { o_deHyphenate = bvalue; } bvalue = false; if (config->getConfParam("backslashasletter", &bvalue)) { if (bvalue) { } else { charclasses[int('\\')] = SPACE; } } } // Final term checkpoint: do some checking (the kind which is simpler // to do here than in the main loop), then send term to our client. inline bool TextSplit::emitterm(bool isspan, string &w, int pos, size_t btstart, size_t btend) { LOGDEB2("TextSplit::emitterm: [" << w << "] pos " << pos << "\n"); int l = int(w.length()); #ifdef TEXTSPLIT_STATS // Update word length statistics. Do this before we filter out // long words because stats are used to detect bad text if (!isspan || m_wordLen == m_span.length()) m_stats.newsamp(m_wordChars); #endif if (l > 0 && l <= o_maxWordLength) { // 1 byte word: we index single ascii letters and digits, but // nothing else. We might want to turn this into a test for a // single utf8 character instead ? if (l == 1) { unsigned int c = ((unsigned int)w[0]) & 0xff; if (charclasses[c] != A_ULETTER && charclasses[c] != A_LLETTER && charclasses[c] != DIGIT && (!(m_flags & TXTS_KEEPWILD) || charclasses[c] != WILD) ) { //cerr << "ERASING single letter term " << c << endl; return true; } } if (pos != m_prevpos || l != m_prevlen) { bool ret = takeword(w, pos, int(btstart), int(btend)); m_prevpos = pos; m_prevlen = int(w.length()); return ret; } LOGDEB2("TextSplit::emitterm:dup: [" << w << "] pos " << pos << "\n"); } return true; } // Check for an acronym/abbreviation ie I.B.M. This only works with // ascii (no non-ascii utf-8 acronym are possible) bool TextSplit::span_is_acronym(string *acronym) { bool acron = false; if (m_wordLen != m_span.length() && m_span.length() > 2 && m_span.length() <= 20) { acron = true; // Check odd chars are '.' for (unsigned int i = 1 ; i < m_span.length(); i += 2) { if (m_span[i] != '.') { acron = false; break; } } if (acron) { // Check that even chars are letters for (unsigned int i = 0 ; i < m_span.length(); i += 2) { int c = m_span[i]; if (!((c >= 'a' && c <= 'z')||(c >= 'A' && c <= 'Z'))) { acron = false; break; } } } } if (acron) { for (unsigned int i = 0; i < m_span.length(); i += 2) { *acronym += m_span[i]; } } return acron; } // Generate terms from span. Have to take into account the // flags: ONLYSPANS, NOSPANS, noNumbers bool TextSplit::words_from_span(size_t bp) { #if 0 cerr << "Span: [" << m_span << "] " << " w_i_s size: " << m_words_in_span.size() << " : "; for (unsigned int i = 0; i < m_words_in_span.size(); i++) { cerr << " [" << m_words_in_span[i].first << " " << m_words_in_span[i].second << "] "; } cerr << endl; #endif int spanwords = int(m_words_in_span.size()); // It seems that something like: tv_combo-sample_util.Po@am_quote // can get the splitter to call doemit with a span of '@' and // words_in_span==0, which then causes a crash when accessing // words_in_span[0] if the stl assertions are active (e.g. Fedora // RPM build). Not too sure what the right fix would be, but for // now, just defend against it if (spanwords == 0) { return true; } int pos = m_spanpos; // Byte position of the span start size_t spboffs = bp - m_span.size(); if (o_deHyphenate && spanwords == 2 && m_span[m_words_in_span[0].second] == '-') { unsigned int s0 = m_words_in_span[0].first; unsigned int l0 = m_words_in_span[0].second - m_words_in_span[0].first; unsigned int s1 = m_words_in_span[1].first; unsigned int l1 = m_words_in_span[1].second - m_words_in_span[1].first; string word = m_span.substr(s0, l0) + m_span.substr(s1, l1); if (l0 && l1) emitterm(false, word, m_spanpos, spboffs, spboffs + m_words_in_span[1].second); } for (int i = 0; i < ((m_flags&TXTS_ONLYSPANS) ? 1 : spanwords); i++) { int deb = m_words_in_span[i].first; bool noposinc = m_words_in_span[i].second == deb; for (int j = ((m_flags&TXTS_ONLYSPANS) ? spanwords-1 : i); j < ((m_flags&TXTS_NOSPANS) ? i+1 : spanwords); j++) { int fin = m_words_in_span[j].second; //cerr << "i " << i << " j " << j << " deb " << deb << //" fin " << fin << endl; if (fin - deb > int(m_span.size())) break; string word(m_span.substr(deb, fin-deb)); if (!emitterm(j != i+1, word, pos, spboffs+deb, spboffs+fin)) return false; } if (!noposinc) ++pos; } return true; } /** * A method called at word boundaries (different places in * text_to_words()), to adjust the current state of the parser, and * possibly generate term(s). While inside a span (words linked by * glue characters), we just keep track of the word boundaries. Once * actual white-space is reached, we get called with spanerase set to * true, and we process the span, calling the emitterm() routine for * each generated term. * * The object flags can modify our behaviour, deciding if we only emit * single words (bill, recoll, org), only spans (bill@recoll.org), or * words and spans (bill@recoll.org, recoll.org, jf, recoll...) * * @return true if ok, false for error. Splitting should stop in this case. * @param spanerase Set if the current span is at its end. Process it. * @param bp The current BYTE position in the stream */ inline bool TextSplit::doemit(bool spanerase, size_t _bp) { int bp = int(_bp); LOGDEB2("TextSplit::doemit: sper " << spanerase << " bp " << bp << " spp " << m_spanpos << " spanwords " << m_words_in_span.size() << " wS " << m_wordStart << " wL " << m_wordLen << " inn " << m_inNumber << " span [" << m_span << "]\n"); if (m_wordLen) { // We have a current word. Remember it // Limit max span word count if (m_words_in_span.size() >= 6) { spanerase = true; } m_words_in_span.push_back(pair(m_wordStart, m_wordStart + m_wordLen)); m_wordpos++; m_wordLen = m_wordChars = 0; } if (spanerase) { // We encountered a span-terminating character. Produce terms. string acronym; if (span_is_acronym(&acronym)) { if (!emitterm(false, acronym, m_spanpos, bp - m_span.length(), bp)) return false; } // Maybe trim at end. These are chars that we might keep // inside a span, but not at the end. while (m_span.length() > 0) { switch (*(m_span.rbegin())) { case '.': case '-': case ',': case '@': case '_': case '\'': m_span.resize(m_span.length()-1); if (m_words_in_span.size() && m_words_in_span.back().second > int(m_span.size())) m_words_in_span.back().second = int(m_span.size()); if (--bp < 0) bp = 0; break; default: goto breaktrimloop; } } breaktrimloop: if (!words_from_span(bp)) { return false; } discardspan(); } else { m_wordStart = int(m_span.length()); } return true; } void TextSplit::discardspan() { m_span.clear(); m_words_in_span.clear(); m_spanpos = m_wordpos; m_wordStart = 0; m_wordLen = m_wordChars = 0; } static inline bool isalphanum(int what, unsigned int flgs) { return what == A_LLETTER || what == A_ULETTER || what == DIGIT || what == LETTER || ((flgs & TextSplit::TXTS_KEEPWILD) && what == WILD); } static inline bool isdigit(int what, unsigned int flgs) { return what == DIGIT || ((flgs & TextSplit::TXTS_KEEPWILD) && what == WILD); } #ifdef TEXTSPLIT_STATS #define STATS_INC_WORDCHARS ++m_wordChars #else #define STATS_INC_WORDCHARS #endif vector splitFlags{ {TextSplit::TXTS_NOSPANS, "nospans"}, {TextSplit::TXTS_ONLYSPANS, "onlyspans"}, {TextSplit::TXTS_KEEPWILD, "keepwild"} }; /** * Splitting a text into terms to be indexed. * We basically emit a word every time we see a separator, but some chars are * handled specially so that special cases, ie, c++ and jfd@recoll.com etc, * are handled properly, */ bool TextSplit::text_to_words(const string &in) { LOGDEB1("TextSplit::text_to_words: docjk " << o_processCJK << "(" << o_CJKNgramLen << ") " << flagsToString(splitFlags, m_flags) << " [" << in.substr(0,50) << "]\n"); if (in.empty()) return true; // Reset the data members relative to splitting state clearsplitstate(); bool pagepending = false; bool softhyphenpending = false; // Running count of non-alphanum chars. Reset when we see one; int nonalnumcnt = 0; Utf8Iter it(in); #if defined(KATAKANA_AS_WORDS) || defined(HANGUL_AS_WORDS) int prev_csc = -1; #endif for (; !it.eof(); it++) { unsigned int c = *it; nonalnumcnt++; if (c == (unsigned int)-1) { LOGERR("Textsplit: error occurred while scanning UTF-8 string\n"); return false; } CharSpanClass csc; if (UNICODE_IS_KATAKANA(c)) { csc = CSC_KATAKANA; } else if (UNICODE_IS_HANGUL(c)) { csc = CSC_HANGUL; } else if (UNICODE_IS_CJK(c)) { csc = CSC_CJK; } else { csc = CSC_OTHER; } if (o_processCJK && csc == CSC_CJK) { // CJK excluding Katakana character hit. // Do like at EOF with the current non-cjk data. if (m_wordLen || m_span.length()) { if (!doemit(true, it.getBpos())) return false; } // Hand off situation to the cjk routine. if (!cjk_to_words(&it, &c)) { LOGERR("Textsplit: scan error in cjk handler\n"); return false; } // Check for eof, else c contains the first non-cjk // character after the cjk sequence, just go on. if (it.eof()) break; } #if defined(KATAKANA_AS_WORDS) || defined(HANGUL_AS_WORDS) // Only needed if we have script transitions inside this // routine, else the call to cjk_to_words does the job (so do // nothing right after a CJK section). Because // katakana-western transitions sometimes have no whitespace // (and maybe hangul too, but probably not). if (prev_csc != CSC_CJK && csc != prev_csc && (m_wordLen || m_span.length())) { LOGDEB2("csc " << valToString(csc_names, csc) << " prev_csc " << valToString(csc_names, prev_csc) << " wl " << m_wordLen << " spl " << m_span.length() << endl); if (!doemit(true, it.getBpos())) { return false; } } prev_csc = csc; #endif char asciirep = 0; int cc = whatcc(c, &asciirep); switch (cc) { case SKIP: // Special-case soft-hyphen. To work, this depends on the // fact that only SKIP calls "continue" inside the // switch. All the others will do the softhyphenpending // reset after the switch if (c == 0xad) { softhyphenpending = true; } else { softhyphenpending = false; } // Skips the softhyphenpending reset continue; case DIGIT: nonalnumcnt = 0; if (m_wordLen == 0) m_inNumber = true; m_wordLen += it.appendchartostring(m_span); STATS_INC_WORDCHARS; break; case SPACE: nonalnumcnt = 0; SPACE: if (m_wordLen || m_span.length()) { if (!doemit(true, it.getBpos())) return false; m_inNumber = false; } if (pagepending) { pagepending = false; newpage(m_wordpos); } break; case WILD: if (m_flags & TXTS_KEEPWILD) goto NORMALCHAR; else goto SPACE; break; case '-': case '+': if (m_wordLen == 0) { // + or - don't start a term except if this looks like // it's going to be to be a number if (isdigit(whatcc(it[it.getCpos()+1]), m_flags)) { // -10 m_inNumber = true; m_wordLen += it.appendchartostring(m_span); STATS_INC_WORDCHARS; break; } } else if (m_inNumber) { if ((m_span[m_span.length() - 1] == 'e' || m_span[m_span.length() - 1] == 'E')) { if (isdigit(whatcc(it[it.getCpos()+1]), m_flags)) { m_wordLen += it.appendchartostring(m_span); STATS_INC_WORDCHARS; break; } } } else { if (cc == '+') { int nextc = it[it.getCpos()+1]; if (nextc == '+' || nextc == -1 || visiblewhite.find(nextc) != visiblewhite.end()) { // someword++[+...] ! m_wordLen += it.appendchartostring(m_span); STATS_INC_WORDCHARS; break; } } else { // Treat '-' inside span as glue char if (!doemit(false, it.getBpos())) return false; m_inNumber = false; m_wordStart += it.appendchartostring(m_span); break; } } goto SPACE; case '.': { // Need a little lookahead here. At worse this gets the end null int nextc = it[it.getCpos()+1]; int nextwhat = whatcc(nextc); if (m_inNumber) { if (!isdigit(nextwhat, m_flags)) goto SPACE; m_wordLen += it.appendchartostring(m_span); STATS_INC_WORDCHARS; break; } else { // Found '.' while not in number // Only letters and digits make sense after if (!isalphanum(nextwhat, m_flags)) goto SPACE; // Keep an initial '.' for catching .net, and .34 (aka // 0.34) but this adds quite a few spurious terms ! if (m_span.length() == 0) { // Check for number like .1 if (isdigit(nextwhat, m_flags)) { m_inNumber = true; m_wordLen += it.appendchartostring(m_span); } else { m_words_in_span. push_back(pair(m_wordStart, m_wordStart)); m_wordStart += it.appendchartostring(m_span); } STATS_INC_WORDCHARS; break; } // '.' between words: span glue if (m_wordLen) { if (!doemit(false, it.getBpos())) return false; m_wordStart += it.appendchartostring(m_span); } } } break; case 0x2010: case 0x2019: case 0x275c: case 0x02bc: // Unicode chars which we replace with ascii for // processing (2010 -> -,others -> '). It happens that // they all work as glue chars and use the same code, but // there might be cases needing different processing. // Hyphen is replaced with ascii minus if (m_wordLen) { // Inside span: glue char if (!doemit(false, it.getBpos())) return false; m_inNumber = false; m_span += asciirep; m_wordStart++; break; } goto SPACE; case '@': case '_': case '\'': // If in word, potential span: o'brien, jf@dockes.org, // else just ignore if (m_wordLen) { if (!doemit(false, it.getBpos())) return false; m_inNumber = false; m_wordStart += it.appendchartostring(m_span); } break; case '#': { int w = whatcc(it[it.getCpos()+1]); // Keep it only at the beginning of a word (hashtag), if (m_wordLen == 0 && isalphanum(w, m_flags)) { m_wordLen += it.appendchartostring(m_span); STATS_INC_WORDCHARS; break; } // or at the end (special case for c# ...) if (m_wordLen > 0) { if (w == SPACE || w == '\n' || w == '\r') { m_wordLen += it.appendchartostring(m_span); STATS_INC_WORDCHARS; break; } } goto SPACE; } break; case '\n': case '\r': if (m_span.length() && *m_span.rbegin() == '-') { // if '-' is the last char before end of line, we // strip it. We have no way to know if this is added // because of the line split or if it was part of an // actual compound word (would need a dictionary to // check). As soft-hyphen *should* be used if the '-' // is not part of the text, it is better to properly // process a real compound word, and produce wrong // output from wrong text. The word-emitting routine // will strip the trailing '-'. goto SPACE; } else if (softhyphenpending) { // Don't reset soft-hyphen continue; } else { // Normal case: EOL is white space goto SPACE; } break; case '\f': pagepending = true; goto SPACE; break; #ifdef RCL_SPLIT_CAMELCASE // Camelcase handling. // If we get uppercase ascii after lowercase ascii, emit word. // This emits "camel" when hitting the 'C' of camelCase // Not enabled by defaults as this makes phrase searches quite // confusing. // ie "MySQL manual" is matched by "MySQL manual" and // "my sql manual" but not "mysql manual" // A possibility would be to emit both my and sql at the // same position. All non-phrase searches would work, and // both "MySQL manual" and "mysql manual" phrases would // match too. "my sql manual" would not match, but this is // not an issue. case A_ULETTER: if (m_span.length() && charclasses[(unsigned char)m_span[m_span.length() - 1]] == A_LLETTER) { if (m_wordLen) { if (!doemit(false, it.getBpos())) return false; } } goto NORMALCHAR; // CamelCase handling. // If we get lowercase after uppercase and the current // word length is bigger than one, it means we had a // string of several upper-case letters: an // acronym (readHTML) or a single letter article (ALittleHelp). // Emit the uppercase word before proceeding case A_LLETTER: if (m_span.length() && charclasses[(unsigned char)m_span[m_span.length() - 1]] == A_ULETTER && m_wordLen > 1) { // Multiple upper-case letters. Single letter word // or acronym which we want to emit now m_wordLen--; if (!doemit(false, it.getBpos())) return false; // m_wordstart could be 0 here if the span was reset // for excessive length if (m_wordStart) m_wordStart--; m_wordLen++; } goto NORMALCHAR; #endif /* CAMELCASE */ default: NORMALCHAR: nonalnumcnt = 0; if (m_inNumber && c != 'e' && c != 'E') { m_inNumber = false; } m_wordLen += it.appendchartostring(m_span); STATS_INC_WORDCHARS; break; } softhyphenpending = false; } if (m_wordLen || m_span.length()) { if (!doemit(true, it.getBpos())) return false; } return true; } // Using an utf8iter pointer just to avoid needing its definition in // textsplit.h // // We output ngrams for exemple for char input a b c and ngramlen== 2, // we generate: a ab b bc c as words // // This is very different from the normal behaviour, so we don't use // the doemit() and emitterm() routines // // The routine is sort of a mess and goes to show that we'd probably // be better off converting the whole buffer to utf32 on entry... bool TextSplit::cjk_to_words(Utf8Iter *itp, unsigned int *cp) { LOGDEB1("cjk_to_words: m_wordpos " << m_wordpos << "\n"); Utf8Iter &it = *itp; // We use an offset buffer to remember the starts of the utf-8 // characters which we still need to use. assert(o_CJKNgramLen < o_CJKMaxNgramLen); unsigned int boffs[o_CJKMaxNgramLen+1]; string mybuf; unsigned int myboffs[o_CJKMaxNgramLen+1]; // Current number of valid offsets; unsigned int nchars = 0; unsigned int c = 0; for (; !it.eof(); it++) { c = *it; if (c == ' ' || c == '\t' || c == '\n') { continue; } if (!UNICODE_IS_CJK(c)) { // Return to normal handler break; } if (whatcc(c) == SPACE) { // Flush the ngram buffer and go on nchars = 0; continue; } if (nchars == o_CJKNgramLen) { // Offset buffer full, shift it. Might be more efficient // to have a circular one, but things are complicated // enough already... for (unsigned int i = 0; i < nchars-1; i++) { boffs[i] = boffs[i+1]; } for (unsigned int i = 0; i < nchars-1; i++) { myboffs[i] = myboffs[i+1]; } } else { nchars++; } // Copy to local buffer, and note local offset myboffs[nchars-1] = mybuf.size(); it.appendchartostring(mybuf); // Take note of document byte offset for this character. boffs[nchars-1] = int(it.getBpos()); // Output all new ngrams: they begin at each existing position // and end after the new character. onlyspans->only output // maximum words, nospans=> single chars if (!(m_flags & TXTS_ONLYSPANS) || nchars == o_CJKNgramLen) { int btend = int(it.getBpos() + it.getBlen()); int loopbeg = (m_flags & TXTS_NOSPANS) ? nchars-1 : 0; int loopend = (m_flags & TXTS_ONLYSPANS) ? 1 : nchars; for (int i = loopbeg; i < loopend; i++) { if (!takeword(mybuf.substr(myboffs[i], mybuf.size()-myboffs[i]), m_wordpos - (nchars-i-1), boffs[i], btend)) { return false; } } if ((m_flags & TXTS_ONLYSPANS)) { // Only spans: don't overlap: flush buffer nchars = 0; mybuf.clear(); } } // Increase word position by one, other words are at an // existing position. This could be subject to discussion... m_wordpos++; } // If onlyspans is set, there may be things to flush in the buffer // first if ((m_flags & TXTS_ONLYSPANS) && nchars > 0 && nchars != o_CJKNgramLen) { int btend = int(it.getBpos()); // Current char is out if (!takeword(mybuf.substr(myboffs[0], mybuf.size()-myboffs[0]), m_wordpos - nchars, boffs[0], btend)) { return false; } } // Reset state, saving term position, and return the found non-cjk // unicode character value. The current input byte offset is kept // in the utf8Iter int pos = m_wordpos; clearsplitstate(); m_spanpos = m_wordpos = pos; *cp = c; return true; } // Specialization for countWords class TextSplitCW : public TextSplit { public: int wcnt; TextSplitCW(Flags flags) : TextSplit(flags), wcnt(0) {} bool takeword(const string &, int, int, int) { wcnt++; return true; } }; int TextSplit::countWords(const string& s, TextSplit::Flags flgs) { TextSplitCW splitter(flgs); splitter.text_to_words(s); return splitter.wcnt; } bool TextSplit::hasVisibleWhite(const string &in) { Utf8Iter it(in); for (; !it.eof(); it++) { unsigned int c = (unsigned char)*it; if (c == (unsigned int)-1) { LOGERR("hasVisibleWhite: error while scanning UTF-8 string\n"); return false; } if (visiblewhite.find(c) != visiblewhite.end()) return true; } return false; } template bool u8stringToStrings(const string &s, T &tokens) { Utf8Iter it(s); string current; tokens.clear(); enum states {SPACE, TOKEN, INQUOTE, ESCAPE}; states state = SPACE; for (; !it.eof(); it++) { unsigned int c = *it; if (visiblewhite.find(c) != visiblewhite.end()) c = ' '; if (c == (unsigned int)-1) { LOGERR("TextSplit::stringToStrings: error while scanning UTF-8 " "string\n"); return false; } switch (c) { case '"': switch(state) { case SPACE: state = INQUOTE; continue; case TOKEN: goto push_char; case ESCAPE: state = INQUOTE; goto push_char; case INQUOTE: tokens.push_back(current);current.clear(); state = SPACE; continue; } break; case '\\': switch(state) { case SPACE: case TOKEN: state=TOKEN; goto push_char; case INQUOTE: state = ESCAPE; continue; case ESCAPE: state = INQUOTE; goto push_char; } break; case ' ': case '\t': case '\n': case '\r': switch(state) { case SPACE: continue; case TOKEN: tokens.push_back(current); current.clear(); state = SPACE; continue; case INQUOTE: case ESCAPE: goto push_char; } break; default: switch(state) { case ESCAPE: state = INQUOTE; break; case SPACE: state = TOKEN; break; case TOKEN: case INQUOTE: break; } push_char: it.appendchartostring(current); } } // End of string. Process residue, and possible error (unfinished quote) switch(state) { case SPACE: break; case TOKEN: tokens.push_back(current); break; case INQUOTE: case ESCAPE: return false; } return true; } bool TextSplit::stringToStrings(const string &s, vector &tokens) { return u8stringToStrings >(s, tokens); } recoll-1.26.3/common/unacpp.cpp0000644000175000017500000001101013566424763013277 00000000000000/* Copyright (C) 2004-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include "unacpp.h" #include "unac.h" #include "log.h" #include "utf8iter.h" bool unacmaybefold(const string &in, string &out, const char *encoding, UnacOp what) { char *cout = 0; size_t out_len; int status = -1; switch (what) { case UNACOP_UNAC: status = unac_string(encoding, in.c_str(), in.length(), &cout, &out_len); break; case UNACOP_UNACFOLD: status = unacfold_string(encoding, in.c_str(), in.length(), &cout, &out_len); break; case UNACOP_FOLD: status = fold_string(encoding, in.c_str(), in.length(), &cout, &out_len); break; } if (status < 0) { if (cout) free(cout); char cerrno[20]; sprintf(cerrno, "%d", errno); out = string("unac_string failed, errno : ") + cerrno; return false; } out.assign(cout, out_len); if (cout) free(cout); return true; } // Functions to determine upper-case or accented status could be implemented // hugely more efficiently inside the unac c code, but there only used for // testing user-entered terms, so we don't really care. bool unaciscapital(const string& in) { LOGDEB2("unaciscapital: [" << in << "]\n"); if (in.empty()) return false; Utf8Iter it(in); string shorter; it.appendchartostring(shorter); string lower; if (!unacmaybefold(shorter, lower, "UTF-8", UNACOP_FOLD)) { LOGINFO("unaciscapital: unac/fold failed for [" << in << "]\n"); return false; } Utf8Iter it1(lower); if (*it != *it1) return true; else return false; } // Check if input contains upper case characters. We used to case-fold // the input and look for a difference, but lowercasing and // casefolding are actually not exactly the same, for example german // sharp s folds to ss but lowercases to itself, and greek final sigma // folds to sigma. So an input containing one of these characters // would wrongly detected as containing upper case. We now handle a // few special cases explicitly, by folding them before performing // the lowercasing. There are actually quite a few other cases of // lowercase being transformed by casefolding, check Unicode // CaseFolding.txt for occurrences of SMALL. One more step towards // ditching everything and using icu... bool unachasuppercase(const string& _in) { LOGDEB("unachasuppercase: in [" << _in << "]\n"); if (_in.empty()) return false; string in; Utf8Iter it(_in); for (; !it.eof(); it++) { if (*it == 0xdf) { // s sharp -> ss in += 's'; in += 's'; } else if (*it == 0x3c2) { // final sigma -> sigma in.append("\xcf\x83"); } else { it.appendchartostring(in); } } LOGDEB("unachasuppercase: folded: [" << in << "]\n"); string lower; if (!unacmaybefold(in, lower, "UTF-8", UNACOP_FOLD)) { LOGINFO("unachasuppercase: unac/fold failed for [" << in << "]\n"); return false; } LOGDEB("unachasuppercase: lower [" << lower << "]\n"); if (lower != in) return true; else return false; } bool unachasaccents(const string& in) { LOGDEB("unachasaccents: in [" << in << "]\n"); if (in.empty()) return false; string noac; if (!unacmaybefold(in, noac, "UTF-8", UNACOP_UNAC)) { LOGINFO("unachasaccents: unac/unac failed for [" << (in) << "]\n" ); return false; } LOGDEB("unachasaccents: noac [" << noac << "]\n"); if (noac != in) return true; else return false; } recoll-1.26.3/common/syngroups.cpp0000644000175000017500000001303713533651561014065 00000000000000/* Copyright (C) 2014-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include "syngroups.h" #include "log.h" #include "smallut.h" #include "pathut.h" #include #include #include #include #include #include "safesysstat.h" using namespace std; // Note that we are storing each term twice. I don't think that the // size could possibly be a serious issue, but if it was, we could // reduce the storage a bit by storing (short hash)-> vector // correspondances in the direct map, and then checking all the // resulting groups for the input word. // // As it is, a word can only index one group (the last it is found // in). It can be part of several groups though (appear in // expansions). I really don't know what we should do with multiple // groups anyway class SynGroups::Internal { public: Internal() : ok(false) { } void setpath(const string& fn) { path = path_canon(fn); stat(path.c_str(), &st); } bool samefile(const string& fn) { string p1 = path_canon(fn); if (path != p1) { return false; } struct stat st1; if (stat(p1.c_str(), &st1) != 0) { return false; } return st.st_mtime == st1.st_mtime && st.st_size == st1.st_size; } bool ok; // Term to group num std::unordered_map terms; // Group num to group vector > groups; std::string path; struct stat st; }; bool SynGroups::ok() { return m && m->ok; } SynGroups::~SynGroups() { delete m; } SynGroups::SynGroups() : m(new Internal) { } bool SynGroups::setfile(const string& fn) { LOGDEB("SynGroups::setfile(" << fn << ")\n"); if (!m) { m = new Internal; if (!m) { LOGERR("SynGroups:setfile:: new Internal failed: no mem ?\n"); return false; } } if (fn.empty()) { delete m; m = 0; return true; } if (m->samefile(fn)) { LOGDEB("SynGroups::setfile: unchanged: " << fn << endl); return true; } LOGDEB("SynGroups::setfile: parsing file " << fn << endl); ifstream input; input.open(fn.c_str(), ios::in); if (!input.is_open()) { LOGSYSERR("SynGroups:setfile", "open", fn); return false; } string cline; bool appending = false; string line; bool eof = false; int lnum = 0; for (;;) { cline.clear(); getline(input, cline); if (!input.good()) { if (input.bad()) { LOGERR("Syngroup::setfile(" << fn << "):Parse: input.bad()\n"); return false; } // Must be eof ? But maybe we have a partial line which // must be processed. This happens if the last line before // eof ends with a backslash, or there is no final \n eof = true; } lnum++; { string::size_type pos = cline.find_last_not_of("\n\r"); if (pos == string::npos) { cline.clear(); } else if (pos != cline.length()-1) { cline.erase(pos+1); } } if (appending) line += cline; else line = cline; // Note that we trim whitespace before checking for backslash-eol // This avoids invisible whitespace problems. trimstring(line); if (line.empty() || line.at(0) == '#') { if (eof) break; continue; } if (line[line.length() - 1] == '\\') { line.erase(line.length() - 1); appending = true; continue; } appending = false; vector words; if (!stringToStrings(line, words)) { LOGERR("SynGroups:setfile: " << fn << ": bad line " << lnum << ": " << line << "\n"); continue; } if (words.empty()) continue; if (words.size() == 1) { LOGERR("Syngroup::setfile(" << fn << "):single term group at line " << lnum << " ??\n"); continue; } m->groups.push_back(words); for (const auto& word : words) { m->terms[word] = m->groups.size()-1; } LOGDEB1("SynGroups::setfile: group: [" << stringsToString(m->groups.back()) << "]\n"); } LOGDEB("SynGroups::setfile: got " << m->groups.size() << " distinct terms." << endl); m->ok = true; m->setpath(fn); return true; } vector SynGroups::getgroup(const string& term) { vector ret; if (!ok()) return ret; const auto it1 = m->terms.find(term); if (it1 == m->terms.end()) { LOGDEB0("SynGroups::getgroup: [" << term << "] not found in map\n"); return ret; } unsigned int idx = it1->second; if (idx >= m->groups.size()) { LOGERR("SynGroups::getgroup: line index higher than line count !\n"); return ret; } LOGDEB0("SynGroups::getgroup: result: " << stringsToString(m->groups[idx]) << endl); return m->groups[idx]; } recoll-1.26.3/common/textsplit.h0000644000175000017500000001541313566424763013531 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _TEXTSPLIT_H_INCLUDED_ #define _TEXTSPLIT_H_INCLUDED_ #include #include #include class Utf8Iter; class RclConfig; /** * Split text into words. * See comments at top of .cpp for more explanations. * This uses a callback function. It could be done with an iterator instead, * but 'ts much simpler this way... */ class TextSplit { public: enum Flags { // Default: will return spans and words (a_b, a, b) TXTS_NONE = 0, // Only return maximum spans (a@b.com, not a, b, or com) TXTS_ONLYSPANS = 1, // Special: Only return atomic words (a, b, com). This is not // used for indexing, but for position computation during // abstract generation, TXTS_NOSPANS = 2, // Handle wildcards as letters. This is used with ONLYSPANS // for parsing a user query (never alone). TXTS_KEEPWILD = 4 }; TextSplit(Flags flags = Flags(TXTS_NONE)) : m_flags(flags) {} virtual ~TextSplit() {} /** Call at program initialization to read non default values from the configuration */ static void staticConfInit(RclConfig *config); /** Split text, emit words and positions. */ virtual bool text_to_words(const std::string &in); /** Process one output word: to be implemented by the actual user class */ virtual bool takeword(const std::string& term, int pos, // term pos int bts, // byte offset of first char in term int bte // byte offset of first char after term ) = 0; /** Called when we encounter formfeed \f 0x0c. Override to use the event. * Mostly or exclusively used with pdftoxx output. Other filters mostly * just don't know about pages. */ virtual void newpage(int /*pos*/) {} // Static utility functions: /** Count words in string, as the splitter would generate them */ static int countWords(const std::string &in, Flags flgs = TXTS_ONLYSPANS); /** Check if this is visibly not a single block of text */ static bool hasVisibleWhite(const std::string &in); /** Split text span into strings, at white space, allowing for substrings * quoted with " . Escaping with \ works as usual inside the quoted areas. * This has to be kept separate from smallut.cpp's stringsToStrings, which * basically works only if whitespace is ascii, and which processes * non-utf-8 input (iso-8859 config files work ok). This hopefully * handles all Unicode whitespace, but needs correct utf-8 input */ static bool stringToStrings(const std::string &s, std::vector &tokens); /** Is char CJK ? (excluding Katakana) */ static bool isCJK(int c); static bool isKATAKANA(int c); static bool isHANGUL(int c); /** Statistics about word length (average and dispersion) can * detect bad data like undecoded base64 or other mis-identified * pieces of data taken as text. In practise, this keeps some junk out * of the index, but does not decrease the index size much, and is * probably not worth the trouble in general. Code kept because it * probably can be useful in special cases. Base64 data does has * word separators in it (+/) and is characterised by high average * word length (>10, often close to 20) and high word length * dispersion (avg/sigma > 0.8). In my tests, most natural * language text has average word lengths around 5-8 and avg/sigma * < 0.7 */ #ifdef TEXTSPLIT_STATS class Stats { public: Stats() { reset(); } void reset() { count = 0; totlen = 0; sigma_acc = 0; } void newsamp(unsigned int len) { ++count; totlen += len; double avglen = double(totlen) / double(count); sigma_acc += (avglen - len) * (avglen - len); } struct Values { int count; double avglen; double sigma; }; Values get() { Values v; v.count = count; v.avglen = double(totlen) / double(count); v.sigma = sqrt(sigma_acc / count); return v; } private: int count; int totlen; double sigma_acc; }; Stats::Values getStats() { return m_stats.get(); } void resetStats() { m_stats.reset(); } #endif // TEXTSPLIT_STATS private: static bool o_processCJK; // true static bool o_noNumbers; // false static bool o_deHyphenate; // false static unsigned int o_CJKNgramLen; // 2 static int o_maxWordLength; // 40 Flags m_flags; // Current span. Might be jf.dockes@wanadoo.f std::string m_span; std::vector > m_words_in_span; // Current word: no punctuation at all in there. Byte offset // relative to the current span and byte length int m_wordStart; unsigned int m_wordLen; // Currently inside number bool m_inNumber; // Term position of current word and span int m_wordpos; int m_spanpos; // It may happen that our cleanup would result in emitting the // same term twice. We try to avoid this int m_prevpos{-1}; int m_prevlen; #ifdef TEXTSPLIT_STATS // Stats counters. These are processed in TextSplit rather than by a // TermProc so that we can take very long words (not emitted) into // account. Stats m_stats; #endif // Word length in characters. Declared but not updated if !TEXTSPLIT_STATS unsigned int m_wordChars; void clearsplitstate() { m_span.clear(); m_words_in_span.clear(); m_inNumber = false; m_wordStart = m_wordLen = m_wordpos = m_spanpos = m_prevpos = m_prevlen = m_wordChars = 0; } // This processes cjk text: bool cjk_to_words(Utf8Iter *it, unsigned int *cp); bool emitterm(bool isspan, std::string &term, int pos, size_t bs,size_t be); bool doemit(bool spanerase, size_t bp); void discardspan(); bool span_is_acronym(std::string *acronym); bool words_from_span(size_t bp); }; #endif /* _TEXTSPLIT_H_INCLUDED_ */ recoll-1.26.3/common/utf8fn.cpp0000644000175000017500000000176713533651561013235 00000000000000#include "utf8fn.h" #include "rclconfig.h" #include "transcode.h" #include "log.h" using namespace std; string compute_utf8fn(const RclConfig *config, const string& ifn, bool simple) { string lfn(simple ? path_getsimple(ifn) : ifn); #ifdef _WIN32 // On windows file names are read as UTF16 wchar_t and converted to UTF-8 // while scanning directories return lfn; #else string charset = config->getDefCharset(true); string utf8fn; int ercnt; if (!transcode(lfn, utf8fn, charset, "UTF-8", &ercnt)) { LOGERR("compute_utf8fn: fn transcode failure from [" << charset << "] to UTF-8 for: [" << lfn << "]\n"); } else if (ercnt) { LOGDEB("compute_utf8fn: " << ercnt << " transcode errors from [" << charset << "] to UTF-8 for: [" << lfn << "]\n"); } LOGDEB1("compute_utf8fn: transcoded from [" << lfn << "] to [" << utf8fn << "] (" << charset << "->" << "UTF-8)\n"); return utf8fn; #endif } recoll-1.26.3/common/syngroups.h0000644000175000017500000000274513533651561013536 00000000000000/* Copyright (C) 2015 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _SYNGROUPS_H_INCLUDED_ #define _SYNGROUPS_H_INCLUDED_ #include #include // Manage synonym groups. This is very different from stemming and // case/diac expansion because there is no reference form: all terms // in a group are equivalent. class SynGroups { public: SynGroups(); ~SynGroups(); SynGroups(const SynGroups&) = delete; SynGroups& operator=(const SynGroups&) = delete; SynGroups(const SynGroups&&) = delete; SynGroups& operator=(const SynGroups&&) = delete; bool setfile(const std::string& fname); std::vector getgroup(const std::string& term); bool ok(); private: class Internal; Internal *m; }; #endif /* _SYNGROUPS_H_INCLUDED_ */ recoll-1.26.3/common/rclinit.cpp0000644000175000017500000003214213566424763013466 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #ifdef _WIN32 #include "safewindows.h" #endif #include #include #include #if !defined(PUTENV_ARG_CONST) #include #endif #include #include "log.h" #include "rclconfig.h" #include "rclinit.h" #include "pathut.h" #include "rclutil.h" #include "unac.h" #include "smallut.h" #include "execmd.h" #include "textsplit.h" #include "rcldb.h" std::thread::id mainthread_id; // Signal etc. processing. We want to be able to properly close the // index if we are currently writing to it. // // This is active if the sigcleanup parameter to recollinit is set, // which only recollindex does. We arrange for the handler to be // called when process termination is requested either by the system // or a user keyboard intr. // // The recollindex handler just sets a global termination flag (plus // the cancelcheck thing), which are tested in all timeout loops // etc. When the flag is seen, the main thread processing returns, and // recollindex calls exit(). // // The other parameter, to recollinit(), cleanup, is set as an // atexit() routine, it does the job of actually signalling the // workers to stop and tidy up. It's automagically called by exit(). #ifndef _WIN32 static void siglogreopen(int) { if (recoll_ismainthread()) Logger::getTheLog("")->reopen(""); } // We would like to block SIGCHLD globally, but we can't because // QT uses it. Have to block it inside execmd.cpp static const int catchedSigs[] = {SIGINT, SIGQUIT, SIGTERM, SIGUSR1, SIGUSR2}; void initAsyncSigs(void (*sigcleanup)(int)) { // We ignore SIGPIPE always. All pieces of code which can write to a pipe // must check write() return values. signal(SIGPIPE, SIG_IGN); // Install app signal handler if (sigcleanup) { struct sigaction action; action.sa_handler = sigcleanup; action.sa_flags = 0; sigemptyset(&action.sa_mask); for (unsigned int i = 0; i < sizeof(catchedSigs) / sizeof(int); i++) if (signal(catchedSigs[i], SIG_IGN) != SIG_IGN) { if (sigaction(catchedSigs[i], &action, 0) < 0) { perror("Sigaction failed"); } } } // Install log rotate sig handler { struct sigaction action; action.sa_handler = siglogreopen; action.sa_flags = 0; sigemptyset(&action.sa_mask); if (signal(SIGHUP, SIG_IGN) != SIG_IGN) { if (sigaction(SIGHUP, &action, 0) < 0) { perror("Sigaction failed"); } } } } void recoll_exitready() { } #else // _WIN32 -> // Windows signals etc. // // ^C can be caught by the signal() emulation, but not ^Break // apparently, which is why we use the native approach too // // When a keyboard interrupt occurs, windows creates a thread inside // the process and calls the handler. The process exits when the // handler returns or after at most 10S // // This should also work, with different signals (CTRL_LOGOFF_EVENT, // CTRL_SHUTDOWN_EVENT) when the user exits or the system shuts down). // // Unfortunately, this is not the end of the story. It seems that in // recent Windows version "some kinds" of apps will not reliably // receive the signals. "Some kind" is variably defined, for example a // simple test program works when built with vs 2015, but not // mingw. See the following discussion thread for tentative // explanations, it seems that importing or not from user32.dll is the // determining factor. // https://social.msdn.microsoft.com/Forums/windowsdesktop/en-US/abf09824-4e4c-4f2c-ae1e-5981f06c9c6e/windows-7-console-application-has-no-way-of-trapping-logoffshutdown-event?forum=windowscompatibility // In any case, it appears that the only reliable way to be advised of // system shutdown or user exit is to create an "invisible window" and // process window messages, which we now do. static void (*l_sigcleanup)(int); static HANDLE eWorkFinished = INVALID_HANDLE_VALUE; static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType) { LOGDEB("CtrlHandler\n" ); if (l_sigcleanup == 0) return FALSE; switch(fdwCtrlType) { case CTRL_C_EVENT: case CTRL_CLOSE_EVENT: case CTRL_BREAK_EVENT: case CTRL_LOGOFF_EVENT: case CTRL_SHUTDOWN_EVENT: { l_sigcleanup(SIGINT); LOGDEB0("CtrlHandler: waiting for exit ready\n" ); DWORD res = WaitForSingleObject(eWorkFinished, INFINITE); if (res != WAIT_OBJECT_0) { LOGERR("CtrlHandler: exit ack wait failed\n" ); } LOGDEB0("CtrlHandler: got exit ready event, exiting\n" ); return TRUE; } default: return FALSE; } } LRESULT CALLBACK MainWndProc(HWND hwnd , UINT msg , WPARAM wParam, LPARAM lParam) { switch (msg) { case WM_POWERBROADCAST: { LOGDEB("MainWndProc: got powerbroadcast message\n"); // This gets specific processing because we want to check the // state of topdirs on resuming indexing (in case a mounted // volume went away). if (l_sigcleanup) { if (wParam == PBT_APMRESUMEAUTOMATIC || wParam == PBT_APMRESUMESUSPEND) { l_sigcleanup(RCLSIG_RESUME); } } } break; case WM_QUERYENDSESSION: case WM_ENDSESSION: case WM_DESTROY: case WM_CLOSE: { if (l_sigcleanup) { l_sigcleanup(SIGINT); LOGDEB("MainWndProc: got end message, waiting for work finished\n"); DWORD res = WaitForSingleObject(eWorkFinished, INFINITE); if (res != WAIT_OBJECT_0) { LOGERR("MainWndProc: exit ack wait failed\n" ); } } LOGDEB("MainWindowProc: got exit ready event, exiting\n" ); } break; default: return DefWindowProc(hwnd, msg, wParam, lParam); } return TRUE; } bool CreateInvisibleWindow() { HWND hwnd; WNDCLASS wc = {0,0,0,0,0,0,0,0,0,0}; wc.lpfnWndProc = (WNDPROC)MainWndProc; wc.hInstance = GetModuleHandle(NULL); wc.hIcon = LoadIcon(GetModuleHandle(NULL), L"TestWClass"); wc.lpszClassName = L"TestWClass"; RegisterClass(&wc); hwnd = CreateWindowEx(0, L"TestWClass", L"TestWClass", WS_OVERLAPPEDWINDOW, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, CW_USEDEFAULT, (HWND) NULL, (HMENU) NULL, GetModuleHandle(NULL), (LPVOID) NULL); if (!hwnd) { return FALSE; } return TRUE; } DWORD WINAPI RunInvisibleWindowThread(LPVOID lpParam) { MSG msg; CreateInvisibleWindow(); while (GetMessage(&msg, (HWND) NULL , 0 , 0)) { TranslateMessage(&msg); DispatchMessage(&msg); } return 0; } static const int catchedSigs[] = {SIGINT, SIGTERM}; void initAsyncSigs(void (*sigcleanup)(int)) { DWORD tid; // Install app signal handler if (sigcleanup) { l_sigcleanup = sigcleanup; for (unsigned int i = 0; i < sizeof(catchedSigs) / sizeof(int); i++) { if (signal(catchedSigs[i], SIG_IGN) != SIG_IGN) { signal(catchedSigs[i], sigcleanup); } } } CreateThread(NULL, 0, RunInvisibleWindowThread, NULL, 0, &tid); SetConsoleCtrlHandler((PHANDLER_ROUTINE)CtrlHandler, TRUE); eWorkFinished = CreateEvent(NULL, TRUE, FALSE, NULL); if (eWorkFinished == INVALID_HANDLE_VALUE) { LOGERR("initAsyncSigs: error creating exitready event\n" ); } } void recoll_exitready() { LOGDEB("recoll_exitready()\n" ); if (!SetEvent(eWorkFinished)) { LOGERR("recoll_exitready: SetEvent failed\n" ); } } #endif RclConfig *recollinit(int flags, void (*cleanup)(void), void (*sigcleanup)(int), string &reason, const string *argcnf) { if (cleanup) atexit(cleanup); #ifdef MACPORTS // Apple keeps changing the way to set the environment (PATH) for // a desktop app (started by launchd or whatever). Life is too // short. const char *cp = getenv("PATH"); if (!cp) //?? cp = ""; string PATH(cp); PATH = string("/opt/local/bin/") + ":" + PATH; setenv("PATH", PATH.c_str(), 1); #endif // Make sure the locale is set. This is only for converting file names // to utf8 for indexing. setlocale(LC_CTYPE, ""); // Initially log to stderr, at error level. Logger::getTheLog("")->setLogLevel(Logger::LLERR); initAsyncSigs(sigcleanup); RclConfig *config = new RclConfig(argcnf); if (!config || !config->ok()) { reason = "Configuration could not be built:\n"; if (config) reason += config->getReason(); else reason += "Out of memory ?"; return 0; } TextSplit::staticConfInit(config); // Retrieve the log file name and level. Daemon and batch indexing // processes may use specific values, else fall back on common // ones. string logfilename, loglevel; if (flags & RCLINIT_DAEMON) { config->getConfParam(string("daemlogfilename"), logfilename); config->getConfParam(string("daemloglevel"), loglevel); } if (flags & RCLINIT_IDX) { if (logfilename.empty()) { config->getConfParam(string("idxlogfilename"), logfilename); } if (loglevel.empty()) { config->getConfParam(string("idxloglevel"), loglevel); } } if (flags & RCLINIT_PYTHON) { if (logfilename.empty()) { config->getConfParam(string("pylogfilename"), logfilename); } if (loglevel.empty()) { config->getConfParam(string("pyloglevel"), loglevel); } } if (logfilename.empty()) config->getConfParam(string("logfilename"), logfilename); if (loglevel.empty()) config->getConfParam(string("loglevel"), loglevel); // Initialize logging if (!logfilename.empty()) { logfilename = path_tildexpand(logfilename); // If not an absolute path or stderr, compute relative to config dir. if (!path_isabsolute(logfilename) && logfilename.compare("stderr")) { logfilename = path_cat(config->getConfDir(), logfilename); } Logger::getTheLog("")->reopen(logfilename); } if (!loglevel.empty()) { int lev = atoi(loglevel.c_str()); Logger::getTheLog("")->setLogLevel(Logger::LogLevel(lev)); } LOGINF(Rcl::version_string() << " [" << config->getConfDir() << "]\n"); // Make sure the locale charset is initialized (so that multiple // threads don't try to do it at once). config->getDefCharset(); mainthread_id = std::this_thread::get_id(); // Init smallut and pathut static values pathut_init_mt(); smallut_init_mt(); rclutil_init_mt(); // Init execmd.h static PATH and PATHELT splitting {string bogus; ExecCmd::which("nosuchcmd", bogus); } // Init Unac translation exceptions string unacex; if (config->getConfParam("unac_except_trans", unacex) && !unacex.empty()) unac_set_except_translations(unacex.c_str()); #ifndef IDX_THREADS ExecCmd::useVfork(true); #else // Keep threads init behind log init, but make sure it's done before // we do the vfork choice ! The latter is not used any more actually, // we always use vfork except if forbidden by config. if ((flags & RCLINIT_IDX)) { config->initThrConf(); } bool novfork; config->getConfParam("novfork", &novfork); if (novfork) { LOGDEB0("rclinit: will use fork() for starting commands\n" ); ExecCmd::useVfork(false); } else { LOGDEB0("rclinit: will use vfork() for starting commands\n" ); ExecCmd::useVfork(true); } #endif int flushmb; if (config->getConfParam("idxflushmb", &flushmb) && flushmb > 0) { LOGDEB1("rclinit: idxflushmb=" << flushmb << ", set XAPIAN_FLUSH_THRESHOLD to 10E6\n"); static const char *cp = "XAPIAN_FLUSH_THRESHOLD=1000000"; #ifdef PUTENV_ARG_CONST ::putenv(cp); #else ::putenv(strdup(cp)); #endif } return config; } // Signals are handled by the main thread. All others should call this // routine to block possible signals void recoll_threadinit() { #ifndef _WIN32 sigset_t sset; sigemptyset(&sset); for (unsigned int i = 0; i < sizeof(catchedSigs) / sizeof(int); i++) sigaddset(&sset, catchedSigs[i]); sigaddset(&sset, SIGHUP); pthread_sigmask(SIG_BLOCK, &sset, 0); #else // Not sure that this is needed at all or correct under windows. for (unsigned int i = 0; i < sizeof(catchedSigs) / sizeof(int); i++) { if (signal(catchedSigs[i], SIG_IGN) != SIG_IGN) { signal(catchedSigs[i], SIG_IGN); } } #endif } bool recoll_ismainthread() { return std::this_thread::get_id() == mainthread_id; } recoll-1.26.3/common/cstr.cpp0000644000175000017500000000013213303776060012754 00000000000000 #include "cstr.h" #define RCLIN_CSTR_CPPFILE #undef _CSTR_H_INCLUDED_ #include "cstr.h" recoll-1.26.3/common/unacpp.h0000644000175000017500000000310513533651561012742 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _UNACPP_H_INCLUDED_ #define _UNACPP_H_INCLUDED_ #include #ifndef NO_NAMESPACES using std::string; #endif /* NO_NAMESPACES */ // A small stringified wrapper for unac.c enum UnacOp {UNACOP_UNAC = 1, UNACOP_FOLD = 2, UNACOP_UNACFOLD = 3}; extern bool unacmaybefold(const string& in, string& out, const char *encoding, UnacOp what); // Utility function to determine if string begins with capital extern bool unaciscapital(const string& in); // Utility function to determine if string has upper-case anywhere extern bool unachasuppercase(const string& in); // Utility function to determine if any character is accented. This // approprialey ignores the characters from unac_except_chars which // are really separate letters extern bool unachasaccents(const string& in); #endif /* _UNACPP_H_INCLUDED_ */ recoll-1.26.3/common/uproplist.h0000644000175000017500000001667313533651561013533 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _PROPLIST_H_INCLUDED_ #define _PROPLIST_H_INCLUDED_ /** * A subset of Unicode chars that we consider word breaks when we * split text in words. * * This is used as a quick fix to the ascii-based code, and is not correct. * the correct way would be to do what http://www.unicode.org/reports/tr29/ * says. */ // Punctuation chararacters blocks array. Each block is defined by a // starting and ending code point (both included). MUST BE SORTED. static const unsigned unipuncblocks[] = { // Start of latin-1 supplement block, up to capital A grave 0x0080, 0x00BF, // General punctuation 0x2000, 0x206F, // Superscripts and subscripts 0x2070, 0x209F, // Currency symbols 0x20A0, 0x20CF, // Letterlike symbols 0x2100, 0x214f, // Number forms 0x2150, 0x218F, // Arrows 0x2190, 0x21FF, // Mathematical Operators 0x2200, 0x22FF, // Miscellaneous Technical 0x2300, 0x23FF, // Control Pictures 0x2400, 0x243F, // Optical Character Recognition 0x2440, 0x245F, // Enclosed Alphanumerics 0x2460, 0x24FF, // Box Drawing 0x2500, 0x257F, // Block Elements 0x2580, 0x259F, // Geometric Shapes 0x25A0, 0x25FF, // Miscellaneous Symbols 0x2600, 0x26FF, // Dingbats 0x2700, 0x27BF, // Miscellaneous Mathematical Symbols-A 0x27C0, 0x27EF, // Supplemental Arrows-A 0x27F0, 0x27FF, // Supplemental Arrows-B 0x2900, 0x297F, // Miscellaneous Mathematical Symbols-B 0x2980, 0x29FF, // Supplemental Mathematical Operators 0x2A00, 0x2AFF, // Miscellaneous Symbols and Arrows 0x2B00, 0x2BFF, }; // Other punctuation characters list. Not all punctuation is in a // separate block some is found in the middle of alphanumeric codes. static const unsigned int unipunc[] = { 0x00D7, /* MULTIPLICATION SIGN */ 0x00F7, /* DIVISION SIGN */ 0x037E, /* GREEK QUESTION MARK */ 0x0387, /* GREEK ANO TELEIA */ 0x055C, /* ARMENIAN EXCLAMATION MARK */ 0x055E, /* ARMENIAN QUESTION MARK */ 0x0589, /* ARMENIAN FULL STOP */ 0x058A, /* ARMENIAN HYPHEN */ 0x05C3, /* HEBREW PUNCTUATION SOF PASUQ */ 0x060C, /* ARABIC COMMA */ 0x061B, /* ARABIC SEMICOLON */ 0x061F, /* ARABIC QUESTION MARK */ 0x06D4, /* ARABIC FULL STOP */ 0x0964, /* DEVANAGARI DANDA */ 0x0965, /* DEVANAGARI DOUBLE DANDA */ 0x166E, /* CANADIAN SYLLABICS FULL STOP */ 0x1680, /* OGHAM SPACE MARK */ 0x16EB, /* RUNIC SINGLE PUNCTUATION */ 0x16EC, /* RUNIC MULTIPLE PUNCTUATION */ 0x16ED, /* RUNIC CROSS PUNCTUATION */ 0x1803, /* MONGOLIAN FULL STOP */ 0x1806, /* MONGOLIAN TODO SOFT HYPHEN */ 0x1809, /* MONGOLIAN MANCHU FULL STOP */ 0x180E, /* MONGOLIAN VOWEL SEPARATOR */ 0x2E2E, /* REVERSED QUESTION MARK;Po;0;ON;;;;;N;;;;; */ 0x3000, /* IDEOGRAPHIC SPACE*/ 0x3002, /* IDEOGRAPHIC FULL STOP*/ 0x300C, /* LEFT CORNER BRACKET*/ 0x300D, /* RIGHT CORNER BRACKET*/ 0x300E, /* LEFT WHITE CORNER BRACKET*/ 0x300F, /* RIGHT WHITE CORNER BRACKET*/ 0x301C, /* WAVE DASH*/ 0x301D, /* REVERSED DOUBLE PRIME QUOTATION MARK*/ 0x301E, /* LOW DOUBLE PRIME QUOTATION MARK*/ 0x3030, /* WAVY DASH*/ 0x30FB, /* KATAKANA MIDDLE DOT*/ 0xC2B6, /* PILCROW SIGN;So;0;ON;;;;;N;PARAGRAPH SIGN;;;; */ 0xC3B7, /* DIVISION SIGN;Sm;0;ON;;;;;N;;;;; */ 0xFE31, /* PRESENTATION FORM FOR VERTICAL EM DASH*/ 0xFE32, /* PRESENTATION FORM FOR VERTICAL EN DASH*/ 0xFE41, /* PRESENTATION FORM FOR VERTICAL LEFT CORNER BRACKET*/ 0xFE42, /* PRESENTATION FORM FOR VERTICAL RIGHT CORNER BRACKET*/ 0xFE43, /* PRESENTATION FORM FOR VERTICAL LEFT WHITE CORNER BRACKET*/ 0xFE44, /* PRESENTATION FORM FOR VERTICAL RIGHT WHITE CORNER BRACKET*/ 0xFE50, /* [3] SMALL COMMA..SMALL FULL STOP*/ 0xFE51, /* [3] SMALL COMMA..SMALL FULL STOP*/ 0xFE52, /* STOP*/ 0xFE52, /* [3] SMALL COMMA..SMALL FULL STOP*/ 0xFE54, /* [4] SMALL SEMICOLON..SMALL EXCLAMATION MARK*/ 0xFE55, /* [4] SMALL SEMICOLON..SMALL EXCLAMATION MARK*/ 0xFE56, /* [4] SMALL SEMICOLON..SMALL EXCLAMATION MARK*/ 0xFE57, /* [4] SMALL SEMICOLON..SMALL EXCLAMATION MARK*/ 0xFE58, /* SMALL EM DASH */ 0xFE63, /* SMALL HYPHEN-MINUS */ 0xFF01, /* FULLWIDTH EXCLAMATION MARK */ 0xFF02, /* FULLWIDTH QUOTATION MARK */ 0xFF03, /* FULLWIDTH NUMBER SIGN */ 0xFF04, /* FULLWIDTH DOLLAR SIGN */ 0xFF05, /* FULLWIDTH PERCENT SIGN */ 0xFF06, /* FULLWIDTH AMPERSAND */ 0xFF07, /* FULLWIDTH APOSTROPHE */ 0xFF08, /* FULLWIDTH LEFT PARENTHESIS */ 0xFF09, /* FULLWIDTH RIGHT PARENTHESIS */ 0xFF0A, /* FULLWIDTH ASTERISK */ 0xFF0B, /* FULLWIDTH PLUS SIGN */ 0xFF0C, /* FULLWIDTH COMMA */ 0xFF0D, /* FULLWIDTH HYPHEN-MINUS */ 0xFF0E, /* FULLWIDTH FULL STOP */ 0xFF0F, /* FULLWIDTH SOLIDUS */ 0xFF1A, /* [2] FULLWIDTH COLON..FULLWIDTH SEMICOLON*/ 0xFF1B, /* [2] FULLWIDTH COLON..FULLWIDTH SEMICOLON*/ 0xFF1F, /* FULLWIDTH QUESTION MARK*/ 0xFF61, /* HALFWIDTH IDEOGRAPHIC FULL STOP*/ 0xFF62, /* HALFWIDTH LEFT CORNER BRACKET*/ 0xFF63, /* HALFWIDTH RIGHT CORNER BRACKET*/ 0xFF64, /* HALFWIDTH IDEOGRAPHIC COMMA*/ 0xFF65, /* HALFWIDTH KATAKANA MIDDLE DOT*/ }; // Characters that should just be discarded. Some of these are in the // above blocks, but this array is tested first, so it's not worth // breaking the blocks static const unsigned int uniskip[] = { 0x00AD, /* SOFT HYPHEN */ 0x034F, /* COMBINING GRAPHEME JOINER */ 0x2027, /* HYPHENATION POINT */ 0x200C, /* ZERO WIDTH NON-JOINER */ 0x200D, /* ZERO WIDTH JOINER */ 0x2060, /* WORD JOINER . Actually this should not be ignored but used to * prevent a word break... */ }; /* Things that would visibly break a block of text, rendering obvious the need * of quotation if a phrase search is wanted */ static const unsigned int avsbwht[] = { 0x0009, /* CHARACTER TABULATION */ 0x000A, /* LINE FEED */ 0x000D, /* CARRIAGE RETURN */ 0x0020, /* SPACE;Zs;0;WS */ 0x00A0, /* NO-BREAK SPACE;Zs;0;CS */ 0x1680, /* OGHAM SPACE MARK;Zs;0;WS */ 0x180E, /* MONGOLIAN VOWEL SEPARATOR;Zs;0;WS */ 0x2000, /* EN QUAD;Zs;0;WS */ 0x2001, /* EM QUAD;Zs;0;WS */ 0x2002, /* EN SPACE;Zs;0;WS */ 0x2003, /* EM SPACE;Zs;0;WS */ 0x2004, /* THREE-PER-EM SPACE;Zs;0;WS */ 0x2005, /* FOUR-PER-EM SPACE;Zs;0;WS */ 0x2006, /* SIX-PER-EM SPACE;Zs;0;WS */ 0x2007, /* FIGURE SPACE;Zs;0;WS */ 0x2008, /* PUNCTUATION SPACE;Zs;0;WS */ 0x2009, /* THIN SPACE;Zs;0;WS */ 0x200A, /* HAIR SPACE;Zs;0;WS */ 0x202F, /* NARROW NO-BREAK SPACE;Zs;0;CS */ 0x205F, /* MEDIUM MATHEMATICAL SPACE;Zs;0;WS */ 0x3000, /* IDEOGRAPHIC SPACE;Zs;0;WS */ }; #endif // _PROPLIST_H_INCLUDED_ recoll-1.26.3/config.guess0000755000175000017500000012564413570165161012345 00000000000000#! /bin/sh # Attempt to guess a canonical system name. # Copyright 1992-2016 Free Software Foundation, Inc. timestamp='2016-10-02' # This file is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . # # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that # program. This Exception is an additional permission under section 7 # of the GNU General Public License, version 3 ("GPLv3"). # # Originally written by Per Bothner; maintained since 2000 by Ben Elliston. # # You can get the latest version of this script from: # http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess # # Please send patches to . me=`echo "$0" | sed -e 's,.*/,,'` usage="\ Usage: $0 [OPTION] Output the configuration name of the system \`$me' is run on. Operation modes: -h, --help print this help, then exit -t, --time-stamp print date of last modification, then exit -v, --version print version number, then exit Report bugs and patches to ." version="\ GNU config.guess ($timestamp) Originally written by Per Bothner. Copyright 1992-2016 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." help=" Try \`$me --help' for more information." # Parse command line while test $# -gt 0 ; do case $1 in --time-stamp | --time* | -t ) echo "$timestamp" ; exit ;; --version | -v ) echo "$version" ; exit ;; --help | --h* | -h ) echo "$usage"; exit ;; -- ) # Stop option processing shift; break ;; - ) # Use stdin as input. break ;; -* ) echo "$me: invalid option $1$help" >&2 exit 1 ;; * ) break ;; esac done if test $# != 0; then echo "$me: too many arguments$help" >&2 exit 1 fi trap 'exit 1' 1 2 15 # CC_FOR_BUILD -- compiler used by this script. Note that the use of a # compiler to aid in system detection is discouraged as it requires # temporary files to be created and, as you can see below, it is a # headache to deal with in a portable fashion. # Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still # use `HOST_CC' if defined, but it is deprecated. # Portable tmp directory creation inspired by the Autoconf team. set_cc_for_build=' trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; : ${TMPDIR=/tmp} ; { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; dummy=$tmp/dummy ; tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; case $CC_FOR_BUILD,$HOST_CC,$CC in ,,) echo "int x;" > $dummy.c ; for c in cc gcc c89 c99 ; do if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then CC_FOR_BUILD="$c"; break ; fi ; done ; if test x"$CC_FOR_BUILD" = x ; then CC_FOR_BUILD=no_compiler_found ; fi ;; ,,*) CC_FOR_BUILD=$CC ;; ,*,*) CC_FOR_BUILD=$HOST_CC ;; esac ; set_cc_for_build= ;' # This is needed to find uname on a Pyramid OSx when run in the BSD universe. # (ghazi@noc.rutgers.edu 1994-08-24) if (test -f /.attbin/uname) >/dev/null 2>&1 ; then PATH=$PATH:/.attbin ; export PATH fi UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown case "${UNAME_SYSTEM}" in Linux|GNU|GNU/*) # If the system lacks a compiler, then just pick glibc. # We could probably try harder. LIBC=gnu eval $set_cc_for_build cat <<-EOF > $dummy.c #include #if defined(__UCLIBC__) LIBC=uclibc #elif defined(__dietlibc__) LIBC=dietlibc #else LIBC=gnu #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC' | sed 's, ,,g'` ;; esac # Note: order is significant - the case branches are not exclusive. case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in *:NetBSD:*:*) # NetBSD (nbsd) targets should (where applicable) match one or # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently # switched to ELF, *-*-netbsd* would select the old # object file format. This provides both forward # compatibility and a consistent mechanism for selecting the # object file format. # # Note: NetBSD doesn't particularly care about the vendor # portion of the name. We always set it to "unknown". sysctl="sysctl -n hw.machine_arch" UNAME_MACHINE_ARCH=`(uname -p 2>/dev/null || \ /sbin/$sysctl 2>/dev/null || \ /usr/sbin/$sysctl 2>/dev/null || \ echo unknown)` case "${UNAME_MACHINE_ARCH}" in armeb) machine=armeb-unknown ;; arm*) machine=arm-unknown ;; sh3el) machine=shl-unknown ;; sh3eb) machine=sh-unknown ;; sh5el) machine=sh5le-unknown ;; earmv*) arch=`echo ${UNAME_MACHINE_ARCH} | sed -e 's,^e\(armv[0-9]\).*$,\1,'` endian=`echo ${UNAME_MACHINE_ARCH} | sed -ne 's,^.*\(eb\)$,\1,p'` machine=${arch}${endian}-unknown ;; *) machine=${UNAME_MACHINE_ARCH}-unknown ;; esac # The Operating System including object format, if it has switched # to ELF recently (or will in the future) and ABI. case "${UNAME_MACHINE_ARCH}" in earm*) os=netbsdelf ;; arm*|i386|m68k|ns32k|sh3*|sparc|vax) eval $set_cc_for_build if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ELF__ then # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). # Return netbsd for either. FIX? os=netbsd else os=netbsdelf fi ;; *) os=netbsd ;; esac # Determine ABI tags. case "${UNAME_MACHINE_ARCH}" in earm*) expr='s/^earmv[0-9]/-eabi/;s/eb$//' abi=`echo ${UNAME_MACHINE_ARCH} | sed -e "$expr"` ;; esac # The OS release # Debian GNU/NetBSD machines have a different userland, and # thus, need a distinct triplet. However, they do not need # kernel version information, so it can be replaced with a # suitable tag, in the style of linux-gnu. case "${UNAME_VERSION}" in Debian*) release='-gnu' ;; *) release=`echo ${UNAME_RELEASE} | sed -e 's/[-_].*//' | cut -d. -f1,2` ;; esac # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: # contains redundant information, the shorter form: # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. echo "${machine}-${os}${release}${abi}" exit ;; *:Bitrig:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/Bitrig.//'` echo ${UNAME_MACHINE_ARCH}-unknown-bitrig${UNAME_RELEASE} exit ;; *:OpenBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} exit ;; *:LibertyBSD:*:*) UNAME_MACHINE_ARCH=`arch | sed 's/^.*BSD\.//'` echo ${UNAME_MACHINE_ARCH}-unknown-libertybsd${UNAME_RELEASE} exit ;; *:ekkoBSD:*:*) echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} exit ;; *:SolidBSD:*:*) echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} exit ;; macppc:MirBSD:*:*) echo powerpc-unknown-mirbsd${UNAME_RELEASE} exit ;; *:MirBSD:*:*) echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} exit ;; *:Sortix:*:*) echo ${UNAME_MACHINE}-unknown-sortix exit ;; alpha:OSF1:*:*) case $UNAME_RELEASE in *4.0) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` ;; *5.*) UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` ;; esac # According to Compaq, /usr/sbin/psrinfo has been available on # OSF/1 and Tru64 systems produced since 1995. I hope that # covers most systems running today. This code pipes the CPU # types through head -n 1, so we only detect the type of CPU 0. ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` case "$ALPHA_CPU_TYPE" in "EV4 (21064)") UNAME_MACHINE=alpha ;; "EV4.5 (21064)") UNAME_MACHINE=alpha ;; "LCA4 (21066/21068)") UNAME_MACHINE=alpha ;; "EV5 (21164)") UNAME_MACHINE=alphaev5 ;; "EV5.6 (21164A)") UNAME_MACHINE=alphaev56 ;; "EV5.6 (21164PC)") UNAME_MACHINE=alphapca56 ;; "EV5.7 (21164PC)") UNAME_MACHINE=alphapca57 ;; "EV6 (21264)") UNAME_MACHINE=alphaev6 ;; "EV6.7 (21264A)") UNAME_MACHINE=alphaev67 ;; "EV6.8CB (21264C)") UNAME_MACHINE=alphaev68 ;; "EV6.8AL (21264B)") UNAME_MACHINE=alphaev68 ;; "EV6.8CX (21264D)") UNAME_MACHINE=alphaev68 ;; "EV6.9A (21264/EV69A)") UNAME_MACHINE=alphaev69 ;; "EV7 (21364)") UNAME_MACHINE=alphaev7 ;; "EV7.9 (21364A)") UNAME_MACHINE=alphaev79 ;; esac # A Pn.n version is a patched version. # A Vn.n version is a released version. # A Tn.n version is a released field test version. # A Xn.n version is an unreleased experimental baselevel. # 1.2 uses "1.2" for uname -r. echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` # Reset EXIT trap before exiting to avoid spurious non-zero exit code. exitcode=$? trap '' 0 exit $exitcode ;; Alpha\ *:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # Should we change UNAME_MACHINE based on the output of uname instead # of the specific Alpha model? echo alpha-pc-interix exit ;; 21064:Windows_NT:50:3) echo alpha-dec-winnt3.5 exit ;; Amiga*:UNIX_System_V:4.0:*) echo m68k-unknown-sysv4 exit ;; *:[Aa]miga[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-amigaos exit ;; *:[Mm]orph[Oo][Ss]:*:*) echo ${UNAME_MACHINE}-unknown-morphos exit ;; *:OS/390:*:*) echo i370-ibm-openedition exit ;; *:z/VM:*:*) echo s390-ibm-zvmoe exit ;; *:OS400:*:*) echo powerpc-ibm-os400 exit ;; arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) echo arm-acorn-riscix${UNAME_RELEASE} exit ;; arm*:riscos:*:*|arm*:RISCOS:*:*) echo arm-unknown-riscos exit ;; SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) echo hppa1.1-hitachi-hiuxmpp exit ;; Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. if test "`(/bin/universe) 2>/dev/null`" = att ; then echo pyramid-pyramid-sysv3 else echo pyramid-pyramid-bsd fi exit ;; NILE*:*:*:dcosx) echo pyramid-pyramid-svr4 exit ;; DRS?6000:unix:4.0:6*) echo sparc-icl-nx6 exit ;; DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) case `/usr/bin/uname -p` in sparc) echo sparc-icl-nx7; exit ;; esac ;; s390x:SunOS:*:*) echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4H:SunOS:5.*:*) echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) echo i386-pc-auroraux${UNAME_RELEASE} exit ;; i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) eval $set_cc_for_build SUN_ARCH=i386 # If there is a compiler, see if it is configured for 64-bit objects. # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. # This test works for both compilers. if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then SUN_ARCH=x86_64 fi fi echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:6*:*) # According to config.sub, this is the proper way to canonicalize # SunOS6. Hard to guess exactly what SunOS6 will be like, but # it's likely to be more like Solaris than SunOS4. echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; sun4*:SunOS:*:*) case "`/usr/bin/arch -k`" in Series*|S4*) UNAME_RELEASE=`uname -v` ;; esac # Japanese Language versions have a version number like `4.1.3-JL'. echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` exit ;; sun3*:SunOS:*:*) echo m68k-sun-sunos${UNAME_RELEASE} exit ;; sun*:*:4.2BSD:*) UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` test "x${UNAME_RELEASE}" = x && UNAME_RELEASE=3 case "`/bin/arch`" in sun3) echo m68k-sun-sunos${UNAME_RELEASE} ;; sun4) echo sparc-sun-sunos${UNAME_RELEASE} ;; esac exit ;; aushp:SunOS:*:*) echo sparc-auspex-sunos${UNAME_RELEASE} exit ;; # The situation for MiNT is a little confusing. The machine name # can be virtually everything (everything which is not # "atarist" or "atariste" at least should have a processor # > m68000). The system name ranges from "MiNT" over "FreeMiNT" # to the lowercase version "mint" (or "freemint"). Finally # the system name "TOS" denotes a system which is actually not # MiNT. But MiNT is downward compatible to TOS, so this should # be no problem. atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) echo m68k-atari-mint${UNAME_RELEASE} exit ;; milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) echo m68k-milan-mint${UNAME_RELEASE} exit ;; hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) echo m68k-hades-mint${UNAME_RELEASE} exit ;; *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) echo m68k-unknown-mint${UNAME_RELEASE} exit ;; m68k:machten:*:*) echo m68k-apple-machten${UNAME_RELEASE} exit ;; powerpc:machten:*:*) echo powerpc-apple-machten${UNAME_RELEASE} exit ;; RISC*:Mach:*:*) echo mips-dec-mach_bsd4.3 exit ;; RISC*:ULTRIX:*:*) echo mips-dec-ultrix${UNAME_RELEASE} exit ;; VAX*:ULTRIX*:*:*) echo vax-dec-ultrix${UNAME_RELEASE} exit ;; 2020:CLIX:*:* | 2430:CLIX:*:*) echo clipper-intergraph-clix${UNAME_RELEASE} exit ;; mips:*:*:UMIPS | mips:*:*:RISCos) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #ifdef __cplusplus #include /* for printf() prototype */ int main (int argc, char *argv[]) { #else int main (argc, argv) int argc; char *argv[]; { #endif #if defined (host_mips) && defined (MIPSEB) #if defined (SYSTYPE_SYSV) printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_SVR4) printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); #endif #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); #endif #endif exit (-1); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && SYSTEM_NAME=`$dummy $dummyarg` && { echo "$SYSTEM_NAME"; exit; } echo mips-mips-riscos${UNAME_RELEASE} exit ;; Motorola:PowerMAX_OS:*:*) echo powerpc-motorola-powermax exit ;; Motorola:*:4.3:PL8-*) echo powerpc-harris-powermax exit ;; Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) echo powerpc-harris-powermax exit ;; Night_Hawk:Power_UNIX:*:*) echo powerpc-harris-powerunix exit ;; m88k:CX/UX:7*:*) echo m88k-harris-cxux7 exit ;; m88k:*:4*:R4*) echo m88k-motorola-sysv4 exit ;; m88k:*:3*:R3*) echo m88k-motorola-sysv3 exit ;; AViiON:dgux:*:*) # DG/UX returns AViiON for all architectures UNAME_PROCESSOR=`/usr/bin/uname -p` if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] then if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ [ ${TARGET_BINARY_INTERFACE}x = x ] then echo m88k-dg-dgux${UNAME_RELEASE} else echo m88k-dg-dguxbcs${UNAME_RELEASE} fi else echo i586-dg-dgux${UNAME_RELEASE} fi exit ;; M88*:DolphinOS:*:*) # DolphinOS (SVR3) echo m88k-dolphin-sysv3 exit ;; M88*:*:R3*:*) # Delta 88k system running SVR3 echo m88k-motorola-sysv3 exit ;; XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) echo m88k-tektronix-sysv3 exit ;; Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) echo m68k-tektronix-bsd exit ;; *:IRIX*:*:*) echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` exit ;; ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' i*86:AIX:*:*) echo i386-ibm-aix exit ;; ia64:AIX:*:*) if [ -x /usr/bin/oslevel ] ; then IBM_REV=`/usr/bin/oslevel` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} exit ;; *:AIX:2:3) if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include main() { if (!__power_pc()) exit(1); puts("powerpc-ibm-aix3.2.5"); exit(0); } EOF if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` then echo "$SYSTEM_NAME" else echo rs6000-ibm-aix3.2.5 fi elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then echo rs6000-ibm-aix3.2.4 else echo rs6000-ibm-aix3.2 fi exit ;; *:AIX:*:[4567]) IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then IBM_ARCH=rs6000 else IBM_ARCH=powerpc fi if [ -x /usr/bin/lslpp ] ; then IBM_REV=`/usr/bin/lslpp -Lqc bos.rte.libc | awk -F: '{ print $3 }' | sed s/[0-9]*$/0/` else IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} fi echo ${IBM_ARCH}-ibm-aix${IBM_REV} exit ;; *:AIX:*:*) echo rs6000-ibm-aix exit ;; ibmrt:4.4BSD:*|romp-ibm:BSD:*) echo romp-ibm-bsd4.4 exit ;; ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to exit ;; # report: romp-ibm BSD 4.3 *:BOSX:*:*) echo rs6000-bull-bosx exit ;; DPX/2?00:B.O.S.:*:*) echo m68k-bull-sysv3 exit ;; 9000/[34]??:4.3bsd:1.*:*) echo m68k-hp-bsd exit ;; hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) echo m68k-hp-bsd4.4 exit ;; 9000/[34678]??:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` case "${UNAME_MACHINE}" in 9000/31? ) HP_ARCH=m68000 ;; 9000/[34]?? ) HP_ARCH=m68k ;; 9000/[678][0-9][0-9]) if [ -x /usr/bin/getconf ]; then sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` case "${sc_cpu_version}" in 523) HP_ARCH=hppa1.0 ;; # CPU_PA_RISC1_0 528) HP_ARCH=hppa1.1 ;; # CPU_PA_RISC1_1 532) # CPU_PA_RISC2_0 case "${sc_kernel_bits}" in 32) HP_ARCH=hppa2.0n ;; 64) HP_ARCH=hppa2.0w ;; '') HP_ARCH=hppa2.0 ;; # HP-UX 10.20 esac ;; esac fi if [ "${HP_ARCH}" = "" ]; then eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #define _HPUX_SOURCE #include #include int main () { #if defined(_SC_KERNEL_BITS) long bits = sysconf(_SC_KERNEL_BITS); #endif long cpu = sysconf (_SC_CPU_VERSION); switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0"); break; case CPU_PA_RISC1_1: puts ("hppa1.1"); break; case CPU_PA_RISC2_0: #if defined(_SC_KERNEL_BITS) switch (bits) { case 64: puts ("hppa2.0w"); break; case 32: puts ("hppa2.0n"); break; default: puts ("hppa2.0"); break; } break; #else /* !defined(_SC_KERNEL_BITS) */ puts ("hppa2.0"); break; #endif default: puts ("hppa1.0"); break; } exit (0); } EOF (CCOPTS="" $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` test -z "$HP_ARCH" && HP_ARCH=hppa fi ;; esac if [ ${HP_ARCH} = hppa2.0w ] then eval $set_cc_for_build # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler # generating 64-bit code. GNU and HP use different nomenclature: # # $ CC_FOR_BUILD=cc ./config.guess # => hppa2.0w-hp-hpux11.23 # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess # => hppa64-hp-hpux11.23 if echo __LP64__ | (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | grep -q __LP64__ then HP_ARCH=hppa2.0w else HP_ARCH=hppa64 fi fi echo ${HP_ARCH}-hp-hpux${HPUX_REV} exit ;; ia64:HP-UX:*:*) HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` echo ia64-hp-hpux${HPUX_REV} exit ;; 3050*:HI-UX:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #include int main () { long cpu = sysconf (_SC_CPU_VERSION); /* The order matters, because CPU_IS_HP_MC68K erroneously returns true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct results, however. */ if (CPU_IS_PA_RISC (cpu)) { switch (cpu) { case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; default: puts ("hppa-hitachi-hiuxwe2"); break; } } else if (CPU_IS_HP_MC68K (cpu)) puts ("m68k-hitachi-hiuxwe2"); else puts ("unknown-hitachi-hiuxwe2"); exit (0); } EOF $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && { echo "$SYSTEM_NAME"; exit; } echo unknown-hitachi-hiuxwe2 exit ;; 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) echo hppa1.1-hp-bsd exit ;; 9000/8??:4.3bsd:*:*) echo hppa1.0-hp-bsd exit ;; *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) echo hppa1.0-hp-mpeix exit ;; hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) echo hppa1.1-hp-osf exit ;; hp8??:OSF1:*:*) echo hppa1.0-hp-osf exit ;; i*86:OSF1:*:*) if [ -x /usr/sbin/sysversion ] ; then echo ${UNAME_MACHINE}-unknown-osf1mk else echo ${UNAME_MACHINE}-unknown-osf1 fi exit ;; parisc*:Lites*:*:*) echo hppa1.1-hp-lites exit ;; C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) echo c1-convex-bsd exit ;; C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) if getsysinfo -f scalar_acc then echo c32-convex-bsd else echo c2-convex-bsd fi exit ;; C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) echo c34-convex-bsd exit ;; C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) echo c38-convex-bsd exit ;; C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) echo c4-convex-bsd exit ;; CRAY*Y-MP:*:*:*) echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*[A-Z]90:*:*:*) echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ -e 's/\.[^.]*$/.X/' exit ;; CRAY*TS:*:*:*) echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*T3E:*:*:*) echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; CRAY*SV1:*:*:*) echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; *:UNICOS/mp:*:*) echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' exit ;; F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) FUJITSU_PROC=`uname -m | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz` FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; 5000:UNIX_System_V:4.*:*) FUJITSU_SYS=`uname -p | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/\///'` FUJITSU_REL=`echo ${UNAME_RELEASE} | tr ABCDEFGHIJKLMNOPQRSTUVWXYZ abcdefghijklmnopqrstuvwxyz | sed -e 's/ /_/'` echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" exit ;; i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} exit ;; sparc*:BSD/OS:*:*) echo sparc-unknown-bsdi${UNAME_RELEASE} exit ;; *:BSD/OS:*:*) echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} exit ;; *:FreeBSD:*:*) UNAME_PROCESSOR=`/usr/bin/uname -p` case ${UNAME_PROCESSOR} in amd64) echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; *) echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; esac exit ;; i*:CYGWIN*:*) echo ${UNAME_MACHINE}-pc-cygwin exit ;; *:MINGW64*:*) echo ${UNAME_MACHINE}-pc-mingw64 exit ;; *:MINGW*:*) echo ${UNAME_MACHINE}-pc-mingw32 exit ;; *:MSYS*:*) echo ${UNAME_MACHINE}-pc-msys exit ;; i*:windows32*:*) # uname -m includes "-pc" on this system. echo ${UNAME_MACHINE}-mingw32 exit ;; i*:PW*:*) echo ${UNAME_MACHINE}-pc-pw32 exit ;; *:Interix*:*) case ${UNAME_MACHINE} in x86) echo i586-pc-interix${UNAME_RELEASE} exit ;; authenticamd | genuineintel | EM64T) echo x86_64-unknown-interix${UNAME_RELEASE} exit ;; IA64) echo ia64-unknown-interix${UNAME_RELEASE} exit ;; esac ;; [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) echo i${UNAME_MACHINE}-pc-mks exit ;; 8664:Windows_NT:*) echo x86_64-pc-mks exit ;; i*:Windows_NT*:* | Pentium*:Windows_NT*:*) # How do we know it's Interix rather than the generic POSIX subsystem? # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we # UNAME_MACHINE based on the output of uname instead of i386? echo i586-pc-interix exit ;; i*:UWIN*:*) echo ${UNAME_MACHINE}-pc-uwin exit ;; amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) echo x86_64-unknown-cygwin exit ;; p*:CYGWIN*:*) echo powerpcle-unknown-cygwin exit ;; prep*:SunOS:5.*:*) echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` exit ;; *:GNU:*:*) # the GNU system echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-${LIBC}`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` exit ;; *:GNU/*:*:*) # other systems with GNU libc and userland echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr "[:upper:]" "[:lower:]"``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-${LIBC} exit ;; i*86:Minix:*:*) echo ${UNAME_MACHINE}-pc-minix exit ;; aarch64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; aarch64_be:Linux:*:*) UNAME_MACHINE=aarch64_be echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; alpha:Linux:*:*) case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in EV5) UNAME_MACHINE=alphaev5 ;; EV56) UNAME_MACHINE=alphaev56 ;; PCA56) UNAME_MACHINE=alphapca56 ;; PCA57) UNAME_MACHINE=alphapca56 ;; EV6) UNAME_MACHINE=alphaev6 ;; EV67) UNAME_MACHINE=alphaev67 ;; EV68*) UNAME_MACHINE=alphaev68 ;; esac objdump --private-headers /bin/sh | grep -q ld.so.1 if test "$?" = 0 ; then LIBC=gnulibc1 ; fi echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arc:Linux:*:* | arceb:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; arm*:Linux:*:*) eval $set_cc_for_build if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_EABI__ then echo ${UNAME_MACHINE}-unknown-linux-${LIBC} else if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ | grep -q __ARM_PCS_VFP then echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabi else echo ${UNAME_MACHINE}-unknown-linux-${LIBC}eabihf fi fi exit ;; avr32*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; cris:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; crisv32:Linux:*:*) echo ${UNAME_MACHINE}-axis-linux-${LIBC} exit ;; e2k:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; frv:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; hexagon:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:Linux:*:*) echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; ia64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; k1om:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m32r*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; m68*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; mips:Linux:*:* | mips64:Linux:*:*) eval $set_cc_for_build sed 's/^ //' << EOF >$dummy.c #undef CPU #undef ${UNAME_MACHINE} #undef ${UNAME_MACHINE}el #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) CPU=${UNAME_MACHINE}el #else #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) CPU=${UNAME_MACHINE} #else CPU= #endif #endif EOF eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` test x"${CPU}" != x && { echo "${CPU}-unknown-linux-${LIBC}"; exit; } ;; mips64el:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; openrisc*:Linux:*:*) echo or1k-unknown-linux-${LIBC} exit ;; or32:Linux:*:* | or1k*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; padre:Linux:*:*) echo sparc-unknown-linux-${LIBC} exit ;; parisc64:Linux:*:* | hppa64:Linux:*:*) echo hppa64-unknown-linux-${LIBC} exit ;; parisc:Linux:*:* | hppa:Linux:*:*) # Look for CPU level case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in PA7*) echo hppa1.1-unknown-linux-${LIBC} ;; PA8*) echo hppa2.0-unknown-linux-${LIBC} ;; *) echo hppa-unknown-linux-${LIBC} ;; esac exit ;; ppc64:Linux:*:*) echo powerpc64-unknown-linux-${LIBC} exit ;; ppc:Linux:*:*) echo powerpc-unknown-linux-${LIBC} exit ;; ppc64le:Linux:*:*) echo powerpc64le-unknown-linux-${LIBC} exit ;; ppcle:Linux:*:*) echo powerpcle-unknown-linux-${LIBC} exit ;; riscv32:Linux:*:* | riscv64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; s390:Linux:*:* | s390x:Linux:*:*) echo ${UNAME_MACHINE}-ibm-linux-${LIBC} exit ;; sh64*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sh*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; sparc:Linux:*:* | sparc64:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; tile*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; vax:Linux:*:*) echo ${UNAME_MACHINE}-dec-linux-${LIBC} exit ;; x86_64:Linux:*:*) echo ${UNAME_MACHINE}-pc-linux-${LIBC} exit ;; xtensa*:Linux:*:*) echo ${UNAME_MACHINE}-unknown-linux-${LIBC} exit ;; i*86:DYNIX/ptx:4*:*) # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. # earlier versions are messed up and put the nodename in both # sysname and nodename. echo i386-sequent-sysv4 exit ;; i*86:UNIX_SV:4.2MP:2.*) # Unixware is an offshoot of SVR4, but it has its own version # number series starting with 2... # I am not positive that other SVR4 systems won't match this, # I just have to hope. -- rms. # Use sysv4.2uw... so that sysv4* matches it. echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} exit ;; i*86:OS/2:*:*) # If we were able to find `uname', then EMX Unix compatibility # is probably installed. echo ${UNAME_MACHINE}-pc-os2-emx exit ;; i*86:XTS-300:*:STOP) echo ${UNAME_MACHINE}-unknown-stop exit ;; i*86:atheos:*:*) echo ${UNAME_MACHINE}-unknown-atheos exit ;; i*86:syllable:*:*) echo ${UNAME_MACHINE}-pc-syllable exit ;; i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) echo i386-unknown-lynxos${UNAME_RELEASE} exit ;; i*86:*DOS:*:*) echo ${UNAME_MACHINE}-pc-msdosdjgpp exit ;; i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} else echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} fi exit ;; i*86:*:5:[678]*) # UnixWare 7.x, OpenUNIX and OpenServer 6. case `/bin/uname -X | grep "^Machine"` in *486*) UNAME_MACHINE=i486 ;; *Pentium) UNAME_MACHINE=i586 ;; *Pent*|*Celeron) UNAME_MACHINE=i686 ;; esac echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} exit ;; i*86:*:3.2:*) if test -f /usr/options/cb.name; then UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ && UNAME_MACHINE=i586 (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ && UNAME_MACHINE=i686 (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ && UNAME_MACHINE=i686 echo ${UNAME_MACHINE}-pc-sco$UNAME_REL else echo ${UNAME_MACHINE}-pc-sysv32 fi exit ;; pc:*:*:*) # Left here for compatibility: # uname -m prints for DJGPP always 'pc', but it prints nothing about # the processor, so we play safe by assuming i586. # Note: whatever this is, it MUST be the same as what config.sub # prints for the "djgpp" host, or else GDB configure will decide that # this is a cross-build. echo i586-pc-msdosdjgpp exit ;; Intel:Mach:3*:*) echo i386-pc-mach3 exit ;; paragon:*:*:*) echo i860-intel-osf1 exit ;; i860:*:4.*:*) # i860-SVR4 if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 else # Add other i860-SVR4 vendors below as they are discovered. echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 fi exit ;; mini*:CTIX:SYS*5:*) # "miniframe" echo m68010-convergent-sysv exit ;; mc68k:UNIX:SYSTEM5:3.51m) echo m68k-convergent-sysv exit ;; M680?0:D-NIX:5.3:*) echo m68k-diab-dnix exit ;; M68*:*:R3V[5678]*:*) test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) OS_REL='' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4; exit; } ;; NCR*:*:4.2:* | MPRAS*:*:4.2:*) OS_REL='.3' test -r /etc/.relid \ && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ && { echo i486-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) echo m68k-unknown-lynxos${UNAME_RELEASE} exit ;; mc68030:UNIX_System_V:4.*:*) echo m68k-atari-sysv4 exit ;; TSUNAMI:LynxOS:2.*:*) echo sparc-unknown-lynxos${UNAME_RELEASE} exit ;; rs6000:LynxOS:2.*:*) echo rs6000-unknown-lynxos${UNAME_RELEASE} exit ;; PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) echo powerpc-unknown-lynxos${UNAME_RELEASE} exit ;; SM[BE]S:UNIX_SV:*:*) echo mips-dde-sysv${UNAME_RELEASE} exit ;; RM*:ReliantUNIX-*:*:*) echo mips-sni-sysv4 exit ;; RM*:SINIX-*:*:*) echo mips-sni-sysv4 exit ;; *:SINIX-*:*:*) if uname -p 2>/dev/null >/dev/null ; then UNAME_MACHINE=`(uname -p) 2>/dev/null` echo ${UNAME_MACHINE}-sni-sysv4 else echo ns32k-sni-sysv fi exit ;; PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort # says echo i586-unisys-sysv4 exit ;; *:UNIX_System_V:4*:FTX*) # From Gerald Hewes . # How about differentiating between stratus architectures? -djm echo hppa1.1-stratus-sysv4 exit ;; *:*:*:FTX*) # From seanf@swdc.stratus.com. echo i860-stratus-sysv4 exit ;; i*86:VOS:*:*) # From Paul.Green@stratus.com. echo ${UNAME_MACHINE}-stratus-vos exit ;; *:VOS:*:*) # From Paul.Green@stratus.com. echo hppa1.1-stratus-vos exit ;; mc68*:A/UX:*:*) echo m68k-apple-aux${UNAME_RELEASE} exit ;; news*:NEWS-OS:6*:*) echo mips-sony-newsos6 exit ;; R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) if [ -d /usr/nec ]; then echo mips-nec-sysv${UNAME_RELEASE} else echo mips-unknown-sysv${UNAME_RELEASE} fi exit ;; BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. echo powerpc-be-beos exit ;; BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. echo powerpc-apple-beos exit ;; BePC:BeOS:*:*) # BeOS running on Intel PC compatible. echo i586-pc-beos exit ;; BePC:Haiku:*:*) # Haiku running on Intel PC compatible. echo i586-pc-haiku exit ;; x86_64:Haiku:*:*) echo x86_64-unknown-haiku exit ;; SX-4:SUPER-UX:*:*) echo sx4-nec-superux${UNAME_RELEASE} exit ;; SX-5:SUPER-UX:*:*) echo sx5-nec-superux${UNAME_RELEASE} exit ;; SX-6:SUPER-UX:*:*) echo sx6-nec-superux${UNAME_RELEASE} exit ;; SX-7:SUPER-UX:*:*) echo sx7-nec-superux${UNAME_RELEASE} exit ;; SX-8:SUPER-UX:*:*) echo sx8-nec-superux${UNAME_RELEASE} exit ;; SX-8R:SUPER-UX:*:*) echo sx8r-nec-superux${UNAME_RELEASE} exit ;; SX-ACE:SUPER-UX:*:*) echo sxace-nec-superux${UNAME_RELEASE} exit ;; Power*:Rhapsody:*:*) echo powerpc-apple-rhapsody${UNAME_RELEASE} exit ;; *:Rhapsody:*:*) echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} exit ;; *:Darwin:*:*) UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown eval $set_cc_for_build if test "$UNAME_PROCESSOR" = unknown ; then UNAME_PROCESSOR=powerpc fi if test `echo "$UNAME_RELEASE" | sed -e 's/\..*//'` -le 10 ; then if [ "$CC_FOR_BUILD" != no_compiler_found ]; then if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ (CCOPTS="" $CC_FOR_BUILD -E - 2>/dev/null) | \ grep IS_64BIT_ARCH >/dev/null then case $UNAME_PROCESSOR in i386) UNAME_PROCESSOR=x86_64 ;; powerpc) UNAME_PROCESSOR=powerpc64 ;; esac fi fi elif test "$UNAME_PROCESSOR" = i386 ; then # Avoid executing cc on OS X 10.9, as it ships with a stub # that puts up a graphical alert prompting to install # developer tools. Any system running Mac OS X 10.7 or # later (Darwin 11 and later) is required to have a 64-bit # processor. This is not true of the ARM version of Darwin # that Apple uses in portable devices. UNAME_PROCESSOR=x86_64 fi echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} exit ;; *:procnto*:*:* | *:QNX:[0123456789]*:*) UNAME_PROCESSOR=`uname -p` if test "$UNAME_PROCESSOR" = x86; then UNAME_PROCESSOR=i386 UNAME_MACHINE=pc fi echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} exit ;; *:QNX:*:4*) echo i386-pc-qnx exit ;; NEO-?:NONSTOP_KERNEL:*:*) echo neo-tandem-nsk${UNAME_RELEASE} exit ;; NSE-*:NONSTOP_KERNEL:*:*) echo nse-tandem-nsk${UNAME_RELEASE} exit ;; NSR-?:NONSTOP_KERNEL:*:*) echo nsr-tandem-nsk${UNAME_RELEASE} exit ;; *:NonStop-UX:*:*) echo mips-compaq-nonstopux exit ;; BS2000:POSIX*:*:*) echo bs2000-siemens-sysv exit ;; DS/*:UNIX_System_V:*:*) echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} exit ;; *:Plan9:*:*) # "uname -m" is not consistent, so use $cputype instead. 386 # is converted to i386 for consistency with other x86 # operating systems. if test "$cputype" = 386; then UNAME_MACHINE=i386 else UNAME_MACHINE="$cputype" fi echo ${UNAME_MACHINE}-unknown-plan9 exit ;; *:TOPS-10:*:*) echo pdp10-unknown-tops10 exit ;; *:TENEX:*:*) echo pdp10-unknown-tenex exit ;; KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) echo pdp10-dec-tops20 exit ;; XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) echo pdp10-xkl-tops20 exit ;; *:TOPS-20:*:*) echo pdp10-unknown-tops20 exit ;; *:ITS:*:*) echo pdp10-unknown-its exit ;; SEI:*:*:SEIUX) echo mips-sei-seiux${UNAME_RELEASE} exit ;; *:DragonFly:*:*) echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` exit ;; *:*VMS:*:*) UNAME_MACHINE=`(uname -p) 2>/dev/null` case "${UNAME_MACHINE}" in A*) echo alpha-dec-vms ; exit ;; I*) echo ia64-dec-vms ; exit ;; V*) echo vax-dec-vms ; exit ;; esac ;; *:XENIX:*:SysV) echo i386-pc-xenix exit ;; i*86:skyos:*:*) echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE} | sed -e 's/ .*$//'` exit ;; i*86:rdos:*:*) echo ${UNAME_MACHINE}-pc-rdos exit ;; i*86:AROS:*:*) echo ${UNAME_MACHINE}-pc-aros exit ;; x86_64:VMkernel:*:*) echo ${UNAME_MACHINE}-unknown-esx exit ;; amd64:Isilon\ OneFS:*:*) echo x86_64-unknown-onefs exit ;; esac cat >&2 </dev/null || echo unknown` uname -r = `(uname -r) 2>/dev/null || echo unknown` uname -s = `(uname -s) 2>/dev/null || echo unknown` uname -v = `(uname -v) 2>/dev/null || echo unknown` /usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` /bin/uname -X = `(/bin/uname -X) 2>/dev/null` hostinfo = `(hostinfo) 2>/dev/null` /bin/universe = `(/bin/universe) 2>/dev/null` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` /bin/arch = `(/bin/arch) 2>/dev/null` /usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` /usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` UNAME_MACHINE = ${UNAME_MACHINE} UNAME_RELEASE = ${UNAME_RELEASE} UNAME_SYSTEM = ${UNAME_SYSTEM} UNAME_VERSION = ${UNAME_VERSION} EOF exit 1 # Local variables: # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "timestamp='" # time-stamp-format: "%:y-%02m-%02d" # time-stamp-end: "'" # End: recoll-1.26.3/aspell/0000755000175000017500000000000013570165407011354 500000000000000recoll-1.26.3/aspell/rclaspell.cpp0000644000175000017500000004316413566424763014001 00000000000000/* Copyright (C) 2006-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #ifdef RCL_USE_ASPELL #include ASPELL_INCLUDE #include #include #include #include "dlib.h" #include "pathut.h" #include "execmd.h" #include "rclaspell.h" #include "log.h" #include "unacpp.h" #include "rclutil.h" using namespace std; // Aspell library entry points class AspellApi { public: struct AspellConfig *(*new_aspell_config)(); int (*aspell_config_replace)(struct AspellConfig *, const char * key, const char * value); struct AspellCanHaveError *(*new_aspell_speller)(struct AspellConfig *); void (*delete_aspell_config)(struct AspellConfig *); void (*delete_aspell_can_have_error)(struct AspellCanHaveError *); struct AspellSpeller * (*to_aspell_speller)(struct AspellCanHaveError *); struct AspellConfig * (*aspell_speller_config)(struct AspellSpeller *); const struct AspellWordList * (*aspell_speller_suggest) (struct AspellSpeller *, const char *, int); int (*aspell_speller_check)(struct AspellSpeller *, const char *, int); struct AspellStringEnumeration * (*aspell_word_list_elements) (const struct AspellWordList * ths); const char * (*aspell_string_enumeration_next) (struct AspellStringEnumeration * ths); void (*delete_aspell_string_enumeration)(struct AspellStringEnumeration *); const struct AspellError *(*aspell_error) (const struct AspellCanHaveError *); const char *(*aspell_error_message)(const struct AspellCanHaveError *); const char *(*aspell_speller_error_message)(const struct AspellSpeller *); void (*delete_aspell_speller)(struct AspellSpeller *); }; static AspellApi aapi; static std::mutex o_aapi_mutex; #define NMTOPTR(NM, TP) \ if ((aapi.NM = TP dlib_sym(m_data->m_handle, #NM)) == 0) { \ badnames += #NM + string(" "); \ } static const vector aspell_lib_suffixes { #if defined(__APPLE__) ".15.dylib", ".dylib", #elif defined(_WIN32) "-15.dll", #else ".so", ".so.15", #endif }; // Private rclaspell data class AspellData { public: ~AspellData() { LOGDEB2("~AspellData\n" ); if (m_handle) { dlib_close(m_handle); m_handle = nullptr; } if (m_speller) { // Dumps core if I do this?? //aapi.delete_aspell_speller(m_speller); m_speller = nullptr; LOGDEB2("~AspellData: speller done\n" ); } } void *m_handle{nullptr}; string m_exec; AspellSpeller *m_speller{nullptr}; #ifdef _WIN32 string m_datadir; #endif string m_addCreateParam; }; Aspell::Aspell(const RclConfig *cnf) : m_config(cnf) { } Aspell::~Aspell() { deleteZ(m_data); } bool Aspell::init(string &reason) { std::unique_lock locker(o_aapi_mutex); deleteZ(m_data); // Language: we get this from the configuration, else from the NLS // environment. The aspell language names used for selecting language // definition files (used to create dictionaries) are like en, fr if (!m_config->getConfParam("aspellLanguage", m_lang) || m_lang.empty()) { string lang = "en"; const char *cp; if ((cp = getenv("LC_ALL"))) lang = cp; else if ((cp = getenv("LANG"))) lang = cp; if (!lang.compare("C")) lang = "en"; m_lang = lang.substr(0, lang.find_first_of("_")); if (!m_lang.compare("ja")) { // Aspell has no support for Japanese. We substitute // english, as Japanese users often have texts with // interspersed english words or english texts. Japanese // parts of the text won't be sent to aspell (check // Rcl::Db::isSpellingCandidate()) m_lang = "en"; } } m_data = new AspellData; m_config->getConfParam("aspellAddCreateParam", m_data->m_addCreateParam); #ifdef _WIN32 m_data->m_datadir = path_cat( path_pkgdatadir(), "filters/aspell-installed/mingw32/lib/aspell-0.60"); if (m_data->m_addCreateParam.empty()) { m_data->m_addCreateParam = string("--local-data-dir=") + path_cat(m_config->getConfDir(), "aspell"); } #endif // WIN32 const char *aspell_prog_from_env = getenv("ASPELL_PROG"); if (aspell_prog_from_env && access(aspell_prog_from_env, X_OK) == 0) { m_data->m_exec = aspell_prog_from_env; } #ifdef ASPELL_PROG if (m_data->m_exec.empty()) { string cmd = m_config->findFilter(ASPELL_PROG); LOGDEB("rclaspell::init: findFilter returns " << cmd << endl); if (path_isabsolute(cmd)) { m_data->m_exec.swap(cmd); } } #endif // ASPELL_PROG if (m_data->m_exec.empty()) { ExecCmd::which("aspell", m_data->m_exec); } if (m_data->m_exec.empty()) { reason = "aspell program not found or not executable"; deleteZ(m_data); return false; } // Don't know what with Apple and (DY)LD_LIBRARY_PATH. Does not work // So we look in all ../lib in the PATH... #if defined(__APPLE__) vector path; const char *pp = getenv("PATH"); if (pp) { stringToTokens(pp, path, ":"); } #endif reason = "Could not open shared library "; string libbase("libaspell"); string lib; for (const auto& suff : aspell_lib_suffixes) { lib = libbase + suff; reason += string("[") + lib + "] "; if ((m_data->m_handle = dlib_open(lib)) != 0) { reason.erase(); goto found; } // Above was the normal lookup: let dlopen search the directories. // Also look in other places for Apple and Windows. #if defined(__APPLE__) for (const auto& dir : path) { string lib1 = path_canon(dir + "/../lib/" + lib); if ((m_data->m_handle = dlib_open(lib1)) != 0) { reason.erase(); lib=lib1; goto found; } } #endif #if defined(_WIN32) // Look in the directory of the aspell binary { string bindir = path_getfather(m_data->m_exec); string lib1 = path_cat(bindir, lib); if ((m_data->m_handle = dlib_open(lib1)) != 0) { reason.erase(); lib=lib1; goto found; } } #endif } found: if (m_data->m_handle == 0) { reason += string(" : ") + dlib_error(); deleteZ(m_data); return false; } string badnames; NMTOPTR(new_aspell_config, (struct AspellConfig *(*)())); NMTOPTR(aspell_config_replace, (int (*)(struct AspellConfig *, const char *, const char *))); NMTOPTR(new_aspell_speller, (struct AspellCanHaveError *(*)(struct AspellConfig *))); NMTOPTR(delete_aspell_config, (void (*)(struct AspellConfig *))); NMTOPTR(delete_aspell_can_have_error, (void (*)(struct AspellCanHaveError *))); NMTOPTR(to_aspell_speller, (struct AspellSpeller *(*)(struct AspellCanHaveError *))); NMTOPTR(aspell_speller_config, (struct AspellConfig *(*)(struct AspellSpeller *))); NMTOPTR(aspell_speller_suggest, (const struct AspellWordList *(*)(struct AspellSpeller *, const char *, int))); NMTOPTR(aspell_speller_check, (int (*)(struct AspellSpeller *, const char *, int))); NMTOPTR(aspell_word_list_elements, (struct AspellStringEnumeration *(*) (const struct AspellWordList *))); NMTOPTR(aspell_string_enumeration_next, (const char * (*)(struct AspellStringEnumeration *))); NMTOPTR(delete_aspell_string_enumeration, (void (*)(struct AspellStringEnumeration *))); NMTOPTR(aspell_error, (const struct AspellError*(*)(const struct AspellCanHaveError *))); NMTOPTR(aspell_error_message, (const char *(*)(const struct AspellCanHaveError *))); NMTOPTR(aspell_speller_error_message, (const char *(*)(const struct AspellSpeller *))); NMTOPTR(delete_aspell_speller, (void (*)(struct AspellSpeller *))); if (!badnames.empty()) { reason = string("Aspell::init: symbols not found:") + badnames; deleteZ(m_data); return false; } return true; } bool Aspell::ok() const { return m_data != 0 && m_data->m_handle != 0; } string Aspell::dicPath() { string ccdir = m_config->getAspellcacheDir(); return path_cat(ccdir, string("aspdict.") + m_lang + string(".rws")); } // The data source for the create dictionary aspell command. We walk // the term list, filtering out things that are probably not words. // Note that the manual for the current version (0.60) of aspell // states that utf-8 is not well supported, so that we should maybe // also filter all 8bit chars. Info is contradictory, so we only // filter out CJK which is definitely not supported (katakana would // make sense though, but currently no support). class AspExecPv : public ExecCmdProvide { public: string *m_input; // pointer to string used as input buffer to command Rcl::TermIter *m_tit; Rcl::Db &m_db; AspExecPv(string *i, Rcl::TermIter *tit, Rcl::Db &db) : m_input(i), m_tit(tit), m_db(db) {} void newData() { while (m_db.termWalkNext(m_tit, *m_input)) { LOGDEB2("Aspell::buildDict: term: [" << (m_input) << "]\n" ); if (!Rcl::Db::isSpellingCandidate(*m_input)) { LOGDEB2("Aspell::buildDict: SKIP\n" ); continue; } if (!o_index_stripchars) { string lower; if (!unacmaybefold(*m_input, lower, "UTF-8", UNACOP_FOLD)) continue; m_input->swap(lower); } // Got a non-empty sort-of appropriate term, let's send it to // aspell LOGDEB2("Apell::buildDict: SEND\n" ); m_input->append("\n"); return; } // End of data. Tell so. Exec will close cmd. m_input->erase(); } }; bool Aspell::buildDict(Rcl::Db &db, string &reason) { if (!ok()) return false; // We create the dictionary by executing the aspell command: // aspell --lang=[lang] create master [dictApath] string cmdstring(m_data->m_exec); ExecCmd aspell; vector args; args.push_back(string("--lang=")+ m_lang); cmdstring += string(" ") + string("--lang=") + m_lang; args.push_back("--encoding=utf-8"); cmdstring += string(" ") + "--encoding=utf-8"; #ifdef _WIN32 args.push_back(string("--data-dir=") + m_data->m_datadir); #endif if (!m_data->m_addCreateParam.empty()) { args.push_back(m_data->m_addCreateParam); cmdstring += string(" ") + m_data->m_addCreateParam; } args.push_back("create"); cmdstring += string(" ") + "create"; args.push_back("master"); cmdstring += string(" ") + "master"; args.push_back(dicPath()); cmdstring += string(" ") + dicPath(); // Have to disable stderr, as numerous messages about bad strings are // printed. We'd like to keep errors about missing databases though, so // make it configurable for diags bool keepStderr = false; m_config->getConfParam("aspellKeepStderr", &keepStderr); if (!keepStderr) aspell.setStderr("/dev/null"); Rcl::TermIter *tit = db.termWalkOpen(); if (tit == 0) { reason = "termWalkOpen failed\n"; return false; } string termbuf; AspExecPv pv(&termbuf, tit, db); aspell.setProvide(&pv); if (aspell.doexec(m_data->m_exec, args, &termbuf)) { ExecCmd cmd; args.clear(); args.push_back("dicts"); string dicts; bool hasdict = false; if (cmd.doexec(m_data->m_exec, args, 0, &dicts)) { vector vdicts; stringToTokens(dicts, vdicts, "\n\r\t "); if (find(vdicts.begin(), vdicts.end(), m_lang) != vdicts.end()) { hasdict = true; } } if (hasdict) { reason = string("\naspell dictionary creation command [") + cmdstring; reason += string( "] failed. Reason unknown.\n" "Try to set aspellKeepStderr = 1 in recoll.conf, and execute \n" "the indexing command in a terminal to see the aspell " "diagnostic output.\n"); } else { reason = string("aspell dictionary creation command failed:\n") + cmdstring + "\n" "One possible reason might be missing language " "data files for lang = " + m_lang + ". Maybe try to execute the command by hand for a better diag."; } return false; } db.termWalkClose(tit); return true; } static const unsigned int ldatadiroptsz = string("--local-data-dir=").size(); bool Aspell::make_speller(string& reason) { if (!ok()) return false; if (m_data->m_speller != 0) return true; AspellCanHaveError *ret; AspellConfig *config = aapi.new_aspell_config(); aapi.aspell_config_replace(config, "lang", m_lang.c_str()); aapi.aspell_config_replace(config, "encoding", "utf-8"); aapi.aspell_config_replace(config, "master", dicPath().c_str()); aapi.aspell_config_replace(config, "sug-mode", "fast"); #ifdef _WIN32 aapi.aspell_config_replace(config, "data-dir", m_data->m_datadir.c_str()); #endif if (m_data->m_addCreateParam.size() > ldatadiroptsz) { aapi.aspell_config_replace( config, "local-data-dir", m_data->m_addCreateParam.substr(ldatadiroptsz).c_str()); } // aapi.aspell_config_replace(config, "sug-edit-dist", "2"); ret = aapi.new_aspell_speller(config); aapi.delete_aspell_config(config); if (aapi.aspell_error(ret) != 0) { reason = aapi.aspell_error_message(ret); aapi.delete_aspell_can_have_error(ret); return false; } m_data->m_speller = aapi.to_aspell_speller(ret); return true; } bool Aspell::check(const string &iterm, string& reason) { LOGDEB("Aspell::check [" << iterm << "]\n"); string mterm(iterm); if (!Rcl::Db::isSpellingCandidate(mterm)) { LOGDEB0("Aspell::check: [" << mterm << " not spelling candidate, return true\n"); return true; } if (!ok() || !make_speller(reason)) return false; if (iterm.empty()) return true; //?? if (!o_index_stripchars) { string lower; if (!unacmaybefold(mterm, lower, "UTF-8", UNACOP_FOLD)) { LOGERR("Aspell::check: cant lowercase input\n"); return false; } mterm.swap(lower); } int ret = aapi.aspell_speller_check(m_data->m_speller, mterm.c_str(), mterm.length()); reason.clear(); switch (ret) { case 0: return false; case 1: return true; default: case -1: reason.append("Aspell error: "); reason.append(aapi.aspell_speller_error_message(m_data->m_speller)); return false; } } bool Aspell::suggest(Rcl::Db &db, const string &_term, list& suggestions, string& reason) { LOGDEB("Aspell::suggest: term [" << _term << "]\n"); if (!ok() || !make_speller(reason)) return false; string mterm(_term); if (mterm.empty()) return true; //?? if (!Rcl::Db::isSpellingCandidate(mterm)) { LOGDEB0("Aspell::suggest: [" << mterm << " not spelling candidate, return empty/true\n"); return true; } if (!o_index_stripchars) { string lower; if (!unacmaybefold(mterm, lower, "UTF-8", UNACOP_FOLD)) { LOGERR("Aspell::check : cant lowercase input\n"); return false; } mterm.swap(lower); } const AspellWordList *wl = aapi.aspell_speller_suggest(m_data->m_speller, mterm.c_str(), mterm.length()); if (wl == 0) { reason = aapi.aspell_speller_error_message(m_data->m_speller); return false; } AspellStringEnumeration *els = aapi.aspell_word_list_elements(wl); const char *word; while ((word = aapi.aspell_string_enumeration_next(els)) != 0) { LOGDEB0("Aspell::suggest: got [" << word << "]\n"); // Check that the word exists in the index (we don't want // aspell computed stuff, only exact terms from the // dictionary). We used to also check that it stems // differently from the base word but this is complicated // (stemming on/off + language), so we now leave this to the // caller. if (db.termExists(word)) suggestions.push_back(word); } aapi.delete_aspell_string_enumeration(els); return true; } #endif // RCL_USE_ASPELL recoll-1.26.3/aspell/aspell-local.h0000644000175000017500000006320013303776060014013 00000000000000/* Automatically generated file. Do not edit directly. */ /* This file is part of The New Aspell * Copyright (C) 2001-2002 by Kevin Atkinson under the GNU LGPL * license version 2.0 or 2.1. You should have received a copy of the * LGPL license along with this library if you did not you can find it * at http://www.gnu.org/. */ #ifndef ASPELL_ASPELL__H #define ASPELL_ASPELL__H #ifdef __cplusplus extern "C" { #endif /******************************* type id *******************************/ union AspellTypeId { unsigned int num; char str[4]; }; typedef union AspellTypeId AspellTypeId; /************************** mutable container **************************/ typedef struct AspellMutableContainer AspellMutableContainer; int aspell_mutable_container_add(struct AspellMutableContainer * ths, const char * to_add); int aspell_mutable_container_remove(struct AspellMutableContainer * ths, const char * to_rem); void aspell_mutable_container_clear(struct AspellMutableContainer * ths); struct AspellMutableContainer * aspell_mutable_container_to_mutable_container(struct AspellMutableContainer * ths); /******************************* key info *******************************/ enum AspellKeyInfoType {AspellKeyInfoString, AspellKeyInfoInt, AspellKeyInfoBool, AspellKeyInfoList}; typedef enum AspellKeyInfoType AspellKeyInfoType; struct AspellKeyInfo { /* The name of the key. */ const char * name; /* The key type. */ enum AspellKeyInfoType type; /* The default value of the key. */ const char * def; /* A brief description of the key or NULL if internal value. */ const char * desc; int flags; int other_data; }; typedef struct AspellKeyInfo AspellKeyInfo; /******************************** config ********************************/ typedef struct AspellKeyInfoEnumeration AspellKeyInfoEnumeration; int aspell_key_info_enumeration_at_end(const struct AspellKeyInfoEnumeration * ths); const struct AspellKeyInfo * aspell_key_info_enumeration_next(struct AspellKeyInfoEnumeration * ths); void delete_aspell_key_info_enumeration(struct AspellKeyInfoEnumeration * ths); struct AspellKeyInfoEnumeration * aspell_key_info_enumeration_clone(const struct AspellKeyInfoEnumeration * ths); void aspell_key_info_enumeration_assign(struct AspellKeyInfoEnumeration * ths, const struct AspellKeyInfoEnumeration * other); typedef struct AspellConfig AspellConfig; struct AspellConfig * new_aspell_config(); void delete_aspell_config(struct AspellConfig * ths); struct AspellConfig * aspell_config_clone(const struct AspellConfig * ths); void aspell_config_assign(struct AspellConfig * ths, const struct AspellConfig * other); unsigned int aspell_config_error_number(const struct AspellConfig * ths); const char * aspell_config_error_message(const struct AspellConfig * ths); const struct AspellError * aspell_config_error(const struct AspellConfig * ths); /* Sets extra keys which this config class should * accept. begin and end are expected to point to * the beginning and ending of an array of Aspell * Key Info. */ void aspell_config_set_extra(struct AspellConfig * ths, const struct AspellKeyInfo * begin, const struct AspellKeyInfo * end); /* Returns the KeyInfo object for the * corresponding key or returns NULL and sets * error_num to PERROR_UNKNOWN_KEY if the key is * not valid. The pointer returned is valid for * the lifetime of the object. */ const struct AspellKeyInfo * aspell_config_keyinfo(struct AspellConfig * ths, const char * key); /* Returns a newly allocated enumeration of all * the possible objects this config class uses. */ struct AspellKeyInfoEnumeration * aspell_config_possible_elements(struct AspellConfig * ths, int include_extra); /* Returns the default value for given key which * may involve substituting variables, thus it is * not the same as keyinfo(key)->def returns NULL * and sets error_num to PERROR_UNKNOWN_KEY if * the key is not valid. Uses the temporary * string. */ const char * aspell_config_get_default(struct AspellConfig * ths, const char * key); /* Returns a newly allocated enumeration of all * the key/value pairs. This DOES not include ones * which are set to their default values. */ struct AspellStringPairEnumeration * aspell_config_elements(struct AspellConfig * ths); /* Inserts an item, if the item already exists it * will be replaced. Returns TRUE if it succeeded * or FALSE on error. If the key in not valid it * sets error_num to PERROR_UNKNOWN_KEY, if the * value is not valid it will set error_num to * PERROR_BAD_VALUE, if the value can not be * changed it sets error_num to * PERROR_CANT_CHANGE_VALUE, and if the value is * a list and you are trying to set its directory, * it sets error_num to PERROR_LIST_SET */ int aspell_config_replace(struct AspellConfig * ths, const char * key, const char * value); /* Remove a key and returns TRUE if it exists * otherwise return FALSE. This effectively sets * the key to its default value. Calling replace * with a value of "" will also call * remove. If the key does not exist then it sets * error_num to 0 or PERROR_NOT, if the key is * not valid then it sets error_num to * PERROR_UNKNOWN_KEY, if the value can not be * changed then it sets error_num to * PERROR_CANT_CHANGE_VALUE */ int aspell_config_remove(struct AspellConfig * ths, const char * key); int aspell_config_have(const struct AspellConfig * ths, const char * key); /* Returns NULL on error. */ const char * aspell_config_retrieve(struct AspellConfig * ths, const char * key); int aspell_config_retrieve_list(struct AspellConfig * ths, const char * key, struct AspellMutableContainer * lst); /* Return -1 on error, 0 if false, 1 if true. */ int aspell_config_retrieve_bool(struct AspellConfig * ths, const char * key); /* Return -1 on error. */ int aspell_config_retrieve_int(struct AspellConfig * ths, const char * key); /******************************** error ********************************/ struct AspellError { const char * mesg; const struct AspellErrorInfo * err; }; typedef struct AspellError AspellError; int aspell_error_is_a(const struct AspellError * ths, const struct AspellErrorInfo * e); struct AspellErrorInfo { const struct AspellErrorInfo * isa; const char * mesg; unsigned int num_parms; const char * parms[3]; }; typedef struct AspellErrorInfo AspellErrorInfo; /**************************** can have error ****************************/ typedef struct AspellCanHaveError AspellCanHaveError; unsigned int aspell_error_number(const struct AspellCanHaveError * ths); const char * aspell_error_message(const struct AspellCanHaveError * ths); const struct AspellError * aspell_error(const struct AspellCanHaveError * ths); void delete_aspell_can_have_error(struct AspellCanHaveError * ths); /******************************** errors ********************************/ extern const struct AspellErrorInfo * const aerror_other; extern const struct AspellErrorInfo * const aerror_operation_not_supported; extern const struct AspellErrorInfo * const aerror_cant_copy; extern const struct AspellErrorInfo * const aerror_unimplemented_method; extern const struct AspellErrorInfo * const aerror_file; extern const struct AspellErrorInfo * const aerror_cant_open_file; extern const struct AspellErrorInfo * const aerror_cant_read_file; extern const struct AspellErrorInfo * const aerror_cant_write_file; extern const struct AspellErrorInfo * const aerror_invalid_name; extern const struct AspellErrorInfo * const aerror_bad_file_format; extern const struct AspellErrorInfo * const aerror_dir; extern const struct AspellErrorInfo * const aerror_cant_read_dir; extern const struct AspellErrorInfo * const aerror_config; extern const struct AspellErrorInfo * const aerror_unknown_key; extern const struct AspellErrorInfo * const aerror_cant_change_value; extern const struct AspellErrorInfo * const aerror_bad_key; extern const struct AspellErrorInfo * const aerror_bad_value; extern const struct AspellErrorInfo * const aerror_duplicate; extern const struct AspellErrorInfo * const aerror_key_not_string; extern const struct AspellErrorInfo * const aerror_key_not_int; extern const struct AspellErrorInfo * const aerror_key_not_bool; extern const struct AspellErrorInfo * const aerror_key_not_list; extern const struct AspellErrorInfo * const aerror_no_value_reset; extern const struct AspellErrorInfo * const aerror_no_value_enable; extern const struct AspellErrorInfo * const aerror_no_value_disable; extern const struct AspellErrorInfo * const aerror_no_value_clear; extern const struct AspellErrorInfo * const aerror_language_related; extern const struct AspellErrorInfo * const aerror_unknown_language; extern const struct AspellErrorInfo * const aerror_unknown_soundslike; extern const struct AspellErrorInfo * const aerror_language_not_supported; extern const struct AspellErrorInfo * const aerror_no_wordlist_for_lang; extern const struct AspellErrorInfo * const aerror_mismatched_language; extern const struct AspellErrorInfo * const aerror_affix; extern const struct AspellErrorInfo * const aerror_corrupt_affix; extern const struct AspellErrorInfo * const aerror_invalid_cond; extern const struct AspellErrorInfo * const aerror_invalid_cond_strip; extern const struct AspellErrorInfo * const aerror_incorrect_encoding; extern const struct AspellErrorInfo * const aerror_encoding; extern const struct AspellErrorInfo * const aerror_unknown_encoding; extern const struct AspellErrorInfo * const aerror_encoding_not_supported; extern const struct AspellErrorInfo * const aerror_conversion_not_supported; extern const struct AspellErrorInfo * const aerror_pipe; extern const struct AspellErrorInfo * const aerror_cant_create_pipe; extern const struct AspellErrorInfo * const aerror_process_died; extern const struct AspellErrorInfo * const aerror_bad_input; extern const struct AspellErrorInfo * const aerror_invalid_string; extern const struct AspellErrorInfo * const aerror_invalid_word; extern const struct AspellErrorInfo * const aerror_invalid_affix; extern const struct AspellErrorInfo * const aerror_inapplicable_affix; extern const struct AspellErrorInfo * const aerror_unknown_unichar; extern const struct AspellErrorInfo * const aerror_word_list_flags; extern const struct AspellErrorInfo * const aerror_invalid_flag; extern const struct AspellErrorInfo * const aerror_conflicting_flags; extern const struct AspellErrorInfo * const aerror_version_control; extern const struct AspellErrorInfo * const aerror_bad_version_string; extern const struct AspellErrorInfo * const aerror_filter; extern const struct AspellErrorInfo * const aerror_cant_dlopen_file; extern const struct AspellErrorInfo * const aerror_empty_filter; extern const struct AspellErrorInfo * const aerror_no_such_filter; extern const struct AspellErrorInfo * const aerror_confusing_version; extern const struct AspellErrorInfo * const aerror_bad_version; extern const struct AspellErrorInfo * const aerror_identical_option; extern const struct AspellErrorInfo * const aerror_options_only; extern const struct AspellErrorInfo * const aerror_invalid_option_modifier; extern const struct AspellErrorInfo * const aerror_cant_describe_filter; extern const struct AspellErrorInfo * const aerror_filter_mode_file; extern const struct AspellErrorInfo * const aerror_mode_option_name; extern const struct AspellErrorInfo * const aerror_no_filter_to_option; extern const struct AspellErrorInfo * const aerror_bad_mode_key; extern const struct AspellErrorInfo * const aerror_expect_mode_key; extern const struct AspellErrorInfo * const aerror_mode_version_requirement; extern const struct AspellErrorInfo * const aerror_confusing_mode_version; extern const struct AspellErrorInfo * const aerror_bad_mode_version; extern const struct AspellErrorInfo * const aerror_missing_magic_expression; extern const struct AspellErrorInfo * const aerror_empty_file_ext; extern const struct AspellErrorInfo * const aerror_filter_mode_expand; extern const struct AspellErrorInfo * const aerror_unknown_mode; extern const struct AspellErrorInfo * const aerror_mode_extend_expand; extern const struct AspellErrorInfo * const aerror_filter_mode_magic; extern const struct AspellErrorInfo * const aerror_file_magic_pos; extern const struct AspellErrorInfo * const aerror_file_magic_range; extern const struct AspellErrorInfo * const aerror_missing_magic; extern const struct AspellErrorInfo * const aerror_bad_magic; extern const struct AspellErrorInfo * const aerror_expression; extern const struct AspellErrorInfo * const aerror_invalid_expression; /******************************* speller *******************************/ typedef struct AspellSpeller AspellSpeller; struct AspellCanHaveError * new_aspell_speller(struct AspellConfig * config); struct AspellSpeller * to_aspell_speller(struct AspellCanHaveError * obj); void delete_aspell_speller(struct AspellSpeller * ths); unsigned int aspell_speller_error_number(const struct AspellSpeller * ths); const char * aspell_speller_error_message(const struct AspellSpeller * ths); const struct AspellError * aspell_speller_error(const struct AspellSpeller * ths); struct AspellConfig * aspell_speller_config(struct AspellSpeller * ths); /* Returns 0 if it is not in the dictionary, * 1 if it is, or -1 on error. */ int aspell_speller_check(struct AspellSpeller * ths, const char * word, int word_size); /* Add this word to your own personal word list. */ int aspell_speller_add_to_personal(struct AspellSpeller * ths, const char * word, int word_size); /* Add this word to the current spelling session. */ int aspell_speller_add_to_session(struct AspellSpeller * ths, const char * word, int word_size); /* This is your own personal word list file plus * any extra words added during this session to * your own personal word list. */ const struct AspellWordList * aspell_speller_personal_word_list(struct AspellSpeller * ths); /* This is a list of words added to this session * that are not in the main word list or in your * own personal list but are considered valid for * this spelling session. */ const struct AspellWordList * aspell_speller_session_word_list(struct AspellSpeller * ths); /* This is the main list of words used during this * spelling session. */ const struct AspellWordList * aspell_speller_main_word_list(struct AspellSpeller * ths); int aspell_speller_save_all_word_lists(struct AspellSpeller * ths); int aspell_speller_clear_session(struct AspellSpeller * ths); /* Return NULL on error. * The word list returned by suggest is only * valid until the next call to suggest. */ const struct AspellWordList * aspell_speller_suggest(struct AspellSpeller * ths, const char * word, int word_size); int aspell_speller_store_replacement(struct AspellSpeller * ths, const char * mis, int mis_size, const char * cor, int cor_size); /******************************** filter ********************************/ typedef struct AspellFilter AspellFilter; void delete_aspell_filter(struct AspellFilter * ths); unsigned int aspell_filter_error_number(const struct AspellFilter * ths); const char * aspell_filter_error_message(const struct AspellFilter * ths); const struct AspellError * aspell_filter_error(const struct AspellFilter * ths); struct AspellFilter * to_aspell_filter(struct AspellCanHaveError * obj); /*************************** document checker ***************************/ struct AspellToken { unsigned int offset; unsigned int len; }; typedef struct AspellToken AspellToken; typedef struct AspellDocumentChecker AspellDocumentChecker; void delete_aspell_document_checker(struct AspellDocumentChecker * ths); unsigned int aspell_document_checker_error_number(const struct AspellDocumentChecker * ths); const char * aspell_document_checker_error_message(const struct AspellDocumentChecker * ths); const struct AspellError * aspell_document_checker_error(const struct AspellDocumentChecker * ths); /* Creates a new document checker. * The speller class is expected to last until * this class is destroyed. * If config is given it will be used to override * any relevent options set by this speller class. * The config class is not once this function is done. * If filter is given then it will take ownership of * the filter class and use it to do the filtering. * You are expected to free the checker when done. */ struct AspellCanHaveError * new_aspell_document_checker(struct AspellSpeller * speller); struct AspellDocumentChecker * to_aspell_document_checker(struct AspellCanHaveError * obj); /* Reset the internal state of the filter. * Should be called whenever a new document is * being filtered. */ void aspell_document_checker_reset(struct AspellDocumentChecker * ths); /* Process a string. * The string passed in should only be split on * white space characters. Furthermore, between * calls to reset, each string should be passed * in exactly once and in the order they appeared * in the document. Passing in strings out of * order, skipping strings or passing them in * more than once may lead to undefined results. */ void aspell_document_checker_process(struct AspellDocumentChecker * ths, const char * str, int size); /* Returns the next misspelled word in the * processed string. If there are no more * misspelled words, then token.word will be * NULL and token.size will be 0 */ struct AspellToken aspell_document_checker_next_misspelling(struct AspellDocumentChecker * ths); /* Returns the underlying filter class. */ struct AspellFilter * aspell_document_checker_filter(struct AspellDocumentChecker * ths); /****************************** word list ******************************/ typedef struct AspellWordList AspellWordList; int aspell_word_list_empty(const struct AspellWordList * ths); unsigned int aspell_word_list_size(const struct AspellWordList * ths); struct AspellStringEnumeration * aspell_word_list_elements(const struct AspellWordList * ths); /************************** string enumeration **************************/ typedef struct AspellStringEnumeration AspellStringEnumeration; void delete_aspell_string_enumeration(struct AspellStringEnumeration * ths); struct AspellStringEnumeration * aspell_string_enumeration_clone(const struct AspellStringEnumeration * ths); void aspell_string_enumeration_assign(struct AspellStringEnumeration * ths, const struct AspellStringEnumeration * other); int aspell_string_enumeration_at_end(const struct AspellStringEnumeration * ths); const char * aspell_string_enumeration_next(struct AspellStringEnumeration * ths); /********************************* info *********************************/ struct AspellModuleInfo { const char * name; double order_num; const char * lib_dir; struct AspellStringList * dict_dirs; struct AspellStringList * dict_exts; }; typedef struct AspellModuleInfo AspellModuleInfo; struct AspellDictInfo { /* The Name to identify this dictionary by. */ const char * name; /* The language code to identify this dictionary. * A two letter UPPER-CASE ISO 639 language code * and an optional two letter ISO 3166 country * code after a dash or underscore. */ const char * code; /* Any extra information to distinguish this * variety of dictionary from other dictionaries * which may have the same language and size. */ const char * jargon; int size; /* A two char digit code describing the size of * the dictionary: 10=tiny, 20=really small, * 30=small, 40=med-small, 50=med, 60=med-large, * 70=large, 80=huge, 90=insane. Please check * the README in aspell-lang-200?????.tar.bz2 or * see SCOWL (http://wordlist.sourceforge.net) * for an example of how these sizes are used. */ const char * size_str; struct AspellModuleInfo * module; }; typedef struct AspellDictInfo AspellDictInfo; typedef struct AspellModuleInfoList AspellModuleInfoList; struct AspellModuleInfoList * get_aspell_module_info_list(struct AspellConfig * config); int aspell_module_info_list_empty(const struct AspellModuleInfoList * ths); unsigned int aspell_module_info_list_size(const struct AspellModuleInfoList * ths); struct AspellModuleInfoEnumeration * aspell_module_info_list_elements(const struct AspellModuleInfoList * ths); typedef struct AspellDictInfoList AspellDictInfoList; struct AspellDictInfoList * get_aspell_dict_info_list(struct AspellConfig * config); int aspell_dict_info_list_empty(const struct AspellDictInfoList * ths); unsigned int aspell_dict_info_list_size(const struct AspellDictInfoList * ths); struct AspellDictInfoEnumeration * aspell_dict_info_list_elements(const struct AspellDictInfoList * ths); typedef struct AspellModuleInfoEnumeration AspellModuleInfoEnumeration; int aspell_module_info_enumeration_at_end(const struct AspellModuleInfoEnumeration * ths); const struct AspellModuleInfo * aspell_module_info_enumeration_next(struct AspellModuleInfoEnumeration * ths); void delete_aspell_module_info_enumeration(struct AspellModuleInfoEnumeration * ths); struct AspellModuleInfoEnumeration * aspell_module_info_enumeration_clone(const struct AspellModuleInfoEnumeration * ths); void aspell_module_info_enumeration_assign(struct AspellModuleInfoEnumeration * ths, const struct AspellModuleInfoEnumeration * other); typedef struct AspellDictInfoEnumeration AspellDictInfoEnumeration; int aspell_dict_info_enumeration_at_end(const struct AspellDictInfoEnumeration * ths); const struct AspellDictInfo * aspell_dict_info_enumeration_next(struct AspellDictInfoEnumeration * ths); void delete_aspell_dict_info_enumeration(struct AspellDictInfoEnumeration * ths); struct AspellDictInfoEnumeration * aspell_dict_info_enumeration_clone(const struct AspellDictInfoEnumeration * ths); void aspell_dict_info_enumeration_assign(struct AspellDictInfoEnumeration * ths, const struct AspellDictInfoEnumeration * other); /***************************** string list *****************************/ typedef struct AspellStringList AspellStringList; struct AspellStringList * new_aspell_string_list(); int aspell_string_list_empty(const struct AspellStringList * ths); unsigned int aspell_string_list_size(const struct AspellStringList * ths); struct AspellStringEnumeration * aspell_string_list_elements(const struct AspellStringList * ths); int aspell_string_list_add(struct AspellStringList * ths, const char * to_add); int aspell_string_list_remove(struct AspellStringList * ths, const char * to_rem); void aspell_string_list_clear(struct AspellStringList * ths); struct AspellMutableContainer * aspell_string_list_to_mutable_container(struct AspellStringList * ths); void delete_aspell_string_list(struct AspellStringList * ths); struct AspellStringList * aspell_string_list_clone(const struct AspellStringList * ths); void aspell_string_list_assign(struct AspellStringList * ths, const struct AspellStringList * other); /****************************** string map ******************************/ typedef struct AspellStringMap AspellStringMap; struct AspellStringMap * new_aspell_string_map(); int aspell_string_map_add(struct AspellStringMap * ths, const char * to_add); int aspell_string_map_remove(struct AspellStringMap * ths, const char * to_rem); void aspell_string_map_clear(struct AspellStringMap * ths); struct AspellMutableContainer * aspell_string_map_to_mutable_container(struct AspellStringMap * ths); void delete_aspell_string_map(struct AspellStringMap * ths); struct AspellStringMap * aspell_string_map_clone(const struct AspellStringMap * ths); void aspell_string_map_assign(struct AspellStringMap * ths, const struct AspellStringMap * other); int aspell_string_map_empty(const struct AspellStringMap * ths); unsigned int aspell_string_map_size(const struct AspellStringMap * ths); struct AspellStringPairEnumeration * aspell_string_map_elements(const struct AspellStringMap * ths); /* Insert a new element. * Will NOT overwrite an existing entry. * Returns FALSE if the element already exists. */ int aspell_string_map_insert(struct AspellStringMap * ths, const char * key, const char * value); /* Insert a new element. * Will overwrite an existing entry. * Always returns TRUE. */ int aspell_string_map_replace(struct AspellStringMap * ths, const char * key, const char * value); /* Looks up an element and returns the value. * Returns NULL if the element does not exist. * Returns an empty string if the element exists * but has a NULL value. */ const char * aspell_string_map_lookup(const struct AspellStringMap * ths, const char * key); /***************************** string pair *****************************/ struct AspellStringPair { const char * first; const char * second; }; typedef struct AspellStringPair AspellStringPair; /*********************** string pair enumeration ***********************/ typedef struct AspellStringPairEnumeration AspellStringPairEnumeration; int aspell_string_pair_enumeration_at_end(const struct AspellStringPairEnumeration * ths); struct AspellStringPair aspell_string_pair_enumeration_next(struct AspellStringPairEnumeration * ths); void delete_aspell_string_pair_enumeration(struct AspellStringPairEnumeration * ths); struct AspellStringPairEnumeration * aspell_string_pair_enumeration_clone(const struct AspellStringPairEnumeration * ths); void aspell_string_pair_enumeration_assign(struct AspellStringPairEnumeration * ths, const struct AspellStringPairEnumeration * other); /******************************** cache ********************************/ /* Reset the global cache(s) so that cache queries will * create a new object. If existing objects are still in * use they are not deleted. If which is NULL then all * caches will be reset. Current caches are "encode", * "decode", "dictionary", "language", and "keyboard". */ int aspell_reset_cache(const char * which); #ifdef __cplusplus } #endif #endif /* ASPELL_ASPELL__H */ recoll-1.26.3/aspell/rclaspell.h0000644000175000017500000000505113566424763013437 00000000000000/* Copyright (C) 2006 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _RCLASPELL_H_INCLUDED_ #define _RCLASPELL_H_INCLUDED_ /* autoconfig.h must be included before this file */ #ifdef RCL_USE_ASPELL /** * Aspell speller interface class. * * Aspell is used to let the user find about spelling variations that may * exist in the document set for a given word. * A specific aspell dictionary is created out of all the terms in the * xapian index, and we then use it to expand a term to spelling neighbours. * We use the aspell C api for term expansion, but have * to execute the program to create dictionaries. */ #include #include #include "rclconfig.h" #include "rcldb.h" class AspellData; class Aspell { public: Aspell(const RclConfig *cnf); ~Aspell(); /** Check health */ bool ok() const; /** Find the aspell command and shared library, init function pointers */ bool init(std::string &reason); /** Build dictionary out of index term list. This is done at the end * of an indexing pass. */ bool buildDict(Rcl::Db &db, std::string &reason); /** Check that word is in dictionary. Note that this would mean * that the EXACT word is: aspell just does a lookup, no * grammatical, case or diacritics magic of any kind * * @return true if word in dic, false if not. reason.size() -> error */ bool check(const std::string& term, std::string& reason); /** Return a list of possible expansions for a given word */ bool suggest(Rcl::Db &db, const std::string& term, std::list &suggestions, std::string &reason); private: std::string dicPath(); const RclConfig *m_config; std::string m_lang; AspellData *m_data{nullptr}; bool make_speller(std::string& reason); }; #endif /* RCL_USE_ASPELL */ #endif /* _RCLASPELL_H_INCLUDED_ */ recoll-1.26.3/ChangeLog0000644000175000017500000121412213303776060011566 000000000000002010-02-02 15:33 +0100 Jean-Francois Dockes (d11da0283f03 [tip]) * src/common/textsplit.cpp, src/common/textsplit.h, src/query/plaintorich.cpp, src/query/recollq.cpp, src/query/wasatorcl.cpp, src/rcldb/rcldb.cpp, src/rcldb/searchdata.cpp, src/rcldb/stoplist.cpp, src/rcldb/stoplist.h: cosmetics: use derived class for actual splitter instead of callback 2010-02-02 10:24 +0100 Jean-Francois Dockes (a8caf709bcd3) * src/qt4gui/rclmain.ui, src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp: QT GUI: define accelerators for res list page movements 2010-02-02 08:20 +0100 Jean-Francois Dockes (ec31e285a553) * src/qtgui/reslist.cpp, src/query/reslistpager.h: Qt GUI: ensure that new page size is taken into account ASAP (no need for restarting app) 2010-02-01 17:51 +0100 Jean-Francois Dockes (db953bb94c7f) * src/qt4gui/rclmain.ui, src/qtgui/preview_w.cpp, src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h: QT GUI: add fullscreen mode 2010-02-01 10:31 +0100 Jean-Francois Dockes (1eda55ae3be9) * src/mk/manifest.txt: new file. * src/excludefile, src/makesrcdist.sh, src/mk/manifest.txt: Making a source dist: check the new list against old reference + other checks 2010-01-31 19:53 +0100 Israel G. Lugo (74d4e25d43c2) * src/recollinstall.in: Install recollq and its manpage when in cmdline mode. Don't install the recoll.1 manpage when in cmdline mode. 2010-01-31 19:47 +0100 Jean-Francois Dockes (c88b0ef40512) * src/common/autoconfig.h.in: use 3-arg version of ac_define as the 1-arg one is being obsoleted 2010-01-31 19:45 +0100 Jean-Francois Dockes (1960435ccb68) * src/configure.ac: Dispense with the x11-monitoring when neither fam nor inotify are configured 2010-01-31 19:35 +0100 Jean-Francois Dockes (08e6abfc5fdf) * src/configure.ac: use 3-arg version of ac_define as the 1-arg one is being obsoleted 2010-01-31 19:34 +0100 Jean-Francois Dockes (c0add9dd8ad4) * website/download.html: none 2010-01-30 17:47 +0100 Jean-Francois Dockes (5ed138ff2230) * src/qtgui/spell_w.cpp, src/qtgui/spell_w.h: QT GUI: fix small problems in newly native qt4 term expander 2010-01-30 17:31 +0100 Jean-Francois Dockes (6ecf959a8e01) * src/qt4gui/spell.ui: new file. * src/qt4gui/recollmain.ui: deleted file. * .hgignore, src/qt4gui/recollmain.ui, src/qt4gui/spell.ui, src/qt4gui/uifrom3, src/qtgui/spell_w.cpp, src/qtgui/spell_w.h: QT GUI: converted the qt4 term expander dialog to native qt4 2010-01-30 14:09 +0100 Jean-Francois Dockes (df8a91aaff88) * src/qt4gui/rclmain.ui: new file. * .hgignore, src/qt4gui/rclmain.ui, src/qt4gui/uifrom3, src/qtgui/confgui/confguiindex.h, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h: Qt GUI: made the Qt4 main window native Qt4 (no more use of Q3MainWindow) 2010-01-30 08:23 +0100 Jean-Francois Dockes (ed18703563b7) * .hgignore, src/ChangeLog, website/BUGS.html, website/devel.html, website/doc.html, website/download.html, website/index.html.en: none 2010-01-30 08:23 +0100 Jean-Francois Dockes (22044a3b2e2c) * src/qtgui/rclmain_w.h: Qt 4.6.1 Uic bug: change qt version test from == to <= as bug still here in 4.6.2 2010-01-30 08:21 +0100 Jean-Francois Dockes (b1cb8c664953) * src/common/autoconfig.h.in, src/configure, src/configure.ac, src/index/recollindex.cpp, src/utils/x11mon.cpp: Renamed WITHOUT_X11 to DISABLE_X11MON for clarification 2010-01-30 08:18 +0100 Israel G. Lugo (be03b72e1258) * src/configure.ac: Rename option without-gui to disable-qtgui. New option disable- x11mon. Separate control of creation of the gui and X11 session monitoring. 2010-01-29 19:00 +0100 Jean-Francois Dockes (d95c21312a15) * src/kde/kioslave/recoll/CMakeLists.txt: KIO slave: fixed CMakeList to configure Recoll with --enable-pic 2010-01-29 17:22 +0100 Jean-Francois Dockes (266941720a99) * src/python/README.txt: new file. * src/configure, src/configure.ac, src/doc/user/usermanual.sgml, src/lib/Makefile, src/lib/mkMake, src/mk/commondefs, src/mk/localdefs.in, src/php/00README.txt, src/python/README.txt, src/python/recoll/setup.py: Implemented configure --enable-pic flag to build the main lib with position-independant objects. This avoids having to edit localdefs by hand to build the new php extension, and voids the need for the Python module to recompile Recoll source files. 2010-01-29 15:47 +0100 Jean-Francois Dockes (69c42078b8d3) * src/php/00README.txt: new file. * src/php/00README.txt, src/php/recoll/make.sh, src/php/recoll/recoll.cpp: PHP extension by Wenqiang Song : make ready for external use. - added minimal doc - fixed build script to work around php/libtool issue - have the module default to Query Language (instead of AND) 2010-01-28 18:22 +0100 Jean-Francois Dockes (45e7ec5e16c5) * .hgignore, website/usermanual/README-dir.txt: new file. * packaging/debian/changelog, packaging/debian/compat, packaging/debian/control, packaging/debian/copyright, packaging/debian/docs, packaging/debian/menu, packaging/debian/rules, packaging/debian/watch: deleted file. * .hgignore, packaging/debian/changelog, packaging/debian/compat, packaging/debian/control, packaging/debian/copyright, packaging/debian/docs, packaging/debian/menu, packaging/debian/rules, packaging/debian/watch, src/makesrcdist.sh, website/usermanual/README-dir.txt: svn->mercurial modifications 2010-01-28 16:13 +0000 convert-repo (e85c82d42126) * .hgtags: new file. * .hgtags: update tags 2010-01-26 13:23 +0000 dockes (c0cb63a2702a) * last before trial switch to mercurial. really. Yeah 2010-01-26 13:22 +0000 dockes (c40e044c63dd) * tests/chm/chm.sh, tests/chm/chm.txt, tests/ics/ics.sh, tests/ics/ics.txt, tests/zip/mcKee.zip, tests/zip/zip.sh, tests/zip/zip.txt, website/download-1.12.html: new file. * tests/chm/chm.sh, tests/chm/chm.txt, tests/ics/ics.sh, tests/ics/ics.txt, tests/zip/mcKee.zip, tests/zip/zip.sh, tests/zip/zip.txt, website/download-1.12.html: last before trial switch to mercurial. really 2010-01-26 13:21 +0000 dockes (7918f7073757) * website/BUGS.html, website/CHANGES.html, website/download.html, website/index.html.en, website/index.html.fr: last before trial switch to mercurial 2010-01-26 07:06 +0000 dockes (0b5ec08c2ba2) * src/INSTALL, src/README: 2010-01-26 07:06 +0000 dockes (f6a420527382) * src/VERSION: 1.13.02 2010-01-26 06:50 +0000 dockes (b223f221578a [RECOLL_1_13_02]) * src/doc/user/usermanual.sgml: clarified --prefix et al 2010-01-25 20:43 +0000 dockes (7d69ae778654) * src/qt4gui/ui_rclmain.h-4.5: new file. * src/qt4gui/ui_rclmain.h-4.5, src/qtgui/rclmain_w.h: use older ui include file under qt 4.6.1, the one its uic generates is broken 2010-01-25 11:08 +0000 dockes (e2e5a1dd802d) * src/php/recoll/recollq.h: deleted file. * src/php/recoll/recollq.h: not used? 2010-01-25 11:06 +0000 dockes (1683475297c1) * src/php/recoll/config.m4, src/php/recoll/make.sh, src/php/recoll/php_recoll.h, src/php/recoll/recoll.cpp, src/php/recoll/recollq.h, src/php/sample/shell.php: new file. * src/php/recoll/config.m4, src/php/recoll/make.sh, src/php/recoll/php_recoll.h, src/php/recoll/recoll.cpp, src/php/recoll/recollq.h, src/php/sample/shell.php: initial import from W. Song 2010-01-20 07:42 +0000 dockes (4df8ebfbb72d) * packaging/rpm/recoll.spec, packaging/rpm/recollfedora.spec, packaging/rpm/recollfedora10.spec, packaging/rpm/recollmdk.spec: change mail address 2010-01-10 10:18 +0000 dockes (1c62f24a5ca4) * packaging/rpm/recollfedora.spec: updated for fc12: depend on xapian-core, use qt4 2010-01-07 15:20 +0000 dockes (01eb4176400c) * src/query/recollq.cpp: add option to print abstracts 2010-01-07 08:42 +0000 dockes (a41bbccff862) * src/VERSION: 1.13.01 2010-01-07 08:41 +0000 dockes (dde7b27846ef) * src/Makefile.in: distclean removes rclexecm.pyc 2010-01-07 08:34 +0000 dockes (324bea9902a4) * src/qtgui/main.cpp, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/ssearch_w.cpp, src/qtgui/ssearch_w.h: moved initial db open and possible message boxes from main.cpp to rclmain_w.cpp first post-init job to avoid random crashes apparently related to the dialogs being created before app.exec(). Exact cause not certain, but crashes gone... 2010-01-07 08:29 +0000 dockes (0629e02f12fe) * src/rcldb/searchdata.cpp: field values were not used in case term expansion was not performed (phrase or capitalized term) 2010-01-06 13:29 +0000 dockes (e10bbaeefab5) * src/kde/kioslave/recoll/htmlif.cpp, src/query/recollq.cpp, src/query/xadump.cpp: adapt kio and recollq to the new internfile interface 2010-01-06 13:06 +0000 dockes (0f2378be2603) * src/kde/kioslave/recoll/CMakeLists.txt: add libz 2010-01-05 15:00 +0000 dockes (1ca577447878) * src/utils/closefrom.cpp, src/utils/fstreewalk.cpp, src/utils/fstreewalk.h, tests/Maildir/Maildir.txt, tests/andor/andor.sh, tests/andor/andor.txt, tests/cjk/cjk.sh, tests/cjk/cjk.txt, tests/mail/mail.txt, tests/msword/msword.txt, tests/txt/txt.txt, website/download.html: 1.13 tests txt mods + solaris port (FNM_LEADING_DIR) 2010-01-05 13:27 +0000 dockes (0ab6a2dfc2c3) * website/BUGS.html, website/CHANGES.html, website/copydocs, website/credits.html, website/download.html, website/features.html, website/index.html.en, website/index.html.fr: web update for 1.13 2010-01-05 07:14 +0000 dockes (cb08729afcd2) * src/INSTALL, src/README: 2010-01-05 07:14 +0000 dockes (a1ba9ba640f7) * src/VERSION, src/common/rclconfig.cpp, src/doc/man/recoll.conf.5, src/doc/user/usermanual.sgml: 1.13.00: fixed doc ortographic typos 2009-12-31 08:20 +0000 dockes (c2ae39772161) * src/INSTALL, src/README: 2009-12-31 08:15 +0000 dockes (851e5b82f3d5) * src/VERSION: 1.13.0 2009-12-31 08:15 +0000 dockes (04512125010e) * src/recollinstall.in: handle --without-gui config inside recollinstall.in 2009-12-20 14:31 +0000 dockes (2cbda11286c5) * src/configure, src/configure.ac: typo in WIHOUT_X11 2009-12-17 20:23 +0000 dockes (95eb8a010525) * src/doc/user/usermanual.sgml: There was an error in the mimemap format in the config exemple 2009-12-14 10:33 +0000 dockes (1e774739395e) * src/INSTALL, src/README: 2009-12-14 10:33 +0000 dockes (49cdfe826199) * src/ChangeLog: snapshot du jour 2009-12-14 10:23 +0000 dockes (437be900fa14) * src/doc/user/Makefile, src/doc/user/usermanual.sgml: add --enable-camelcase doc + fix typo in doc Makefile comment 2009-12-14 10:10 +0000 dockes (009ed00592fd) * src/common/autoconfig.h.in, src/common/textsplit.cpp, src/configure, src/configure.ac: add --enable-camelcase option to configure 2009-12-14 09:46 +0000 dockes (1fabd736d16f) * src/doc/user/usermanual.sgml, src/index/fsindexer.cpp: use : as separator in localfields value before parsing as confsimple 2009-12-14 09:44 +0000 dockes (2b09276dedc8) * src/utils/circache.cpp: fix pointer casting to make gcc happy 2009-12-14 09:44 +0000 dockes (4ee0085fa59e) * src/sampleconf/fields: typo: keywords->keyword in prefixes 2009-12-14 09:43 +0000 dockes (87b2caa6ec9c) * src/filters/rclabw, src/filters/rcldjvu, src/filters/rcldoc, src/filters/rcldvi, src/filters/rclflac, src/filters/rclgaim, src/filters/rclid3, src/filters/rclkwd, src/filters/rcllyx, src/filters/rclman, src/filters/rclogg, src/filters/rclopxml, src/filters/rclpdf, src/filters/rclppt, src/filters/rclps, src/filters/rclpurple, src/filters/rclrtf, src/filters/rclscribus, src/filters/rclsiduxman, src/filters/rclsoff, src/filters/rclsvg, src/filters/rcltex, src/filters/rcltext, src/filters/rclwpd, src/filters/rclxls, src/filters/recfiltcommon: iscmd: supplement -x with -d test not a dir 2009-12-14 07:26 +0000 dockes (b8eceb552b3e) * src/INSTALL, src/README: 2009-12-14 07:25 +0000 dockes (16dc2e0ed9fa) * src/makesrcdist.sh: 2009-12-14 07:13 +0000 dockes (e5aae08ee26d) * src/Makefile.in: 2009-12-14 07:07 +0000 dockes (c66c86594b35) * src/VERSION: 2009-12-14 07:06 +0000 dockes (7229a431d686) * src/makesrcdist.sh: use different release name for beta versions 2009-12-13 21:40 +0000 dockes (e0033b00df1e) * src/doc/user/usermanual.sgml: anacron 2009-12-13 16:16 +0000 dockes (e148cd3f92c1) * src/sampleconf/recoll.conf.in: add localfields example 2009-12-13 16:13 +0000 dockes (89ebf91076d8) * src/common/textsplit.cpp, src/internfile/mh_html.cpp, src/query/plaintorich.cpp, src/utils/base64.cpp: small amd64 fixes: 64 bits size_type, signed chars 2009-12-08 07:43 +0000 dockes (026aa6df356f) * src/doc/man/recollindex.1, src/doc/man/recollq.1: clarify stemming options 2009-12-08 07:43 +0000 dockes (0c698007055e) * src/query/recollq.cpp: add option -s to select stemming language 2009-12-08 07:42 +0000 dockes (fcb5bca6adf8) * src/rcldb/stemdb.cpp: traces 2009-12-07 18:47 +0000 dockes (6631c645c9df) * src/index/recollindex.cpp: use setpriority() to be a nice indexer 2009-12-07 17:43 +0000 dockes (76128d18110e [RECOLL_1_13_0, RECOLL_20091214, RECOLL_1_13_01, RECOLL_1_13_00]) * src/qtgui/preview_w.cpp, src/qtgui/preview_w.h: reimplemented Q3TextDocument::find() to be like the qt3 version 2009-12-07 14:32 +0000 dockes (b02171ea3078) * src/qtgui/preview_w.cpp: switch preview qtextedit format back to plain text after loading so that selections copy plain text not html 2009-12-07 13:27 +0000 dockes (3d37dc441cc9) * src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/searchdata.cpp: Term expansion: handle field issues inside rcldb::termmatch, ensuring that we take the field name into account for all expansions. Ensures that File Name searches and filename: query language searches work the same, + overall better consistency 2009-12-07 13:24 +0000 dockes (fe625ef90a21) * src/configure.ac: Israel G. Lugo: make sure that only one of inotify or FAM gets enabled, giving priority to inotify. 2009-11-30 10:04 +0000 dockes (a75cd5af7c71) * src/VERSION, src/internfile/mimehandler.cpp, src/kde/kioslave/recoll/htmlif.cpp, src/qtgui/preview_w.cpp, src/qtgui/preview_w.h, src/query/plaintorich.cpp, src/query/plaintorich.h, src/sampleconf/mimeconf: add
 tag to text/plain translated into qt html to preserve
	indentation. Removes need for rcltext (which did just this). Allow
	specifying any text/xxx as internal (allows having specific editor
	but no filter)

2009-11-30 06:34 +0000  dockes    (c4fdcda7df89)

	* src/index/rclmonrcv.cpp:
	compile either fam or inotify not both

2009-11-29 15:00 +0000  dockes    (6b9ed9ae0949)

	* src/doc/user/usermanual.sgml:
	change defaults for big text params

2009-11-29 12:56 +0000  dockes    (a04f5006fe89)

	* src/sampleconf/recoll.conf.in:
	add new 1.13 variables and defaults

2009-11-28 09:15 +0000  dockes    (4b56c2068545)

	* src/internfile/mh_execm.cpp, src/internfile/mh_text.cpp,
	src/query/docseqhist.cpp, src/utils/circache.cpp:
	new glibc missing includes

2009-11-28 08:45 +0000  dockes    (012b4b63e260)

	* src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_fr.ts,
	src/qtgui/i18n/recoll_it.ts, src/qtgui/i18n/recoll_ru.ts,
	src/qtgui/i18n/recoll_tr.ts, src/qtgui/i18n/recoll_uk.ts,
	src/qtgui/i18n/recoll_xx.ts:
	

2009-11-28 08:14 +0000  dockes    (b0e70a20b7f1)

	* src/index/beaglequeue.cpp, src/internfile/mh_text.cpp,
	src/qtgui/confgui/confguiindex.cpp, src/qtgui/guiutils.cpp:
	set defaults usedesktoprefs, maxtext 20mb pagesz 1000k webcache 40m

2009-11-28 08:11 +0000  dockes    (ed3a574543f5)

	* src/doc/user/usermanual.sgml:
	clean-up + documented 1.13 new features

2009-11-28 06:39 +0000  dockes    (c45a690ee533)

	* src/internfile/mh_mbox.cpp:
	converted iostream to stdio because of mysterious read errors at the
	last position in the offsets file

2009-11-27 13:23 +0000  dockes    (7fa95cd57200)

	* src/internfile/mh_mail.cpp:
	add cnf(maildefcharset) to set specific mail default charset (mainly
	for readpst extracts which are utf-8 but have no charset set)

2009-11-27 13:11 +0000  dockes    (385305ee1820)

	* src/rcldb/rcldb.cpp:
	loglevel

2009-11-27 13:08 +0000  dockes    (8cc1ab099807)

	* src/internfile/mh_mbox.cpp:
	include sys/stat

2009-11-27 12:41 +0000  dockes    (c3039d4eab51)

	* src/internfile/Filter.h, src/internfile/internfile.cpp,
	src/internfile/mh_mbox.cpp, src/internfile/mh_mbox.h,
	src/internfile/mimehandler.h, src/query/docseqhist.cpp:
	implemented a cache for mbox message header offsets

2009-11-27 07:07 +0000  dockes    (a1a92e0952dd)

	* src/internfile/mh_mbox.cpp:
	Support From "bla bla" (quoted) From lines

2009-11-27 07:00 +0000  dockes    (64f09e3ad5a7)

	* src/internfile/internfile.cpp:
	update test driver

2009-11-26 14:03 +0000  dockes    (023c2a8520de)

	* src/aspell/rclaspell.cpp, src/aspell/rclaspell.h,
	src/qtgui/reslist.cpp, src/qtgui/reslist.h, src/query/docseq.h,
	src/query/docseqdb.cpp, src/query/docseqdb.h,
	src/query/reslistpager.cpp, src/query/reslistpager.h,
	src/rcldb/rcldb.cpp, src/rcldb/searchdata.cpp,
	src/rcldb/searchdata.h:
	suggest alternate spellings if no results

2009-11-26 13:52 +0000  dockes    (4270622aa3e0)

	* src/qtgui/guiutils.cpp:
	suppressed core dump at exit on unexisting config

2009-11-26 07:17 +0000  dockes    (f02bf2b6ea30)

	* src/rcldb/rcldb.cpp, src/rcldb/rclquery.cpp, src/rcldb/rclquery.h:
	use only match terms to build doc abstract, not all query terms
	(might save a little effort)

2009-11-26 07:15 +0000  dockes    (90776b10554c)

	* src/qtgui/rclmain_w.cpp:
	spell tool must be created even is USE_ASPELL is undefined

2009-11-25 14:37 +0000  dockes    (e3faedd237b8)

	* src/utils/md5.cpp:
	suppress unused parm warning

2009-11-25 11:07 +0000  dockes    (f8011c9579c8)

	* packaging/debian/debiankio/changelog,
	packaging/debian/debiankio/compat,
	packaging/debian/debiankio/control,
	packaging/debian/debiankio/copyright,
	packaging/debian/debiankio/dirs, packaging/debian/debiankio/docs,
	packaging/debian/debiankio/rules, packaging/debian/debiankio/watch,
	packaging/debian/debianrecoll/changelog,
	packaging/debian/debianrecoll/compat,
	packaging/debian/debianrecoll/control,
	packaging/debian/debianrecoll/copyright,
	packaging/debian/debianrecoll/docs,
	packaging/debian/debianrecoll/menu,
	packaging/debian/debianrecoll/rules,
	packaging/debian/debianrecoll/watch: new file.
	* packaging/debian/debiankio/changelog,
	packaging/debian/debiankio/compat,
	packaging/debian/debiankio/control,
	packaging/debian/debiankio/copyright,
	packaging/debian/debiankio/dirs, packaging/debian/debiankio/docs,
	packaging/debian/debiankio/rules, packaging/debian/debiankio/watch,
	packaging/debian/debianrecoll/changelog,
	packaging/debian/debianrecoll/compat,
	packaging/debian/debianrecoll/control,
	packaging/debian/debianrecoll/copyright,
	packaging/debian/debianrecoll/docs,
	packaging/debian/debianrecoll/menu,
	packaging/debian/debianrecoll/rules,
	packaging/debian/debianrecoll/watch:
	added debian dir to build kio-recoll

2009-11-24 10:25 +0000  dockes    (87057b6e2cba)

	* src/kde/kioslave/recoll/CMakeLists.txt:
	execute minimum recoll config inside cmakelists to create rclversion
	and autoconfig includes

2009-11-24 10:24 +0000  dockes    (a6e854084ffb)

	* src/utils/smallut.h:
	gcc4

2009-11-23 19:51 +0000  dockes    (42785e498950)

	* src/index/beaglequeue.cpp:
	store beagle fields before interning the file

2009-11-23 17:38 +0000  dockes    (aaccb7e813a8)

	* src/qtgui/preview_w.cpp:
	if text is empty, display fields by default

2009-11-23 17:37 +0000  dockes    (129654f22b3c)

	* src/internfile/internfile.cpp:
	in FileInterner::FileInterner(Rcl::Doc) (query), declare the
	BeagleQueue static so that the cache persists between FileInterner
	objects

2009-11-23 17:36 +0000  dockes    (2292efb797b4)

	* src/internfile/internfile.h:
	comments

2009-11-23 16:12 +0000  dockes    (a7ed9c85c313)

	* src/query/dynconf.cpp, src/query/dynconf.h: new file.
	* src/query/history.cpp, src/query/history.h: deleted file.
	* src/lib/Makefile, src/lib/mkMake, src/qtgui/guiutils.cpp,
	src/qtgui/main.cpp, src/qtgui/preview_w.cpp,
	src/qtgui/rclmain_w.cpp, src/qtgui/recoll.h,
	src/query/docseqhist.cpp, src/query/docseqhist.h,
	src/query/dynconf.cpp, src/query/dynconf.h, src/query/history.cpp,
	src/query/history.h:
	revamped history feature to be udi-based while supporting old format

2009-11-23 16:11 +0000  dockes    (8a494a30e71f)

	* src/rcldb/rcldb.cpp:
	set udi in meta from getDoc(udi)

2009-11-23 16:10 +0000  dockes    (c432dcb83d8f)

	* src/index/beaglequeue.cpp, src/utils/circache.cpp,
	src/utils/circache.h:
	Beaglequeue: simplify index from cache now that udi entries are
	unique in cache

2009-11-22 17:27 +0000  dockes    (112515ddfd1b)

	* src/index/beaglequeue.cpp, src/utils/circache.cpp,
	src/utils/circache.h:
	only keep the latest entry for a given udi in the cache

2009-11-22 17:26 +0000  dockes    (c47346e105ac)

	* src/utils/smallut.h:
	added tempbuf class

2009-11-21 13:36 +0000  dockes    (d497773469db)

	* src/internfile/mimehandler.cpp, src/qtgui/rclmain_w.cpp:
	allow setting attrs on mimeview defs, factorize some code with
	mhExecFactory

2009-11-21 13:35 +0000  dockes    (77639dc8a584)

	* src/common/rclconfig.cpp, src/common/rclconfig.h:
	added valueSplitAttributes() method

2009-11-21 11:18 +0000  dockes    (50c2c8c764bb)

	* src/internfile/mimehandler.cpp:
	use a confsimple to parse the additional filter attributes

2009-11-21 11:14 +0000  dockes    (ba1b73290998)

	* src/qtgui/guiutils.h:
	add ipath to default paragraph format

2009-11-18 15:32 +0000  dockes    (132c512aacde)

	* src/kde/kioslave/recoll/CMakeLists.txt:
	added beaglequeue/circache to kio build because of internfile
	dependancy

2009-11-18 14:27 +0000  dockes    (d1587dd98290)

	* src/utils/circache.cpp:
	warning

2009-11-18 14:26 +0000  dockes    (812296ef15d8)

	* src/rcldb/rclquery.cpp:
	query::getrescnt() would only work once following 1.13 mods (affects
	python api)

2009-11-18 14:25 +0000  dockes    (cc1924f2d969)

	* src/python/samples/recollq.py:
	

2009-11-18 14:03 +0000  dockes    (e60f229404a4)

	* src/python/recoll/pyrecoll.cpp:
	add some casts to avoid kwargs const warnings

2009-11-18 13:46 +0000  dockes    (0e29576743b0)

	* src/index/rclmonrcv.cpp:
	typo

2009-11-18 12:33 +0000  dockes    (da553b8d1e93)

	* src/filters/rclchm, src/filters/rclexecm.py, src/filters/rclics,
	src/internfile/mh_execm.cpp, src/internfile/mh_execm.h:
	handle REFILTERROR in execm

2009-11-18 10:26 +0000  dockes    (f28392bec173)

	* src/internfile/mh_mail.cpp, src/rcldb/rcldb.cpp:
	mh_mail: use truncate_to_word to avoid cutting an utf8 char. rcldb:
	logdeb text_to_word errors

2009-11-18 08:24 +0000  dockes    (c9b8704e7ffa)

	* src/index/beaglequeue.cpp, src/mk/FreeBSD:
	beaglequeue fully functional, small fixes remaining?

2009-11-18 07:57 +0000  dockes    (0f863324690f)

	* src/index/beaglequeue.cpp:
	ok with compression

2009-11-18 07:46 +0000  dockes    (7925e58ac0d9)

	* src/utils/circache.cpp, src/utils/circache.h:
	compression works

2009-11-17 14:52 +0000  dockes    (122d9a523dc7)

	* src/utils/circache.cpp, src/utils/circache.h:
	circache ok

2009-11-16 16:18 +0000  dockes    (88021fc84abd)

	* src/internfile/internfile.cpp:
	Lack of error checking after have_document() in preview case could
	lead to looping, and cancellation was not checked to make things
	worse

2009-11-16 16:16 +0000  dockes    (22e0540453bc)

	* src/configure:
	--without-gui

2009-11-16 16:12 +0000  dockes    (d3e16fb089de)

	* src/qt4gui/recoll.pro.in:
	stupid mistake in previous cosmetic change

2009-11-16 16:11 +0000  dockes    (a422d8f6d6fd)

	* src/index/fsindexer.cpp:
	make very sure ~/.beagle is in the skippedPaths

2009-11-16 16:10 +0000  dockes    (effac8983ab5)

	* src/internfile/mh_mail.cpp:
	reason msg

2009-11-16 12:50 +0000  dockes    (bfc0df6ab067)

	* src/Makefile.in, src/common/autoconfig.h.in, src/configure.ac,
	src/index/Makefile, src/mk/localdefs.in, src/utils/x11mon.cpp:
	add --without-gui configure option

2009-11-15 16:41 +0000  dockes    (81edb2c4cef7)

	* src/index/beaglequeue.cpp, src/index/fsindexer.cpp,
	src/utils/circache.cpp:
	catch cancel exceptions cast by internfile()

2009-11-15 14:39 +0000  dockes    (4539869b5761)

	* src/index/fsindexer.h, src/qtgui/rclmain_w.cpp,
	src/query/reslistpager.cpp, src/rcldb/rcldoc.cpp,
	src/rcldb/rcldoc.h, src/sampleconf/fields:
	changed apptag field name to rclaptg

2009-11-15 14:18 +0000  dockes    (b41678f5ad12)

	* src/qt4gui/recoll.pro.in, src/qtgui/recoll.pro.in:
	add -ldl -lX11 for binutils-gold

2009-11-15 08:38 +0000  dockes    (3801ee9a51c6)

	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/index/fsindexer.cpp, src/index/rclmonrcv.cpp,
	src/utils/fstreewalk.cpp, src/utils/fstreewalk.h,
	src/utils/smallut.cpp, src/utils/smallut.h:
	rationalized how we recompute things on setkeydir. recoll_noindex
	and skippedNames can now be changed at any point in the tree

2009-11-14 11:34 +0000  dockes    (a922eac98d16)

	* src/index/rclmonprc.cpp:
	monitor: accumulate mods during 30S before indexing

2009-11-14 10:29 +0000  dockes    (ea134de640e0)

	* src/index/beaglequeue.cpp, src/index/beaglequeue.h,
	src/index/fsindexer.cpp, src/index/indexer.cpp,
	src/index/rclmonrcv.cpp, src/index/recollindex.cpp,
	src/utils/circache.cpp:
	monitor the beagle queue

2009-11-14 10:25 +0000  dockes    (42421f027b94)

	* src/filters/rclchm, src/filters/rclics, src/filters/rcltext:
	emit helpernotfound

2009-11-14 08:21 +0000  dockes    (93baac7e87ac)

	* src/index/beaglequeue.cpp, src/index/beaglequeue.h,
	src/index/fsindexer.cpp, src/index/fsindexer.h,
	src/index/indexer.cpp, src/index/indexer.h,
	src/index/recollindex.cpp, src/index/recollindex.h,
	src/rcldb/rcldb.cpp, src/rcldb/rcldb.h:
	beaglequeue indexFiles

2009-11-13 13:29 +0000  dockes    (7d0c4d7a917c)

	* src/index/beaglequeue.cpp, src/index/beaglequeue.h,
	src/internfile/internfile.cpp, src/internfile/internfile.h,
	src/qtgui/confgui/confguiindex.cpp, src/qtgui/preview_w.cpp,
	src/qtgui/preview_w.h, src/qtgui/rclmain_w.cpp,
	src/rcldb/rcldoc.cpp, src/rcldb/rcldoc.h, src/rcldb/rclquery.cpp,
	src/sampleconf/fields:
	1st beagle version with index/preview working

2009-11-13 09:08 +0000  dockes    (71f8c28cbeba)

	* src/qtgui/idxthread.cpp:
	integrate beaglequeueindexer for indexing. Work remains for
	indexfiles() at least

2009-11-13 09:08 +0000  dockes    (dda5121a7c45)

	* src/utils/circache.cpp, src/utils/circache.h:
	integrate beaglequeueindexer for indexing. Work remains for
	indexfiles() at least

2009-11-13 09:07 +0000  dockes    (364d46e16faf)

	* src/index/beaglequeue.cpp, src/index/beaglequeue.h,
	src/index/fsindexer.cpp, src/index/fsindexer.h,
	src/index/indexer.cpp, src/index/indexer.h,
	src/index/recollindex.cpp:
	integrate beaglequeueindexer for indexing. Work remains for
	indexfiles() at least

2009-11-13 09:04 +0000  dockes    (7e32466740a7)

	* src/configure.ac:
	Israel G. Lugo: give priority to the user's PATH when looking for
	qmake (fixes detecting the wrong qmake when more than one exists).

2009-11-13 09:01 +0000  dockes    (3503bfba6b70)

	* src/rcldb/rcldoc.cpp, src/rcldb/rcldoc.h:
	make dump const

2009-11-13 09:01 +0000  dockes    (b4c8330037e7)

	* src/lib/Makefile, src/lib/mkMake:
	add beaglequeue, fsindexer

2009-11-13 08:58 +0000  dockes    (63ee628229e7)

	* src/qtgui/confgui/confgui.cpp, src/qtgui/confgui/confgui.h,
	src/qtgui/confgui/confguiindex.cpp,
	src/qtgui/confgui/confguiindex.h:
	add panel for beaglequeue parameters + arrange so that a checkbox
	can enable/disable other params

2009-11-13 08:54 +0000  dockes    (5edf24b7552e)

	* src/sampleconf/fields:
	comments

2009-11-13 08:15 +0000  dockes    (a829fce15458)

	* src/filters/rclchm, src/filters/rclexecm.py, src/filters/rclics,
	src/filters/rclimg, src/filters/rclzip, src/internfile/mh_execm.cpp:
	dont use 0-sized doc to mean eof now

2009-11-11 18:09 +0000  dockes    (21b6ba1309c7)

	* src/filters/rclimg:
	send mimetype

2009-11-11 18:07 +0000  dockes    (7f2a7a7214fb)

	* src/internfile/mh_execm.cpp:
	set mimetype for the non-ipath case

2009-11-11 07:47 +0000  dockes    (75f9d10cf2f3)

	* src/index/fsindexer.cpp, src/index/fsindexer.h,
	src/index/indexer.cpp, src/index/indexer.h,
	src/index/recollindex.cpp:
	moved common db code from fsindexer to confindexer

2009-11-10 18:11 +0000  dockes    (e079c8ce273f)

	* src/index/beaglequeue.cpp, src/index/beaglequeue.h: new file.
	* src/index/beaglequeue.cpp, src/index/beaglequeue.h:
	

2009-11-10 18:10 +0000  dockes    (698e70099ec0)

	* src/index/fsindexer.cpp, src/index/fsindexer.h,
	src/index/recollindex.h: new file.
	* src/index/fsindexer.cpp, src/index/fsindexer.h,
	src/index/indexer.cpp, src/index/indexer.h, src/index/rclmonprc.cpp,
	src/index/recollindex.cpp, src/index/recollindex.h:
	dbindexer->fsindexer, split into its own file

2009-11-10 17:42 +0000  dockes    (ccf674432104)

	* src/ChangeLog:
	

2009-11-10 17:42 +0000  dockes    (065c40b8964d)

	* src/index/recollindex.cpp:
	small cleanups and add option to call beaglequeue

2009-11-10 17:41 +0000  dockes    (d4ff290d1615)

	* src/index/indexer.cpp:
	small cleanups and comments

2009-11-10 17:39 +0000  dockes    (00c5f0c09ef9)

	* src/index/indexer.h:
	comments

2009-11-10 17:38 +0000  dockes    (02b632bcbeca)

	* src/index/rclmonrcv.cpp:
	remove indexer.h include

2009-11-10 17:38 +0000  dockes    (ba2255ec8b62)

	* src/common/rclinit.h:
	comment

2009-11-10 17:37 +0000  dockes    (915bf923b8da)

	* src/utils/fstreewalk.cpp, src/utils/fstreewalk.h:
	add nocanon option

2009-11-10 17:34 +0000  dockes    (29b753cd1f78)

	* src/utils/circache.cpp, src/utils/circache.h:
	intermediary checkpoint (things work, no index, no compression)

2009-11-10 17:32 +0000  dockes    (16e0d5965055)

	* src/rcldb/rcldb.cpp, src/rcldb/rcldb.h:
	removed the useless keep_updated flag

2009-11-10 17:31 +0000  dockes    (75878eb08588)

	* src/rcldb/rcldoc.cpp, src/rcldb/rcldoc.h:
	added dump function

2009-11-10 17:30 +0000  dockes    (35b43d00db47)

	* src/query/recollq.cpp:
	added explicit flag parameter to Internfile constructeur for helping
	with beagle queue integration

2009-11-10 17:29 +0000  dockes    (75255bb8d7a0)

	* src/sampleconf/fields:
	add dc:description as keywords alias

2009-11-10 09:39 +0000  dockes    (ee6104876da9)

	* src/internfile/internfile.cpp, src/internfile/internfile.h,
	src/kde/kioslave/recoll/htmlif.cpp, src/qtgui/preview_w.cpp:
	added explicit flag parameter to Internfile constructeur for helping
	with beagle queue integration

2009-11-09 09:26 +0000  dockes    (7c3c0eed036b)

	* src/utils/circache.cpp, src/utils/circache.h: new file.
	* src/lib/Makefile, src/lib/mkMake, src/utils/Makefile,
	src/utils/circache.cpp, src/utils/circache.h:
	circache

2009-11-09 09:26 +0000  dockes    (877bb76973aa)

	* src/utils/conftree.cpp, src/utils/conftree.h:
	add some constness

2009-11-06 11:33 +0000  dockes    (944e0b9d1d53)

	* src/internfile/internfile.cpp, src/internfile/internfile.h,
	src/qtgui/rclmain_w.cpp, src/qtgui/reslist.cpp, src/qtgui/reslist.h,
	src/query/docseq.h, src/query/docseqdb.cpp, src/query/docseqdb.h,
	src/query/docseqhist.cpp, src/query/docseqhist.h,
	src/utils/fileudi.h, src/utils/pathut.cpp, src/utils/pathut.h:
	allow opening parent/enclosing doc with native editor in reslist

2009-11-06 11:26 +0000  dockes    (1d9a5530d7bf)

	* src/sampleconf/mimeview:
	added okular as chm viewer

2009-11-04 13:52 +0000  dockes    (226d88ccb6c1)

	* src/qtgui/confgui/confgui.cpp, src/qtgui/confgui/confgui.h,
	src/qtgui/confgui/confguiindex.cpp:
	store file names using local8bit qstring conversions

2009-11-04 13:43 +0000  dockes    (b57bd81d3e8e)

	* src/utils/fstreewalk.h:
	comment

2009-11-04 13:42 +0000  dockes    (2037ae120bcf)

	* src/utils/cancelcheck.h:
	comment

2009-10-31 09:00 +0000  dockes    (3ad7f6c85ce2)

	* src/internfile/mh_mail.cpp, src/internfile/mh_mail.h:
	extract msgid + generate abstract at start of txt, excluding headers

2009-10-31 08:59 +0000  dockes    (9e7ae93bd35b)

	* src/utils/mimeparse.cpp:
	change rfc2047 mail header decoding (=?iso-xx stuff) so that a start
	of encoding section can be recognized even not after white space

2009-10-30 19:05 +0000  dockes    (eb9ed35f9fe0)

	* src/qtgui/rclmain_w.cpp:
	allow substituting all doc fields in viewer command line

2009-10-30 19:04 +0000  dockes    (d8065c96ceae)

	* src/qtgui/viewaction.ui:
	clarify using desktop defs in action choice dialog

2009-10-30 10:16 +0000  dockes    (4a744302db21)

	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/qtgui/rclmain_w.cpp, src/query/reslistpager.cpp,
	src/sampleconf/fields:
	Allow setting fields in fs subtree. Use for an application tag used
	for starting a specialized viewer

2009-10-30 08:59 +0000  dockes    (bdd54ae7a182)

	* src/VERSION, src/common/rclconfig.h, src/index/indexer.cpp,
	src/index/indexer.h, src/internfile/mimehandler.cpp,
	src/rcldb/rcldb.cpp, src/rcldb/rclquery.cpp, src/utils/conftree.cpp,
	src/utils/conftree.h:
	Allow fields local to a subtree to be set in the configuration

2009-10-30 08:53 +0000  dockes    (aa8c442a67ec)

	* src/configure.ac:
	use /bin/sh to execute recollinstall instead of making it executable

2009-10-30 08:53 +0000  dockes    (0faf1f6ccf5f)

	* src/Makefile.in, src/configure:
	use /bin/sh to execute recollinstall instead of making it executable

2009-10-29 18:11 +0000  dockes    (2338d18226f2)

	* src/qtgui/uiprefs.ui:
	move the use-desktop-preference checkbox close to the choose editors
	button

2009-10-29 18:10 +0000  dockes    (4b6f29c1e3c3)

	* src/sampleconf/mimeconf:
	

2009-10-29 18:09 +0000  dockes    (2de2f1804086)

	* src/common/rclconfig.cpp, src/common/rclconfig.h:
	support wildcard filtering in getConfNames() + implement config
	checking function in test driver

2009-10-29 18:08 +0000  dockes    (78c287d1d2da)

	* src/utils/conftree.cpp, src/utils/conftree.h:
	bugfix: if last line ended with backslash, entry was ignored. new
	function: filter by wildcard expr in getNames()

2009-10-29 13:44 +0000  dockes    (26ae4011727a)

	* src/sampleconf/mimeconf, src/sampleconf/mimemap:
	chm+comments

2009-10-29 13:34 +0000  dockes    (178273f496f2)

	* src/sampleconf/recoll.conf.in:
	comment

2009-10-28 13:08 +0000  dockes    (9435a56f1962)

	* src/qtgui/reslist.cpp, src/qtgui/reslist.h:
	fix signal/slot type mismatch for setSortParams

2009-10-26 13:19 +0000  dockes    (2a369661c70c)

	* src/qtgui/uiprefs_w.cpp:
	disable app-choosing button when use-desktop-prefs is activated

2009-10-26 11:16 +0000  dockes    (8cdb908a253d)

	* src/qtgui/rclmain_w.cpp:
	qt4 sometimes doesnt display the status bar if its not created in
	init

2009-10-26 10:00 +0000  dockes    (758f39788d0c)

	* src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h:
	arrange to send pageup/down and shift-home to the reslist

2009-10-24 15:02 +0000  dockes    (7d98b5c330c1)

	* src/rcldb/rcldb.cpp, src/rcldb/rcldb_p.h, src/rcldb/rclquery.cpp,
	src/rcldb/rclquery_p.h:
	unified retrying for databaseModified errors

2009-10-24 11:00 +0000  dockes    (9d49d2991eed)

	* src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/rcldb_p.h,
	src/rcldb/rclquery.cpp:
	renamed fields for clarity

2009-10-24 06:37 +0000  dockes    (1486d8f630fc)

	* src/filters/rclchm, src/filters/rclics, src/filters/rclzip:
	cleanup

2009-10-24 06:17 +0000  dockes    (6a8a9821c17c)

	* src/filters/rclexecm.py, src/filters/rclzip:
	use python zipfile

2009-10-23 16:45 +0000  dockes    (436e03b2f0c1)

	* src/filters/rclchm:
	comments

2009-10-23 16:03 +0000  dockes    (be653b19dd28)

	* src/filters/rclchm: new file.
	* src/filters/rclchm:
	first working

2009-10-23 16:03 +0000  dockes    (99a819213c2a)

	* src/filters/rclzip:
	comment

2009-10-22 17:28 +0000  dockes    (e5f16d6d23db)

	* src/doc/user/usermanual.sgml:
	%(fldname) specs

2009-10-22 17:27 +0000  dockes    (3e37f6aac6c5)

	* src/doc/user/docbook.css:
	new freebsd version

2009-10-22 17:27 +0000  dockes    (1535c07dd8a6)

	* src/sampleconf/mimeconf:
	ics

2009-10-22 17:16 +0000  dockes    (deaef902d7e3)

	* src/sampleconf/mimeconf:
	add ics + more programming languages

2009-10-22 17:16 +0000  dockes    (98009bab1e61)

	* src/sampleconf/mimemap:
	add ics + more programming languages

2009-10-22 17:13 +0000  dockes    (bf9a0c5eeb5c)

	* src/filters/rclics: new file.
	* src/filters/rclexecm.py, src/filters/rclics:
	initial support for icalendar splitting

2009-10-22 12:24 +0000  dockes    (f97b91cb8153)

	* src/filters/rclexecm.py: new file.
	* src/filters/rclexecm.py, src/filters/rclzip:
	made rclexecm a class in a separate module

2009-10-22 11:58 +0000  dockes    (9361ab690eec)

	* src/filters/rclzip:
	fully extracted common code

2009-10-21 21:00 +0000  dockes    (39b12da95a76)

	* src/filters/rclzip: new file.
	* src/filters/rclzip:
	initial

2009-10-21 20:59 +0000  dockes    (ef17d33ea782)

	* website/download.html:
	1.12.2

2009-10-21 12:02 +0000  dockes    (2baccf2235b6)

	* src/qtgui/rclmain_w.cpp, src/query/docseqdb.cpp,
	src/query/docseqdb.h, src/query/reslistpager.cpp,
	src/query/reslistpager.h, src/utils/Makefile, src/utils/smallut.cpp,
	src/utils/smallut.h:
	fix queryBuildAbstract option functionality. Allow substituting
	%(fieldname) in reslist paragraph format

2009-10-21 12:00 +0000  dockes    (30a02a6bada8)

	* src/internfile/mimehandler.h:
	warning

2009-10-21 11:58 +0000  dockes    (ebc82bec7704)

	* src/kde/kioslave/recoll/CMakeLists.txt, src/utils/closefrom.cpp:
	linux

2009-10-21 11:32 +0000  dockes    (1cc979921a0d)

	* src/internfile/mh_text.cpp, src/utils/closefrom.cpp:
	gcc43+linux compile

2009-10-21 07:48 +0000  dockes    (72168c28c9bb)

	* src/makestaticdist.sh:
	cleanup .svn directories

2009-10-21 07:24 +0000  dockes    (a550073d34d4)

	* src/makestaticdist.sh:
	get makestaticdist to work with qt4

2009-10-21 07:15 +0000  dockes    (e44497010880)

	* packaging/debian/changelog, packaging/debian/control,
	packaging/debian/menu, packaging/debian/rules,
	packaging/rpm/recollmdk.spec, tests/lyx/lyx.txt:
	1.12.2 release fixes

2009-10-21 07:15 +0000  dockes    (cecbbb5e3c23)

	* website/pics/mario.png, website/pics/smile.png: new file.
	* website/mario.png, website/smile.png: deleted file.
	* website/BUGS.html, website/CHANGES.html, website/devel.html,
	website/download.html, website/features.html, website/index.html.en,
	website/index.html.fr, website/mario.png, website/pics/index.html,
	website/pics/mario.png, website/pics/recoll5-thumb.png,
	website/pics/recoll5.png, website/pics/smile.png, website/smile.png:
	1.12.2 release

2009-10-19 16:20 +0000  dockes    (b2a9b0c5fc47)

	* src/lib/Makefile, src/lib/mkMake:
	add closefrom

2009-10-19 16:19 +0000  dockes    (5b3c0f9438a9)

	* src/README, src/doc/man/recoll.conf.5, src/doc/man/recollindex.1,
	src/doc/user/usermanual.sgml, src/filters/rclsvg:
	explict(e)ly errors again

2009-10-19 10:51 +0000  dockes    (70ed5ded2a5e)

	* src/qtgui/uiprefs.ui:
	move the use-desktop-preference checkbox close to the choose editors
	button

2009-10-19 07:30 +0000  dockes    (d25d7050d60c)

	* src/rcldb/rcldb.cpp, src/rcldb/rclquery.cpp:
	catch xapian exceptions in 2 more places.

2009-10-18 07:57 +0000  dockes    (cbcf397757a1)

	* src/qtgui/reslist.cpp:
	reslist: rightclick popup would not work inside table

2009-10-17 06:38 +0000  dockes    (cb08cd6b282b)

	* src/Makefile.in, src/index/recollindex.cpp, src/qtgui/main.cpp,
	src/qtgui/rclmain_w.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h:
	rclversion.h must not include xapian.h. Replace with
	Rcl::version_string()

2009-10-15 15:50 +0000  dockes    (6d01b54d3cf5)

	* src/qtgui/preview_w.cpp:
	compile with qt3

2009-10-15 12:32 +0000  dockes    (749d93d72709)

	* src/index/rclmonprc.cpp:
	only call x11IsAlive from the main thread

2009-10-15 12:32 +0000  dockes    (7339dd810b4c)

	* src/utils/Makefile, src/utils/conftree.cpp, src/utils/execmd.cpp:
	small linux include and makefile adjustments

2009-10-14 12:25 +0000  dockes    (4bfcb9f6483a)

	* src/utils/execmd.cpp, src/utils/execmd.h:
	m_cancelRequest->m_killRequest to avoid confusion with cancelcheck +
	close descriptors before exec

2009-10-14 12:24 +0000  dockes    (834b841865f0)

	* src/internfile/mh_exec.cpp:
	no timeout if filtermaxseconds is -1

2009-10-14 12:23 +0000  dockes    (894b94a986c2)

	* src/qtgui/confgui/confguiindex.cpp, src/sampleconf/recoll.conf.in:
	add filtermaxseconds to config

2009-10-14 12:22 +0000  dockes    (eec367c78b29)

	* src/utils/closefrom.cpp, src/utils/closefrom.h: new file.
	* src/utils/closefrom.cpp, src/utils/closefrom.h:
	

2009-10-14 06:21 +0000  dockes    (48782c4d99bd)

	* src/filters/rclimg, src/index/recollindex.cpp,
	src/internfile/mh_execm.cpp, src/internfile/mh_execm.h,
	src/sampleconf/mimeconf, src/sampleconf/mimemap,
	src/utils/execmd.cpp:
	execm first working zip version

2009-10-13 17:32 +0000  dockes    (ac8388c11bcb)

	* src/utils/idfile.cpp, src/utils/idfile.h:
	allow working on memory string

2009-10-13 16:37 +0000  dockes    (25cd49e5f3b2)

	* src/internfile/mh_exec.cpp:
	comments

2009-10-13 12:22 +0000  dockes    (f8f821415451)

	* src/internfile/mh_exec.cpp:
	handle interrupt requests and set timeout on execs

2009-10-13 12:21 +0000  dockes    (0ec65928f00f)

	* src/utils/execmd.cpp:
	use process group to control/kill execd processes

2009-10-13 12:20 +0000  dockes    (ad3f88e0578e)

	* src/sampleconf/recoll.conf.in:
	added loop.ps to skippedFiles

2009-10-12 16:27 +0000  dockes    (2d5321b8e32c)

	* src/common/rclinit.cpp:
	also block USR1 USR2

2009-10-09 13:58 +0000  dockes    (9ef52b9903d4)

	* src/internfile/mh_execm.cpp, src/internfile/mh_execm.h: new file.
	* src/filters/rclimg, src/internfile/mh_exec.cpp,
	src/internfile/mh_exec.h, src/internfile/mh_execm.cpp,
	src/internfile/mh_execm.h, src/internfile/mimehandler.cpp,
	src/lib/Makefile, src/lib/mkMake, src/sampleconf/mimeconf:
	execm persistent filters

2009-10-09 13:57 +0000  dockes    (94243b4ecca6)

	* src/common/textsplit.cpp:
	process camelCase

2009-10-09 13:34 +0000  dockes    (9129980cfe0e)

	* src/utils/execmd.cpp, src/utils/execmd.h, src/utils/netcon.cpp,
	src/utils/netcon.h:
	Execmd: added count parameter to receive(), and new getline()
	function Netcon: fix receive() to properly handle the case where
	there is initially data in the line buffer

2009-10-04 13:25 +0000  dockes    (f81cdfd36952)

	* src/utils/readfile.cpp:
	

2009-10-04 13:24 +0000  dockes    (fe1c983b582e)

	* src/mk/commondefs:
	remove -I/usr/local/include from commondefs!

2009-09-30 15:53 +0000  dockes    (401a53878320)

	* src/internfile/mh_text.cpp:
	dont set ipath for the first page in text files to avoid dual
	records for files under the page size

2009-09-30 15:45 +0000  dockes    (1ce015f48d3a)

	* src/internfile/mh_text.cpp, src/internfile/mh_text.h,
	src/qtgui/confgui/confguiindex.cpp, src/sampleconf/recoll.conf.in,
	src/utils/readfile.cpp, src/utils/readfile.h:
	implemented paged text files

2009-09-29 15:58 +0000  dockes    (b288f2d22754)

	* src/internfile/mh_text.cpp, src/qtgui/confgui/confguiindex.cpp,
	src/sampleconf/recoll.conf.in:
	textfilemaxmbs

2009-09-29 15:58 +0000  dockes    (a41ae31020fa)

	* src/utils/execmd.cpp:
	loglevels

2009-09-29 14:49 +0000  dockes    (89ab6fcd4bef)

	* src/utils/netcon.cpp, src/utils/netcon.h: new file.
	* src/utils/netcon.cpp, src/utils/netcon.h:
	

2009-09-29 14:49 +0000  dockes    (254aad5cdd17)

	* src/utils/netcon.cpp, src/utils/netcon.h: deleted file.
	* src/utils/netcon.cpp, src/utils/netcon.h:
	

2009-09-29 08:47 +0000  dockes    (302c0dd0dfa0)

	* src/qtgui/preview_w.cpp, src/qtgui/preview_w.h:
	got rid of the preview tabdata array

2009-09-29 07:48 +0000  dockes    (f65d40e808c6)

	* src/qtgui/preview_w.cpp, src/qtgui/preview_w.h:
	make print a slot in the editor, not the preview

2009-09-28 18:19 +0000  dockes    (5c03bd6d7d00)

	* src/doc/user/usermanual.sgml, src/qtgui/preview_w.cpp,
	src/qtgui/preview_w.h:
	Preview printing

2009-09-28 17:53 +0000  dockes    (564c8022205f)

	* src/utils/execmd.cpp:
	adjust log levels

2009-09-26 09:30 +0000  dockes    (231f842cfa1a)

	* src/utils/netcon.cpp, src/utils/netcon.h: new file.
	* src/lib/Makefile, src/lib/mkMake, src/utils/execmd.cpp,
	src/utils/execmd.h, src/utils/netcon.cpp, src/utils/netcon.h:
	execmd uses netcon

2009-09-26 09:05 +0000  dockes    (3883518b318e)

	* src/rcldb/rclquery.cpp:
	dont abort on get_mset exception

2009-08-13 06:34 +0000  dockes    (71e1aa73c37e)

	* src/utils/refcntr.h:
	add release() method

2009-08-13 06:32 +0000  dockes    (75501a297534)

	* src/index/indexer.cpp, src/internfile/internfile.cpp,
	src/internfile/internfile.h, src/internfile/mimehandler.cpp,
	src/internfile/mimehandler.h:
	xattrs: make them work with non-text files. Use ctime for up to date
	checks

2009-08-13 06:29 +0000  dockes    (45721e5ace5a)

	* src/common/autoconfig.h.in:
	allow choosing the "file" command from configure

2009-08-13 06:28 +0000  dockes    (817bbeb36f34)

	* src/qtgui/rclmain_w.cpp:
	Make sure db is open at all times (caused problems when sorting
	query started from the command line)

2009-08-13 06:27 +0000  dockes    (05b809bbb7d0)

	* src/qtgui/preview_w.cpp:
	

2009-08-13 06:26 +0000  dockes    (b5b49b39dc8a)

	* src/configure, src/configure.ac, src/index/mimetype.cpp:
	allow choosing the "file" command from configure

2009-08-13 06:24 +0000  dockes    (902b5dc99b09)

	* src/ChangeLog:
	

2009-08-13 06:23 +0000  dockes    (3ee15899a458)

	* src/sampleconf/recoll.conf.in:
	add indexedmimetypes to sample file

2009-07-02 13:26 +0000  dockes    (a0f0be9546bb)

	* src/filters/rclman:
	

2009-07-02 10:26 +0000  dockes    (82d09aa4b256)

	* src/index/indexer.cpp, src/qtgui/rclmain_w.cpp:
	improve periodic indexing status reporting and timer processing

2009-07-02 06:17 +0000  dockes    (b8cdf0ab08a9)

	* src/qtgui/main.cpp, src/rcldb/searchdata.h, src/utils/mimeparse.cpp,
	src/utils/mimeparse.h:
	explicitely->explicitly

2009-06-26 09:25 +0000  dockes    (98153ad73366)

	* src/filters/rclman, src/sampleconf/mimemap:
	improve man page handling

2009-06-22 16:41 +0000  dockes    (5003fe921249)

	* src/qtgui/main.cpp, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h:
	moved periodic timer control from main.cpp to rclmain_w.cpp

2009-06-22 15:25 +0000  dockes    (a420554375c5)

	* src/qtgui/idxthread.cpp, src/qtgui/main.cpp,
	src/qtgui/rclmain_w.cpp:
	use proper locking/sleeping object for idx thread sync

2009-06-22 08:58 +0000  dockes    (d4fdc68fab47)

	* src/filters/rclman:
	use groff html output!

2009-06-22 08:57 +0000  dockes    (01a166e9f9e7)

	* src/index/indexer.cpp:
	debug trace

2009-06-01 06:32 +0000  dockes    (272067257953)

	* src/qtgui/main.cpp:
	fixed bug in handling remaining arguments as question pieces

2009-05-29 06:28 +0000  dockes    (091488ca1543)

	* src/bincimapmime/convert.h, src/utils/base64.cpp:
	change strchr() return parameter to const for new libc

2009-05-25 08:59 +0000  dockes    (6231c20d3e23)

	* src/filters/rcllyx:
	bug report from d.prost: spaces and accents in lyx file names

2009-05-04 08:06 +0000  dockes    (20f1f5746b3e)

	* src/qtgui/guiutils.cpp, src/qtgui/preview_w.h,
	src/qtgui/spell_w.cpp:
	gcc44

2009-04-27 11:49 +0000  dockes    (ba8db4a9fcf6)

	* packaging/rpm/recollfedora10.spec: new file.
	* packaging/rpm/recollfedora10.spec:
	

2009-04-27 11:42 +0000  dockes    (85e5723e268a)

	* tests/cjk/cjk.txt: new file.
	* tests/cjk/cjk.txt:
	

2009-04-27 09:40 +0000  dockes    (a7cf61bb3e6a)

	* website/BUGS.html, website/download.html, website/index.html.en,
	website/index.html.fr:
	1.12 release changes

2009-04-27 09:15 +0000  dockes    (eb2d1da3c9ee)

	* website/BUGS.html:
	

2009-04-27 08:05 +0000  dockes    (c26df870665c)

	* src/utils/md5.cpp, src/utils/readfile.cpp:
	gcc 4.4 includes fixes

2009-04-27 08:03 +0000  dockes    (5e892d5aa963)

	* src/python/recoll/setup.py:
	pathhash->fileudi

2009-02-24 18:30 +0000  dockes    (d897d4f128ce)

	* src/qtgui/guiutils.cpp, src/qtgui/guiutils.h,
	src/qtgui/rclmain_w.cpp, src/qtgui/uiprefs.ui,
	src/qtgui/uiprefs_w.cpp:
	implement option to display the catg filter as a toolbar combobox

2009-02-23 07:57 +0000  dockes    (5eb3b91eca18)

	* src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts:
	new ru/uk translations from Michael

2009-02-06 16:49 +0000  dockes    (0946c032bea8)

	* src/utils/refcntr.h:
	make RefCntr(x*) explicit

2009-02-06 16:48 +0000  dockes    (1f50a0e7a3ac)

	* src/internfile/mimehandler.cpp:
	comments

2009-02-05 14:35 +0000  dockes    (1eb8b93ed85b)

	* src/utils/execmd.cpp, src/utils/execmd.h:
	1st execcmd cleanup

2009-01-30 13:27 +0000  dockes    (55d06dfa9b04)

	* src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_fr.ts,
	src/qtgui/i18n/recoll_it.ts, src/qtgui/i18n/recoll_ru.ts,
	src/qtgui/i18n/recoll_tr.ts, src/qtgui/i18n/recoll_uk.ts,
	src/qtgui/i18n/recoll_xx.ts, src/qtgui/reslist.cpp:
	small pbs with reslist translations

2009-01-30 11:43 +0000  dockes    (af28dae4f689)

	* src/INSTALL, src/README:
	

2009-01-30 11:43 +0000  dockes    (581a47458445 [RECOLL_1_12_0])

	* website/BUGS.html, website/CHANGES.html:
	1.12.0?

2009-01-30 11:42 +0000  dockes    (fd6cc84e76ce)

	* src/doc/user/usermanual.sgml:
	1.12 manual

2009-01-30 10:22 +0000  dockes    (f683b3907dd1)

	* src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_fr.ts,
	src/qtgui/i18n/recoll_it.ts, src/qtgui/i18n/recoll_ru.ts,
	src/qtgui/i18n/recoll_tr.ts, src/qtgui/i18n/recoll_uk.ts,
	src/qtgui/i18n/recoll_xx.ts:
	updated message files, translated french

2009-01-29 14:24 +0000  dockes    (f09b8b421535)

	* src/filters/rcltext:
	simplified rcltext. No need for awk and no assumptions on charset

2009-01-29 11:27 +0000  dockes    (c8b882dea260)

	* src/ChangeLog, website/CHANGES.html, website/doc.html:
	

2009-01-29 11:04 +0000  dockes    (0bf58162416f)

	* src/VERSION:
	1.12.0 une

2009-01-29 10:47 +0000  dockes    (40e8e1f2f59b)

	* packaging/debian/changelog, packaging/rpm/recoll.spec,
	packaging/rpm/recollCooker.spec, packaging/rpm/recollfedora.spec,
	packaging/rpm/recollmdk.spec:
	

2009-01-29 10:08 +0000  dockes    (2af56852a361)

	* src/qtgui/main.cpp, src/qtgui/ssearch_w.cpp, src/qtgui/ssearch_w.h:
	have ssearch install the lang help section when needed

2009-01-28 17:41 +0000  dockes    (8654c9b9d56d)

	* src/qtgui/rclmain_w.cpp, src/qtgui/reslist.cpp:
	erase history would crash with empty reslist docsource

2009-01-28 17:21 +0000  dockes    (8b56ccfdd91b)

	* src/qtgui/rclmain_w.cpp:
	fixed status bar messages (were cleared by periodic100 every 100ms)

2009-01-28 17:05 +0000  dockes    (b435cf90abb0)

	* src/qtgui/rclhelp.cpp, src/qtgui/rclhelp.h: new file.
	* src/qtgui/rclhelp.cpp, src/qtgui/rclhelp.h:
	F1 context-enhanced help

2009-01-28 16:56 +0000  dockes    (e5410627d9d5)

	* src/qt4gui/recoll.pro.in:
	F1 context-enhanced help

2009-01-28 16:56 +0000  dockes    (741df5618110)

	* src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h,
	src/qtgui/main.cpp, src/qtgui/preview_w.cpp,
	src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/recoll.h,
	src/qtgui/recoll.pro.in, src/qtgui/reslist.cpp,
	src/qtgui/sort_w.cpp, src/qtgui/spell_w.cpp:
	F1 context-enhanced help

2009-01-28 14:58 +0000  dockes    (7e804d156dc5)

	* src/qtgui/rclmain_w.cpp:
	dont encode urls used for starting help browser

2009-01-28 14:22 +0000  dockes    (e696212a674c)

	* src/Makefile.in:
	add xapian version to version string

2009-01-28 08:45 +0000  dockes    (46251043fd88)

	* src/qtgui/advsearch.ui, src/qtgui/sort.ui, src/qtgui/spell.ui,
	src/qtgui/uiprefs.ui:
	avoid setting 0 sizes

2009-01-28 08:40 +0000  dockes    (1c551a065bdd)

	* src/configure, src/configure.ac:
	allow setting QMAKE in the environment

2009-01-27 18:12 +0000  dockes    (fb41a05985ed)

	* src/utils/pxattr.cpp:
	

2009-01-27 11:19 +0000  dockes    (3f5897bb4b8d)

	* tests/stemming/stemming.sh, tests/stemming/stemming.txt: new file.
	* tests/Maildir1/Maildir1.sh, tests/Maildir1/Maildir1.txt,
	tests/andor/andor.sh, tests/andor/andor.txt,
	tests/badsuffs/badsuffs.sh, tests/badsuffs/badsuffs.txt,
	tests/badsuffs1/badsuffs1.sh, tests/badsuffs1/badsuffs1.txt,
	tests/boolean/boolean.sh, tests/boolean/boolean.txt,
	tests/cjk/cjk.sh, tests/delete/delete.sh, tests/delete/delete.txt,
	tests/dirwithblanks/dirwithblanks.sh,
	tests/dirwithblanks/dirwithblanks.txt, tests/djvu/djvu.sh,
	tests/djvu/djvu.txt, tests/dvi/dvi.sh, tests/dvi/dvi.txt,
	tests/empty/empty.sh, tests/empty/empty.txt, tests/html/html.sh,
	tests/html/html.txt, tests/images/images.sh,
	tests/images/images.txt, tests/koi8r/koi8r.sh,
	tests/koi8r/koi8r.txt, tests/lyx/lyx.sh, tests/lyx/lyx.txt,
	tests/mail/mail.sh, tests/mail/mail.txt, tests/media/media.sh,
	tests/media/media.txt, tests/msword/msword.sh,
	tests/msword/msword.txt, tests/notypes/notypes.sh,
	tests/notypes/notypes.txt, tests/ooff/ooff.sh, tests/ooff/ooff.txt,
	tests/pdf/pdf.sh, tests/pdf/pdf.txt, tests/postscript/postscript.sh,
	tests/postscript/postscript.txt, tests/ppt/ppt.sh,
	tests/ppt/ppt.txt, tests/rfc2231/rfc2231.sh,
	tests/rfc2231/rfc2231.txt, tests/rtf/rtf.sh, tests/rtf/rtf.txt,
	tests/runtests.sh, tests/scribus/scribus.sh,
	tests/scribus/scribus.txt, tests/skipped/skipped.sh,
	tests/skipped/skipped.txt, tests/special/special.sh,
	tests/special/special.txt, tests/stemming/stemming.sh,
	tests/stemming/stemming.txt, tests/txt/txt.sh, tests/txt/txt.txt,
	tests/utf8/utf8.sh, tests/utf8/utf8.txt, tests/xls/xls.sh,
	tests/xls/xls.txt:
	remove recoll query text from compared test outputs

2009-01-27 10:25 +0000  dockes    (57dd90e8b55d)

	* src/common/textsplit.cpp, src/common/textsplit.h:
	Emit a_b intermediary span when splitting a_b.c

2009-01-26 18:30 +0000  dockes    (e2238061ec9d)

	* src/query/plaintorich.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h,
	src/rcldb/searchdata.cpp:
	modified the time at which we unaccent so that we can do the
	Capitalized->nostemming test on single words (this had been broken
	by the change of noac/split order done earlier to get japanese to
	work)

2009-01-26 18:26 +0000  dockes    (8529cb7d58c7)

	* tests/cjk/cjk.sh:
	

2009-01-26 17:52 +0000  dockes    (8a5b4971a703)

	* tests/cjk/cjk.sh: new file.
	* tests/cjk/cjk.sh:
	

2009-01-26 17:34 +0000  dockes    (e65566ba6690)

	* website/BUGS.html, website/CHANGES.html, website/features.html,
	website/index.html.en, website/index.html.fr,
	website/pics/index.html, website/pics/recoll-
	HTML_search_results.html, website/pics/recoll0.html,
	website/pics/recoll0.txt, website/pics/recoll1.html,
	website/pics/recoll2.html, website/pics/recoll3.html,
	website/pics/recoll4.html, website/pics/recoll5.html,
	website/pics/recoll_chinese.html:
	website

2009-01-26 13:29 +0000  dockes    (61198659243f)

	* src/utils/smallut.cpp, src/utils/smallut.h:
	add overloaded neutchars with different parameters

2009-01-26 13:27 +0000  dockes    (61567bc09eab)

	* src/utils/transcode.cpp:
	tested and decided against cacheing iconv_open

2009-01-23 15:56 +0000  dockes    (1998b1608eb0)

	* src/ChangeLog, src/qtgui/advsearch_w.cpp, src/qtgui/main.cpp,
	src/qtgui/rclmain_w.cpp, src/qtgui/recoll.h:
	temp ckpt: need to test on real unix

2009-01-23 11:07 +0000  dockes    (3631372e04f1)

	* src/qtgui/uiprefs.ui:
	avoid name duplication

2009-01-23 11:03 +0000  dockes    (0dba2718e1aa)

	* src/qtgui/uiprefs.ui:
	one button for choosing native editors

2009-01-23 10:38 +0000  dockes    (167a153bcf3c)

	* src/kde/kioslave/recoll/data/searchable.html:
	simplified javascrip: no ie here!

2009-01-23 09:41 +0000  dockes    (b71166d61782)

	* src/qtgui/rclmain_w.cpp:
	toLocal8Bit->local8bit

2009-01-23 09:30 +0000  dockes    (c3565b4a7244)

	* src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/main.cpp,
	src/qtgui/rclmain_w.cpp, src/qtgui/recoll.h, src/qtgui/uiprefs.ui,
	src/qtgui/uiprefs_w.cpp, src/qtgui/uiprefs_w.h:
	use normal text/html ext app for viewing help

2009-01-23 09:27 +0000  dockes    (c025fa3fe99d)

	* src/utils/execmd.cpp, src/utils/execmd.h:
	accept additional path argument to execmd::which

2009-01-22 14:25 +0000  dockes    (967d5e013a33)

	* src/qtgui/preview_w.cpp, src/qtgui/preview_w.h:
	allow toggle show text/fields in preview

2009-01-21 16:42 +0000  dockes    (f950b7d75e66)

	* src/internfile/internfile.cpp, src/internfile/internfile.h,
	src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h,
	src/qtgui/reslist.cpp, src/qtgui/reslist.h:
	added saveToFile menu entry to reslist

2009-01-21 13:55 +0000  dockes    (033fe406a666)

	* src/utils/pxattr.cpp, src/utils/pxattr.h: new file.
	* src/common/autoconfig.h.in, src/common/rclconfig.cpp,
	src/common/rclconfig.h, src/configure, src/configure.ac,
	src/internfile/internfile.cpp, src/internfile/mh_exec.h,
	src/internfile/mh_html.cpp, src/internfile/mh_mail.cpp,
	src/internfile/mh_mbox.cpp, src/internfile/mh_text.cpp,
	src/internfile/mh_unknown.h, src/internfile/mimehandler.cpp,
	src/internfile/mimehandler.h, src/lib/Makefile, src/lib/mkMake,
	src/sampleconf/fields, src/utils/pxattr.cpp, src/utils/pxattr.h:
	added optional extended file attributes support

2009-01-21 11:11 +0000  dockes    (f269f00857ec)

	* src/sampleconf/mimeconf:
	comments

2009-01-21 11:11 +0000  dockes    (fda5a0a6fccb)

	* src/filters/rcldoc:
	try to use wvWare if present and antiword fails

2009-01-21 10:49 +0000  dockes    (394e160f7032)

	* src/utils/readfile.cpp:
	initialize the error buffer for gnu strerror_r

2009-01-21 10:24 +0000  dockes    (7580c4ed79ce)

	* src/utils/readfile.cpp:
	fix errno printing

2009-01-21 10:17 +0000  dockes    (f1dca213efee)

	* src/rcldb/rcldb.cpp:
	fixed typo that would prevent stopfile use

2009-01-17 14:57 +0000  dockes    (90f03bbd715c)

	* src/doc/man/recoll.conf.5, src/doc/user/usermanual.sgml:
	added compressedfilemaxkbs

2009-01-17 14:56 +0000  dockes    (78d1dd932d5b)

	* src/internfile/internfile.cpp, src/qtgui/confgui/confguiindex.cpp,
	src/sampleconf/recoll.conf.in:
	added compressedfilemaxkbs

2009-01-16 17:40 +0000  dockes    (fcc2539b18b4)

	* src/kde/kioslave/recoll/data/searchable.html:
	

2009-01-16 16:42 +0000  dockes    (11cc037db8a9)

	* src/kde/kioslave/recoll/00README.txt:
	

2009-01-16 11:32 +0000  dockes    (baaf38fdbca9)

	* src/kde/kioslave/recoll/00README.txt,
	src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/data/help.html,
	src/kde/kioslave/recoll/dirif.cpp,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/notes.txt:
	fixed docs + removed dead code

2009-01-15 17:07 +0000  dockes    (144b35bd64c0)

	* src/filters/rcluncomp, src/internfile/internfile.cpp:
	fixed handling of decompression errors, which was wrong but not
	catastrophly so in most cases

2009-01-15 17:05 +0000  dockes    (4b10b961d158)

	* src/qtgui/reslist.cpp:
	disable printing to tmp file

2009-01-15 14:37 +0000  dockes    (9392e278bb0a)

	* src/query/docseq.h, src/query/filtseq.cpp, src/query/filtseq.h,
	src/query/sortseq.cpp, src/query/sortseq.h:
	refactor operations delegated to subsequence by sortseq and filtspec
	into superclass

2009-01-15 09:47 +0000  dockes    (f02a34f835b4)

	* src/rcldb/rcldb.cpp:
	removed unused variable

2009-01-15 09:45 +0000  dockes    (2440f3259cd0)

	* src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/reslist.cpp,
	src/qtgui/uiprefs_w.cpp:
	ensure reslist parformat is refreshed after edit (1.11 bug)

2009-01-14 07:52 +0000  dockes    (b3c89a56c9a1)

	* src/qtgui/advsearch.ui, src/qtgui/rclmain_w.cpp,
	src/qtgui/uiprefs_w.cpp, src/qtgui/uiprefs_w.h,
	src/qtgui/viewaction.ui, src/qtgui/viewaction_w.cpp,
	src/qtgui/viewaction_w.h:
	arrange so that the select action dialog is preselected on the right
	mime type after missing action

2009-01-13 16:03 +0000  dockes    (2d8517785a8e)

	* src/common/textsplit.cpp:
	add _ to wordsep/spanglue chars. Add non-ascii test to isCJK for
	optimization

2009-01-13 16:02 +0000  dockes    (cbfb1f939c9d)

	* src/common/uproplist.h:
	small fix : remove diaeresis from seps + comments

2009-01-13 08:56 +0000  dockes    (ee8989c89330)

	* src/doc/user/usermanual.sgml:
	

2009-01-13 08:49 +0000  dockes    (93e74953ed0b)

	* src/doc/user/usermanual.sgml:
	update version

2009-01-13 08:02 +0000  dockes    (051bf6d49898)

	* src/rcldb/rcldb.h, src/rcldb/rcldb_p.h, src/rcldb/rclquery.h:
	minor visibility cleanup

2009-01-13 08:01 +0000  dockes    (c550fb351f5f)

	* src/qtgui/ssearchb.ui:
	fix obsolete tooltip message

2009-01-12 18:31 +0000  dockes    (3cefac6eb52d)

	* src/doc/user/usermanual.sgml:
	doc: better adv search explanation + duplicates collapsing

2009-01-12 17:50 +0000  dockes    (f8cb21911962)

	* src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h:
	simplified dialog structure, apparently allowed to get rid of size
	hacks

2009-01-12 16:42 +0000  dockes    (48ca278dcd42)

	* src/qtgui/advsearch.ui:
	suppressed unused vbox

2009-01-12 15:55 +0000  dockes    (b5486bd5b85d)

	* src/qtgui/advsearch.ui, src/qtgui/searchclause_w.cpp,
	src/qtgui/searchclause_w.h:
	suppressed unused layout in searchClause

2009-01-09 14:56 +0000  dockes    (073523a33ffe)

	* src/internfile/mh_exec.cpp, src/internfile/mh_html.cpp,
	src/internfile/mh_mail.cpp, src/internfile/mh_text.cpp,
	src/qtgui/guiutils.cpp, src/qtgui/guiutils.h,
	src/qtgui/rclmain_w.cpp, src/qtgui/uiprefs.ui,
	src/qtgui/uiprefs_w.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h,
	src/rcldb/rcldoc.cpp, src/rcldb/rcldoc.h, src/rcldb/rclquery.cpp,
	src/rcldb/rclquery.h:
	compute md5 checksums for all docs and optionally collapse
	duplicates in results

2009-01-09 12:23 +0000  dockes    (f89119e58f79)

	* src/qtgui/reslist.cpp:
	add space/backspace as pager keys for reslist

2009-01-09 12:23 +0000  dockes    (36eb326513d5)

	* src/utils/Makefile, src/utils/md5.cpp, src/utils/md5.h,
	src/utils/readfile.cpp, src/utils/readfile.h:
	implement md5 convenience file and string wrappers. Modify readfile
	to support this

2009-01-09 07:27 +0000  dockes    (de3507d26de4)

	* src/rcldb/pathhash.cpp, src/rcldb/pathhash.h: deleted file.
	* src/lib/Makefile, src/lib/mkMake, src/rcldb/pathhash.cpp,
	src/rcldb/pathhash.h, src/rcldb/rcldb.cpp:
	got rid of pathhash in rcldb, not used since 11.0

2009-01-08 09:55 +0000  dockes    (1fc0cdb06859)

	* src/excludefile:
	adapt to svn

2009-01-08 09:50 +0000  dockes    (867f1a9f6b02)

	* src/makesrcdist.sh:
	adapt distrib script to svn

2009-01-08 09:40 +0000  dockes    (33a7fbc42386 [RECOLL_1_12_1exp5])

	* src/VERSION:
	

2009-01-06 18:48 +0000  dockes    (2e111dad7cba)

	* src/doc/user/bldloop: new file.
	* packaging/FreeBSD/recoll/Makefile,
	packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg-
	plist, src/doc/user/bldloop, tests/koi8r/koi8r.sh, tests/shared.sh:
	*** empty log message ***

2009-01-06 18:40 +0000  dockes    (c82fbe0ee8fc)

	* packaging/debian/changelog, packaging/rpm/recoll.spec,
	packaging/rpm/recollfedora.spec, packaging/rpm/recollmdk.spec,
	src/ChangeLog, website/devel.html, website/download.html:
	*** empty log message ***

2009-01-06 18:40 +0000  dockes    (7ebc18a8b4d7)

	* unac/builder.in, unac/unac.c, unac/unac.h:
	new unac approach for japanese: dont decompose at all

2009-01-06 17:40 +0000  dockes    (a0b7ed1f2bda)

	* website/xapUpg100.html: new file.
	* website/BUGS.html, website/BUGS.txt, website/CHANGES.html,
	website/doc.html, website/download.html, website/index.html.en,
	website/index.html.fr, website/xapUpg100.html:
	*** empty log message ***

2009-01-06 17:30 +0000  dockes    (636e0f9f2a77)

	* website/howtos/index.html,
	website/howtos/prevent_indexing_a_directory/index.html,
	website/howtos/use_multiple_indexes/index.html,
	website/pics/piclist.txt, website/pics/recoll-HTML_search_results-
	thumb.png, website/pics/recoll-HTML_search_results.html,
	website/pics/recoll-HTML_search_results.png, website/pics/recoll-
	HTML_search_results.txt, website/pics/recoll0-thumb.png,
	website/pics/recoll0.html, website/pics/recoll0.png,
	website/pics/recoll0.txt, website/pics/recoll1-thumb.png,
	website/pics/recoll1.html, website/pics/recoll1.png,
	website/pics/recoll1.txt, website/pics/recoll2-thumb.png,
	website/pics/recoll2.html, website/pics/recoll2.png,
	website/pics/recoll2.txt, website/pics/recoll3-thumb.png,
	website/pics/recoll3.html, website/pics/recoll3.png,
	website/pics/recoll3.txt, website/pics/recoll4-thumb.png,
	website/pics/recoll4.html, website/pics/recoll4.png,
	website/pics/recoll4.txt, website/pics/recoll5-thumb.png,
	website/pics/recoll5.html, website/pics/recoll5.png,
	website/pics/recoll5.txt, website/pics/recoll_chinese-thumb.png,
	website/pics/recoll_chinese.html, website/pics/recoll_chinese.png,
	website/pics/recoll_chinese.txt: new file.
	* website/howtos/index.html,
	website/howtos/prevent_indexing_a_directory/index.html,
	website/howtos/use_multiple_indexes/index.html,
	website/pics/piclist.txt, website/pics/recoll-HTML_search_results-
	thumb.png, website/pics/recoll-HTML_search_results.html,
	website/pics/recoll-HTML_search_results.png, website/pics/recoll-
	HTML_search_results.txt, website/pics/recoll0-thumb.png,
	website/pics/recoll0.html, website/pics/recoll0.png,
	website/pics/recoll0.txt, website/pics/recoll1-thumb.png,
	website/pics/recoll1.html, website/pics/recoll1.png,
	website/pics/recoll1.txt, website/pics/recoll2-thumb.png,
	website/pics/recoll2.html, website/pics/recoll2.png,
	website/pics/recoll2.txt, website/pics/recoll3-thumb.png,
	website/pics/recoll3.html, website/pics/recoll3.png,
	website/pics/recoll3.txt, website/pics/recoll4-thumb.png,
	website/pics/recoll4.html, website/pics/recoll4.png,
	website/pics/recoll4.txt, website/pics/recoll5-thumb.png,
	website/pics/recoll5.html, website/pics/recoll5.png,
	website/pics/recoll5.txt, website/pics/recoll_chinese-thumb.png,
	website/pics/recoll_chinese.html, website/pics/recoll_chinese.png,
	website/pics/recoll_chinese.txt:
	*** empty log message ***

2008-12-21 13:17 +0000  dockes    (74da01dd27c2)

	* src/unac/unac.c, src/unac/unac.h:
	new unac approach for japanese: dont decompose at all

2008-12-21 13:05 +0000  dockes    (273dad0916bb)

	* unac/unac_version.h: new file.
	* unac/unac_version.h:
	*** empty log message ***

2008-12-19 09:55 +0000  dockes    (1a2dd90e07b4)

	* src/rcldb/rcldb.h, src/rcldb/rclquery.cpp, src/rcldb/searchdata.cpp:
	getMainConfig not actually needed and possibly harmful

2008-12-19 09:44 +0000  dockes    (3a16629b24f5)

	* src/rcldb/searchdata.cpp, src/unac/unac.c, src/unac/unac.h:
	dont unaccent japanese + fix bug in unac/split ordering in
	searchdata

2008-12-19 08:39 +0000  dockes    (b895714a6500)

	* src/python/recoll/setup.py:
	pyrecoll: port to linux, update

2008-12-18 14:11 +0000  dockes    (33bffc499e78)

	* src/query/xadump.cpp:
	diag: prevent char combination by inserting spaces

2008-12-18 11:58 +0000  dockes    (a3863a0c1f62)

	* unac/builder.in, unac/unac.c:
	no going out of the basic plane!

2008-12-18 11:12 +0000  dockes    (ac1315d2a94f)

	* unac/unac.c:
	added recoll memory allocation checks

2008-12-18 11:05 +0000  dockes    (cfb4210ce7d5)

	* unac/CaseFolding-5.1.0.txt, unac/UnicodeData-5.1.0.txt: new file.
	* unac/CaseFolding-5.1.0.txt, unac/UnicodeData-5.1.0.txt:
	*** empty log message ***

2008-12-18 11:04 +0000  dockes    (cc609462a402)

	* unac/builder.in, unac/configure, unac/configure.ac, unac/unac.c,
	unac/unac.h:
	use unicode 5.1.0 + dont unaccent katakana/hiragana. Main change in
	unicode is that letters ae and o with stroke dont decompose anymore
	into a+e and o+e we may actually want to restore this if it proves a
	problem

2008-12-17 16:20 +0000  dockes    (65fd4fda84d3)

	* src/rcldb/rcldb.cpp:
	fix to previous abstract fix

2008-12-17 15:12 +0000  dockes    (9e9e84a23da6)

	* src/qtgui/reslist.cpp:
	use local hiliter

2008-12-17 14:26 +0000  dockes    (ada853f1e3b8)

	* src/common/Makefile, src/rcldb/rcldb.cpp:
	fix abstract generation when the match term is a multiword span
	(esp. for japanese)

2008-12-17 14:26 +0000  dockes    (9705bf172f13)

	* src/rcldb/searchdata.cpp:
	comment

2008-12-17 08:01 +0000  dockes    (42bc5b3b5abf)

	* src/index/indexer.cpp, src/index/indexer.h,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/python/recoll/pyrecoll.cpp, src/qtgui/main.cpp,
	src/query/recollq.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h:
	simplified db open by getting rid of the illusion that we could have
	several writeable dbs per config

2008-12-16 17:43 +0000  dockes    (1d040e634db3 [RECOLL_1_12_1exp4])

	* src/README, src/VERSION,
	src/kde/kioslave/recoll/data/searchable.html,
	src/kde/kioslave/recoll/data/welcome.html:
	*** empty log message ***

2008-12-16 17:30 +0000  dockes    (18f65ef55dd6)

	* src/kde/kioslave/recoll/data/searchable.html:
	*** empty log message ***

2008-12-16 17:28 +0000  dockes    (e991bdd3d8c7)

	* src/kde/kioslave/recoll/data/searchable.html: new file.
	* src/kde/kioslave/recoll/data/searchable.html,
	src/kde/kioslave/recoll/data/welcome.html,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h:
	updated kioslave for small changes in reslistpager after main i/f
	integration. + javascript to search page

2008-12-16 14:20 +0000  dockes    (7bc14752b5f3)

	* src/qtgui/preview_w.h, src/qtgui/reslist.cpp, src/qtgui/reslist.h,
	src/query/plaintorich.h, src/query/reslistpager.cpp,
	src/query/reslistpager.h, src/utils/debuglog.cpp,
	src/utils/debuglog.h:
	converted qt reslist to reslistpager

2008-12-16 08:54 +0000  dockes    (c702627139c8)

	* src/query/wasastringtoquery.cpp:
	OR chain longer than 2 would swallow preceding AND terms

2008-12-15 15:04 +0000  dockes    (62e1b7eaa7b9)

	* src/kde/kioslave/recoll/htmlif.cpp, src/query/reslistpager.cpp:
	kio: use right ipath for preview

2008-12-15 14:39 +0000  dockes    (30b71a18e961)

	* src/query/xadump.cpp, src/rcldb/searchdata.cpp:
	make gcc happy

2008-12-15 13:51 +0000  dockes    (f93dda12024f)

	* website/howtos/template.html:
	*** empty log message ***

2008-12-15 11:20 +0000  dockes    (4a74871e9823)

	* website/howtos/buildindex.sh, website/howtos/fragend.html,
	website/howtos/fraghead.html, website/howtos/newdir.sh,
	website/howtos/template.html: new file.
	* website/BUGS.html, website/BUGS.txt, website/CHANGES.html,
	website/copydocs, website/download.html,
	website/howtos/buildindex.sh, website/howtos/fragend.html,
	website/howtos/fraghead.html, website/howtos/newdir.sh,
	website/howtos/template.html, website/index.html.en,
	website/index.html.fr, website/pics/index.html:
	*** empty log message ***

2008-12-15 09:33 +0000  dockes    (afc0ef4911b2)

	* src/doc/user/usermanual.sgml:
	more search tips

2008-12-15 09:24 +0000  dockes    (59cd1bdd4d3f)

	* src/rcldb/searchdata.cpp:
	reorganize code + add boost to phrase element to match boost of
	original user terms

2008-12-12 11:53 +0000  dockes    (4121cbc09d70)

	* src/common/textsplit.cpp, src/common/textsplit.h,
	src/rcldb/rcldb.cpp:
	dont insert space in cjk abstracts

2008-12-12 11:02 +0000  dockes    (37fd1c31af49)

	* src/rcldb/rcldb.cpp:
	message level

2008-12-12 11:01 +0000  dockes    (d2a8c016d05c)

	* src/qtgui/reslist.cpp:
	add %i for displaying ipath

2008-12-12 11:00 +0000  dockes    (151d6a590152)

	* src/qtgui/main.cpp:
	add all extra cmd line args to the question

2008-12-08 17:43 +0000  dockes    (90b62656b326)

	* src/kde/kioslave/recoll/htmlif.cpp:
	set name as preview title

2008-12-08 17:42 +0000  dockes    (5717c313d23a)

	* src/kde/kioslave/recoll/dirif.cpp:
	removed a few traces

2008-12-08 14:34 +0000  dockes    (de392f657f81)

	* src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/data/help.html,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h,
	src/kde/kioslave/recoll/notes.txt:
	previews

2008-12-08 11:22 +0000  dockes    (877b674c328c)

	* src/utils/Makefile, src/utils/readfile.cpp:
	file_to_string: stat+reserve makes faster

2008-12-05 13:15 +0000  dockes    (19ef9198e3d5)

	* src/VERSION:
	branched maintenance for 1.11 kio devs on main now 1.12

2008-12-05 11:09 +0000  dockes    (b27d4070bbf8)

	* src/common/textsplit.cpp, src/common/textsplit.h,
	src/common/uproplist.h, src/kde/kioslave/recoll/kio_recoll.cpp,
	src/qtgui/ssearch_w.cpp, src/query/recollq.cpp,
	src/query/wasatorcl.cpp, src/rcldb/searchdata.cpp:
	take care of splitting user string with respect to unicode white
	space, not only ascii

2008-12-05 07:38 +0000  dockes    (d102970d3aee)

	* src/utils/smallut.h:
	comment

2008-12-04 12:41 +0000  dockes    (a3f25963b2da [RECOLL_1_11_1exp3])

	* src/kde/kioslave/recoll/recollf.protocol: new file.
	* src/kde/kioslave/recoll/recollf.protocol:
	*** empty log message ***

2008-12-04 12:23 +0000  dockes    (adffbb42e449)

	* src/kde/kioslave/recoll/dirif.cpp:
	kde 4.0 compile

2008-12-04 11:50 +0000  dockes    (fef6cc6c4c97)

	* src/VERSION:
	*** empty log message ***

2008-12-04 11:49 +0000  dockes    (d1b1a426ddfa)

	* src/kde/kioslave/recoll/data/help.html,
	src/kde/kioslave/recoll/dirif.cpp,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h,
	src/kde/kioslave/recoll/notes.txt, src/query/reslistpager.cpp,
	src/query/reslistpager.h, src/rcldb/rcldb.cpp, src/utils/pathut.h:
	kio_recoll: html/dir switching

2008-12-03 17:04 +0000  dockes    (a762165399a2)

	* src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/data/help.html,
	src/kde/kioslave/recoll/dirif.cpp,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h,
	src/kde/kioslave/recoll/notes.txt:
	cleaned up virtual tree and url handling. Drag to desktop now works
	with appropriate name. recollf protocol

2008-12-03 10:02 +0000  dockes    (127dbb400363)

	* src/kde/kioslave/recoll/dirif.cpp:
	better stat

2008-12-02 13:41 +0000  dockes    (6e55b23fb64f)

	* src/kde/kioslave/recoll/dirif.cpp:
	*** empty log message ***

2008-12-02 13:38 +0000  dockes    (66b031be3559)

	* src/kde/kioslave/recoll/dirif.cpp:
	*** empty log message ***

2008-12-02 13:16 +0000  dockes    (619e41b1537b)

	* src/INSTALL, src/README, src/VERSION, src/doc/user/usermanual.sgml:
	*** empty log message ***

2008-12-02 13:14 +0000  dockes    (fff18d4ea953)

	* src/kde/kioslave/recoll/00README.txt,
	src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/data/welcome.html,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/notes.txt, src/query/reslistpager.cpp:
	kio goes to testing

2008-12-01 18:42 +0000  dockes    (714fdf15621e)

	* src/kde/kioslave/recoll/data/help.html,
	src/kde/kioslave/recoll/dirif.cpp,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h:
	small cleanups and comments. Still some weirdness

2008-12-01 15:37 +0000  dockes    (8d9ea1f1c645)

	* src/kde/kioslave/recoll/cleancmakestuff.sh:
	*** empty log message ***

2008-12-01 15:36 +0000  dockes    (8504e2e278dd)

	* src/kde/kioslave/recoll/data/help.html: new file.
	* src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/cleancmakestuff.sh,
	src/kde/kioslave/recoll/data/help.html,
	src/kde/kioslave/recoll/data/welcome.html,
	src/kde/kioslave/recoll/dirif.cpp,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h,
	src/kde/kioslave/recoll/notes.txt:
	seems to work by re-rerunning search whenever it changes. Still had
	one crash, needs cleanup

2008-11-28 09:14 +0000  dockes    (ee6a7d32843e)

	* src/kde/kioslave/recoll/recollnolist.protocol: new file.
	* src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/dirif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h,
	src/kde/kioslave/recoll/notes.txt,
	src/kde/kioslave/recoll/recoll.protocol,
	src/kde/kioslave/recoll/recollnolist.protocol:
	ensured compatibility with kde4.0

2008-11-27 17:48 +0000  dockes    (d461029ef29c)

	* src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/dirif.cpp,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h,
	src/kde/kioslave/recoll/notes.txt,
	src/kde/kioslave/recoll/recoll.protocol:
	bits of dual mode working

2008-11-27 14:05 +0000  dockes    (8cc177e8775a)

	* src/query/reslistpager.cpp:
	safety check

2008-11-27 13:35 +0000  dockes    (4d28c4942bc1)

	* src/sampleconf/mimeconf:
	*** empty log message ***

2008-11-27 09:49 +0000  dockes    (394d882caa0c)

	* src/sampleconf/mimeconf:
	remove obsolete [prefixes] section

2008-11-27 09:39 +0000  dockes    (0ec8260d8d7c)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2008-11-26 15:03 +0000  dockes    (b6a62dc24003)

	* src/kde/kioslave/recoll/cleancmakestuff.sh,
	src/kde/kioslave/recoll/dirif.cpp,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/notes.txt: new file.
	* src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/cleancmakestuff.sh,
	src/kde/kioslave/recoll/dirif.cpp,
	src/kde/kioslave/recoll/htmlif.cpp,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h,
	src/kde/kioslave/recoll/notes.txt,
	src/kde/kioslave/recoll/recoll.protocol:
	listdir doesnt work on kde 4.0 because on parent/child assumptions
	in kdirmodel have to check on kde 4.1

2008-11-24 17:42 +0000  dockes    (9333f13ac4c7)

	* src/VERSION:
	*** empty log message ***

2008-11-24 17:38 +0000  dockes    (a761936ec65e)

	* src/kde/kioslave/recoll/CMakeLists.txt:
	check for dlopen

2008-11-24 16:42 +0000  dockes    (0f7e0292212f)

	* src/kde/kioslave/recoll/CMakeLists.txt:
	have to cc the pic objects, cant use librcl

2008-11-24 15:47 +0000  dockes    (d06dd2891012)

	* src/Makefile.in, src/aspell/Makefile, src/common/Makefile,
	src/common/autoconfig.h.in, src/common/rclconfig.h, src/configure,
	src/configure.ac, src/index/Makefile, src/internfile/Makefile,
	src/lib/Makefile, src/lib/mkMake, src/mk/Darwin, src/mk/FreeBSD,
	src/mk/OpenBSD, src/mk/SunOS, src/mk/commondefs,
	src/qt4gui/recoll.pro.in, src/qtgui/recoll.pro.in,
	src/query/Makefile, src/unac/unac.c, src/utils/pathut.cpp:
	make it easier to maintain the kio cmake by moving as much stuff as
	possible to autoconfig.h, merging libmime and librcl etc.

2008-11-24 15:23 +0000  dockes    (7d9add059cc1)

	* src/qtgui/confgui/main.cpp, src/qtgui/guiutils.cpp,
	src/qtgui/main.cpp, src/qtgui/recoll.h:
	replace local variable recoll_datadir with access to config

2008-11-24 14:54 +0000  dockes    (7005bf515a0b)

	* src/unac/unac_version.h: new file.
	* src/unac/unac.c, src/unac/unac_version.h:
	*** empty log message ***

2008-11-21 16:43 +0000  dockes    (5c4559fa9d49)

	* src/makesrcdist.sh:
	*** empty log message ***

2008-11-21 16:37 +0000  dockes    (e92347cad84d)

	* src/kde/kioslave/recoll/00README.txt, src/makesrcdist.sh:
	ccmake cleanup in kio_recoll

2008-11-21 16:02 +0000  dockes    (f691d6ad3333)

	* src/excludefile:
	*** empty log message ***

2008-11-20 18:00 +0000  dockes    (5063f4280d8d)

	* src/kde/kioslave/recoll/Makefile: deleted file.
	* src/kde/kioslave/recoll/Makefile:
	*** empty log message ***

2008-11-20 15:10 +0000  dockes    (dc45badd0c45)

	* src/VERSION:
	*** empty log message ***

2008-11-20 14:16 +0000  dockes    (c653773059df)

	* src/kde/kioslave/recoll/00README.txt:
	*** empty log message ***

2008-11-20 13:10 +0000  dockes    (8b5eea7103b5)

	* src/kde/kioslave/recoll/data/welcome.html: new file.
	* src/kde/kioslave/recoll/00README.txt,
	src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/data/welcome.html,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h, src/query/reslistpager.cpp,
	src/query/reslistpager.h:
	kioslave sort of works

2008-11-19 12:28 +0000  dockes    (93e6b483f5c4)

	* src/kde/kioslave/recoll/kio_recoll.cpp:
	*** empty log message ***

2008-11-19 12:19 +0000  dockes    (9b0d90b61574)

	* src/query/plaintorich.cpp, src/query/plaintorich.h,
	src/query/reslistpager.cpp, src/query/reslistpager.h: new file.
	* src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h: deleted file.
	* src/lib/Makefile, src/lib/mkMake, src/qt4gui/recoll.pro.in,
	src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h,
	src/qtgui/recoll.pro.in, src/query/plaintorich.cpp,
	src/query/plaintorich.h, src/query/reslistpager.cpp,
	src/query/reslistpager.h:
	moved plaintorich from qtgui/ to query/

2008-11-19 10:06 +0000  dockes    (350dd565c80d)

	* src/qtgui/reslist.cpp, src/utils/smallut.cpp, src/utils/smallut.h:
	moved code from qtgui to smallut

2008-11-18 13:51 +0000  dockes    (fae04b17c778)

	* src/utils/cancelcheck.h:
	comment

2008-11-18 13:25 +0000  dockes    (4d54c32dbee7)

	* src/index/csguess.cpp, src/index/mimetype.cpp,
	src/index/rclmonprc.cpp, src/index/rclmonrcv.cpp,
	src/query/wasatorcl.cpp:
	add a few includes for new gcc version

2008-11-18 13:24 +0000  dockes    (9455c0affe0a)

	* src/utils/cancelcheck.h:
	comments

2008-11-18 10:23 +0000  dockes    (d09d14bf2e24)

	* src/utils/debuglog.h:
	*** empty log message ***

2008-11-17 14:51 +0000  dockes    (9d4e9515342e)

	* src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/Makefile.kde3: new file.
	* src/kde/kioslave/recoll/CMakeLists.txt,
	src/kde/kioslave/recoll/Makefile,
	src/kde/kioslave/recoll/Makefile.kde3,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h:
	1st kde test. cmake doesnt work need to use buildit script

2008-11-14 15:49 +0000  dockes    (13ca00d869a1)

	* src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.h:
	*** empty log message ***

2008-11-13 10:57 +0000  dockes    (5cd3ce5481df)

	* src/kde/kioslave/recoll/00README.txt,
	src/kde/kioslave/recoll/Makefile,
	src/kde/kioslave/recoll/kio_recoll.cpp,
	src/kde/kioslave/recoll/kio_recoll.la, src/query/docseqdb.cpp:
	got the kio_slave working again

2008-11-08 11:00 +0000  dockes    (81b9fe1d7644)

	* src/qtgui/reslist.cpp:
	Copy entries in the rght clck menu copy both to selection and
	clipboard

2008-10-18 07:04 +0000  dockes    (33b4eec42ac8)

	* website/BUGS.html: new file.
	* website/BUGS.html, website/download.html:
	*** empty log message ***

2008-10-18 06:51 +0000  dockes    (b885092a2488)

	* website/CHANGES.html: new file.
	* website/CHANGES.txt: deleted file.
	* website/BUGS.txt, website/CHANGES.html, website/CHANGES.txt,
	website/download.html, website/index.html.en, website/index.html.fr:
	*** empty log message ***

2008-10-15 08:30 +0000  dockes    (6657f5e0f698)

	* src/sampleconf/recoll.conf.in:
	add .git .hg .bzr to skipped

2008-10-14 07:50 +0000  dockes    (2321044edfb9 [RECOLL_1_11_0])

	* src/rcldb/searchdata.cpp, src/rcldb/searchdata.h,
	src/utils/refcntr.h:
	highlighting would not work with cat filt active because ClausSub
	did not implement getTerms

2008-10-14 06:07 +0000  dockes    (6ecc84bb82aa)

	* src/index/recollindex.cpp:
	print version in recollindex help

2008-10-13 11:46 +0000  dockes    (1cd1451bbb74)

	* src/excludefile, src/makesrcdist.sh:
	change in excludefile handling

2008-10-13 11:46 +0000  dockes    (609bbaa80120)

	* src/qtgui/ssearch_w.cpp, src/query/filtseq.cpp:
	warnings

2008-10-13 11:44 +0000  dockes    (809f8c3eb265)

	* src/qtgui/plaintorich.cpp:
	compil warn

2008-10-13 08:35 +0000  dockes    (a5d743b90fe8)

	* src/INSTALL, src/README:
	*** empty log message ***

2008-10-13 08:23 +0000  dockes    (5874f0e6fc82)

	* src/query/recollq.cpp:
	dont change recollq output, used for tests!

2008-10-13 07:57 +0000  dockes    (bf5637bbe652)

	* src/doc/user/usermanual.sgml, src/qtgui/i18n/recoll_de.ts,
	src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_it.ts,
	src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_tr.ts,
	src/qtgui/i18n/recoll_uk.ts, src/qtgui/i18n/recoll_xx.ts,
	src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp:
	messages and manual

2008-10-10 08:19 +0000  dockes    (d5a5fb9959b7)

	* src/doc/user/usermanual.sgml:
	added python api doc

2008-10-10 08:18 +0000  dockes    (4771280faa9c)

	* src/python/recoll/pyrecoll.cpp, src/python/samples/rclmbox.py,
	src/python/samples/recollqsd.py:
	fix executesd

2008-10-10 08:05 +0000  dockes    (f613511f9e1a)

	* src/python/recoll/pyrecoll.cpp:
	add delete purge

2008-10-10 08:04 +0000  dockes    (2547574a0242)

	* src/internfile/internfile.cpp:
	log levels

2008-10-09 09:36 +0000  dockes    (4fb973a50769)

	* src/python/recoll/pyrecoll.cpp:
	stemming went from query to searchdata

2008-10-09 09:21 +0000  dockes    (e112c834fca2)

	* src/filters/rclflac, src/filters/rclid3, src/sampleconf/mimeconf:
	improved mp3/flac filter. use pstotext directly

2008-10-09 09:19 +0000  dockes    (cf2e0559c3d9)

	* src/internfile/mh_exec.cpp, src/internfile/mimehandler.cpp:
	need to transcode text to utf-8

2008-10-09 09:19 +0000  dockes    (d250c2a0a26f)

	* src/utils/transcode.h:
	comments

2008-10-09 06:41 +0000  dockes    (721f4b3d08f4)

	* src/filters/rclimg:
	*** empty log message ***

2008-10-09 06:38 +0000  dockes    (e9d7fde008f9)

	* src/filters/rclimg:
	*** empty log message ***

2008-10-09 06:31 +0000  dockes    (4b76370655c3)

	* src/filters/rclimg:
	conform to filter error usual protocol

2008-10-08 16:15 +0000  dockes    (d60a26ce4397)

	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/filters/rcldvi, src/index/indexer.cpp, src/index/indexer.h,
	src/internfile/internfile.cpp, src/internfile/internfile.h,
	src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp,
	src/qtgui/rclmain_w.h, src/utils/smallut.cpp, src/utils/smallut.h:
	added menu to display missing helpers

2008-10-08 16:12 +0000  dockes    (30da9114943c)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2008-10-08 08:27 +0000  dockes    (8a78ee8cc158)

	* src/filters/rclabw, src/filters/rcldjvu, src/filters/rclid3,
	src/filters/rclkwd, src/filters/rclogg, src/filters/rclopxml,
	src/filters/rclppt, src/filters/rclsoff, src/filters/rclsvg,
	src/filters/rclxls, src/sampleconf/fields, src/sampleconf/mimeconf:
	improved rclid3 and rclogg

2008-10-07 16:19 +0000  dockes    (c922f7984106)

	* src/qtgui/preview_w.cpp:
	message

2008-10-07 08:07 +0000  dockes    (7e7e59b8a48f)

	* src/doc/user/usermanual.sgml:
	query language precisions

2008-10-07 06:52 +0000  dockes    (0b46df2d0a1d)

	* src/query/wasatorcl.cpp:
	*** empty log message ***

2008-10-07 06:44 +0000  dockes    (a6e8f2583e65)

	* src/ChangeLog, src/common/rclconfig.cpp,
	src/python/recoll/pyrecoll.cpp, src/rcldb/rcldb.cpp,
	src/rcldb/rclquery.cpp, src/sampleconf/fields:
	let rclconfig take care of field name lowercasing

2008-10-06 06:22 +0000  dockes    (26eae5316b88)

	* src/internfile/mh_exec.cpp, src/internfile/mh_exec.h,
	src/internfile/mimehandler.cpp, src/utils/execmd.cpp:
	Disable filters with missing helpers for the whole indexing pass

2008-10-04 14:26 +0000  dockes    (556c7fa5998c)

	* src/index/indexer.cpp, src/internfile/Filter.h,
	src/internfile/internfile.cpp, src/internfile/internfile.h,
	src/internfile/mh_exec.cpp, src/internfile/mh_exec.h,
	src/internfile/mh_html.h, src/internfile/mh_mail.cpp,
	src/internfile/mh_mail.h, src/internfile/mh_mbox.cpp,
	src/internfile/mh_mbox.h, src/internfile/mh_text.h,
	src/internfile/mh_unknown.h, src/internfile/mimehandler.cpp,
	src/internfile/mimehandler.h:
	allow specifying format and charset for ext filters. Cache and reuse
	filters

2008-10-03 16:02 +0000  dockes    (6f5d875c2923)

	* src/utils/Makefile:
	*** empty log message ***

2008-10-03 16:02 +0000  dockes    (8d1e930cc9e2)

	* src/qtgui/preview_w.cpp:
	message

2008-10-03 08:19 +0000  dockes    (cf75be4a88cf)

	* src/common/rclconfig.cpp:
	*** empty log message ***

2008-10-03 08:09 +0000  dockes    (068bc565bf8b)

	* src/common/rclconfig.cpp, src/qtgui/guiutils.cpp,
	src/qtgui/guiutils.h, src/qtgui/plaintorich.cpp,
	src/qtgui/plaintorich.h, src/qtgui/preview_w.cpp,
	src/qtgui/preview_w.h, src/qtgui/uiprefs.ui,
	src/qtgui/uiprefs_w.cpp:
	add option to preview html instead of plain text

2008-10-03 06:23 +0000  dockes    (bd1a6a560e25)

	* src/internfile/internfile.cpp, src/internfile/internfile.h:
	arrange for setting aside an html version when working for preview

2008-10-03 06:17 +0000  dockes    (b10d8b6906a0)

	* src/internfile/mh_html.cpp, src/internfile/mh_html.h:
	save transcoded html for preview

2008-10-02 13:30 +0000  dockes    (f469cf040425)

	* src/internfile/mh_exec.cpp, src/internfile/mh_exec.h:
	comments

2008-09-30 12:38 +0000  dockes    (6ff81f690928)

	* src/index/recollindex.cpp, src/qtgui/confgui/confguiindex.cpp,
	src/qtgui/idxthread.cpp, src/qtgui/idxthread.h, src/qtgui/main.cpp,
	src/qtgui/rclmain_w.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb_p.h:
	added index format version checking

2008-09-29 11:33 +0000  dockes    (2691a6abf645)

	* src/kde/kioslave/recoll/kio_recoll.cpp,
	src/python/recoll/pyrecoll.cpp, src/qtgui/rclmain_w.cpp,
	src/qtgui/reslist.cpp, src/query/docseq.h, src/query/docseqdb.cpp,
	src/query/docseqdb.h, src/query/filtseq.cpp, src/query/filtseq.h,
	src/query/recollq.cpp, src/query/sortseq.cpp, src/query/sortseq.h,
	src/rcldb/rclquery.cpp, src/rcldb/rclquery.h,
	src/rcldb/searchdata.cpp, src/rcldb/searchdata.h:
	move stemlang from RclQuery to SearchData. Allow DocSequences to do
	the sorting/filtering themselves

2008-09-29 08:59 +0000  dockes    (00bc43d91e91)

	* src/kde/kioslave/recoll/kio_recoll.cpp,
	src/python/recoll/pyrecoll.cpp, src/qtgui/reslist.cpp,
	src/query/docseq.cpp, src/query/docseq.h, src/query/docseqdb.cpp,
	src/query/docseqdb.h, src/query/docseqhist.cpp,
	src/query/docseqhist.h, src/query/filtseq.cpp, src/query/filtseq.h,
	src/query/recollq.cpp, src/query/sortseq.cpp, src/query/sortseq.h,
	src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/rclquery.cpp,
	src/rcldb/rclquery.h:
	doc.pc now only place where relevancy is stored

2008-09-29 07:13 +0000  dockes    (da809a196cc5)

	* src/qtgui/reslist.h:
	comments

2008-09-29 06:58 +0000  dockes    (dccf6cb38207)

	* src/python/recoll/pyrecoll.cpp, src/query/recollq.cpp,
	src/rcldb/rclquery.cpp, src/rcldb/rclquery.h,
	src/rcldb/searchdata.cpp, src/rcldb/searchdata.h:
	move sort params from searchdata to rclquery

2008-09-28 14:20 +0000  dockes    (0ce1cca8cac2)

	* src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/rclmain.ui,
	src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h,
	src/qtgui/reslist.cpp, src/qtgui/reslist.h, src/qtgui/sort_w.cpp,
	src/query/filtseq.h, src/query/sortseq.cpp, src/query/sortseq.h,
	src/sampleconf/mimeconf:
	1st impl of catg filtering in reslist

2008-09-28 07:40 +0000  dockes    (5e29feefc554)

	* src/query/filtseq.cpp, src/query/filtseq.h: new file.
	* src/lib/Makefile, src/lib/mkMake, src/qtgui/rclmain_w.cpp,
	src/qtgui/rclmain_w.h, src/qtgui/reslist.cpp, src/qtgui/reslist.h,
	src/query/docseq.h, src/query/docseqdb.cpp, src/query/docseqdb.h,
	src/query/filtseq.cpp, src/query/filtseq.h:
	rearranged some reslist/rclmain functions + add but not use filtseq
	code

2008-09-25 09:08 +0000  dockes    (8588b8cc05d1)

	* src/python/samples/rcldlkp.py, src/python/samples/rclmbox.py:
	*** empty log message ***

2008-09-25 09:07 +0000  dockes    (40e028763fab)

	* src/python/xesam/xesam-recoll-service:
	arret apres hackfest

2008-09-25 06:17 +0000  dockes    (811009efeb96)

	* src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_it.ts,
	src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_tr.ts,
	src/qtgui/i18n/recoll_uk.ts, src/qtgui/i18n/recoll_xx.ts:
	*** empty log message ***

2008-09-25 06:14 +0000  dockes    (ce29702ab7cc)

	* src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_fr.ts:
	*** empty log message ***

2008-09-25 06:02 +0000  dockes    (a065c833e601)

	* src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts:
	new russian/ukrainian translations

2008-09-25 06:00 +0000  dockes    (ba80af83d32f)

	* src/qtgui/advsearch_w.cpp, src/qtgui/confgui/confguiindex.cpp,
	src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_fr.ts,
	src/qtgui/i18n/recoll_it.ts, src/qtgui/i18n/recoll_ru.ts,
	src/qtgui/i18n/recoll_tr.ts, src/qtgui/i18n/recoll_uk.ts,
	src/qtgui/i18n/recoll_xx.ts, src/qtgui/rclmain_w.cpp,
	src/qtgui/reslist.cpp, src/qtgui/uiprefs.ui:
	fixed typos

2008-09-24 06:50 +0000  dockes    (695914bd6d5d)

	* src/kde/recoll_applet/0README.Recoll:
	*** empty log message ***

2008-09-24 06:44 +0000  dockes    (48bbf0a115cc)

	* src/query/recollq.cpp:
	command line args must be processed as local 8 bit

2008-09-24 06:34 +0000  dockes    (e90ac2ed62fe)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2008-09-24 05:35 +0000  dockes    (36e2522b06b2)

	* src/qtgui/main.cpp:
	command line args must be processed as local 8 bit

2008-09-24 05:31 +0000  dockes    (9b420f1d25f8)

	* src/qtgui/main.cpp:
	command line args must be processed as local 8 bit

2008-09-23 14:32 +0000  dockes    (cd440e5917d3)

	* src/configure, src/configure.ac:
	use $QMAKE not qmake when checking version

2008-09-16 10:19 +0000  dockes    (2bc72ad13a9b)

	* src/python/recoll/pyrecoll.cpp:
	fields, indexing i/f

2008-09-16 10:13 +0000  dockes    (ff10e8072c66)

	* src/qtgui/rclmain_w.cpp:
	have to setkeydir before calling internfile when opening

2008-09-16 08:18 +0000  dockes    (c78945994f7c)

	* src/python/samples/recollqsd.py: new file.
	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/internfile/internfile.cpp, src/python/recoll/pyrecoll.cpp,
	src/python/recoll/setup.py, src/python/samples/recollqsd.py,
	src/python/xesam/xesam-recoll-service, src/query/recollq.cpp,
	src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/rcldb_p.h,
	src/rcldb/rcldoc.cpp, src/rcldb/rcldoc.h, src/rcldb/rclquery.cpp,
	src/rcldb/rclquery.h, src/rcldb/searchdata.cpp,
	src/rcldb/searchdata.h, src/sampleconf/fields:
	general field name handling cleanup + sort facility in rclquery

2008-09-16 08:13 +0000  dockes    (bd4c0f6fd812)

	* src/internfile/mh_mail.cpp:
	emit field for recipients

2008-09-15 08:03 +0000  dockes    (11ba5592559e)

	* src/sampleconf/fields, src/sampleconf/mimeconf,
	src/sampleconf/mimemap, src/sampleconf/mimeview:
	added rcltext/python/purple

2008-09-15 08:02 +0000  dockes    (8af411ff9bf6)

	* src/filters/rclpurple: new file.
	* src/filters/rclpurple, src/qtgui/mtpics/README,
	src/utils/base64.cpp, src/utils/smallut.cpp, src/utils/smallut.h,
	src/utils/transcode.cpp:
	*** empty log message ***

2008-09-15 07:55 +0000  dockes    (49401228a5ef)

	* src/filters/rclpython, src/qtgui/mtpics/pidgin.png,
	src/qtgui/mtpics/text-x-python.png: new file.
	* src/filters/rclpython, src/qtgui/mtpics/pidgin.png,
	src/qtgui/mtpics/text-x-python.png:
	*** empty log message ***

2008-09-13 12:56 +0000  dockes    (299644545ca0)

	* src/python/xesam/xesam-recoll-service: new file.
	* src/python/xesam/xesam-recoll-service:
	*** empty log message ***

2008-09-12 11:35 +0000  dockes    (5c85f26d124d)

	* src/sampleconf/mimeconf:
	index c code with the new rcltext generic filter

2008-09-12 11:30 +0000  dockes    (b8277032f494)

	* src/filters/rcltext: new file.
	* src/filters/rcltext:
	*** empty log message ***

2008-09-09 12:58 +0000  dockes    (a3afe9b35b57)

	* src/rcldb/rcldb.cpp:
	debug messages

2008-09-08 16:49 +0000  dockes    (a18ab0c682a4)

	* src/rcldb/rcldoc.cpp, src/sampleconf/fields: new file.
	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/internfile/internfile.cpp, src/lib/Makefile, src/lib/mkMake,
	src/python/recoll/pyrecoll.cpp, src/python/samples/recollq.py,
	src/qtgui/preview_w.cpp, src/qtgui/reslist.cpp, src/query/docseq.h,
	src/query/docseqdb.cpp, src/query/recollq.cpp, src/rcldb/rcldb.cpp,
	src/rcldb/rcldoc.cpp, src/rcldb/rcldoc.h, src/recollinstall.in,
	src/sampleconf/fields:
	foundation work for configurable stored/indexed fields

2008-09-08 15:47 +0000  dockes    (861e4211280b)

	* src/rcldb/searchdata.h:
	unused args warning

2008-09-08 15:47 +0000  dockes    (3f6468e20038)

	* src/utils/smallut.cpp:
	test driver

2008-09-08 15:46 +0000  dockes    (581ee503208b)

	* src/ChangeLog:
	*** empty log message ***

2008-09-07 07:22 +0000  dockes    (dfe4dd53d0b9)

	* src/Makefile.in, src/qt4gui/uifrom3:
	cleanup

2008-09-07 07:08 +0000  dockes    (95c2a94321a3)

	* src/Makefile.in:
	cleaning

2008-09-07 06:43 +0000  dockes    (6294638c2504)

	* src/Makefile.in, src/VERSION, src/mk/localdefs.in:
	improved cleaning

2008-09-05 11:45 +0000  dockes    (8532ebb84453)

	* src/rcldb/rclquery.cpp:
	gcc4.3

2008-09-05 10:36 +0000  dockes    (2ada099a7545)

	* src/internfile/internfile.cpp, src/internfile/internfile.h:
	strimline and restructure the doctree-exploring loop to make it
	close to understandable

2008-09-05 10:34 +0000  dockes    (404aa368d498)

	* src/rcldb/rcldb.cpp, src/rcldb/rcldb_p.h, src/rcldb/rclquery.cpp:
	add relevancyrating to the metadata when querying

2008-09-05 10:33 +0000  dockes    (bc0210deda18)

	* src/internfile/myhtmlparse.cpp:
	accept iso date format (2008-09-05T11:55:32)

2008-09-05 10:26 +0000  dockes    (4b17d6defb3c)

	* src/doc/man/recollindex.1:
	*** empty log message ***

2008-09-01 20:39 +0000  dockes    (39ff03712b54)

	* src/sampleconf/mimeconf, src/sampleconf/mimeview:
	openxml types

2008-09-01 17:31 +0000  dockes    (f0fde685acc8)

	* src/filters/rclopxml:
	sort of works

2008-09-01 17:21 +0000  dockes    (dfd3281994ff)

	* src/filters/rclopxml: new file.
	* src/filters/rclopxml:
	almost almost ok excepts outputs some formatting directives for ppt

2008-08-31 15:28 +0000  dockes    (7756d792699d [RECOLL_1_11_1exp1, RECOLL_1_11_1exp2, RECOLL_1_11_1exp])

	* packaging/debian/changelog, packaging/debian/control,
	packaging/debian/rules:
	*** empty log message ***

2008-08-30 12:21 +0000  dockes    (60b122f6f4d6)

	* src/rcldb/rcldb.cpp:
	typo in xfsn len fix

2008-08-30 07:38 +0000  dockes    (d516181ad7a0)

	* src/rcldb/rcldb.cpp:
	truncate simple file names at max term length

2008-08-30 07:34 +0000  dockes    (59326d99e18d)

	* src/utils/smallut.cpp:
	utf8truncate

2008-08-30 07:31 +0000  dockes    (8f5c5fba53d1)

	* src/utils/smallut.cpp, src/utils/smallut.h:
	utf8truncate

2008-08-29 14:12 +0000  dockes    (41c405565cd4)

	* tests/boolean/boolean.sh:
	or->OR

2008-08-29 13:05 +0000  dockes    (6454f838026e)

	* src/internfile/mh_mbox.cpp:
	accept weird date format in From lines used by (old?) tbird

2008-08-29 09:51 +0000  dockes    (b830b6d6b04d)

	* src/index/recollindex.cpp:
	be more informative when monitoring not configured

2008-08-28 15:44 +0000  dockes    (27a9bf47f895)

	* src/python/recoll/pyrecoll.cpp, src/python/samples/rcldlkp.py,
	src/python/samples/rclmbox.py, src/rcldb/rcldb.cpp,
	src/sampleconf/mimeview:
	*** empty log message ***

2008-08-28 15:43 +0000  dockes    (d28eac37bdd9)

	* src/query/wasatorcl.cpp, src/rcldb/searchdata.h:
	use a refcntr for the sub SearchData

2008-08-28 15:42 +0000  dockes    (417a8f1346df)

	* src/rcldb/searchdata.cpp:
	ensure that a negative clause is not first or only in list

2008-08-27 12:34 +0000  dockes    (658ca4b955c8)

	* src/python/recoll/pyrecoll.cpp:
	reorganize+traces

2008-08-27 12:12 +0000  dockes    (37791b8e66aa)

	* src/python/recoll/pyrecoll.cpp:
	doc

2008-08-26 13:50 +0000  dockes    (af43f86ffe99)

	* src/query/wasastringtoquery.cpp:
	make AND and OR case-sensitive

2008-08-26 13:47 +0000  dockes    (bda91f767e32)

	* src/query/wasastringtoquery.cpp, src/query/wasastringtoquery.h,
	src/query/wasatorcl.cpp:
	try to parse the whole of Xesam user language 0.95

2008-08-26 07:56 +0000  dockes    (6a17726c7e41)

	* src/python/recoll/pyrecoll.cpp, src/python/recoll/setup.py,
	src/python/samples/rcldlkp.py, src/python/samples/rclmbox.py,
	src/python/samples/recollq.py:
	renamed a few things

2008-08-26 07:38 +0000  dockes    (c97de92889e3)

	* src/rcldb/rcldb.cpp, src/rcldb/rcldb.h:
	copy author back from data record to Doc

2008-08-26 07:36 +0000  dockes    (d6e27e630844)

	* src/python/samples/rcldlkp.py, src/python/samples/rclmbox.py,
	src/python/samples/recollq.py: new file.
	* src/python/recoll/pyrecoll.cpp, src/python/recoll/setup.py,
	src/python/samples/rcldlkp.py, src/python/samples/rclmbox.py,
	src/python/samples/recollq.py:
	*** empty log message ***

2008-08-26 07:33 +0000  dockes    (1d6816c32358)

	* src/rcldb/rcldoc.h:
	comments

2008-08-26 07:33 +0000  dockes    (4e86d4c4f3d9)

	* src/internfile/internfile.cpp, src/internfile/internfile.h,
	src/qtgui/reslist.cpp:
	move ipath computations from reslist to internfile

2008-08-26 07:31 +0000  dockes    (b44f4950a084)

	* src/internfile/mh_exec.cpp, src/internfile/mh_exec.h:
	implement skip_to_document

2008-08-25 16:12 +0000  dockes    (936499917659)

	* src/sampleconf/mimeconf, src/sampleconf/mimemap,
	src/sampleconf/mimeview:
	opxml formats

2008-07-30 13:16 +0000  dockes    (0f1387a8a565)

	* src/rcldb/stemdb.cpp:
	fixed inocuous but nasty bad string value test

2008-07-29 08:25 +0000  dockes    (a7888d48c2a6)

	* src/rcldb/rcldb.h, src/rcldb/rcldoc.h:
	comments

2008-07-29 06:25 +0000  dockes    (28ebb7cac39d)

	* src/index/indexer.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h,
	src/rcldb/rcldb_p.h, src/rcldb/rcldoc.h:
	use explicit parent udi term instead of Qterm structure to express
	parent-child relationship

2008-07-28 12:24 +0000  dockes    (5cb926be362f)

	* src/index/indexer.cpp, src/lib/Makefile, src/lib/mkMake,
	src/query/docseqhist.cpp, src/rcldb/pathhash.cpp,
	src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/rcldoc.h,
	src/utils/Makefile, src/utils/fileudi.cpp:
	replaced path|ipath with unique doc id in rcldb i/f. Still depends
	on udi structure for parent/child

2008-07-28 10:20 +0000  dockes    (07bc933efb70)

	* src/utils/fileudi.cpp, src/utils/fileudi.h: new file.
	* src/utils/fileudi.cpp, src/utils/fileudi.h:
	*** empty log message ***

2008-07-28 08:42 +0000  dockes    (825cb66f8be3)

	* src/index/indexer.cpp, src/qtgui/uiprefs_w.cpp, src/rcldb/rcldb.cpp,
	src/rcldb/rcldb.h, src/rcldb/rcldb_p.h, src/rcldb/rcldoc.h,
	src/utils/base64.h:
	begin i/f cleanup: opacify doc uptodate sig (size+mtime)

2008-07-04 09:29 +0000  dockes    (6551cb55fa98 [RECOLL_1_10_3])

	* src/qtgui/plaintorich.cpp:
	turn dbg off

2008-07-01 13:00 +0000  dockes    (19e926f99256)

	* src/ChangeLog, src/VERSION:
	1.10.3: checkpoint for 1.10 branch maintenance

2008-07-01 12:11 +0000  dockes    (910f409cb0be)

	* src/bincimapmime/convert.h:
	suppressed a few wasteful string-cstr conversions

2008-07-01 11:57 +0000  dockes    (913963d84bc5)

	* src/bincimapmime/convert.cc, src/bincimapmime/convert.h,
	src/bincimapmime/mime-parseonlyheader.cc, src/bincimapmime/mime-
	printheader.cc:
	suppressed a few wasteful string-cstr conversions

2008-07-01 11:51 +0000  dockes    (54f3a868fb92)

	* src/bincimapmime/address.cc, src/internfile/mh_mail.cpp,
	src/query/wasastringtoquery.cpp, src/query/xadump.cpp,
	src/rcldb/rcldb.cpp, src/rcldb/rclquery.cpp, src/rcldb/searchdata.h,
	src/utils/conftree.cpp, src/utils/conftree.h, src/utils/idfile.cpp,
	src/utils/mimeparse.cpp, src/utils/pathut.cpp, src/utils/pathut.h,
	src/utils/smallut.cpp:
	suppressed a few wasteful string-cstr conversions

2008-07-01 10:29 +0000  dockes    (3e1aa9958af4)

	* src/index/mimetype.cpp, src/index/mimetype.h,
	src/internfile/mh_mail.cpp:
	mh_mail now uses mimetype() to try and better identify application
	/octet-stream

2008-07-01 08:31 +0000  dockes    (3665315a4fdd)

	* src/ChangeLog:
	*** empty log message ***

2008-07-01 08:31 +0000  dockes    (928e08cb2cc8)

	* src/rcldb/rclquery.cpp, src/rcldb/rclquery.h,
	src/rcldb/rclquery_p.h:
	small cleanups and comments

2008-07-01 08:28 +0000  dockes    (e5847d808877)

	* src/rcldb/rcldb.h:
	comments

2008-07-01 08:27 +0000  dockes    (97cd50050ecf)

	* src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h,
	src/qtgui/preview_w.cpp, src/qtgui/preview_w.h,
	src/qtgui/reslist.cpp:
	cleaned up plaintorich. Now a proper subclassable class + highlights
	multiple groups, not just the first

2008-07-01 08:27 +0000  dockes    (3ef1709e5955)

	* src/qtgui/confgui/confguiindex.cpp:
	typo

2008-07-01 08:26 +0000  dockes    (f6ddabbf59a2)

	* src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/utils/pathut.cpp,
	src/utils/pathut.h:
	moved printableUrl() to pathut

2008-07-01 08:24 +0000  dockes    (413a8a75c5af)

	* src/python/recoll/pyrecoll.cpp:
	added abstract i/f

2008-06-17 11:43 +0000  dockes    (009a912c3daf)

	* src/python/recoll/pyrecoll.cpp, src/python/recoll/setup.py:
	basic functionality ok, more funcs and options needed

2008-06-13 18:23 +0000  dockes    (58a4b54fa103)

	* src/utils/refcntr.h:
	separated rcldb and rclquery

2008-06-13 18:22 +0000  dockes    (a52ef2510839)

	* src/rcldb/rcldb_p.h, src/rcldb/rclquery.cpp, src/rcldb/rclquery.h,
	src/rcldb/rclquery_p.h: new file.
	* src/kde/kioslave/recoll/kio_recoll.cpp, src/lib/Makefile,
	src/lib/mkMake, src/python/recoll/pyrecoll.cpp,
	src/python/recoll/setup.py, src/qtgui/main.cpp,
	src/qtgui/rclmain_w.cpp, src/query/docseq.h, src/query/docseqdb.cpp,
	src/query/docseqdb.h, src/query/recollq.cpp, src/rcldb/rcldb.cpp,
	src/rcldb/rcldb.h, src/rcldb/rcldb_p.h, src/rcldb/rclquery.cpp,
	src/rcldb/rclquery.h, src/rcldb/rclquery_p.h,
	src/rcldb/searchdata.h, src/utils/pathut.cpp, src/utils/refcntr.h:
	separated rcldb and rclquery

2008-06-13 18:14 +0000  dockes    (e39af9faad92)

	* src/common/autoconfig.h.in, src/configure, src/configure.ac,
	src/mk/Darwin, src/mk/FreeBSD, src/mk/Linux, src/mk/OpenBSD:
	move few things from the mk/sys files to autoconf

2008-06-10 06:30 +0000  dockes    (822b88ae3d1f)

	* src/qtgui/mtpics/License_sidux.txt: new file.
	* src/qtgui/mtpics/License_sidux.txt:
	*** empty log message ***

2008-06-09 09:14 +0000  dockes    (c9953f1a54ee)

	* src/filters/rclsiduxman, src/qtgui/mtpics/sidux-book.png: new file.
	* src/filters/rclsiduxman, src/qtgui/mtpics/sidux-book.png,
	src/sampleconf/mimeconf, src/sampleconf/mimemap,
	src/sampleconf/mimeview:
	sidux manual support

2008-05-27 10:46 +0000  dockes    (2afb8b8ec073)

	* 1.10.2

2008-05-27 10:45 +0000  dockes    (62c7f8ba0eb8)

	* packaging/debian/changelog, packaging/rpm/recoll.spec,
	packaging/rpm/recollfedora.spec, packaging/rpm/recollmdk.spec,
	src/python/recoll/pyrecoll.cpp, src/python/recoll/setup.py,
	website/BUGS.txt, website/CHANGES.txt, website/download.html,
	website/features.html, website/index.html.en, website/index.html.fr,
	website/styles/style.css:
	1.10.2

2008-05-27 06:47 +0000  dockes    (b120e7a059cd [RECOLL_1_10_2])

	* src/README:
	*** empty log message ***

2008-05-27 06:46 +0000  dockes    (70d9bb153b58)

	* src/VERSION:
	1.10.2

2008-05-27 06:18 +0000  dockes    (305829599fb1)

	* src/utils/pathut.cpp:
	suppress warning

2008-05-27 05:40 +0000  dockes    (f611211f012a)

	* src/internfile/internfile.cpp:
	log message

2008-05-26 09:07 +0000  dockes    (dbb469971d76)

	* src/ChangeLog:
	*** empty log message ***

2008-05-21 07:21 +0000  dockes    (b1ee79619cca)

	* src/qtgui/advsearch_w.cpp, src/qtgui/confgui/confgui.cpp,
	src/qtgui/confgui/confgui.h, src/qtgui/preview_w.cpp,
	src/qtgui/reslist.cpp, src/utils/idfile.cpp:
	openSuse 11 compile issues

2008-05-20 10:09 +0000  dockes    (f047b0f61753)

	* src/qtgui/advsearch_w.cpp, src/rcldb/rcldb.cpp:
	*** empty log message ***

2008-05-20 10:09 +0000  dockes    (f2e76fada01c)

	* src/unac/unac.c:
	make strict gcc happy

2008-05-09 12:34 +0000  dockes    (be08db2c226e)

	* src/python/recoll/pyrecoll.cpp, src/python/recoll/setup.py: new
	file.
	* src/python/recoll/pyrecoll.cpp, src/python/recoll/setup.py:
	*** empty log message ***

2008-05-08 10:00 +0000  dockes    (2ff9f42dc279)

	* src/rcldb/searchdata.h:
	comments

2008-05-08 09:57 +0000  dockes    (bd6106d7f9ab)

	* src/utils/smallut.cpp, src/utils/smallut.h:
	*** empty log message ***

2008-05-08 09:31 +0000  dockes    (70f8eab20535)

	* src/ChangeLog:
	*** empty log message ***

2008-05-07 06:14 +0000  dockes    (f3d36126287d)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2008-05-05 20:31 +0000  dockes    (d271616c4b99)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2008-05-05 20:28 +0000  dockes    (02b1484f3eee)

	* src/doc/user/usermanual.sgml, src/qtgui/guiutils.cpp,
	src/qtgui/guiutils.h, src/qtgui/preview_w.cpp,
	src/qtgui/reslist.cpp, src/qtgui/uiprefs.ui,
	src/qtgui/uiprefs_w.cpp:
	allow setting query term highlight color in prefs

2008-05-05 16:38 +0000  dockes    (763298305d15)

	* src/qtgui/reslist.cpp:
	Edit -> Open in links

2008-05-05 13:13 +0000  dockes    (d2fc5c651024)

	* src/bincimapmime/mime-parsefull.cc:
	part data was sometimes truncated because of bad handling of
	consecutive mime boundaries. Most common symptom: error in base64
	decoding

2008-04-18 11:41 +0000  dockes    (32155182993c)

	* src/mk/localdefs.in:
	get CXXFLAGS from autoconf

2008-04-18 11:39 +0000  dockes    (72073f033a45)

	* src/query/xadump.cpp:
	xadump would sometimes dump core with -b

2008-04-18 11:38 +0000  dockes    (ef6566c2ac8e)

	* src/qtgui/preview_w.cpp:
	walking the search terms hits backwards would go forward

2008-04-18 11:37 +0000  dockes    (018890cfdbd7)

	* src/utils/Makefile, src/utils/base64.cpp, src/utils/readfile.cpp:
	base64 testing code

2008-02-19 08:02 +0000  dockes    (34b45c5acd1c)

	* src/qtgui/main.cpp:
	make first sort after -q work

2008-02-19 08:02 +0000  dockes    (1293fc15412b)

	* src/qtgui/rclmain_w.cpp:
	comments+debug

2008-02-19 07:41 +0000  dockes    (efbaeed44ee9)

	* src/rcldb/rcldb.cpp:
	traces

2008-02-11 10:21 +0000  dockes    (81923201adc7)

	* src/utils/idfile.cpp:
	hack for Mark B.: allow treating (single-message) mbox files as
	message/rfc822

2008-02-08 08:37 +0000  dockes    (ddcce838e7d0)

	* src/qtgui/i18n/recoll_de.ts:
	update by Frank Thieme

2008-02-05 10:45 +0000  dockes    (51a501984fd4)

	* src/sampleconf/mimeconf:
	*** empty log message ***

2008-02-03 16:24 +0000  dockes    (825bb43d67ca)

	* src/sampleconf/mimeconf, src/sampleconf/mimemap,
	src/sampleconf/mimeview:
	rclsvg

2008-02-03 16:05 +0000  dockes    (81794c3a6d9e)

	* src/filters/rclsvg:
	*** empty log message ***

2008-02-03 16:04 +0000  dockes    (40c35a7fb1bb)

	* src/filters/rclsvg: new file.
	* src/filters/rclsvg:
	*** empty log message ***

2008-01-29 10:14 +0000  dockes    (fd74eae7e8b4 [RECOLL_1_10_1])

	* src/README:
	*** empty log message ***

2008-01-29 10:11 +0000  dockes    (a1fee09bfc3d)

	* src/rcldb/searchdata.h:
	m_haveWildCards was sometimes not init

2008-01-29 08:41 +0000  dockes    (ebc971754f92)

	* src/ChangeLog:
	*** empty log message ***

2008-01-24 09:34 +0000  dockes    (301425122a56)

	* src/qtgui/main.cpp:
	*** empty log message ***

2008-01-17 11:15 +0000  dockes    (af11c991aff3)

	* src/qtgui/idxthread.cpp, src/qtgui/idxthread.h,
	src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp,
	src/qtgui/rclmain_w.h:
	allow stopping indexing through menu action

2008-01-17 11:14 +0000  dockes    (4c108ac6227a)

	* src/query/wasastringtoquery.h:
	comment

2008-01-17 11:13 +0000  dockes    (7b2a9225dbef)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2008-01-16 11:14 +0000  dockes    (ddfe49735bc2)

	* src/query/wasatorcl.cpp, src/rcldb/searchdata.cpp,
	src/rcldb/searchdata.h:
	express query language OR chains as rcldb subqueries so that field
	specs will work inside them

2008-01-16 10:52 +0000  dockes    (6487da12360f)

	* src/ChangeLog, src/doc/user/usermanual.sgml:
	*** empty log message ***

2008-01-16 08:43 +0000  dockes    (592d1258c5e4)

	* src/rcldb/searchdata.cpp:
	splitString filename queries

2007-12-20 09:08 +0000  dockes    (e99decc750eb)

	* src/index/indexer.cpp, src/rcldb/rcldb.cpp:
	ensure that the names of files with filter errors get indexed anyway

2007-12-13 06:58 +0000  dockes    (de422a0df409)

	* src/aspell/rclaspell.cpp, src/bincimapmime/convert.h,
	src/common/rclconfig.cpp, src/common/rclinit.cpp,
	src/common/textsplit.cpp, src/common/unacpp.cpp,
	src/index/csguess.cpp, src/index/indexer.cpp,
	src/index/rclmonprc.cpp, src/index/rclmonrcv.cpp,
	src/index/recollindex.cpp, src/internfile/htmlparse.cpp,
	src/internfile/mh_mail.cpp, src/internfile/mh_mbox.cpp,
	src/internfile/myhtmlparse.cpp, src/query/docseqhist.cpp,
	src/query/history.cpp, src/query/recollq.cpp,
	src/rcldb/pathhash.cpp, src/rcldb/rcldb.cpp, src/utils/base64.cpp,
	src/utils/conftree.cpp, src/utils/copyfile.cpp,
	src/utils/fstreewalk.cpp, src/utils/idfile.cpp,
	src/utils/mimeparse.cpp, src/utils/pathut.cpp,
	src/utils/readfile.cpp, src/utils/wipedir.cpp:
	gcc 4 compat, thanks to Kartik Mistry

2007-12-04 10:17 +0000  dockes    (f2bd537aad87)

	* src/qtgui/rclmain_w.cpp:
	directly open editor action choice dialog when user says so

2007-12-04 10:16 +0000  dockes    (9a289ca30889)

	* src/qtgui/uiprefs_w.cpp, src/utils/utf8iter.cpp:
	*** empty log message ***

2007-11-25 07:29 +0000  dockes    (3782c85019d4)

	* src/qtgui/i18n/recoll_tr.ts:
	*** empty log message ***

2007-11-24 16:51 +0000  dockes    (a41099c58ac0)

	* src/qtgui/i18n/recoll_fr.ts:
	accents

2007-11-24 16:43 +0000  dockes    (eecb572a0935)

	* src/qtgui/confgui/confguiindex.h:
	make conftoppanelw a q_object for translations to work

2007-11-24 10:41 +0000  dockes    (343184d41f3b)

	* src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_fr.ts,
	src/qtgui/i18n/recoll_it.ts, src/qtgui/i18n/recoll_ru.ts,
	src/qtgui/i18n/recoll_tr.ts, src/qtgui/i18n/recoll_uk.ts,
	src/qtgui/i18n/recoll_xx.ts:
	*** empty log message ***

2007-11-21 16:34 +0000  dockes    (966333a903a9)

	* src/VERSION:
	*** empty log message ***

2007-11-21 16:34 +0000  dockes    (aed5f0389421)

	* 1.10.0

2007-11-21 16:34 +0000  dockes    (4918fce7a71a)

	* packaging/debian/changelog, packaging/debian/control,
	packaging/debian/menu, packaging/debian/rules,
	packaging/rpm/recoll.spec, packaging/rpm/recollfedora.spec,
	packaging/rpm/recollmdk.spec, tests/shared.sh, website/CHANGES.txt,
	website/devel.html, website/download.html, website/features.html,
	website/fr/features.html, website/index.html.en,
	website/index.html.fr, website/pics/index.html,
	website/styles/style.css:
	1.10.0

2007-11-21 14:15 +0000  dockes    (9c57d53ad305 [RECOLL_1_10_0])

	* src/qtgui/confgui/confguiindex.cpp, src/qtgui/main.cpp,
	src/qtgui/rclmain_w.cpp, src/qtgui/recoll.h:
	allow opening config gui if no index on first start

2007-11-21 09:42 +0000  dockes    (b1db39055b6d)

	* src/excludefile:
	*** empty log message ***

2007-11-21 09:34 +0000  dockes    (cca64d1bdb79)

	* src/utils/conftree.cpp:
	explicitely detect lines beginning with #

2007-11-21 09:00 +0000  dockes    (2cb85a4bd555)

	* src/INSTALL, src/README:
	*** empty log message ***

2007-11-16 15:20 +0000  dockes    (1f90c7302746)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2007-11-16 14:28 +0000  dockes    (d7f21b7adf20)

	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/internfile/internfile.cpp, src/internfile/mimehandler.cpp,
	src/internfile/mimehandler.h:
	indexedmimetypes

2007-11-16 12:21 +0000  dockes    (8221e8f1ce4f)

	* src/query/wasastringtoquery.cpp, src/query/wasatorcl.cpp:
	very small effort to look like xesam simple query

2007-11-16 07:34 +0000  dockes    (1398d49de21d)

	* src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_it.ts,
	src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts:
	*** empty log message ***

2007-11-16 07:19 +0000  dockes    (eedcef5d56b7)

	* src/qtgui/i18n/recoll_tr.ts:
	*** empty log message ***

2007-11-15 18:44 +0000  dockes    (99e585288200)

	* src/qtgui/preview_w.cpp:
	comment

2007-11-15 18:39 +0000  dockes    (1ee213030954)

	* src/qt4gui/q3richtext_p.h: new file.
	* src/qt4gui/q3richtext_p.h, src/qt4gui/recoll.pro.in:
	qt4 movetoanchor

2007-11-15 18:39 +0000  dockes    (335db8a5c8cb)

	* src/qtgui/i18n/recoll_it.ts:
	*** empty log message ***

2007-11-15 18:34 +0000  dockes    (b3bb7b017f2a)

	* src/qtgui/preview_w.cpp, src/qtgui/preview_w.h:
	moveToAnchor qt4

2007-11-15 18:05 +0000  dockes    (1fe63dd4f268)

	* src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h,
	src/qtgui/preview_w.cpp, src/qtgui/preview_w.h,
	src/qtgui/recoll.pro.in:
	finally got anchors to work. qt3

2007-11-15 18:05 +0000  dockes    (3158e59fd92e)

	* src/qtgui/reslist.cpp:
	*** empty log message ***

2007-11-13 18:42 +0000  dockes    (1a7029e2dd4e)

	* src/doc/man/recoll.1:
	*** empty log message ***

2007-11-13 18:40 +0000  dockes    (a5a94cfbfa7d)

	* src/query/recollq.cpp:
	keep format constant

2007-11-13 18:40 +0000  dockes    (09f615e1a305)

	* src/doc/user/usermanual.sgml:
	text

2007-11-13 18:39 +0000  dockes    (ce5a12bb92bd)

	* tests/badsuffs1/badsuffs1.txt, tests/html/html.txt,
	tests/mail/mail.txt, tests/ooff/ooff.txt, tests/special/special.txt:
	1.10+small changes in dataset

2007-11-13 15:35 +0000  dockes    (3a8d3f5af0e8)

	* src/ChangeLog:
	*** empty log message ***

2007-11-13 15:34 +0000  dockes    (d23b6a94f4c0)

	* src/VERSION:
	1.10.0?

2007-11-13 10:07 +0000  dockes    (f3338fa8cb4e)

	* src/doc/man/recollq.1: new file.
	* src/doc/man/recollq.1:
	*** empty log message ***

2007-11-09 18:48 +0000  dockes    (7859ad070bfc)

	* src/qtgui/i18n/recoll_fr.ts:
	1.9 ?

2007-11-09 18:07 +0000  dockes    (557d4b9ce60a)

	* src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_fr.ts,
	src/qtgui/i18n/recoll_it.ts, src/qtgui/i18n/recoll_ru.ts,
	src/qtgui/i18n/recoll_tr.ts, src/qtgui/i18n/recoll_uk.ts,
	src/qtgui/i18n/recoll_xx.ts:
	*** empty log message ***

2007-11-09 15:56 +0000  dockes    (2c201bdce017)

	* src/filters/rcltex:
	*** empty log message ***

2007-11-09 15:46 +0000  dockes    (7960c1dd4d0a)

	* src/kde/kioslave/recoll/00README.txt,
	src/kde/kioslave/recoll/Makefile,
	src/kde/kioslave/recoll/kio_recoll.cpp:
	get things to compile with recoll 1.9 and suse + kde 3.5.5

2007-11-09 13:44 +0000  dockes    (6196dbaf0aec)

	* src/sampleconf/mimeview:
	tex

2007-11-09 11:55 +0000  dockes    (10ce7112596d)

	* src/filters/rcltex: new file.
	* src/filters/rclmedia: deleted file.
	* src/filters/rclmedia, src/filters/rcltex, src/sampleconf/mimeconf,
	src/sampleconf/mimemap:
	added support for indexing TeX text

2007-11-09 11:54 +0000  dockes    (5a35ec87ecf2)

	* src/filters/rclid3:
	comments

2007-11-08 09:35 +0000  dockes    (bdde14acf3bd)

	* src/query/recollq.h: new file.
	* src/lib/Makefile, src/lib/mkMake, src/qtgui/main.cpp,
	src/qtgui/recoll.pro.in, src/query/Makefile, src/query/recollq.cpp,
	src/query/recollq.h:
	allow recoll to be used as a recollq driver

2007-11-08 09:34 +0000  dockes    (06e94674b8e2)

	* src/utils/execmd.cpp:
	include pthread

2007-11-08 09:34 +0000  dockes    (d6e84478935d)

	* src/rcldb/stemdb.cpp:
	debug

2007-11-08 09:32 +0000  dockes    (9f3349e7358b)

	* src/qt4gui/recoll.pro.in:
	turkish

2007-11-08 09:31 +0000  dockes    (cd6b8b7d2a36)

	* src/mk/OpenBSD:
	*** empty log message ***

2007-11-08 07:54 +0000  dockes    (6e986b6d1e64)

	* src/query/recollq.cpp:
	add -b option to only output url list

2007-11-06 11:55 +0000  dockes    (2b0e2fc0dd88)

	* src/qtgui/i18n/recoll_tr.ts: new file.
	* src/qtgui/i18n/recoll_tr.ts:
	*** empty log message ***

2007-10-27 16:40 +0000  dockes    (e8ac0b8f6c46)

	* src/rcldb/rcldb.cpp:
	comment

2007-10-27 08:40 +0000  dockes    (2ccaf4ef243e)

	* src/ChangeLog, src/qtgui/i18n/recoll_de.ts,
	src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_it.ts,
	src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts:
	*** empty log message ***

2007-10-27 08:40 +0000  dockes    (e647e4592daa)

	* src/filters/rcluncomp:
	allow uncompressing suffix-less files

2007-10-27 08:40 +0000  dockes    (dc6d97a86685)

	* src/internfile/internfile.cpp:
	use pcSubst

2007-10-27 08:39 +0000  dockes    (54ba3ef75586)

	* src/rcldb/rcldb.cpp:
	adjust MatchDecider return type according to xapian version

2007-10-27 07:06 +0000  dockes    (54b798d7fa02)

	* src/qtgui/i18n/recoll_xx.ts:
	sent to ning

2007-10-26 10:42 +0000  dockes    (acfa4e6c24ba)

	* src/doc/user/usermanual.sgml:
	index config gui

2007-10-25 15:51 +0000  dockes    (12d12311134a)

	* src/qtgui/confgui/confguiindex.cpp:
	labels

2007-10-25 15:51 +0000  dockes    (2a1d29582446)

	* src/qtgui/confgui/confgui.cpp:
	use new style combobox constructor

2007-10-25 15:50 +0000  dockes    (8b45d32c605c)

	* src/internfile/mh_exec.h:
	cleanup

2007-10-25 08:04 +0000  dockes    (0bf8540b6c22)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2007-10-25 07:27 +0000  dockes    (5d57c38993af)

	* src/query/recollq.cpp, src/query/wasatorcl.cpp:
	added option to query language for filtering on directory

2007-10-25 07:09 +0000  dockes    (d1adc7006d08)

	* src/rcldb/rcldb.cpp:
	add filter topdir to query description

2007-10-24 15:38 +0000  dockes    (5f1863c33239)

	* src/rcldb/rcldb.cpp:
	use a Xapian MatchDecider to filter on dir path

2007-10-24 08:42 +0000  dockes    (2d337545271f)

	* src/rcldb/rcldb.cpp:
	make filter a xapian::MatchDecider, dont change mechanism

2007-10-19 15:25 +0000  dockes    (935a92d6db39)

	* src/qtgui/ssearch_w.cpp, src/utils/smallut.cpp:
	consider cr and lf as whitespace when splitting strings

2007-10-19 14:31 +0000  dockes    (bb88b5f4fc25)

	* src/qtgui/confgui/confgui.h, src/qtgui/confgui/confguiindex.cpp:
	small sizing adjustments

2007-10-18 10:39 +0000  dockes    (f34f0260a62a)

	* src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h,
	src/qtgui/preview_w.cpp, src/qtgui/reslist.cpp:
	let plaintorich do the chunking, easier to make sure we dont confuse
	textedit by cutting inside a tag

2007-10-18 10:15 +0000  dockes    (7c46f29559fe)

	* src/qtgui/confgui/confguiindex.cpp:
	qt3

2007-10-17 16:12 +0000  dockes    (41f711edeb0b)

	* src/qtgui/plaintorich.cpp:
	replaced utf8 cgj with good ole bel

2007-10-17 11:40 +0000  dockes    (102fcc4aa169)

	* src/internfile/mh_mail.cpp, src/internfile/mh_mail.h,
	src/utils/mimeparse.cpp:
	text/plain attachments were not transcoded to utf-8

2007-10-17 09:57 +0000  dockes    (dd33128e3a59)

	* src/common/rclconfig.cpp, src/internfile/internfile.cpp:
	*** empty log message ***

2007-10-15 13:08 +0000  dockes    (0a095e89bfb9)

	* src/kde/recoll_applet/0README.Recoll, src/kde/recoll_applet/AUTHORS,
	src/kde/recoll_applet/COPYING, src/kde/recoll_applet/ChangeLog,
	src/kde/recoll_applet/Doxyfile, src/kde/recoll_applet/INSTALL,
	src/kde/recoll_applet/Makefile.am,
	src/kde/recoll_applet/Makefile.cvs,
	src/kde/recoll_applet/Makefile.in, src/kde/recoll_applet/NEWS,
	src/kde/recoll_applet/README, src/kde/recoll_applet/TODO,
	src/kde/recoll_applet/acinclude.m4,
	src/kde/recoll_applet/aclocal.m4,
	src/kde/recoll_applet/admin/Doxyfile.am,
	src/kde/recoll_applet/admin/Doxyfile.global,
	src/kde/recoll_applet/admin/Makefile.common,
	src/kde/recoll_applet/admin/acinclude.m4.in,
	src/kde/recoll_applet/admin/am_edit,
	src/kde/recoll_applet/admin/bcheck.pl,
	src/kde/recoll_applet/admin/compile,
	src/kde/recoll_applet/admin/conf.change.pl,
	src/kde/recoll_applet/admin/config.guess,
	src/kde/recoll_applet/admin/config.pl,
	src/kde/recoll_applet/admin/config.sub,
	src/kde/recoll_applet/admin/configure.in.bot.end,
	src/kde/recoll_applet/admin/configure.in.min,
	src/kde/recoll_applet/admin/cvs.sh,
	src/kde/recoll_applet/admin/debianrules,
	src/kde/recoll_applet/admin/depcomp,
	src/kde/recoll_applet/admin/deps.am, src/kde/recoll_applet/admin
	/detect-autoconf.pl, src/kde/recoll_applet/admin/doxygen.sh,
	src/kde/recoll_applet/admin/install-sh,
	src/kde/recoll_applet/admin/libtool.m4.in,
	src/kde/recoll_applet/admin/ltmain.sh,
	src/kde/recoll_applet/admin/missing,
	src/kde/recoll_applet/admin/mkinstalldirs,
	src/kde/recoll_applet/admin/nmcheck,
	src/kde/recoll_applet/admin/oldinclude.m4.in,
	src/kde/recoll_applet/admin/pkg.m4.in,
	src/kde/recoll_applet/admin/ylwrap,
	src/kde/recoll_applet/config.h.in, src/kde/recoll_applet/configure,
	src/kde/recoll_applet/configure.files,
	src/kde/recoll_applet/configure.in,
	src/kde/recoll_applet/configure.in.in,
	src/kde/recoll_applet/doc/Makefile.am,
	src/kde/recoll_applet/doc/Makefile.in,
	src/kde/recoll_applet/doc/en/Makefile.am,
	src/kde/recoll_applet/doc/en/Makefile.in,
	src/kde/recoll_applet/doc/en/index.docbook,
	src/kde/recoll_applet/po/Makefile.am,
	src/kde/recoll_applet/po/Makefile.in,
	src/kde/recoll_applet/src/Makefile.am,
	src/kde/recoll_applet/src/Makefile.in,
	src/kde/recoll_applet/src/kpixmapcombo.cpp,
	src/kde/recoll_applet/src/kpixmapcombo.h,
	src/kde/recoll_applet/src/recoll_applet.cpp,
	src/kde/recoll_applet/src/recoll_applet.desktop,
	src/kde/recoll_applet/src/recoll_applet.h,
	src/kde/recoll_applet/src/recoll_applet.lsm,
	src/kde/recoll_applet/stamp-h.in, src/kde/recoll_applet/subdirs: new
	file.
	* src/kde/recoll_applet/0README.Recoll, src/kde/recoll_applet/AUTHORS,
	src/kde/recoll_applet/COPYING, src/kde/recoll_applet/ChangeLog,
	src/kde/recoll_applet/Doxyfile, src/kde/recoll_applet/INSTALL,
	src/kde/recoll_applet/Makefile.am,
	src/kde/recoll_applet/Makefile.cvs,
	src/kde/recoll_applet/Makefile.in, src/kde/recoll_applet/NEWS,
	src/kde/recoll_applet/README, src/kde/recoll_applet/TODO,
	src/kde/recoll_applet/acinclude.m4,
	src/kde/recoll_applet/aclocal.m4,
	src/kde/recoll_applet/admin/Doxyfile.am,
	src/kde/recoll_applet/admin/Doxyfile.global,
	src/kde/recoll_applet/admin/Makefile.common,
	src/kde/recoll_applet/admin/acinclude.m4.in,
	src/kde/recoll_applet/admin/am_edit,
	src/kde/recoll_applet/admin/bcheck.pl,
	src/kde/recoll_applet/admin/compile,
	src/kde/recoll_applet/admin/conf.change.pl,
	src/kde/recoll_applet/admin/config.guess,
	src/kde/recoll_applet/admin/config.pl,
	src/kde/recoll_applet/admin/config.sub,
	src/kde/recoll_applet/admin/configure.in.bot.end,
	src/kde/recoll_applet/admin/configure.in.min,
	src/kde/recoll_applet/admin/cvs.sh,
	src/kde/recoll_applet/admin/debianrules,
	src/kde/recoll_applet/admin/depcomp,
	src/kde/recoll_applet/admin/deps.am, src/kde/recoll_applet/admin
	/detect-autoconf.pl, src/kde/recoll_applet/admin/doxygen.sh,
	src/kde/recoll_applet/admin/install-sh,
	src/kde/recoll_applet/admin/libtool.m4.in,
	src/kde/recoll_applet/admin/ltmain.sh,
	src/kde/recoll_applet/admin/missing,
	src/kde/recoll_applet/admin/mkinstalldirs,
	src/kde/recoll_applet/admin/nmcheck,
	src/kde/recoll_applet/admin/oldinclude.m4.in,
	src/kde/recoll_applet/admin/pkg.m4.in,
	src/kde/recoll_applet/admin/ylwrap,
	src/kde/recoll_applet/config.h.in, src/kde/recoll_applet/configure,
	src/kde/recoll_applet/configure.files,
	src/kde/recoll_applet/configure.in,
	src/kde/recoll_applet/configure.in.in,
	src/kde/recoll_applet/doc/Makefile.am,
	src/kde/recoll_applet/doc/Makefile.in,
	src/kde/recoll_applet/doc/en/Makefile.am,
	src/kde/recoll_applet/doc/en/Makefile.in,
	src/kde/recoll_applet/doc/en/index.docbook,
	src/kde/recoll_applet/po/Makefile.am,
	src/kde/recoll_applet/po/Makefile.in,
	src/kde/recoll_applet/src/Makefile.am,
	src/kde/recoll_applet/src/Makefile.in,
	src/kde/recoll_applet/src/kpixmapcombo.cpp,
	src/kde/recoll_applet/src/kpixmapcombo.h,
	src/kde/recoll_applet/src/recoll_applet.cpp,
	src/kde/recoll_applet/src/recoll_applet.desktop,
	src/kde/recoll_applet/src/recoll_applet.h,
	src/kde/recoll_applet/src/recoll_applet.lsm,
	src/kde/recoll_applet/stamp-h.in, src/kde/recoll_applet/subdirs:
	*** empty log message ***

2007-10-14 16:07 +0000  dockes    (aea3ceac265d)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2007-10-09 14:08 +0000  dockes    (008fb8da2cfe)

	* src/qt4gui/recoll.pro.in, src/qtgui/confgui/confguiindex.cpp,
	src/qtgui/confgui/confguiindex.h, src/qtgui/idxthread.cpp,
	src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp,
	src/qtgui/rclmain_w.h, src/qtgui/recoll.pro.in:
	indexing confgui seems to sort of work

2007-10-09 11:08 +0000  dockes    (8e165638db48)

	* src/qtgui/confgui/confgui.cpp, src/qtgui/confgui/confgui.h,
	src/qtgui/confgui/confguiindex.cpp,
	src/qtgui/confgui/confguiindex.h, src/qtgui/confgui/conflinkrcl.h,
	src/qtgui/confgui/main.cpp:
	*** empty log message ***

2007-10-09 09:43 +0000  dockes    (bda697547b28)

	* src/common/rclconfig.cpp, src/common/rclconfig.h:
	modified mechanism for confgui updates

2007-10-09 09:40 +0000  dockes    (314568630e50)

	* src/utils/conftree.h:
	*** empty log message ***

2007-10-07 20:22 +0000  dockes    (a4407de529dc)

	* src/qtgui/confgui/confgui.cpp, src/qtgui/confgui/confguiindex.cpp:
	*** empty log message ***

2007-10-06 07:44 +0000  dockes    (e12dcaba9422)

	* src/sampleconf/mimeconf:
	*** empty log message ***

2007-10-06 07:26 +0000  dockes    (8c03c83a6353)

	* src/ChangeLog, src/INSTALL, src/README, src/VERSION:
	*** empty log message ***

2007-10-06 07:13 +0000  dockes    (80c8e77d75e3)

	* src/qtgui/i18n/recoll_xx.ts: new file.
	* src/doc/user/usermanual.sgml, src/qtgui/i18n/recoll_xx.ts:
	*** empty log message ***

2007-10-05 14:00 +0000  dockes    (3f47738c7b7f)

	* src/query/wasatorcl.cpp:
	add rclcat prefix to query languages + adapt find_applet to use it

2007-10-05 08:03 +0000  dockes    (eb9ae456f872)

	* src/qtgui/main.cpp, src/qtgui/ssearch_w.cpp, src/qtgui/ssearch_w.h:
	add cmd line option to run query when starting

2007-10-04 12:26 +0000  dockes    (479712bd069b)

	* src/rcldb/searchdata.cpp:
	when search includes composite spans + other terms, increase slack
	instead of switching to word split

2007-10-04 12:21 +0000  dockes    (67c23cd41df2)

	* src/common/rclconfig.cpp, src/common/textsplit.cpp,
	src/common/textsplit.h:
	make cjk ngramlen configurable

2007-10-04 12:20 +0000  dockes    (e9e128bf43ab)

	* src/index/indexer.cpp:
	trace

2007-10-03 14:53 +0000  dockes    (b8852ea7a80c)

	* src/internfile/Makefile, src/internfile/mh_mbox.cpp,
	src/internfile/mh_mbox.h:
	Improve From_ line detection

2007-10-02 14:25 +0000  dockes    (3379ab8d9013)

	* src/doc/user/docbook.css, src/doc/user/usermanual.sgml:
	*** empty log message ***

2007-10-02 14:22 +0000  dockes    (29a402a23d12)

	* src/sampleconf/mimeconf, src/sampleconf/mimemap,
	src/sampleconf/mimeview:
	a few more image files

2007-10-02 14:00 +0000  dockes    (d0e7241eeb0e)

	* src/filters/rclflac, src/filters/rclogg: new file.
	* src/filters/rcljpeg: deleted file.
	* src/filters/rclflac, src/filters/rcljpeg, src/filters/rclogg:
	*** empty log message ***

2007-10-02 13:56 +0000  dockes    (e180ca729bea)

	* src/filters/rclimg:
	comments,GPL

2007-10-02 11:39 +0000  dockes    (7777fdc5d30a)

	* src/common/rclconfig.cpp, src/common/textsplit.cpp,
	src/common/textsplit.h:
	add flag to disable cjk processing

2007-10-01 17:56 +0000  dockes    (29b1aeb75d23)

	* src/filters/rclimg: new file.
	* src/filters/rclimg:
	initial version from Cedric Scott

2007-10-01 15:57 +0000  dockes    (b3aeb47d6a43)

	* src/utils/conftree.cpp:
	added updates/erase tests

2007-10-01 06:35 +0000  dockes    (b29617933c16)

	* src/qtgui/confgui/confgui.cpp, src/qtgui/confgui/confgui.h,
	src/qtgui/confgui/confguiindex.cpp, src/qtgui/confgui/main.cpp,
	src/qtgui/confgui/trconf.pro:
	qt4 port

2007-10-01 06:19 +0000  dockes    (78068b236681)

	* src/VERSION, src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/qtgui/confgui/confgui.cpp, src/utils/conftree.cpp,
	src/utils/conftree.h:
	config update enabling functions

2007-09-29 09:06 +0000  dockes    (e38c26097ece)

	* src/qtgui/confgui/confgui.cpp, src/qtgui/confgui/confgui.h,
	src/qtgui/confgui/confguiindex.cpp,
	src/qtgui/confgui/confguiindex.h, src/qtgui/confgui/conflinkrcl.h,
	src/qtgui/confgui/main.cpp, src/qtgui/confgui/trconf.pro:
	*** empty log message ***

2007-09-27 15:47 +0000  dockes    (9ac07bf91591)

	* src/qtgui/confgui/confguiindex.cpp,
	src/qtgui/confgui/confguiindex.h, src/qtgui/confgui/conflinkrcl.h:
	new file.
	* src/qtgui/confgui/confgui.cpp, src/qtgui/confgui/confgui.h,
	src/qtgui/confgui/confguiindex.cpp,
	src/qtgui/confgui/confguiindex.h, src/qtgui/confgui/conflinkrcl.h,
	src/qtgui/confgui/main.cpp, src/qtgui/confgui/trconf.pro:
	*** empty log message ***

2007-09-27 11:03 +0000  dockes    (436530279a09)

	* src/utils/conftree.h:
	comment

2007-09-27 11:02 +0000  dockes    (a466c387c485)

	* src/utils/conftree.cpp, src/utils/conftree.h:
	avoid adding unneeded entries in confstack. fix erase-add resulting
	in duplicate

2007-09-26 12:16 +0000  dockes    (8e1e4edb4f4a)

	* src/qtgui/confgui/confgui.cpp, src/qtgui/confgui/confgui.h,
	src/qtgui/confgui/main.cpp, src/qtgui/confgui/trconf.pro: new file.
	* src/qtgui/confgui/confgui.cpp, src/qtgui/confgui/confgui.h,
	src/qtgui/confgui/main.cpp, src/qtgui/confgui/trconf.pro:
	*** empty log message ***

2007-09-22 08:51 +0000  dockes    (8072f3278663)

	* src/common/textsplit.cpp, src/utils/utf8iter.h:
	include assert.h when needed

2007-09-21 16:45 +0000  dockes    (d85479652341)

	* src/INSTALL, src/README, src/VERSION, src/doc/user/usermanual.sgml,
	src/qtgui/recoll.pro.in:
	*** empty log message ***

2007-09-20 12:22 +0000  dockes    (28a9c536ebba)

	* src/common/textsplit.cpp:
	logs

2007-09-20 08:45 +0000  dockes    (415256bd7508)

	* src/common/textsplit.cpp, src/common/textsplit.h,
	src/utils/utf8iter.h:
	initial cjk support

2007-09-20 08:43 +0000  dockes    (66200ff61f31)

	* src/rcldb/searchdata.cpp:
	comments,formatting

2007-09-20 08:42 +0000  dockes    (750b59dea1e9)

	* src/qtgui/rclmain_w.cpp:
	restore cursor if cant start query

2007-09-18 20:35 +0000  dockes    (1d01904f2b55)

	* src/common/textsplit.cpp, src/common/textsplit.h:
	use m_ prefix for members

2007-09-18 20:34 +0000  dockes    (49381b7f40f6)

	* src/qt4gui/recoll.pro.in:
	add recoll_xx.ts

2007-09-18 07:01 +0000  dockes    (7dea06d57ada)

	* src/qtgui/i18n/recoll_it.ts:
	changes by Giovanni Cannizzaro

2007-09-11 08:23 +0000  dockes    (615a70a64b94 [RECOLL_1_9_0])

	* src/desktop/recoll-searchgui.desktop:
	desktop file corrected as per Kartik Mistry patch

2007-09-10 05:44 +0000  dockes    (78b0c9bd47bb)

	* src/qtgui/i18n/recoll_fr.ts:
	long menu labels cause pbs at least on macosx

2007-09-08 17:26 +0000  dockes    (ef2964b2e49e)

	* src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_fr.ts,
	src/qtgui/i18n/recoll_it.ts, src/qtgui/i18n/recoll_ru.ts,
	src/qtgui/i18n/recoll_uk.ts:
	*** empty log message ***

2007-09-08 17:25 +0000  dockes    (000b2b01844d)

	* src/qtgui/guiutils.cpp, src/qtgui/guiutils.h,
	src/qtgui/preview_w.cpp, src/qtgui/uiprefs.ui,
	src/qtgui/uiprefs_w.cpp:
	change hghlight text size limit to configurable value

2007-09-08 17:21 +0000  dockes    (c0ab1e961f0a)

	* src/qtgui/viewaction_w.cpp:
	added missing space in string

2007-09-08 17:21 +0000  dockes    (f70ce9c4c753)

	* src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp:
	renamed preferencesQuery_PrefsAction to queryPrefsAction

2007-09-08 17:19 +0000  dockes    (17eefeb77500 [RECOLL_1_9_1cjk2, RECOLL_1_9_1cjk1])

	* src/qtgui/plaintorich.cpp:
	comment

2007-09-08 09:44 +0000  dockes    (8aabe9bc2d85)

	* src/utils/readfile.cpp:
	small pb in solaris fix

2007-09-08 08:07 +0000  dockes    (4b862559adbb)

	* src/mk/SunOS, src/utils/pathut.cpp, src/utils/readfile.cpp:
	SunOS 2.8 fixes

2007-09-07 14:58 +0000  dockes    (f0b17af1f5d7)

	* src/configure, src/configure.ac:
	always add lz to lxapian

2007-09-07 12:39 +0000  dockes    (b10ac30fe130)

	* website/CHANGES.txt:
	*** empty log message ***

2007-09-07 08:05 +0000  dockes    (f031116372e8)

	* src/rcldb/rcldb.cpp:
	improve purge error message printing

2007-09-07 08:04 +0000  dockes    (276b259f9ec6)

	* src/qtgui/i18n/recoll_it.ts:
	new 1.9 translation by C. Rigamont

2007-09-07 08:04 +0000  dockes    (450e1342467c)

	* src/sampleconf/mimemap:
	fix wordperfect spurious extensions

2007-09-07 08:03 +0000  dockes    (624a100107be [RECOLL_1_9_1cjk])

	* website/BUGS.txt:
	update xapian near to 1.0.2

2007-09-07 08:03 +0000  dockes    (a0d360caf71e)

	* website/copydocs:
	to_mac

2007-09-01 19:12 +0000  dockes    (3ebdb5af664f)

	* src/qt4gui/recoll.pro.in, src/qtgui/i18n/recoll_de.ts,
	src/qtgui/recoll.pro.in:
	*** empty log message ***

2007-08-31 09:04 +0000  dockes    (32533d0d11d0)

	* src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h:
	pressing CR in advsearch would run query twice because of start
	autodefault

2007-08-31 07:23 +0000  dockes    (bb17fa4cfaca)

	* src/qtgui/images/d_firstpage.png, src/qtgui/images/firstpage.png:
	new file.
	* src/qtgui/images/d_firstpage.png, src/qtgui/images/firstpage.png:
	*** empty log message ***

2007-08-30 10:11 +0000  dockes    (c75b5f42b33d)

	* src/INSTALL, src/README, src/qtgui/i18n/recoll_fr.ts,
	src/qtgui/i18n/recoll_it.ts, src/qtgui/i18n/recoll_ru.ts,
	src/qtgui/i18n/recoll_uk.ts:
	*** empty log message ***

2007-08-30 10:00 +0000  dockes    (7c4ccceae2a7)

	* website/BUGS.txt, website/CHANGES.txt, website/download.html,
	website/features.html:
	*** empty log message ***

2007-08-30 09:01 +0000  dockes    (687cad7b46de)

	* src/doc/user/usermanual.sgml, src/index/indexer.cpp,
	src/index/indexer.h, src/index/rclmonrcv.cpp,
	src/sampleconf/recoll.conf.in, src/utils/fstreewalk.cpp,
	src/utils/fstreewalk.h:
	add followLinks option

2007-08-30 08:39 +0000  dockes    (6af3a2216074)

	* src/doc/user/usermanual.sgml:
	add followLinks option

2007-08-28 08:12 +0000  dockes    (6385c6a9c88e)

	* src/index/indexer.cpp:
	allow symlinks in topdirs

2007-08-28 08:08 +0000  dockes    (a3df89087437)

	* src/utils/fstreewalk.cpp, src/utils/fstreewalk.h:
	follow top (entry) symlinks even if nofollow is set

2007-08-28 08:07 +0000  dockes    (19ac4f90b7e7)

	* src/internfile/internfile.cpp:
	error msg

2007-08-26 13:52 +0000  dockes    (fa08f95a4d95)

	* src/doc/user/usermanual.sgml:
	add wordperfect ext app info

2007-08-26 13:34 +0000  dockes    (ac877cc2e3ad)

	* src/filters/rclwpd: new file.
	* src/filters/rclwpd, src/sampleconf/mimeconf, src/sampleconf/mimemap,
	src/sampleconf/mimeview:
	added wordperfect support

2007-08-26 13:34 +0000  dockes    (7472abcdbc4a)

	* src/sampleconf/recoll.conf.in:
	add commented entries for daem*

2007-08-07 08:45 +0000  dockes    (ad6dad566902)

	* src/qtgui/rclmain_w.cpp:
	*** empty log message ***

2007-08-07 08:42 +0000  dockes    (2040417c73e4)

	* src/qtgui/rclmain_w.cpp, src/qtgui/reslist.cpp:
	qt3 adjustments

2007-08-07 08:26 +0000  dockes    (55c7dc79c190)

	* src/aspell/rclaspell.cpp, src/sampleconf/recoll.conf.in:
	*** empty log message ***

2007-08-05 05:55 +0000  dockes    (3acd192c01d1)

	* src/utils/conftree.h:
	comments

2007-08-05 05:49 +0000  dockes    (afee970ae166)

	* src/utils/conftree.h:
	*** empty log message ***

2007-08-04 07:22 +0000  dockes    (9afb2050f462)

	* src/utils/conftree.cpp, src/utils/conftree.h:
	Allow updates in confstacks

2007-08-03 07:50 +0000  dockes    (28ae2e572dcf)

	* src/utils/Makefile, src/utils/conftree.cpp, src/utils/conftree.h:
	have conftree preserve comments and ordering

2007-08-02 06:33 +0000  dockes    (4da8b2dbcaa6)

	* src/qt4gui/recoll.qrc, src/qtgui/rclmain.ui,
	src/qtgui/rclmain_w.cpp, src/qtgui/recoll.pro.in,
	src/qtgui/reslist.cpp, src/qtgui/reslist.h:
	added gotofirstpage action

2007-08-01 10:04 +0000  dockes    (c91831fab8a0)

	* src/qtgui/guiutils.h, src/qtgui/rclmain_w.cpp,
	src/qtgui/rclmain_w.h, src/qtgui/ssearch_w.cpp,
	src/qtgui/uiprefs_w.cpp, src/qtgui/uiprefs_w.h,
	src/rcldb/stemdb.cpp, src/rcldb/stemdb.h:
	Allow stem expansion for several (all) stemming languages at a time

2007-08-01 07:55 +0000  dockes    (5d13d87e6e14)

	* src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp,
	src/qtgui/rclmain_w.h, src/qtgui/uiprefs_w.cpp,
	src/qtgui/uiprefs_w.h:
	allow setting stemlang from prefs menu

2007-07-20 14:50 +0000  dockes    (573069870fd4)

	* src/configure, src/configure.ac:
	check for uic3 during qt4 configure

2007-07-20 14:43 +0000  dockes    (32ae47904cca)

	* src/qtgui/preview_w.cpp, src/qtgui/preview_w.h:
	preview: dont search for anchors if we have none

2007-07-20 14:32 +0000  dockes    (eac614c9a725)

	* src/qtgui/rclmain_w.cpp, src/qtgui/reslist.cpp, src/qtgui/reslist.h:
	*** empty log message ***

2007-07-20 11:44 +0000  dockes    (6133f68f886f)

	* src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h:
	factorize previewNext/Prev

2007-07-20 11:38 +0000  dockes    (4dfc3942351a)

	* src/qtgui/preview_w.cpp, src/qtgui/preview_w.h,
	src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h:
	more preview window interface cleanup

2007-07-20 10:55 +0000  dockes    (d57bd5e6cb2d)

	* src/qtgui/preview_w.cpp, src/qtgui/preview_w.h,
	src/qtgui/rclmain_w.cpp:
	cleaned up preview window interface

2007-07-14 16:53 +0000  dockes    (35087158d61f)

	* src/common/autoconfig.h.in, src/configure, src/configure.ac,
	src/mk/AIX, src/mk/Darwin, src/mk/Linux, src/mk/SunOS,
	src/utils/execmd.cpp:
	handle putenv arg constness in configure

2007-07-13 10:24 +0000  dockes    (98774298901d)

	* src/INSTALL, src/README, src/doc/man/recoll.conf.5,
	src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_it.ts,
	src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts,
	website/BUGS.txt, website/CHANGES.txt, website/download.html:
	*** empty log message ***

2007-07-13 07:17 +0000  dockes    (d2c5a6098bbd)

	* src/doc/user/docbook.css, src/doc/user/usermanual.sgml:
	1.9 changes

2007-07-13 07:10 +0000  dockes    (2569115962c0)

	* src/qtgui/uiprefs.ui:
	msg

2007-07-13 07:00 +0000  dockes    (2bd0371b8e12)

	* src/qtgui/plaintorich.cpp:
	adjust term beacon for better finding ?

2007-07-13 06:31 +0000  dockes    (f7d41e95166c)

	* src/qtgui/preview_w.cpp, src/qtgui/preview_w.h,
	src/qtgui/rclmain_w.cpp:
	better handle preview close during load

2007-07-12 17:28 +0000  dockes    (5b6f1204d077)

	* src/rcldb/rcldb.cpp:
	*** empty log message ***

2007-07-12 17:13 +0000  dockes    (9345d3db5ff2)

	* src/filters/rclpdf:
	dont use anchored regexps for stripping whitespace, ubuntu mawk
	ignores the anchor

2007-07-12 13:41 +0000  dockes    (1fb4e582fe5b)

	* src/utils/cancelcheck.h:
	*** empty log message ***

2007-07-12 10:53 +0000  dockes    (eb352f6c17ae)

	* src/index/rclmonrcv.cpp, src/utils/fstreewalk.cpp,
	src/utils/fstreewalk.h:
	monitor: dont add watch on created dir if in skippedXXX

2007-07-12 10:13 +0000  dockes    (d55862505674)

	* src/ChangeLog, src/qtgui/guiutils.cpp, src/qtgui/guiutils.h,
	src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp:
	fix v1.8 default format string if we find it

2007-07-12 08:34 +0000  dockes    (b69b14b67cd2)

	* src/rcldb/rcldb.cpp:
	use uniform code for Xapian exception catching + catch a few more,
	esp. databaseModified cases

2007-07-12 08:23 +0000  dockes    (ffe9a12f9237)

	* src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/reslist.cpp,
	src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp:
	icon now part of paragraph format

2007-07-11 10:05 +0000  dockes    (a8d4da32f304)

	* src/qtgui/reslist.cpp:
	dont create popup in irrelevant areas

2007-07-10 09:24 +0000  dockes    (993776d69bab)

	* src/sampleconf/recoll.conf.in:
	idxflushnb default 10

2007-07-10 09:23 +0000  dockes    (e800d4e4d1de)

	* src/doc/man/recollindex.1, src/index/indexer.cpp,
	src/index/indexer.h, src/index/recollindex.cpp, src/rcldb/rcldb.cpp,
	src/rcldb/rcldb.h:
	recollindex -l

2007-07-10 05:44 +0000  dockes    (7247df0336ab)

	* src/sampleconf/recoll.conf.in:
	*** empty log message ***

2007-07-09 17:21 +0000  dockes    (ef8eddb1b94a)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2007-07-01 06:52 +0000  dockes    (03cb707d9122)

	* src/filters/rclid3: new file.
	* src/filters/rclid3, src/sampleconf/mimeconf, src/sampleconf/mimemap,
	src/sampleconf/mimeview:
	audio tags support improvement: flac+ogg. use FORPREVIEW

2007-06-26 17:07 +0000  dockes    (ec5b66db8aea)

	* src/qtgui/i18n/recoll_de.ts, src/qtgui/i18n/recoll_fr.ts,
	src/qtgui/i18n/recoll_it.ts, src/qtgui/i18n/recoll_ru.ts,
	src/qtgui/i18n/recoll_uk.ts:
	*** empty log message ***

2007-06-26 16:58 +0000  dockes    (34658791397a)

	* *** empty log message ***

2007-06-26 16:58 +0000  dockes    (26a811724423)

	* packaging/rpm/recollCooker.spec, website/fr/features.html,
	website/mario.png, website/perfs.html, website/smile.png: new file.
	* packaging/rpm/recollCooker.spec, src/doc/user/usermanual.sgml,
	website/BUGS.txt, website/CHANGES.txt, website/credits.html,
	website/doc.html, website/download.html, website/features.html,
	website/fr/features.html, website/index.html.en,
	website/index.html.fr, website/mario.png, website/perfs.html,
	website/rclidxfmt.html, website/smile.png, website/styles/style.css:
	*** empty log message ***

2007-06-26 16:09 +0000  dockes    (d4a3058d613e)

	* src/internfile/internfile.cpp, src/internfile/internfile.h:
	comments

2007-06-26 16:08 +0000  dockes    (7115d37ab33d)

	* src/configure, src/configure.ac, src/mk/Darwin,
	src/qtgui/reslist.cpp, src/recollinstall.in:
	get things to sort of compile / install on macosx

2007-06-26 15:38 +0000  dockes    (02621fd62ca0)

	* src/ChangeLog: new file.
	* src/ChangeLog:
	*** empty log message ***

2007-06-26 11:59 +0000  dockes    (51061217635d)

	* src/excludefile, src/makesrcdist.sh:
	small mkdist fixes

2007-06-25 18:31 +0000  dockes    (5f173fcd227f)

	* src/INSTALL, src/README:
	*** empty log message ***

2007-06-25 10:25 +0000  dockes    (048658cd678b)

	* src/rcldb/rcldb.cpp:
	simplified and hopefully improved abstract generation

2007-06-25 10:13 +0000  dockes    (14ecb9d719e7)

	* src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h,
	src/qtgui/reslist.cpp:
	plaintorich: only setup beacons if needed

2007-06-22 06:14 +0000  dockes    (0584daa67e7c)

	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/query/wasatorcl.cpp, src/rcldb/rcldb.cpp,
	src/rcldb/searchdata.cpp, src/sampleconf/mimeconf:
	handle mime: and ext: in qlang

2007-06-21 11:56 +0000  dockes    (e5102468f77e)

	* src/rcldb/rcldb.cpp, src/rcldb/rcldb.h:
	slightly reorganized Db::close/~Db code

2007-06-21 11:14 +0000  dockes    (e360a50fdaa5)

	* src/common/rclconfig.cpp:
	beware of unsigneds diffs when comparing to 0 !

2007-06-20 13:16 +0000  dockes    (e515c5541bd4)

	* src/qtgui/preview_w.cpp, src/qtgui/rclmain.ui,
	src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h,
	src/query/history.cpp, src/query/history.h:
	menu entry to reset document history

2007-06-19 16:19 +0000  dockes    (675d2fed7a32)

	* src/qtgui/rclmain_w.cpp, src/qtgui/sort_w.cpp:
	fix sort state restoration which didnt work

2007-06-19 15:48 +0000  dockes    (36fa1c12d616)

	* src/rcldb/rcldb.cpp:
	try to better print delete exception messages

2007-06-19 15:47 +0000  dockes    (304862edc545)

	* src/query/xadump.cpp:
	option X

2007-06-19 15:47 +0000  dockes    (23a728d3cdd7)

	* src/query/recollq.cpp:
	compile

2007-06-19 12:27 +0000  dockes    (5ee1b5e9168e)

	* src/internfile/internfile.cpp, src/internfile/internfile.h:
	get test driver to compile

2007-06-19 12:17 +0000  dockes    (8974a52d2baa)

	* src/internfile/htmlparse.h, src/internfile/mh_html.cpp,
	src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h:
	renamed the html charset values to stick to omega usage

2007-06-19 10:28 +0000  dockes    (e66870aeadb6)

	* src/internfile/htmlparse.cpp, src/internfile/htmlparse.h,
	src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h:
	updated html parser to omega 1.0.1 + moved entity decoder to
	myhtmlparse to minimize amount of diffs

2007-06-19 08:36 +0000  dockes    (e2533617731d)

	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/internfile/internfile.cpp, src/internfile/mh_html.cpp,
	src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h,
	src/qtgui/preview_w.cpp, src/qtgui/reslist.cpp, src/query/docseq.h,
	src/query/docseqdb.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h,
	src/rcldb/rcldoc.h, src/rcldb/searchdata.cpp,
	src/sampleconf/mimeconf:
	added open-ended field name handling

2007-06-19 07:52 +0000  dockes    (73ccb629ad66)

	* src/common/autoconfig.h.in, src/configure, src/configure.ac,
	src/index/csguess.cpp, src/utils/transcode.cpp:
	added test for iconv parm 2 constness

2007-06-18 13:04 +0000  dockes    (bb1262134776)

	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/searchdata.cpp,
	src/sampleconf/mimeconf:
	implement dynamic field name to prefix translation, query side

2007-06-15 11:41 +0000  dockes    (5eccc05a2ae7)

	* src/filters/rclabw, src/filters/rclsoff, src/sampleconf/mimeconf,
	src/sampleconf/mimemap, src/sampleconf/mimeview:
	added abiword + some oofice cleanup

2007-06-15 09:25 +0000  dockes    (f5b1666a10e6)

	* src/filters/rclabw: new file.
	* src/filters/rclabw:
	*** empty log message ***

2007-06-14 08:20 +0000  dockes    (dc698e7b3c84)

	* src/rcldb/rcldb.cpp:
	removed the "weak" date, not used and not in omega anymore

2007-06-13 17:03 +0000  dockes    (3d509dbc275c)

	* src/qtgui/reslist.cpp:
	textedit autext sometimes switched to plain at eol?

2007-06-12 13:31 +0000  dockes    (793abec1cee4)

	* src/qtgui/plaintorich.cpp, src/qtgui/preview_w.cpp,
	src/qtgui/preview_w.h, src/qtgui/rclmain_w.cpp,
	src/qtgui/reslist.cpp, src/qtgui/reslist.h:
	somewhat fixed qt4 selection problems

2007-06-12 10:33 +0000  dockes    (261ca6c11087)

	* src/qtgui/ssearch_w.cpp:
	adjust event handling for qt4, get esc-spc to work

2007-06-12 10:32 +0000  dockes    (97c9f158e297)

	* src/qtgui/plaintorich.cpp:
	comments

2007-06-12 08:50 +0000  dockes    (28d503078074)

	* src/qtgui/rclmain_w.cpp:
	*** empty log message ***

2007-06-12 08:46 +0000  dockes    (d3f305e57522)

	* src/query/recollq.cpp:
	getMainConfig

2007-06-11 08:33 +0000  dockes    (5542196b466a)

	* src/qtgui/rclmain_w.cpp:
	set busy cursor while search runs

2007-06-11 05:51 +0000  dockes    (bf5090aed2fd)

	* src/Makefile.in:
	*** empty log message ***

2007-06-11 05:49 +0000  dockes    (9327b736d7ff)

	* src/Makefile.in, src/qt4gui/uifrom3:
	*** empty log message ***

2007-06-11 05:45 +0000  dockes    (cbb602782461)

	* src/desktop/recoll.png, src/desktop/recoll.xcf: new file.
	* src/desktop/recoll-searchgui.png, src/desktop/recoll-searchgui.xcf:
	deleted file.
	* src/desktop/recoll-searchgui.desktop, src/desktop/recoll-
	searchgui.png, src/desktop/recoll-searchgui.xcf,
	src/desktop/recoll.png, src/desktop/recoll.xcf,
	src/makestaticdist.sh, src/recollinstall.in:
	icon named recoll.png

2007-06-11 05:38 +0000  dockes    (9268fba2c65c)

	* src/index/indexer.cpp:
	changed level of missing helpers message

2007-06-10 12:26 +0000  dockes    (f5b6dcd36de0)

	* src/mk/OpenBSD: new file.
	* src/mk/OpenBSD:
	*** empty log message ***

2007-06-08 16:47 +0000  dockes    (96f2807957dd)

	* src/common/rclconfig.h, src/index/indexer.cpp,
	src/index/recollindex.cpp, src/mk/FreeBSD, src/qtgui/main.cpp,
	src/rcldb/rcldb.cpp, src/rcldb/rcldb.h:
	added file system usage check

2007-06-08 16:46 +0000  dockes    (0c11deb1a678)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2007-06-08 16:46 +0000  dockes    (4f2c0d45e15b)

	* src/desktop/recoll-searchgui.desktop, src/desktop/recoll-
	searchgui.png, src/desktop/recoll-searchgui.xcf:
	new icon

2007-06-08 16:05 +0000  dockes    (6835d2fbb56c)

	* src/rcldb/rcldb.h:
	comments and ordering

2007-06-08 15:30 +0000  dockes    (aeffac1f3f2d)

	* src/utils/pathut.cpp, src/utils/pathut.h:
	fsocc

2007-06-08 14:01 +0000  dockes    (7c47d8aae3cc)

	* src/filters/rclkwd: new file.
	* src/filters/rclkwd, src/sampleconf/mimeview:
	kword support

2007-06-08 13:51 +0000  dockes    (53a1012a564f)

	* src/filters/rcldjvu, src/filters/rcldoc, src/filters/rcldvi,
	src/filters/rclgaim, src/filters/rcljpeg, src/filters/rcllyx,
	src/filters/rclman, src/filters/rclmedia, src/filters/rclpdf,
	src/filters/rclppt, src/filters/rclps, src/filters/rclrtf,
	src/filters/rclscribus, src/filters/rclsoff, src/filters/rclxls,
	src/filters/recfiltcommon, src/sampleconf/mimeconf,
	src/sampleconf/mimemap:
	kword support

2007-06-08 12:33 +0000  dockes    (a56bc180327b)

	* src/query/recollq.cpp:
	added stopfile parameter

2007-06-08 12:32 +0000  dockes    (7b3710f69cd0)

	* src/filters/rcljpeg: new file.
	* src/filters/rcljpeg, src/sampleconf/mimeconf:
	rcljpeg

2007-06-08 12:31 +0000  dockes    (0b20447d105e)

	* src/common/rclconfig.cpp:
	improve message about bad config

2007-06-02 08:30 +0000  dockes    (dfa3e5682035)

	* src/rcldb/Makefile, src/rcldb/stoplist.cpp, src/rcldb/stoplist.h:
	new file.
	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/index/indexer.cpp, src/lib/Makefile, src/lib/mkMake,
	src/qtgui/main.cpp, src/rcldb/Makefile, src/rcldb/rcldb.cpp,
	src/rcldb/rcldb.h, src/rcldb/searchdata.cpp, src/rcldb/stoplist.cpp,
	src/rcldb/stoplist.h, src/utils/readfile.cpp, src/utils/readfile.h:
	minimal experimental stopword functionality

2007-06-01 05:44 +0000  dockes    (b9f3d4b61852)

	* src/qtgui/preview_w.cpp:
	preview: space and backspace bound to pgdown/pgup

2007-05-30 12:31 +0000  dockes    (105744d9f609)

	* src/index/indexer.cpp, src/internfile/mh_html.cpp,
	src/internfile/mh_html.h, src/qtgui/plaintorich.cpp,
	src/query/xadump.cpp, src/utils/transcode.cpp:
	improve transcode error printing

2007-05-30 12:30 +0000  dockes    (d4fa167018eb)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2007-05-30 12:30 +0000  dockes    (6027fd8afb12)

	* src/rcldb/rcldb.cpp:
	improve add_document error message printing

2007-05-30 12:29 +0000  dockes    (234dc300c26b)

	* src/qtgui/reslist.cpp, src/qtgui/reslist.h:
	escape possibly not html-safe text

2007-05-24 09:35 +0000  dockes    (ec684a070c43)

	* src/rcldb/stemdb.cpp:
	comment

2007-05-24 07:48 +0000  dockes    (deedeff93a6e)

	* src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/sort_w.cpp,
	src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp:
	optionally remember sorting state between invocations

2007-05-24 07:47 +0000  dockes    (e6bb3bced970)

	* src/configure, src/configure.ac, src/qt4gui/uifrom3:
	make uifrom3 a makefile

2007-05-23 09:19 +0000  dockes    (4f9ab7436818)

	* src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h,
	src/qtgui/preview.ui, src/qtgui/preview_w.cpp,
	src/qtgui/preview_w.h:
	in preview window if search line empty look for search terms

2007-05-23 08:29 +0000  dockes    (644c4e20106b)

	* src/internfile/internfile.cpp:
	*** empty log message ***

2007-05-23 08:28 +0000  dockes    (1927522b5826)

	* src/common/rclinit.cpp, src/utils/execmd.cpp:
	cant block sigcld globally cause qt needs it

2007-05-22 08:33 +0000  dockes    (2c0d94ae674a)

	* src/internfile/internfile.cpp, src/internfile/mh_html.cpp:
	let email attachments inherit date and author from parent message

2007-05-22 07:40 +0000  dockes    (fc644359e793)

	* src/index/indexer.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h:
	implemented adjustable indexing flush threshold

2007-05-21 14:26 +0000  dockes    (d70d7b6988f0)

	* src/qtgui/rclmain_w.cpp:
	reopen db for each search during query

2007-05-21 13:30 +0000  dockes    (7f65a405e028)

	* src/common/rclinit.cpp, src/common/rclinit.h,
	src/index/rclmonprc.cpp, src/index/rclmonrcv.cpp,
	src/index/recollindex.cpp, src/qtgui/idxthread.cpp,
	src/qtgui/main.cpp, src/rcldb/rcldb.cpp, src/utils/execmd.cpp,
	src/utils/execmd.h:
	make sure signals are only handled by the main thread. Fix bus error
	on rclmon exit (double delete)

2007-05-21 12:03 +0000  dockes    (7af2d0c361be)

	* src/utils/smallut.h:
	*** empty log message ***

2007-05-21 09:00 +0000  dockes    (9ee50650bd6f)

	* src/index/indexer.cpp, src/index/rclmonprc.cpp:
	better handle aspell errors: dont exit from monitor on aux db
	creation failure, and dont retry forever

2007-05-21 07:24 +0000  dockes    (53f18ed9c2f8)

	* src/VERSION, src/configure, src/configure.ac,
	src/doc/user/usermanual.sgml, src/qt4gui/uifrom3,
	src/sampleconf/recoll.conf.in:
	removed --enable-qt4, rely on qmake output instead

2007-05-21 06:46 +0000  dockes    (d6267bb0e30f)

	* website/BUGS.txt, website/CHANGES.txt, website/devel.html,
	website/doc.html, website/download.html, website/index.html.en,
	website/index.html.fr:
	*** empty log message ***

2007-05-19 07:32 +0000  dockes    (cbbd4158e0a8)

	* website/doc.html: new file.
	* website/doc.html:
	*** empty log message ***

2007-05-18 12:05 +0000  dockes    (75610b300ee1 [RECOLL_1_8_2])

	* src/recollinstall.in:
	qt4 install glitches

2007-05-18 11:16 +0000  dockes    (c9a0be6210be)

	* src/README:
	*** empty log message ***

2007-05-18 07:49 +0000  dockes    (eaf500145dd5)

	* src/VERSION:
	1.8.2

2007-05-18 07:49 +0000  dockes    (fc64434e87c0)

	* packaging/debian/changelog, tests/runtests.sh, website/BUGS.txt,
	website/CHANGES.txt, website/download.html:
	*** empty log message ***

2007-05-18 07:41 +0000  dockes    (6bec0784b8fd)

	* src/doc/user/usermanual.sgml:
	doc fix

2007-05-18 07:41 +0000  dockes    (022d354a0a2f)

	* src/sampleconf/recoll.conf.in:
	add .beagle to stops

2007-05-18 07:41 +0000  dockes    (451a13663a00)

	* src/rcldb/rcldb.cpp, src/rcldb/stemdb.cpp:
	change method name deprecated in xap 1.0

2007-05-18 07:40 +0000  dockes    (54bfc83a6186)

	* src/query/Makefile:
	*** empty log message ***

2007-05-18 07:40 +0000  dockes    (ef599af3e2e7)

	* src/configure.ac:
	use $libdir instead of /usr/lib (64bits machs)

2007-05-16 11:28 +0000  dockes    (2cced3d0aa32)

	* src/qtgui/i18n/recoll_it.ts:
	*** empty log message ***

2007-04-22 07:36 +0000  dockes    (8628fca949e7)

	* src/qtgui/i18n/recoll_de.ts: new file.
	* src/qtgui/i18n/recoll_de.ts:
	*** empty log message ***

2007-03-28 19:30 +0000  dockes    (51c5bdb227cd)

	* website/BUGS.txt, website/download.html, website/index.html.en,
	website/index.html.fr:
	*** empty log message ***

2007-03-08 12:24 +0000  dockes    (0efcbb1564f2)

	* packaging/FreeBSD/recoll/Makefile,
	packaging/FreeBSD/recoll/distinfo:
	1.8.1

2007-03-08 12:04 +0000  dockes    (813c82bcc951 [RECOLL_1_8_1])

	* packaging/FreeBSD/recoll/Makefile,
	packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg-
	plist, packaging/debian/changelog, packaging/rpm/recoll.spec,
	packaging/rpm/recollfedora.spec, packaging/rpm/recollmdk.spec,
	src/VERSION, src/makestaticdist.sh, website/BUGS.txt,
	website/CHANGES.txt, website/download.html:
	version 1.8.1 ?

2007-02-20 09:30 +0000  dockes    (817cdab71c1c [RECOLL_1_8_0])

	* src/recollinstall.in:
	go back to not using xdg

2007-02-20 07:57 +0000  dockes    (d1f1b31e4a58)

	* website/index.html.en, website/index.html.fr: new file.
	* website/index.html: deleted file.
	* packaging/debian/changelog, website/BUGS.txt, website/CHANGES.txt,
	website/credits.html, website/download.html, website/features.html,
	website/index.html, website/index.html.en, website/index.html.fr:
	*** empty log message ***

2007-02-20 07:43 +0000  dockes    (1f4b07f4cb62)

	* src/qtgui/recoll.pro.in:
	*** empty log message ***

2007-02-20 07:33 +0000  dockes    (dc922603c639)

	* src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_it.ts,
	src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts:
	*** empty log message ***

2007-02-20 07:19 +0000  dockes    (d6b63dc759cd)

	* src/INSTALL, src/README:
	*** empty log message ***

2007-02-19 18:15 +0000  dockes    (a1331ff143f7)

	* src/qtgui/preview_w.cpp, src/qtgui/preview_w.h:
	make shift-arrow in preview work with qt4 and avoid reentrancy while
	loading a file

2007-02-19 18:14 +0000  dockes    (66c79bcff30e)

	* src/utils/execmd.cpp:
	block sigcld, it sometimes causes eintrs during the select() call

2007-02-19 18:05 +0000  dockes    (db9e1830a040)

	* src/internfile/internfile.cpp:
	check file name not empty on return from uncomp exec

2007-02-19 16:28 +0000  dockes    (19982a948347)

	* src/qtgui/spell_w.cpp:
	stemming language choice was not observed in term explorer

2007-02-19 16:10 +0000  dockes    (26815f6c7ce0)

	* src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h:
	cleanup ign file types handling/saving

2007-02-14 15:02 +0000  dockes    (714300cb7780)

	* tests/empty/empty.sh, tests/empty/empty.txt, tests/html/html.sh,
	tests/html/html.txt, tests/images/images.sh,
	tests/images/images.txt, tests/koi8r/koi8r.sh,
	tests/koi8r/koi8r.txt, tests/mail/mail.sh, tests/mail/mail.txt,
	tests/notypes/notypes.sh, tests/notypes/notypes.txt,
	tests/rfc2231/rfc2231.sh, tests/rfc2231/rfc2231.txt,
	tests/special/special.sh, tests/special/special.txt,
	tests/txt/txt.sh, tests/txt/txt.txt, tests/utf8/utf8.sh,
	tests/utf8/utf8.txt: new file.
	* tests/empty/empty.sh, tests/empty/empty.txt, tests/html/html.sh,
	tests/html/html.txt, tests/images/images.sh,
	tests/images/images.txt, tests/koi8r/koi8r.sh,
	tests/koi8r/koi8r.txt, tests/lyx/lyx.txt, tests/mail/mail.sh,
	tests/mail/mail.txt, tests/notypes/notypes.sh,
	tests/notypes/notypes.txt, tests/rfc2231/rfc2231.sh,
	tests/rfc2231/rfc2231.txt, tests/special/special.sh,
	tests/special/special.txt, tests/txt/txt.sh, tests/txt/txt.txt,
	tests/utf8/utf8.sh, tests/utf8/utf8.txt:
	*** empty log message ***

2007-02-14 11:52 +0000  dockes    (b3f3859ce5e5)

	* tests/boolean/boolean.sh, tests/boolean/boolean.txt,
	tests/delete/delete.sh, tests/delete/delete.txt,
	tests/dirwithblanks/dirwithblanks.sh,
	tests/dirwithblanks/dirwithblanks.txt, tests/djvu/djvu.sh,
	tests/djvu/djvu.txt, tests/dvi/dvi.sh, tests/dvi/dvi.txt,
	tests/lyx/lyx.sh, tests/lyx/lyx.txt, tests/media/media.sh,
	tests/media/media.txt, tests/msword/msword.sh,
	tests/msword/msword.txt, tests/ooff/ooff.sh, tests/ooff/ooff.txt,
	tests/pdf/pdf.sh, tests/pdf/pdf.txt, tests/postscript/postscript.sh,
	tests/postscript/postscript.txt, tests/ppt/ppt.sh,
	tests/ppt/ppt.txt, tests/rtf/rtf.sh, tests/rtf/rtf.txt,
	tests/scribus/scribus.sh, tests/scribus/scribus.txt,
	tests/xls/xls.sh, tests/xls/xls.txt: new file.
	* tests/Maildir1/Maildir1.txt, tests/andor/andor.txt,
	tests/badsuffs1/badsuffs1.txt, tests/boolean/boolean.sh,
	tests/boolean/boolean.txt, tests/delete/delete.sh,
	tests/delete/delete.txt, tests/dirwithblanks/dirwithblanks.sh,
	tests/dirwithblanks/dirwithblanks.txt, tests/djvu/djvu.sh,
	tests/djvu/djvu.txt, tests/dvi/dvi.sh, tests/dvi/dvi.txt,
	tests/lyx/lyx.sh, tests/lyx/lyx.txt, tests/media/media.sh,
	tests/media/media.txt, tests/msword/msword.sh,
	tests/msword/msword.txt, tests/ooff/ooff.sh, tests/ooff/ooff.txt,
	tests/pdf/pdf.sh, tests/pdf/pdf.txt, tests/postscript/postscript.sh,
	tests/postscript/postscript.txt, tests/ppt/ppt.sh,
	tests/ppt/ppt.txt, tests/rtf/rtf.sh, tests/rtf/rtf.txt,
	tests/scribus/scribus.sh, tests/scribus/scribus.txt,
	tests/shared.sh, tests/skipped/skipped.sh,
	tests/skipped/skipped.txt, tests/xls/xls.sh, tests/xls/xls.txt:
	*** empty log message ***

2007-02-14 10:10 +0000  dockes    (04c3156fd4dd)

	* src/doc/user/usermanual.sgml, src/makestaticdist.sh,
	src/qtgui/guiutils.cpp, src/qtgui/guiutils.h,
	src/qtgui/rclmain_w.cpp, src/qtgui/uiprefs.ui,
	src/qtgui/uiprefs_w.cpp, src/recollinstall.in,
	src/sampleconf/mimeview:
	add user pref to use xdg-open for all document edits

2007-02-14 10:09 +0000  dockes    (7886dd99d419)

	* src/rcldb/rcldb.cpp:
	during indexing use simple file name as title if this is empty. This
	allows storing the sfn for subdocs for which the url sfn doesnt make
	sense as title

2007-02-14 10:08 +0000  dockes    (fb42e10e5a7b)

	* src/query/recollq.cpp:
	adjust format to help the test set scripts

2007-02-14 08:54 +0000  dockes    (5e02666b38db)

	* src/desktop/xdg-utils-1.0.1/scripts/xdg-open: new file.
	* src/desktop/xdg-utils-1.0.1/scripts/xdg-open:
	*** empty log message ***

2007-02-14 08:16 +0000  dockes    (eb0fd52ef15a)

	* tests/Maildir/Maildir.sh, tests/Maildir/Maildir.txt,
	tests/Maildir1/Maildir1.sh, tests/Maildir1/Maildir1.txt,
	tests/andor/andor.sh, tests/andor/andor.txt,
	tests/badsuffs/badsuffs.sh, tests/badsuffs/badsuffs.txt,
	tests/badsuffs1/badsuffs1.sh, tests/badsuffs1/badsuffs1.txt,
	tests/runtests.sh, tests/shared.sh, tests/skipped/skipped.sh,
	tests/skipped/skipped.txt: new file.
	* tests/Maildir/Maildir.sh, tests/Maildir/Maildir.txt,
	tests/Maildir1/Maildir1.sh, tests/Maildir1/Maildir1.txt,
	tests/andor/andor.sh, tests/andor/andor.txt,
	tests/badsuffs/badsuffs.sh, tests/badsuffs/badsuffs.txt,
	tests/badsuffs1/badsuffs1.sh, tests/badsuffs1/badsuffs1.txt,
	tests/runtests.sh, tests/shared.sh, tests/skipped/skipped.sh,
	tests/skipped/skipped.txt:
	*** empty log message ***

2007-02-13 10:58 +0000  dockes    (19c29e100995)

	* src/query/wasatorcl.cpp, src/rcldb/searchdata.cpp,
	src/rcldb/searchdata.h:
	propagate wasa nostem modifier

2007-02-12 18:16 +0000  dockes    (bf3060f2e259)

	* src/query/wasastringtoquery.cpp, src/query/wasastringtoquery.h:
	add wasabi modifiers

2007-02-12 18:14 +0000  dockes    (6ae625065d64)

	* src/qtgui/guiutils.cpp:
	dont set Helvetica as default font

2007-02-08 17:05 +0000  dockes    (f23e18da0362)

	* src/index/indexer.cpp, src/index/indexer.h,
	src/internfile/internfile.cpp, src/internfile/internfile.h,
	src/qtgui/preview_w.cpp, src/utils/smallut.cpp, src/utils/smallut.h:
	improve handling of missing helpers messages

2007-02-08 17:03 +0000  dockes    (f53e952b71cd)

	* src/filters/rcldvi:
	typos

2007-02-08 12:25 +0000  dockes    (ba982598a66f)

	* src/internfile/internfile.h, src/query/recollq.cpp:
	clarify temp dir usage in internfile

2007-02-08 09:03 +0000  dockes    (876ec27bd9c0)

	* src/qtgui/reslist.cpp, src/qtgui/uiprefs_w.cpp,
	src/qtgui/uiprefs_w.h:
	qt4 compilation glitches

2007-02-07 17:18 +0000  dockes    (2f05854b010a)

	* src/filters/injectcommon.sh, src/filters/recfiltcommon: new file.
	* src/filters/injectcommon.sh, src/filters/recfiltcommon:
	*** empty log message ***

2007-02-07 17:17 +0000  dockes    (39e4d9e07461)

	* src/recoll.desktop, src/recoll.png, src/recoll.xcf: deleted file.
	* src/recoll.desktop, src/recoll.png, src/recoll.xcf,
	src/recollinstall.in:
	use xdg scripts to install desktop file and icon

2007-02-07 17:17 +0000  dockes    (3161a2dabc0a)

	* src/common/rclconfig.cpp, src/doc/user/usermanual.sgml:
	dont autocreate config specified with -c or RECOLL_CONFDIR

2007-02-07 16:31 +0000  dockes    (f89cbedba93f)

	* src/desktop/recoll-searchgui.desktop, src/desktop/recoll-
	searchgui.png, src/desktop/recoll-searchgui.xcf, src/desktop/xdg-
	utils-1.0.1/LICENSE, src/desktop/xdg-utils-1.0.1/scripts/xdg-
	desktop-menu, src/desktop/xdg-utils-1.0.1/scripts/xdg-icon-resource:
	new file.
	* src/desktop/recoll-searchgui.desktop, src/desktop/recoll-
	searchgui.png, src/desktop/recoll-searchgui.xcf, src/desktop/xdg-
	utils-1.0.1/LICENSE, src/desktop/xdg-utils-1.0.1/scripts/xdg-
	desktop-menu, src/desktop/xdg-utils-1.0.1/scripts/xdg-icon-resource,
	src/query/recollq.cpp:
	*** empty log message ***

2007-02-07 16:31 +0000  dockes    (2494c5157c22)

	* src/aspell/rclaspell.cpp:
	improve db creation error message

2007-02-07 12:00 +0000  dockes    (3c02ca709886)

	* src/query/recollq.cpp: new file.
	* src/query/Makefile, src/query/recollq.cpp, src/query/wasatorcl.cpp,
	src/query/wasatorcl.h:
	recollq

2007-02-06 18:01 +0000  dockes    (1992c71741c0)

	* src/index/indexer.cpp, src/internfile/internfile.cpp,
	src/internfile/internfile.h, src/internfile/mh_exec.cpp,
	src/qtgui/preview_w.cpp:
	arrange for error info about missing helpers to trickle up to the
	user

2007-02-06 18:01 +0000  dockes    (d5e12cec5aeb)

	* src/sampleconf/mimeconf, src/sampleconf/mimemap:
	added config+filter for man pages

2007-02-06 15:08 +0000  dockes    (ef2eef3c33e9)

	* src/filters/rclman: new file.
	* src/filters/rcldjvu, src/filters/rcldoc, src/filters/rcldvi,
	src/filters/rclgaim, src/filters/rcllyx, src/filters/rclman,
	src/filters/rclmedia, src/filters/rclpdf, src/filters/rclppt,
	src/filters/rclps, src/filters/rclrtf, src/filters/rclscribus,
	src/filters/rclsoff, src/filters/rclxls:
	factored out filter script common code

2007-02-06 14:18 +0000  dockes    (7812fc3157a4)

	* src/common/rclconfig.cpp, src/utils/pathut.cpp, src/utils/pathut.h:
	make sure the -c argument is turned absolute before use

2007-02-06 10:19 +0000  dockes    (243d1fffdfb9)

	* src/qtgui/ssearch_w.cpp:
	no space in query -> phrase

2007-02-06 10:18 +0000  dockes    (d1b8dd6a7182)

	* src/qtgui/reslist.cpp:
	try to make sure that the old reslist is cleared while searching

2007-02-06 10:18 +0000  dockes    (24163d1804e5)

	* src/qt4gui/uifrom3:
	link images/ from qtgui to qt4gui

2007-02-03 16:46 +0000  dockes    (d27849ad572f)

	* website/styles/style.css:
	*** empty log message ***

2007-02-02 10:27 +0000  dockes    (3c82d463b36c)

	* src/doc/user/usermanual.sgml:
	add skippedPaths and daemSkippedPaths config variables

2007-02-02 10:12 +0000  dockes    (0232602ba055)

	* src/common/rclconfig.cpp, src/common/rclconfig.h,
	src/index/indexer.cpp, src/index/rclmonrcv.cpp,
	src/utils/fstreewalk.cpp:
	add skippedPaths and daemSkippedPaths config variables

2007-02-02 10:10 +0000  dockes    (12a2a255dedc)

	* src/rcldb/rcldb.cpp, src/rcldb/rcldb.h:
	sort and uniquify termMatch results out of stem expansion

2007-02-02 10:09 +0000  dockes    (344b11ebced1)

	* src/index/recollindex.cpp:
	do x11 check between sleeping and starting in recollindex -m

2007-02-02 10:06 +0000  dockes    (3a9bb20130c8)

	* src/doc/user/usermanual-italian.html: new file.
	* src/doc/user/usermanual-italian.html, src/qtgui/i18n/recoll_it.ts:
	*** empty log message ***

2007-02-02 10:06 +0000  dockes    (9a6092dbecea)

	* src/lib/Makefile, src/lib/mkMake:
	fix $(depth) usage for easier kio compilation

2007-02-02 10:05 +0000  dockes    (a645eeae729a)

	* src/doc/user/usermanual.sgml:
	added config examples

2007-02-02 10:01 +0000  dockes    (a0640e49ab3a)

	* src/recollinstall.in:
	removed old filter in examples cleanup

2007-02-01 15:01 +0000  dockes    (db53657c868d)

	* src/aspell/rclaspell.cpp, src/mk/localdefs.in:
	use configure libdir to search for aspell lib (mainly for 64 bits
	machines)

2007-02-01 12:43 +0000  dockes    (7f3d33405e53)

	* src/kde/kioslave/recoll/Makefile,
	src/kde/kioslave/recoll/kio_recoll.cpp:
	fixed kio compilation. Dont know if it works

2007-01-30 11:39 +0000  dockes    (1aa8e8c3d93a)

	* src/filters/rcldjvu, src/filters/rcldoc, src/filters/rcldvi,
	src/filters/rclpdf, src/filters/rclps, src/filters/rclrtf,
	src/filters/rclsoff:
	hide awk BEGIN statements - make debian linda happy

2007-01-29 13:51 +0000  dockes    (f207a83f0617)

	* src/rcldb/searchdata.cpp:
	more field name synonyms

2007-01-25 15:50 +0000  dockes    (ba53fd450dc5)

	* src/rcldb/searchdata.cpp, src/rcldb/searchdata.h:
	better wildcards handling. Tuning of user term boosting

2007-01-25 15:47 +0000  dockes    (026e24e9aafc)

	* src/doc/user/usermanual.sgml:
	*** empty log message ***

2007-01-25 15:47 +0000  dockes    (31cd60d81a3a)

	* src/rcldb/rcldb.cpp:
	dont explicitely anchor regexp in termMatch

2007-01-25 15:46 +0000  dockes    (8c7afe9df6fb)

	* src/qtgui/ssearch_w.cpp:
	Dont add auto phrase if there are wildcards

2007-01-25 15:45 +0000  dockes    (d35369f54699)

	* src/query/wasatorcl.cpp:
	comment

2007-01-25 15:40 +0000  dockes    (2d7b13ebd2c8)

	* src/common/textsplit.cpp:
	[] are also wildcard chars

2007-01-25 12:04 +0000  dockes    (27310036f46c)

	* src/qtgui/i18n/recoll_it.ts:
	*** empty log message ***

2007-01-25 08:27 +0000  dockes    (876d5192bdde)

	* src/qtgui/i18n/recoll_it.ts: new file.
	* src/qtgui/i18n/recoll_it.ts:
	*** empty log message ***

2007-01-24 12:40 +0000  dockes    (dd470677dbf2)

	* src/qtgui/guiutils.cpp:
	make AND the initial default for ssearch

2007-01-24 11:20 +0000  dockes    (623c6533e0f0)

	* src/qtgui/uiprefs.ui, src/qtgui/viewaction.ui:
	change MyDialog and Form1 dialog captions

2007-01-24 11:15 +0000  dockes    (9dc93d749ea8)

	* src/filters/rclscribus:
	transate \r to 
(for older scribus files) 2007-01-24 11:00 +0000 dockes (7ea73b206760) * src/sampleconf/mimeconf, src/sampleconf/mimemap: scribus scd files 2007-01-23 07:23 +0000 dockes (0f9e96c72d1c) * src/filters/rcllyx: *** empty log message *** 2007-01-23 07:22 +0000 dockes (5fc9550be90c) * src/filters/rcllyx: *** empty log message *** 2007-01-23 07:16 +0000 dockes (55734c5d16c2) * src/filters/rcllyx: *** empty log message *** 2007-01-23 07:14 +0000 dockes (dafabbcdaf1a) * src/sampleconf/mimeconf, src/sampleconf/mimemap, src/sampleconf/mimeview: lyx filter 2007-01-23 07:14 +0000 dockes (d5ac3c0cf64f) * src/filters/rcllyx: new file. * src/filters/rcllyx: lyx filter 2007-01-22 16:34 +0000 dockes (e76e39a890d0) * src/sampleconf/mimeconf, src/sampleconf/mimemap: added scribus support 2007-01-22 16:32 +0000 dockes (0b142f40e0c7) * src/filters/rclscribus: new file. * src/filters/rclscribus: added scribus support 2007-01-21 16:41 +0000 dockes (8e06e0f7914e) * src/filters/rclsoff: fix shell syntax for debian 2007-01-19 15:22 +0000 dockes (084098d57a50) * src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h, src/qtgui/preview_w.cpp, src/qtgui/preview_w.h, src/qtgui/rclmain_w.cpp, src/qtgui/reslist.cpp, src/qtgui/reslist.h, src/query/docseq.h, src/query/docseqdb.cpp, src/query/docseqdb.h, src/query/docseqhist.h, src/query/sortseq.cpp, src/query/sortseq.h: try to limit the places which use Rcl:: stuff 2007-01-19 15:19 +0000 dockes (1d1bdf98f176) * src/rcldb/stemdb.cpp: make sure that both the user term and the stem are in the expanded list 2007-01-19 10:32 +0000 dockes (757f49c23d93) * src/query/docseqdb.cpp, src/query/docseqdb.h, src/query/docseqhist.cpp, src/query/docseqhist.h: new file. * src/lib/Makefile, src/lib/mkMake, src/qtgui/rclmain_w.cpp, src/qtgui/reslist.cpp, src/qtgui/ssearch_w.cpp, src/query/docseq.cpp, src/query/docseq.h, src/query/docseqdb.cpp, src/query/docseqdb.h, src/query/docseqhist.cpp, src/query/docseqhist.h, src/query/sortseq.cpp, src/query/sortseq.h: cleanup docseq, arrange things so that we can page reslist past the initial result count estimate if there are more 2007-01-19 10:23 +0000 dockes (d4ecd356406a) * src/rcldb/searchdata.cpp: the relevance-boosted original term needs a prefix too 2007-01-19 10:23 +0000 dockes (cacb9b50f1cf) * src/rcldb/rcldb.cpp: adjust makeAbstract for prefixed terms 2007-01-19 10:22 +0000 dockes (95d569102c37) * src/query/wasatorcl.cpp, src/query/wasatorcl.h: add direct qstring to rcl function 2007-01-18 14:23 +0000 dockes (157d8676b256) * src/utils/mimeparse.cpp: debug msg 2007-01-18 12:09 +0000 dockes (b0647b310dec) * src/common/textsplit.cpp, src/common/textsplit.h, src/rcldb/searchdata.cpp: handle wildcards in search terms 2007-01-17 14:06 +0000 dockes (65d2617d690c) * src/query/Makefile, src/query/wasatorcl.cpp: *** empty log message *** 2007-01-17 13:53 +0000 dockes (82c00cf9d054) * src/internfile/internfile.cpp, src/internfile/mh_html.cpp, src/internfile/mh_mail.cpp, src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h, src/lib/Makefile, src/lib/mkMake, src/query/Makefile, src/query/wasastringtoquery.cpp, src/query/wasastringtoquery.h, src/query/wasatorcl.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldoc.h, src/rcldb/searchdata.cpp, src/rcldb/searchdata.h: added field/prefixes for author and title + command line query language 2007-01-16 10:58 +0000 dockes (f56d8a303798) * src/sampleconf/recoll.conf.in: add recollrc to skipped 2007-01-16 10:58 +0000 dockes (a28d7ea5359b) * website/BUGS.txt, website/CHANGES.txt, website/download.html, website/index.html: 1.7.5 2007-01-16 10:58 +0000 dockes (83b10dc2e5ea) * src/bincimapmime/trbinc.cc, src/utils/debuglog.cpp: wrong copyrights 2007-01-16 10:56 +0000 dockes (e51d7ee21ffd) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/rpm/recoll.spec, packaging/rpm/recollfedora.spec, packaging/rpm/recollmdk.spec: 1.7.5 packaging 2007-01-16 09:22 +0000 dockes (7f8fea3bed13) * packaging/debian/manpages: deleted file. * packaging/debian/changelog, packaging/debian/control, packaging/debian/copyright, packaging/debian/manpages, packaging/debian/rules: 2007-01-12 comments 2007-01-15 19:16 +0000 dockes (740528a1cd7d) * packaging/debian/dirs: deleted file. * packaging/debian/dirs: *** empty log message *** 2007-01-15 13:06 +0000 dockes (12e31e690f9e) * src/internfile/internfile.cpp, src/internfile/internfile.h: dont stop processing a complex document on the first next_document error: pop level and go on 2007-01-15 13:03 +0000 dockes (6d3f8a71e602) * src/doc/user/usermanual.sgml: *** empty log message *** 2007-01-13 15:21 +0000 dockes (b04adc5188d5) * src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp, src/qtgui/uiprefs_w.h: improved external index dialog with listview 2007-01-13 14:41 +0000 dockes (f9567f0fed32) * src/query/xadump.cpp: add option to dump raw terms 2007-01-13 10:28 +0000 dockes (fc890008108f) * src/internfile/mh_mail.cpp: handle multipart/signed 2007-01-12 09:01 +0000 dockes (1782d39f9d4d) * src/qtgui/reslist.cpp: Use sample from Rcl::Doc if makeAbstract() fails 2007-01-12 06:42 +0000 dockes (8223a4aa9ad4) * packaging/debian/copyright: include gpl statement 2007-01-10 16:03 +0000 dockes (66247acdb470) * packaging/debian/changelog, packaging/debian/compat, packaging/debian/control, packaging/debian/copyright, packaging/debian/dirs, packaging/debian/docs, packaging/debian/manpages, packaging/debian/menu, packaging/debian/rules, packaging/debian/watch: new file. * packaging/debian/changelog, packaging/debian/compat, packaging/debian/control, packaging/debian/copyright, packaging/debian/dirs, packaging/debian/docs, packaging/debian/manpages, packaging/debian/menu, packaging/debian/rules, packaging/debian/watch, src/Makefile.in, src/VERSION: *** empty log message *** 2007-01-10 12:27 +0000 dockes (733bc11b5526) * packaging/FreeBSD/recoll/Makefile: *** empty log message *** 2007-01-09 15:34 +0000 dockes (9583ff723edf [RECOLL_1_7_3]) * packaging/rpm/recollfedora.spec: new file. * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- descr, packaging/FreeBSD/recoll/pkg-plist, packaging/rpm/recoll.spec, packaging/rpm/recollfedora.spec, packaging/rpm/recollmdk.spec: 1.7.3 2007-01-09 14:39 +0000 dockes (f108471cd099) * website/BUGS.txt, website/CHANGES.txt, website/download.html: 1.7.3 2007-01-09 07:25 +0000 dockes (e0c1d14a73c5) * src/VERSION: *** empty log message *** 2007-01-09 07:25 +0000 dockes (f06dbc019ff4) * src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts: french messages 2007-01-08 15:21 +0000 dockes (906e56e99e81) * src/VERSION, src/qtgui/main.cpp: initial indexation with gui would not work 2007-01-08 13:00 +0000 dockes (44d2b5d58ac6 [RECOLL_1_7_1]) * src/VERSION: 1.7.1 2007-01-08 12:43 +0000 dockes (2cb748432b10) * src/qtgui/advsearch.ui: lost sizers? 2007-01-08 10:11 +0000 dockes (67c4375292e5) * src/qtgui/rclmain_w.cpp: fix the previous icon fix 2007-01-08 10:01 +0000 dockes (8eb24fe9db4f) * src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/reslist.cpp, src/qtgui/reslist.h: reslist menu openParent opens containing folder if not a subdoc 2007-01-08 09:40 +0000 dockes (b38401a650c3) * src/qtgui/uiprefs.ui: fix resizing of prefs dialog 2007-01-08 07:02 +0000 dockes (5a2fb87a2c55) * src/qtgui/rclmain_w.cpp: get rid of messages about missing d_xxx images 2007-01-08 07:01 +0000 dockes (8a2c6d2cba46) * src/qtgui/reslist.cpp: synthetic abstracts not displayed 2006-12-24 08:07 +0000 dockes (e42dca990bea [RECOLL_1_7_0]) * src/Makefile.in: better cleanup -> 1.7.0 NOW 2006-12-24 08:02 +0000 dockes (916d6e831996 [RECOLL_1_7_2]) * packaging/FreeBSD/recoll/pkg-plist, src/excludefile, website/BUGS.txt, website/CHANGES.txt, website/download.html, website/features.html, website/index.html, website/pics/index.html, website/rclidxfmt.html: 1.7.0 2006-12-24 07:53 +0000 dockes (b37a6e3566b9) * src/INSTALL, src/README, src/doc/user/usermanual.sgml: *** empty log message *** 2006-12-24 07:40 +0000 dockes (69573fe97b89) * src/configure, src/configure.ac, src/doc/man/recollindex.1, src/doc/user/usermanual.sgml, src/index/rclmon.h, src/index/rclmonprc.cpp, src/index/recollindex.cpp: option -x to disable x11 session monitoring 2006-12-23 13:07 +0000 dockes (fb731b7d3ab1) * src/configure, src/configure.ac, src/index/Makefile, src/index/rclmonprc.cpp, src/lib/Makefile, src/lib/mkMake, src/mk/localdefs.in, src/utils/pathut.cpp: x11 session end detection 2006-12-23 12:23 +0000 dockes (00532204c17f) * src/utils/x11mon.cpp, src/utils/x11mon.h: new file. * src/utils/Makefile, src/utils/x11mon.cpp, src/utils/x11mon.h: *** empty log message *** 2006-12-22 16:48 +0000 dockes (ee878b9d311e) * src/qt4gui/recoll.qrc, src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp, src/qtgui/spell_w.cpp: get all icons out of .ui files to avoid qt4 startup messages 2006-12-22 11:01 +0000 dockes (078acb3ab4fd) * src/doc/man/recollindex.1, src/doc/user/usermanual.sgml, src/qtgui/spell_w.h: *** empty log message *** 2006-12-21 10:08 +0000 dockes (d36d26d5b5d5) * src/index/rclmonprc.cpp: try to be more responsive to interruptions 2006-12-21 09:22 +0000 dockes (818387de5d92) * src/index/indexer.cpp, src/index/rclmonrcv.cpp, src/index/recollindex.cpp, src/sampleconf/mimemap, src/utils/fstreewalk.cpp: always skip indexing of confdir and dbdir. start index monitor with normal indexing pass 2006-12-21 09:21 +0000 dockes (13c7229ee6dc) * src/qtgui/advsearch.ui, src/qtgui/ssearchb.ui: tooltips 2006-12-21 08:22 +0000 dockes (c1e9892c3ba1) * src/utils/fstreewalk.cpp, src/utils/fstreewalk.h: add skipped paths 2006-12-20 14:28 +0000 dockes (f580e9aa026a) * src/internfile/internfile.cpp: msg 2006-12-20 14:09 +0000 dockes (e3d7f975546f) * src/qtgui/preview_w.cpp: try to improve error message for internfile failure 2006-12-20 13:55 +0000 dockes (0a07075dd464) * src/qtgui/preview_w.cpp, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/reslist.cpp, src/qtgui/reslist.h: reslist: added menu entry to see parent doc of attachment 2006-12-20 13:12 +0000 dockes (733a59947cfb) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/sampleconf/mimeconf: mime categories 2006-12-20 10:47 +0000 dockes (591625eb1d38) * src/INSTALL, src/README: *** empty log message *** 2006-12-20 09:54 +0000 dockes (c563fb138893) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/doc/user/usermanual.sgml, src/index/mimetype.cpp: changed stopsuffixes processing 2006-12-20 09:41 +0000 dockes (64488c2687be) * src/index/recollindex.cpp: opt -e 2006-12-19 12:38 +0000 dockes (6d4a0c0f8cc3) * src/qtgui/spell_w.cpp: qt4 2006-12-19 12:11 +0000 dockes (a3e7c86f79d7) * src/qtgui/spell.ui, src/qtgui/spell_w.cpp, src/qtgui/spell_w.h, src/qtgui/ssearch_w.cpp, src/qtgui/viewaction.ui, src/qtgui/viewaction_w.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/searchdata.cpp, src/rcldb/stemdb.cpp, src/rcldb/stemdb.h, src/utils/smallut.cpp, src/utils/smallut.h: merge stemExpand into termExpand. return term frequencies from there and display in spellW 2006-12-19 08:40 +0000 dockes (3bbff3062a89) * src/common/rclconfig.cpp, src/index/indexer.cpp, src/index/mimetype.cpp, src/index/mimetype.h, src/internfile/internfile.cpp, src/internfile/internfile.h, src/internfile/mimehandler.cpp, src/qtgui/preview_w.cpp, src/sampleconf/mimeconf, src/sampleconf/mimeview: index directory names 2006-12-19 07:48 +0000 dockes (7301c237649a) * src/qtgui/mtpics/folder.png: new file. * src/qtgui/mtpics/folder.png: *** empty log message *** 2006-12-18 16:45 +0000 dockes (0a640477a752) * src/qt4gui/recoll.pro.in, src/qtgui/viewaction.ui, src/qtgui/viewaction_w.cpp, src/qtgui/viewaction_w.h: qt4 2006-12-18 12:06 +0000 dockes (5f17ab347621) * src/doc/user/usermanual.sgml, src/internfile/mh_mail.cpp, src/utils/smallut.cpp, src/utils/smallut.h: mh_mail needs to lowercase contentypes 2006-12-18 12:05 +0000 dockes (03363b562546) * src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/viewaction_w.cpp, src/qtgui/viewaction_w.h: dblclick to edit in viewAction 2006-12-16 15:39 +0000 dockes (a3027dd4b920) * src/internfile/internfile.cpp, src/internfile/internfile.h, src/internfile/mh_html.h, src/internfile/mh_mail.cpp, src/internfile/mh_mail.h, src/internfile/mh_text.h, src/internfile/mimehandler.h, src/qtgui/main.cpp, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h: mail attachments sort of ok 2006-12-16 15:31 +0000 dockes (7d335e595c2b) * src/utils/pathut.cpp, src/utils/pathut.h: added TempFile class 2006-12-16 15:30 +0000 dockes (89fed05a6ace) * src/internfile/Filter.h: *** empty log message *** 2006-12-16 15:30 +0000 dockes (5f74c84fa800) * src/index/indexer.cpp: dont clobber utf8fn from filter 2006-12-16 15:30 +0000 dockes (b5f77fb6530b) * src/common/rclconfig.cpp, src/common/rclconfig.h: added getSuffixFromMimeType() 2006-12-16 07:15 +0000 dockes (ef72575e285c) * src/internfile/Filter.h: *** empty log message *** 2006-12-15 16:33 +0000 dockes (df6232340341) * src/index/indexer.cpp, src/internfile/Filter.h, src/internfile/internfile.cpp, src/internfile/internfile.h, src/internfile/mh_html.cpp, src/internfile/mh_mail.cpp, src/internfile/mh_mail.h, src/internfile/myhtmlparse.h: test data indexing result same terms as 1.6.3 2006-12-15 12:40 +0000 dockes (5156a319f219) * src/internfile/Filter.h, src/internfile/mh_mbox.cpp, src/internfile/mh_mbox.h: new file. * src/internfile/Filter.h, src/internfile/Makefile, src/internfile/internfile.cpp, src/internfile/internfile.h, src/internfile/mh_exec.cpp, src/internfile/mh_exec.h, src/internfile/mh_html.cpp, src/internfile/mh_html.h, src/internfile/mh_mail.cpp, src/internfile/mh_mail.h, src/internfile/mh_mbox.cpp, src/internfile/mh_mbox.h, src/internfile/mh_text.cpp, src/internfile/mh_text.h, src/internfile/mh_unknown.h, src/internfile/mimehandler.cpp, src/internfile/mimehandler.h, src/internfile/myhtmlparse.h, src/lib/Makefile, src/lib/mkMake, src/utils/smallut.cpp, src/utils/smallut.h: Dijon filters 1st step: mostly working needs check and optim 2006-12-14 14:54 +0000 dockes (2f7d4fb90b31) * src/rcldb/rcldoc.h: new file. * src/rcldb/rcldb.h, src/rcldb/rcldoc.h: split rcldb.h -> rcldoc.h 2006-12-14 13:53 +0000 dockes (839454238284) * src/qtgui/viewaction.ui, src/qtgui/viewaction_w.cpp, src/qtgui/viewaction_w.h, src/sampleconf/mimeview: new file. * src/common/rclconfig.cpp, src/common/rclconfig.h, src/doc/user/usermanual.sgml, src/index/indexer.cpp, src/qtgui/preview_w.cpp, src/qtgui/rclmain_w.cpp, src/qtgui/recoll.pro.in, src/qtgui/ssearch_w.cpp, src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp, src/qtgui/uiprefs_w.h, src/qtgui/viewaction.ui, src/qtgui/viewaction_w.cpp, src/qtgui/viewaction_w.h, src/recollinstall.in, src/sampleconf/mimeconf, src/sampleconf/mimeview, src/utils/Makefile, src/utils/conftree.cpp, src/utils/conftree.h, src/utils/execmd.cpp, src/utils/execmd.h, src/utils/pathut.cpp, src/utils/pathut.h, src/utils/smallut.cpp, src/utils/smallut.h: created mimeview and the viewer conf edit dialog 2006-12-13 09:13 +0000 dockes (ca4c21f5ad44) * src/common/rclconfig.cpp, src/internfile/internfile.cpp, src/internfile/mh_exec.cpp, src/internfile/mimehandler.cpp: move findFilter usage out of mh_exec 2006-12-11 14:56 +0000 dockes (dd4f283c9753 [BEFORE_Dijon20061215]) * src/qtgui/plaintorich.cpp: not calling textsplit with onlyspans improves highlighting 2006-12-11 14:50 +0000 dockes (c6d552528f6c) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/index/mimetype.cpp, src/utils/smallut.cpp, src/utils/smallut.h: rationalize stopsuffix list usage 2006-12-11 09:05 +0000 dockes (87037320fddf) * packaging/FreeBSD/recoll/pkg-plist: try to cleanup the share/icons tree 2006-12-10 17:03 +0000 dockes (0d0fec69b4e4 [MAPSTRMAPSTRSTR]) * src/query/wasastringtoquery.cpp, src/query/wasastringtoquery.h, src/query/wasatorcl.cpp, src/query/wasatorcl.h: added sort and type specs parsing 2006-12-08 17:18 +0000 dockes (9d443b2ad416) * src/query/wasatorcl.h: 1st query 2006-12-08 10:54 +0000 dockes (dc4914858b42) * src/query/wasastringtoquery.cpp, src/query/wasastringtoquery.h: *** empty log message *** 2006-12-08 07:11 +0000 dockes (df1ce4c7c9bf) * src/common/textsplit.cpp, src/common/textsplit.h, src/qtgui/ssearch_w.cpp: only autophrase if query has several terms 2006-12-08 06:45 +0000 dockes (6b96cd852343) * src/qtgui/ssearch_w.cpp: make autophrase do the right thing: add a subclause, not modify the query string 2006-12-07 16:38 +0000 dockes (e0b7c11d4054) * src/query/qtry.cpp, src/query/qxtry.cpp: deleted file. * src/query/Makefile, src/query/qtry.cpp, src/query/qxtry.cpp, src/query/xadump.cpp: removed qtry and merged qxtry into xadump 2006-12-07 13:24 +0000 dockes (11f50dc2ced9) * src/rcldb/rcldb.cpp: comment 2006-12-07 13:14 +0000 dockes (0137bc80c8a5) * website/rclidxfmt.html: new file. * website/rclidxfmt.html: *** empty log message *** 2006-12-07 13:02 +0000 dockes (e36e165c1055) * src/rcldb/rcldb.cpp: comments 2006-12-07 08:23 +0000 dockes (6bdb3421d1ca) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- plist: 1.6.3 2006-12-07 08:06 +0000 dockes (2ca80dafce2a) * src/internfile/mh_mail.cpp: fix bug with bad message "From " delimiter detection 2006-12-07 07:07 +0000 dockes (92354b8e641a) * src/qtgui/rclmain_w.cpp, src/utils/mimeparse.h, src/utils/smallut.cpp, src/utils/smallut.h: fix pb with executing viewer for files with single-quotes in pathnames 2006-12-07 07:06 +0000 dockes (b415958c3148) * src/internfile/mh_mail.cpp: fix bug with bad message "From " delimiter detection 2006-12-05 15:25 +0000 dockes (451489717e47) * src/internfile/mh_mail.cpp: use regexp to better discriminate From delimiter lines in mbox. Avoid reading mboxes twice 2006-12-05 15:23 +0000 dockes (282880e83069) * src/qtgui/advsearch.ui, src/qtgui/main.cpp, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/reslist.cpp, src/qtgui/reslist.h, src/qtgui/sort.ui, src/qtgui/sort_w.cpp, src/qtgui/sort_w.h: avoid generating abstracts before theyre needed (ie: not during sort). have the sort tools redisplay the results when sort criteria are applied 2006-12-05 15:18 +0000 dockes (069f87c83682) * src/query/sortseq.cpp, src/query/sortseq.h: use refcntr to access docsequence 2006-12-05 15:17 +0000 dockes (f7bad3e61904) * src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: expose abstract synthesis to let users decide when they want it done 2006-12-05 15:17 +0000 dockes (57148c851c44) * src/rcldb/searchdata.h: clauseCount 2006-12-05 15:16 +0000 dockes (d6d5ee7b750b) * src/utils/refcntr.h: fix pbs with empty object 2006-12-04 09:56 +0000 dockes (1173f38c9de4) * src/configure, src/configure.ac, src/qtgui/advsearch_w.h, src/qtgui/main.cpp, src/qtgui/preview_w.h, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/sort_w.h, src/qtgui/spell_w.h, src/qtgui/ssearch_w.h, src/qtgui/uiprefs_w.h, src/utils/refcntr.h: qt4 compiles and sort of works 2006-12-04 09:49 +0000 dockes (00bc69d47f20) * src/qt4gui/recoll.qrc, src/qt4gui/uifrom3: new file. * src/qt4gui/recoll.pro.in, src/qt4gui/recoll.qrc, src/qt4gui/uifrom3: *** empty log message *** 2006-12-04 08:17 +0000 dockes (c92f84765756) * src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/preview_w.cpp, src/qtgui/preview_w.h, src/qtgui/sort_w.cpp, src/qtgui/ssearch_w.cpp, src/qtgui/uiprefs_w.cpp, src/qtgui/uiprefs_w.h: compiles (doesnt work) on qt4 2006-12-04 06:19 +0000 dockes (5a7d6794967e) * src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/guiutils.cpp, src/qtgui/main.cpp, src/qtgui/preview.ui, src/qtgui/preview_w.h, src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/reslist.cpp, src/qtgui/reslist.h, src/qtgui/searchclause_w.cpp, src/qtgui/searchclause_w.h, src/qtgui/sort_w.h, src/qtgui/spell.ui, src/qtgui/spell_w.h, src/qtgui/ssearch_w.h, src/qtgui/ssearchb.ui, src/qtgui/uiprefs_w.h: qt4 ckpt 2006-12-02 07:32 +0000 dockes (45564d318a93) * src/utils/idfile.cpp: improved tests to check for mail 2006-12-01 10:05 +0000 dockes (8c3b51bc117f) * src/query/xadump.cpp: *** empty log message *** 2006-11-30 18:12 +0000 dockes (e41c0db701ae) * src/query/wasastringtoquery.cpp, src/query/wasastringtoquery.h, src/query/wasatorcl.cpp, src/query/wasatorcl.h: new file. * src/query/wasastringtoquery.cpp, src/query/wasastringtoquery.h, src/query/wasatorcl.cpp, src/query/wasatorcl.h: *** empty log message *** 2006-11-30 13:44 +0000 dockes (5ef831ae4659) * website/download.html: *** empty log message *** 2006-11-30 13:38 +0000 dockes (6e49658236c6) * src/qtgui/images/cancel.png, src/qtgui/images/close.png: new file. * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- plist, packaging/rpm/recoll.spec, packaging/rpm/recollmdk.spec, src/README, src/aspell/rclaspell.cpp, src/doc/user/usermanual.sgml, src/index/indexer.cpp, src/index/indexer.h, src/makesrcdist.sh, src/makestaticdist.sh, src/mk/SunOS, src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts, src/qtgui/images/cancel.png, src/qtgui/images/close.png, src/qtgui/main.cpp, src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h, src/qtgui/preview.ui, src/qtgui/preview_w.cpp, src/qtgui/rclmain.ui, src/qtgui/recoll.pro.in, src/qtgui/reslist.cpp, src/qtgui/searchclause_w.cpp, src/qtgui/spell_w.cpp, src/qtgui/ssearch_w.cpp, src/qtgui/ssearchb.ui, src/rcldb/searchdata.cpp, src/recoll.desktop, src/recollinstall.in, src/sampleconf/recoll.conf.in, src/utils/execmd.cpp, src/utils/mimeparse.cpp, src/utils/smallut.cpp, website/BUGS.txt, website/CHANGES.txt, website/download.html, website/features.html, website/index.html: merged 1.6 maint branch modifs up to MERGED_TO_TRUNK_20061130 2006-11-22 09:29 +0000 dockes (568c34cf75e9) * src/VERSION, website/BUGS.txt, website/CHANGES.txt, website/credits.html, website/download.html: *** empty log message *** 2006-11-21 14:00 +0000 dockes (f247e019bf08 [RECOLL_1_6_0]) * src/INSTALL, src/README: *** empty log message *** 2006-11-21 13:05 +0000 dockes (23604b23773d) * src/aspell/rclaspell.cpp, src/configure, src/configure.ac: mdk 2006 aspell quirks 2006-11-21 09:18 +0000 dockes (17597459707c) * packaging/FreeBSD/recoll/pkg-plist, packaging/rpm/recoll.spec, packaging/rpm/recollmdk.spec, src/doc/user/usermanual.sgml, src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts, website/BUGS.txt, website/CHANGES.txt, website/copydocs, website/download.html, website/index.html: *** empty log message *** 2006-11-21 08:47 +0000 dockes (f434e776fec8) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/spell.ui, src/qtgui/spell_w.cpp, src/qtgui/spell_w.h, src/qtgui/uiprefs_w.cpp: added stem expansion mode to term explorer 2006-11-20 18:07 +0000 dockes (bc85af9f678c) * src/doc/man/recoll.conf.5, src/doc/man/recollindex.1, src/doc/user/usermanual.sgml: doc 2006-11-20 17:46 +0000 dockes (28cb0d8c325a) * src/doc/user/usermanual.sgml: *** empty log message *** 2006-11-20 17:06 +0000 dockes (9252428377e4) * src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts: *** empty log message *** 2006-11-20 15:35 +0000 dockes (192c101b8b7c) * src/qtgui/advsearch.ui: tooltip 2006-11-20 15:29 +0000 dockes (73ca6e78a1dd) * src/filters/rclxls, src/utils/transcode.cpp: *** empty log message *** 2006-11-20 15:28 +0000 dockes (9bb875d3bfcf) * src/rcldb/rcldb.cpp: clear abstract if its only ... 2006-11-20 15:28 +0000 dockes (5ef1b603c3be) * src/common/rclconfig.cpp: test driver 2006-11-20 15:28 +0000 dockes (1c4807a363f9) * src/common/rclconfig.h: fix defaultcharset reset 2006-11-20 11:17 +0000 dockes (ef95275586d1) * src/common/Makefile, src/common/textsplit.cpp, src/common/textsplit.h: improved textsplit speed (needs utf8iter modifs too 2006-11-20 11:16 +0000 dockes (e05653621eb4) * src/utils/Makefile, src/utils/utf8iter.cpp, src/utils/utf8iter.h: cleaned and speeded up utf8iter 2006-11-19 18:37 +0000 dockes (756bc7569b34) * src/common/textsplit.cpp, src/common/textsplit.h: optim ckpt 2006-11-18 12:56 +0000 dockes (bf6e4de3a902) * src/qtgui/plaintorich.cpp: firsttermocc init was not always done 2006-11-18 12:31 +0000 dockes (1703e5a7b03e) * src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h, src/qtgui/preview_w.cpp, src/qtgui/reslist.cpp: improve positionning on term groups by storing/passing an occurrence index 2006-11-18 12:30 +0000 dockes (f065c8063ff3) * src/rcldb/searchdata.cpp: correctly generate highlighting term groups when stem-expanding NEAR queries 2006-11-17 15:26 +0000 dockes (ee4a13877b24) * src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/guiutils.cpp, src/qtgui/guiutils.h: Save adv search clause list + add delete button 2006-11-17 12:55 +0000 dockes (679a2cb3d3e7) * src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/reslist.cpp, src/qtgui/reslist.h: get shift+clicklink to open new preview window instead of tab 2006-11-17 12:32 +0000 dockes (51f7db5eff83) * src/qtgui/plaintorich.cpp: small opts + fixed near region detection code 2006-11-17 12:31 +0000 dockes (c0ba08efc3dd) * src/qtgui/plaintorich.h: comments 2006-11-17 12:31 +0000 dockes (e54183706237) * src/utils/utf8iter.h: removed not strictly needed error checking code 2006-11-17 10:09 +0000 dockes (7e44d4280e2d) * src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h, src/qtgui/preview_w.cpp, src/qtgui/preview_w.h, src/qtgui/rclmain_w.cpp, src/qtgui/reslist.cpp, src/qtgui/reslist.h: Remember searchData and use it in plaintorich for phrase/group highlighting 2006-11-17 10:08 +0000 dockes (c175799e9e72) * src/qtgui/advsearch_w.cpp: better data encap in searchdata 2006-11-17 10:06 +0000 dockes (0ea302968170) * src/rcldb/rcldb.cpp, src/rcldb/searchdata.cpp, src/rcldb/searchdata.h: added code to remember search terms and term groups in searchdata 2006-11-15 14:57 +0000 dockes (188b5b28427d) * src/common/Makefile, src/lib/Makefile, src/lib/mkMake, src/mk/commondefs, src/qtgui/recoll.pro.in, src/rcldb/pathhash.cpp, src/rcldb/pathhash.h, src/rcldb/rcldb.cpp, src/rcldb/searchdata.h, src/rcldb/stemdb.h: distributed files from common/ into rcld, internfile, common 2006-11-15 07:27 +0000 dockes (5bfc0444c072) * src/internfile/Makefile: new file. * src/internfile/Makefile: *** empty log message *** 2006-11-14 18:29 +0000 dockes (c45d7a2b1c63) * src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h: got rid of the static clause names 2006-11-14 18:17 +0000 dockes (e4789b229585) * src/qtgui/advsearch.ui: *** empty log message *** 2006-11-14 17:56 +0000 dockes (ae916c13c591) * src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp: added conjunction choice in advsearch 2006-11-14 17:41 +0000 dockes (dfc71f06c1ce) * src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/searchclause_w.cpp, src/qtgui/searchclause_w.h, src/rcldb/searchdata.cpp: use SearchClauseW for all advsearch fields 2006-11-14 15:13 +0000 dockes (300f3705d6cf) * src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp: *** empty log message *** 2006-11-14 14:58 +0000 dockes (c5f65c6f8fb9) * src/qtgui/recoll.pro.in: *** empty log message *** 2006-11-14 13:55 +0000 dockes (9e98c3d86016) * src/qtgui/searchclause_w.cpp, src/qtgui/searchclause_w.h: new file. * src/doc/user/usermanual.sgml, src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/rclmain_w.cpp, src/qtgui/searchclause_w.cpp, src/qtgui/searchclause_w.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/searchdata.cpp, src/rcldb/searchdata.h: added dynamic clauses to adv search. Still needs work 2006-11-13 14:51 +0000 dockes (5c9db8d08690) * src/rcldb/rcldb.cpp: *** empty log message *** 2006-11-13 14:48 +0000 dockes (edec86240778) * src/rcldb/rcldb.cpp: use wdfs for better selection of doc extracts in makeAbstract 2006-11-13 11:59 +0000 dockes (fdf0f43cd03e) * src/utils/smallut.h: *** empty log message *** 2006-11-13 08:58 +0000 dockes (c48e54f96603) * src/utils/refcntr.h: new file. * src/utils/refcntr.h: *** empty log message *** 2006-11-13 08:58 +0000 dockes (40853ad94507) * src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/reslist.cpp, src/qtgui/reslist.h, src/qtgui/ssearch_w.cpp, src/qtgui/ssearch_w.h: make searchdata a more flexible struct 2006-11-13 08:50 +0000 dockes (e585bfd6e725) * src/rcldb/searchdata.cpp: new file. * src/lib/Makefile, src/lib/mkMake, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/searchdata.cpp, src/rcldb/searchdata.h: make searchdata a more flexible struct 2006-11-13 08:49 +0000 dockes (db3490f9b522) * src/kde/kioslave/recoll/kio_recoll.cpp: *** empty log message *** 2006-11-13 08:15 +0000 dockes (7240ec62ffac) * src/qtgui/plaintorich.cpp: new splitter interface 2006-11-12 08:35 +0000 dockes (ff9f3aed6a5b) * src/common/textsplit.cpp, src/common/textsplit.h, src/rcldb/rcldb.cpp: phrase queries with bot spans and words must be splitted as words only 2006-11-11 15:30 +0000 dockes (25647c7c5aac) * src/qtgui/reslist.cpp, src/qtgui/uiprefs.ui: have more compact list header + %N 2006-11-10 17:53 +0000 dockes (d423490bea37) * src/qtgui/reslist.cpp: Really use the rich abstracts 2006-11-10 17:18 +0000 dockes (9fc1a2d1b7af) * src/rcldb/rcldb.cpp: optimized abstract building: bybye big vector 2006-11-10 13:32 +0000 dockes (8cfbbddd355a) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/rclmain_w.cpp, src/qtgui/reslist.cpp, src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp: make result list paragraph format user-adjustable 2006-11-10 13:30 +0000 dockes (933509968125) * src/utils/smallut.cpp, src/utils/smallut.h: pcSubst() 2006-11-10 13:29 +0000 dockes (17e0ecfb5834) * src/internfile/internfile.cpp: errlog 2006-11-09 19:04 +0000 dockes (814af75ba7a8) * src/qtgui/preview_w.cpp: better handle the situation of mbox file name matching search 2006-11-09 17:38 +0000 dockes (3c996d97497a) * src/Makefile.in: *** empty log message *** 2006-11-09 17:37 +0000 dockes (c13aab8ac186) * src/qtgui/reslist.cpp: nbsp to prevent line date wrap before tz 2006-11-09 17:37 +0000 dockes (573934250b27) * src/rcldb/rcldb.cpp: dont continue adding ellipsis into the abstract when its maxlen! 2006-11-09 08:59 +0000 dockes (baafe52b9d1b) * src/utils/mimeparse.cpp: test driver modifs 2006-11-08 15:34 +0000 dockes (025fa484738a) * src/common/rclinit.cpp, src/utils/debuglog.cpp, src/utils/debuglog.h: fix pb with special log file names 2006-11-08 15:32 +0000 dockes (a32333c18e9f) * src/configure, src/configure.ac: aspell help string 2006-11-08 13:04 +0000 dockes (f5f0e953f42e) * src/qtgui/plaintorich.cpp: use vector instead of list for positions 2006-11-08 07:22 +0000 dockes (de7777528655) * src/common/rclinit.cpp, src/common/rclinit.h, src/index/recollindex.cpp: allow daemon-specific log parameters 2006-11-08 06:56 +0000 dockes (451b31555bc2) * src/utils/conftree.cpp, src/utils/conftree.h: volatile conf 2006-11-08 06:49 +0000 dockes (ba1e8fb12e39) * src/recollinstall.in: install rclmon.sh 2006-11-07 18:28 +0000 dockes (fb26679a6cec) * src/qtgui/reslist.cpp, src/utils/mimeparse.cpp: 1.5.9: fix bad tz correction in email dates + display tz in reslist 2006-11-07 16:51 +0000 dockes (affd0b42e8ae) * src/index/rclmon.h, src/index/rclmonprc.cpp, src/index/rclmonrcv.cpp: traces 2006-11-07 12:02 +0000 dockes (70ed645d27f3) * src/rcldb/rcldb.cpp: use both size and mtime changes as updateneeding indicator 2006-11-07 09:11 +0000 dockes (2491b468f55d) * src/qtgui/uiprefs.ui: improved autophrase tooltip 2006-11-07 09:04 +0000 dockes (ba7c28e1a205) * src/qtgui/uiprefs_w.cpp: Cancel did not reset uiprefs dialog to stored state 2006-11-07 08:57 +0000 dockes (c9f2b8c02171) * src/qtgui/uiprefs_w.cpp, src/qtgui/uiprefs_w.h: Cancel did not reset uiprefs dialog to stored state 2006-11-07 06:41 +0000 dockes (fea8781e4829) * src/index/indexer.cpp: record/show mtime instead of ctime 2006-11-06 17:37 +0000 dockes (a82b3932ac69) * src/doc/user/usermanual.sgml, src/qtgui/rclmain.ui, src/qtgui/spell_w.cpp, src/rcldb/rcldb.cpp: wrote manual for term explorer and fixed a few problems 2006-11-05 21:10 +0000 dockes (f4fc6544cb74) * src/bincimapmime/mime-parsefull.cc: fix binc imap infinite loop on multipart with null boundary 2006-11-05 18:02 +0000 dockes (9306096cb34f) * src/bincimapmime/depot.h, src/bincimapmime/session.h: deleted file. * src/bincimapmime/address.cc, src/bincimapmime/address.h, src/bincimapmime/convert.cc, src/bincimapmime/convert.h, src/bincimapmime/depot.h, src/bincimapmime/iodevice.cc, src/bincimapmime/iodevice.h, src/bincimapmime/iofactory.h, src/bincimapmime/mime-getpart.cc, src/bincimapmime/mime- inputsource.h, src/bincimapmime/mime-parsefull.cc, src/bincimapmime /mime-parseonlyheader.cc, src/bincimapmime/mime-printbody.cc, src/bincimapmime/mime-printdoc.cc, src/bincimapmime/mime- printheader.cc, src/bincimapmime/mime-utils.h, src/bincimapmime/mime.cc, src/bincimapmime/mime.h, src/bincimapmime/session.h: included bincimap 1.3.3 to 1.3.4 diffs (mostly cosmetic) 2006-11-04 17:09 +0000 dockes (3e0e0d4b152f) * src/qtgui/spell.ui, src/qtgui/spell_w.cpp: fix aspell version of term explorer 2006-11-04 14:49 +0000 dockes (7f914235875b) * src/qtgui/ssearch_w.cpp, src/qtgui/ssearch_w.h: change ctrl-tab to esc-spc 2006-10-30 12:59 +0000 dockes (2454d0c418a2) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/rclmain_w.cpp, src/qtgui/spell.ui, src/qtgui/spell_w.cpp, src/qtgui/ssearch_w.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: Turn spell tool into multimode spell/wild/regexp 2006-10-25 11:50 +0000 dockes (66843e6e167c) * src/index/indexer.cpp: make tmpdir only once 2006-10-25 10:52 +0000 dockes (10c2b1b74822) * src/index/indexer.cpp, src/index/rclmon.h, src/index/rclmonprc.cpp, src/index/rclmonrcv.cpp, src/rcldb/rcldb.cpp: added some debugging msgs (too much) 2006-10-24 15:16 +0000 dockes (1fc3f90d5ee3) * src/mk/Darwin: *** empty log message *** 2006-10-24 14:28 +0000 dockes (33512e5ceddb) * src/index/indexer.cpp, src/index/indexer.h, src/index/rclmon.h, src/index/rclmonprc.cpp, src/index/recollindex.cpp: create stemming db on queue timeout if needed 2006-10-24 13:22 +0000 dockes (21df8a0f4856) * src/index/rclmon.sh: new file. * src/index/rclmon.sh: *** empty log message *** 2006-10-24 12:48 +0000 dockes (4af32c44f8ea) * src/index/rclmon.h, src/index/rclmonprc.cpp: setup lockfile for monitor 2006-10-24 11:42 +0000 dockes (f922b4dda121) * src/query/Makefile: *** empty log message *** 2006-10-24 11:42 +0000 dockes (f1da6521f1ff) * src/qtgui/ssearch_w.cpp: explain error for C-TAB too many expansions 2006-10-24 09:28 +0000 dockes (3228b6b8093a) * src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: fix slowness in needUpdate by using Database instead of WritableDatabase 2006-10-24 09:09 +0000 dockes (0d72c341e2eb) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/index/indexer.cpp, src/index/rclmonrcv.cpp: centralize skippedNames computation to add dbdir always 2006-10-23 15:01 +0000 dockes (6a07dc59db99) * src/index/rclmon.h, src/index/rclmonrcv.cpp: handle directory creation 2006-10-23 15:00 +0000 dockes (672e4b4bfe51) * src/utils/pathut.cpp, src/utils/pathut.h: add path_isdir() 2006-10-23 14:29 +0000 dockes (d395ca679c7a) * src/common/autoconfig.h.in, src/configure, src/configure.ac, src/index/rclmonrcv.cpp: raw inotify support 2006-10-22 15:55 +0000 dockes (de0702a6c5e2) * src/mk/Linux: *** empty log message *** 2006-10-22 15:54 +0000 dockes (35832011eaf9) * src/rcldb/rcldb.cpp: simplify needUpdate test 2006-10-22 14:47 +0000 dockes (733c7646ca29) * src/configure, src/configure.ac, src/index/Makefile, src/index/indexer.cpp, src/index/indexer.h, src/index/rclmonprc.cpp, src/index/rclmonrcv.cpp, src/index/recollindex.cpp, src/mk/localdefs.in, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: monitor: purge docs for deleted files from db 2006-10-20 08:31 +0000 dockes (6d54039efe79) * src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts: new Ukrainian+Russian messages from Michael 2006-10-20 08:29 +0000 dockes (ebcb12870038) * src/qtgui/advsearch.ui, src/qtgui/rclmain.ui, src/qtgui/uiprefs.ui: small fixes on label strings 2006-10-17 14:41 +0000 dockes (ea77c15d81a6) * src/common/autoconfig.h.in, src/configure, src/configure.ac, src/index/rclmon.h, src/index/rclmonprc.cpp, src/index/rclmonrcv.cpp, src/index/recollindex.cpp: fam autoconfig 2006-10-16 15:33 +0000 dockes (aa570fc97bf9) * src/index/rclmon.h, src/index/rclmonprc.cpp, src/index/rclmonrcv.cpp: new file. * src/common/rclconfig.cpp, src/common/rclconfig.h, src/index/Makefile, src/index/indexer.cpp, src/index/indexer.h, src/index/rclmon.h, src/index/rclmonprc.cpp, src/index/rclmonrcv.cpp, src/index/recollindex.cpp: 1st version of real time monitor 2006-10-15 13:07 +0000 dockes (aa97f764d4a6) * src/qtgui/rclmain_w.cpp, src/qtgui/spell_w.cpp, src/qtgui/spell_w.h: dbl click in spell win to add to ssearch 2006-10-12 14:46 +0000 dockes (78a3a37209ae) * src/configure.ac, src/index/indexer.cpp, src/index/indexer.h, src/index/recollindex.cpp: recollindex -i now checks that the files are descendants of topdirs 2006-10-12 08:39 +0000 dockes (ab66430f3f7d) * src/doc/user/usermanual.sgml: *** empty log message *** 2006-10-11 16:09 +0000 dockes (1cf66e2b486f) * src/aspell/rclaspell.cpp, src/utils/execmd.cpp, src/utils/execmd.h: improve execcmd to avoid allocating an allterms buffer when creating dico 2006-10-11 14:16 +0000 dockes (26e08a8fc135) * src/qtgui/images/d_spell.png, src/qtgui/images/spell.png, src/qtgui/spell.ui, src/qtgui/spell_w.cpp, src/qtgui/spell_w.h: new file. * src/aspell/rclaspell.cpp, src/aspell/rclaspell.h, src/common/autoconfig.h.in, src/common/rclconfig.h, src/configure, src/configure.ac, src/index/indexer.cpp, src/index/indexer.h, src/index/recollindex.cpp, src/mk/commondefs, src/mk/localdefs.in, src/qtgui/images/d_spell.png, src/qtgui/images/spell.png, src/qtgui/main.cpp, src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/recoll.h, src/qtgui/recoll.pro.in, src/qtgui/spell.ui, src/qtgui/spell_w.cpp, src/qtgui/spell_w.h, src/sampleconf/recoll.conf.in, src/utils/smallut.cpp, src/utils/smallut.h: 1st full version of aspell support 2006-10-10 10:58 +0000 dockes (ed60d657e8e9) * src/aspell/aspell-local.h, src/common/autoconfig.h.in: new file. * src/aspell/aspell-local.h, src/common/autoconfig.h.in: *** empty log message *** 2006-10-09 16:37 +0000 dockes (93d9009c4d51) * src/VERSION, src/aspell/rclaspell.cpp, src/aspell/rclaspell.h, src/index/Makefile, src/lib/Makefile, src/lib/mkMake, src/makesrcdist.sh, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/stemdb.cpp, src/utils/execmd.cpp, src/utils/execmd.h: aspell checkpoint 2006-10-09 14:05 +0000 dockes (711360b10738) * src/aspell/Makefile, src/aspell/rclaspell.cpp, src/aspell/rclaspell.h: new file. * src/aspell/Makefile, src/aspell/rclaspell.cpp, src/aspell/rclaspell.h: *** empty log message *** 2006-10-03 08:34 +0000 dockes (4033d57b83da) * packaging/rpm/recoll.spec, packaging/rpm/recollmdk.spec: 1.5 2006-10-02 13:30 +0000 dockes (562f54fc8029) * src/VERSION, src/makestaticdist.sh: small glitches in makestaticdist 2006-10-02 12:33 +0000 dockes (7be5b4b7d6c0) * src/VERSION, src/configure, src/configure.ac, src/query/history.h: small glitches detected on suse / gcc 4.1 2006-10-02 11:25 +0000 dockes (8731b62606fe [RECOLL-1_5_8, RECOLL-1_5_3, RECOLL-1_5_6, RECOLL-1_5_7, RECOLL-1_5_4, RECOLL-1_5_5]) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- descr, packaging/FreeBSD/recoll/pkg-plist: 1.5.2 2006-10-02 08:38 +0000 dockes (ee82de281263) * src/VERSION: *** empty log message *** 2006-10-02 08:26 +0000 dockes (bafb3e762d82) * src/sampleconf/mimeconf: added 2 icons 2006-10-02 08:25 +0000 dockes (c3d47772ea99) * src/qtgui/mtpics/image.png, src/qtgui/mtpics/source.png: new file. * src/qtgui/mtpics/image.png, src/qtgui/mtpics/source.png: *** empty log message *** 2006-10-02 07:50 +0000 dockes (fe9700b5a6fe [RECOLL-1_5_2, RECOLL-1_5_1]) * src/INSTALL, src/README, website/BUGS.txt, website/CHANGES.txt, website/download.html, website/index.html: *** empty log message *** 2006-10-02 07:45 +0000 dockes (bc3c93581184) * src/VERSION, src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts: 1.5.1 2006-09-29 11:43 +0000 dockes (bd51dd85dc0f) * src/doc/user/usermanual.sgml: aspell pass 2006-09-29 08:26 +0000 dockes (026d0b177533) * src/rcldb/rcldb.cpp: syntabs: remove size limit. Handle overlapping chunks. Make sure we use only one term per position 2006-09-29 08:24 +0000 dockes (49342357f800) * src/qtgui/reslist.cpp: reset curPvDoc on setDocSource 2006-09-29 08:23 +0000 dockes (a9cef42dd219) * src/qtgui/uiprefs.ui: bump up limits on max abstract size parameters 2006-09-29 07:13 +0000 dockes (7973023d7c1b) * src/qtgui/ssearch_w.cpp: bad/unneeded conversion to utf8 while saving ssearch history would cause some string sizes in history to double at each program invocation 2006-09-28 14:32 +0000 dockes (a5bba26b0ac0) * src/qtgui/i18n/recoll_fr.ts: 1.5 2006-09-28 14:31 +0000 dockes (71d5895b7848) * src/qtgui/ssearchb.ui: improved tip 2006-09-28 14:30 +0000 dockes (903f443a7150) * src/doc/user/usermanual.sgml: reordered the tips 2006-09-28 11:55 +0000 dockes (3bf818bd1e39) * src/qtgui/main.cpp: debug messages 2006-09-23 13:32 +0000 dockes (3cf269bf18f0) * website/BUGS.txt, website/CHANGES.txt, website/download.html, website/features.html, website/index.html: *** empty log message *** 2006-09-23 13:13 +0000 dockes (06ed627f182d [RECOLL-1_5_0]) * src/INSTALL, src/README, src/makesrcdist.sh: *** empty log message *** 2006-09-23 13:11 +0000 dockes (0533f47f1c34) * src/INSTALL, src/doc/user/usermanual.sgml: *** empty log message *** 2006-09-23 13:09 +0000 dockes (9b8fdf62ad07) * src/doc/user/usermanual.sgml: *** empty log message *** 2006-09-23 07:39 +0000 dockes (33e469ad3a2e) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/preview_w.cpp, src/qtgui/reslist.cpp: fix file name display in tooltips 2006-09-23 07:39 +0000 dockes (e47ebb4e22ce) * src/internfile/mh_mail.cpp: fix newlines 2006-09-23 07:21 +0000 dockes (b5b530ea2ec9) * src/rcldb/rcldb.cpp: message 2006-09-22 14:11 +0000 dockes (3ef29a8417c7) * src/qtgui/i18n/recoll_fr.ts, src/rcldb/rcldb.cpp: msg 2006-09-22 10:46 +0000 dockes (9ade76cc0df5) * src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts, src/qtgui/uiprefs_w.cpp: messages 2006-09-22 08:19 +0000 dockes (c228a1515468) * src/qtgui/recoll.pro.in: *** empty log message *** 2006-09-22 07:51 +0000 dockes (df858f3508f4) * src/qtgui/recoll.pro.in: names cleanup 2006-09-22 07:42 +0000 dockes (de54384ab321) * src/utils/mimeparse.h: comment 2006-09-22 07:41 +0000 dockes (f37052248b5b) * src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts, src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h: new file. * src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/recoll_fr.ts, src/qtgui/recoll_ru.ts, src/qtgui/recoll_uk.ts, src/qtgui/recollmain.ui: deleted file. * src/qtgui/i18n/recoll_fr.ts, src/qtgui/i18n/recoll_ru.ts, src/qtgui/i18n/recoll_uk.ts, src/qtgui/main.cpp, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/rclmain.ui, src/qtgui/rclmain_w.cpp, src/qtgui/rclmain_w.h, src/qtgui/recoll_fr.ts, src/qtgui/recoll_ru.ts, src/qtgui/recoll_uk.ts, src/qtgui/recollmain.ui: names cleanup: rclmain, translations 2006-09-22 07:38 +0000 dockes (c17bf757689b) * src/recollinstall.in: names cleanup: translations 2006-09-22 07:29 +0000 dockes (2d749704a22b) * src/qtgui/reslist.cpp, src/qtgui/reslist.h: new file. * src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h: deleted file. * src/qtgui/rclmain.cpp, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h, src/qtgui/recollmain.ui, src/qtgui/reslist.cpp, src/qtgui/reslist.h: names cleanup: reslist 2006-09-22 07:22 +0000 dockes (cd9f046bf5e3) * src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h: clarified preview paragraph coloring in reslist 2006-09-22 07:19 +0000 dockes (c700f9f95168) * src/internfile/mh_mail.cpp: clarified depth processing and increased limit 2006-09-21 12:56 +0000 dockes (334ef2914129) * src/qtgui/preview_w.cpp, src/qtgui/preview_w.h, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h: synchronize preview tab and colored paragraph in result list 2006-09-21 09:37 +0000 dockes (43c279d4d112) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/sort_w.cpp, src/qtgui/sort_w.h, src/query/sortseq.cpp, src/query/sortseq.h: remember sort criteria 2006-09-21 05:59 +0000 dockes (e5e9c1ffa44c) * src/internfile/myhtmlparse.cpp: dont throw away text even if html is weird 2006-09-21 05:59 +0000 dockes (4e99ebec009f) * src/common/textsplit.cpp: 132.jpg was not split 2006-09-21 05:57 +0000 dockes (3bc572456a49) * src/common/Makefile, src/utils/Makefile: *** empty log message *** 2006-09-20 06:21 +0000 dockes (5829221e8612) * src/rcldb/stemdb.cpp: comments 2006-09-19 14:30 +0000 dockes (598f2c534c4c) * src/rcldb/stemdb.cpp: Stems with unique parent must be in db too so that one can search on stem (which is not a term) 2006-09-19 14:30 +0000 dockes (98cd92c958bd) * src/internfile/mh_mail.cpp, src/internfile/mh_mail.h: walk the full mime tree instead of staying at level 1 2006-09-19 14:19 +0000 dockes (12fcb57186c2) * src/configure, src/configure.ac: *** empty log message *** 2006-09-19 14:19 +0000 dockes (88bbc8f18b9e) * src/utils/mimeparse.cpp: disable date debug msgs 2006-09-19 14:18 +0000 dockes (a0016b0e9969) * src/query/xadump.cpp: add option to dump a recoll stemdb 2006-09-18 12:17 +0000 dockes (e662e0bbe85e) * src/README: *** empty log message *** 2006-09-15 16:50 +0000 dockes (315f1c1d3dd3) * src/VERSION, src/doc/user/usermanual.sgml, src/internfile/mh_mail.cpp, src/utils/mimeparse.cpp, src/utils/mimeparse.h: Use own code to parse rfc822 dates, strptime() cant do 2006-09-15 16:49 +0000 dockes (ca133771bc5b) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/recoll.pro.in, src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp: small typo fixes 2006-09-15 12:36 +0000 dockes (47354227e577) * src/INSTALL, src/README: *** empty log message *** 2006-09-14 07:13 +0000 dockes (b717321f9de4 [RECOLL-1_4_4]) * *** empty log message *** 2006-09-14 07:13 +0000 dockes (919e6e0dfc56) * website/BUGS.txt, website/CHANGES.txt, website/copydocs, website/credits.html, website/devel.html, website/download.html, website/features.html, website/index.html, website/pics/index.html, website/styles/style.css: new file. * website/BUGS.txt, website/CHANGES.txt, website/copydocs, website/credits.html, website/devel.html, website/download.html, website/features.html, website/index.html, website/pics/index.html, website/styles/style.css: *** empty log message *** 2006-09-13 15:31 +0000 dockes (9bd2431eaa66) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/ssearch_w.cpp, src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp: autophrase parameter 2006-09-13 14:57 +0000 dockes (0d952f522055) * src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h, src/qtgui/rclreslist.cpp, src/query/docseq.cpp, src/query/docseq.h: colorize search terms in abstracts 2006-09-13 13:53 +0000 dockes (5980807171a8) * src/index/indexer.cpp, src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/main.cpp, src/qtgui/rclmain.cpp, src/qtgui/ssearch_w.cpp, src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/sampleconf/recoll.conf.in: make constant lengths for abstracts config params 2006-09-13 08:13 +0000 dockes (6e43869ceb61) * src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/main.cpp, src/qtgui/rclmain.cpp, src/qtgui/uiprefs.ui, src/qtgui/uiprefs_w.cpp: add feature to save asearch ignored file types as startup default 2006-09-12 10:11 +0000 dockes (9b323c436beb) * src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/preview_w.cpp, src/qtgui/preview_w.h, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h, src/qtgui/ssearch_w.cpp, src/qtgui/ssearchb.ui: allow paging through results inside a preview window with shift-up shift-down 2006-09-11 14:22 +0000 dockes (f0dd93428e23) * src/doc/user/usermanual.sgml, src/qtgui/advsearch.ui: try to make clearer that adv search fields will accept phrases as well as single words 2006-09-11 12:05 +0000 dockes (f455fbc6a42a) * src/qtgui/advsearch.ui, src/qtgui/advsearch_w.cpp, src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/ssearch_w.cpp, src/qtgui/ssearchb.ui, src/query/history.cpp, src/query/history.h: remember history of restrict subdirs in adv search 2006-09-11 09:08 +0000 dockes (ad274e633ffb) * src/qtgui/guiutils.cpp, src/qtgui/main.cpp, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/recoll.h, src/qtgui/uiprefs.ui, src/query/docseq.h, src/query/history.cpp, src/query/history.h: use the (should be renamed) history file to store external databases lists 2006-09-11 07:10 +0000 dockes (6cb09384f54a) * src/qtgui/ssearch_w.cpp, src/qtgui/ssearchb.ui: maintain ssearches listbox in mru order 2006-09-11 06:58 +0000 dockes (b62d0be5650e) * src/qtgui/advsearch.ui, src/qtgui/sort.ui, src/qtgui/uiprefs.ui: ensure dialogs are sized according to font size 2006-09-08 09:02 +0000 dockes (a5a31c9b0a37) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/common/rclinit.cpp, src/common/rclinit.h, src/doc/man/recoll.1, src/doc/man/recoll.conf.5, src/doc/man/recollindex.1, src/doc/user/usermanual.sgml, src/index/recollindex.cpp, src/qtgui/guiutils.cpp, src/qtgui/main.cpp: Add -c option to recoll and recollindex 2006-09-08 08:51 +0000 dockes (315e0865ec26) * src/sampleconf/recoll.conf.in: The dbdir default value is now relative to the cnf dir 2006-09-06 09:50 +0000 dockes (e696d98fe7fe) * src/qtgui/uiprefs_w.cpp: Used to reset the buildAbstract replaceAbstract options because of setDown instead of setChecked 2006-09-06 09:14 +0000 dockes (0dedd735c86e) * src/utils/mimeparse.cpp, src/utils/mimeparse.h: implement rfc2231 decoding for mime parameter values 2006-09-05 17:09 +0000 dockes (95fd6b3a5b9a) * src/internfile/mh_mail.cpp: let mimeparse handle decoding or param values 2006-09-05 09:52 +0000 dockes (44182523e711) * src/filters/rclppt, src/filters/rclxls: new file. * src/filters/rclppt, src/filters/rclxls, src/sampleconf/mimeconf, src/sampleconf/mimemap: added support for ppt and xls via catdoc 2006-09-05 08:05 +0000 dockes (587719349228) * src/internfile/mh_mail.cpp, src/internfile/mh_mail.h: index and display attachment file names 2006-09-05 08:04 +0000 dockes (6f8b09a74d14) * src/utils/mimeparse.cpp, src/utils/mimeparse.h: comments only 2006-09-04 15:13 +0000 dockes (0f11e18480b2) * src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/preview.ui, src/qtgui/preview.ui.h, src/qtgui/preview_w.cpp, src/qtgui/preview_w.h, src/qtgui/sort_w.cpp, src/qtgui/sort_w.h, src/qtgui/ssearch_w.cpp, src/qtgui/ssearch_w.h, src/qtgui/uiprefs_w.cpp, src/qtgui/uiprefs_w.h: new file. * src/qtgui/preview/preview.pro, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/preview/pvmain.cpp: deleted file. * src/qtgui/advsearch.ui, src/qtgui/advsearch.ui.h, src/qtgui/advsearch_w.cpp, src/qtgui/advsearch_w.h, src/qtgui/guiutils.cpp, src/qtgui/main.cpp, src/qtgui/preview.ui, src/qtgui/preview.ui.h, src/qtgui/preview/preview.pro, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/preview/pvmain.cpp, src/qtgui/preview_w.cpp, src/qtgui/preview_w.h, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/recoll.pro.in, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/qtgui/sort.ui, src/qtgui/sort.ui.h, src/qtgui/sort_w.cpp, src/qtgui/sort_w.h, src/qtgui/ssearch_w.cpp, src/qtgui/ssearch_w.h, src/qtgui/ssearchb.ui, src/qtgui/ssearchb.ui.h, src/qtgui/uiprefs.ui, src/qtgui/uiprefs.ui.h, src/qtgui/uiprefs_w.cpp, src/qtgui/uiprefs_w.h: mostly cosmetic changes to prepare for a future qt4 port: better separate form design from code 2006-06-29 11:05 +0000 dockes (8f28af2cb548) * src/qt4gui/recollmain.ui: *** empty log message *** 2006-06-24 09:56 +0000 dockes (fb2180e4d577) * src/qt4gui/recollmain.ui: qt4 cleanup: merged back rclmainbase and rclmain 2006-06-24 07:40 +0000 dockes (e1b5ffd88b25) * src/Makefile.in, src/VERSION, src/configure, src/configure.ac, src/doc/user/usermanual.sgml, src/qt4gui/recoll.pro.in, src/qt4gui/recollmain.ui, src/recollinstall.in: more qt4, unfinished 2006-06-23 08:07 +0000 dockes (46a46e406504) * src/qt4gui/recoll.pro.in, src/qt4gui/recollmain.ui: new file. * src/qt4gui/recoll.pro.in, src/qt4gui/recollmain.ui: added qt4gui code from Gennadi Sushko 2006-05-22 07:04 +0000 dockes (2663a50d4760) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo: 1.4.3 2006-05-09 10:15 +0000 dockes (c9a62f0cb289) * src/rcldb/rcldb.cpp: perform stem expansion using all active dbs 2006-05-09 07:56 +0000 dockes (29feec461985) * src/qtgui/preview/preview.ui.h, src/qtgui/rclreslist.cpp: esc quits preview + prev/next links 2006-05-08 07:08 +0000 dockes (185da0be6900) * src/recollinstall.in: install icon 2006-05-08 07:08 +0000 dockes (7dbebc260389) * src/qtgui/rclreslist.cpp: *** empty log message *** 2006-05-07 14:18 +0000 dockes (2f273f645a91) * packaging/rpm/recoll.spec: 1.4.3 2006-05-07 14:18 +0000 dockes (16b38a704d8e) * packaging/rpm/recoll.spec, packaging/rpm/recollmdk.spec: 1.3.3 2006-05-07 14:10 +0000 dockes (4ab20caea142 [RECOLL-1_4_3]) * src/VERSION: Release 1.4.3 2006-05-06 17:25 +0000 dockes (e7b4fd0f97fa) * src/recoll.png, src/recoll.xcf: new file. * src/qtgui/recoll.pro.in, src/recoll.png, src/recoll.xcf: *** empty log message *** 2006-05-06 17:24 +0000 dockes (aae37ad598a9) * src/qtgui/recoll_ru.ts, src/qtgui/recoll_uk.ts: new from michael 2006-05-02 09:49 +0000 dockes (fb5bb4665925 [RECOLL-1_4_2]) * src/qtgui/guiutils.cpp, src/rcldb/stemdb.cpp, src/unac/unac.c: more fbsd4 tweaks: Release 1.4.2 2006-04-30 07:44 +0000 dockes (d686e45d4b5e) * src/rcldb/rcldb.cpp: fbsd4 tweaks 2006-04-30 07:39 +0000 dockes (b889e57b87d6) * src/VERSION, src/index/indexer.cpp, src/index/indexer.h, src/lib/Makefile, src/lib/mkMake: fbsd4 tweaks 2006-04-30 07:26 +0000 dockes (72f2881955d1 [RECOLL-1_4_1]) * src/Makefile.in, src/README: *** empty log message *** 2006-04-30 07:23 +0000 dockes (172a9e09b77c) * src/Makefile.in: *** empty log message *** 2006-04-30 07:20 +0000 dockes (7be76a62e017) * src/qtgui/recoll_fr.ts, src/qtgui/recoll_ru.ts, src/qtgui/recoll_uk.ts: lupdate+french 2006-04-28 07:54 +0000 dockes (5b44017502c3) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/index/indexer.cpp, src/index/recollindex.cpp, src/kde/kioslave/recoll/kio_recoll.cpp, src/qtgui/main.cpp, src/query/qtry.cpp: centralize dbdir computation in rclconfig+cat with conffdir if not absolute 2006-04-28 07:23 +0000 dockes (436f58f83459) * src/utils/transcode.cpp: change debug log trace 2006-04-27 09:23 +0000 dockes (3df68e37cdd9) * src/doc/user/usermanual.sgml, src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/rclmain.cpp, src/qtgui/ssearchb.ui, src/qtgui/ssearchb.ui.h: make ssearch a combobox 2006-04-27 06:12 +0000 dockes (e7c0f6cd73f0) * src/configure, src/configure.ac, src/lib/Makefile, src/lib/mkMake: fix pb with .deps not existing 2006-04-27 06:12 +0000 dockes (83e1c6a16ca6) * src/qtgui/preview/preview.ui.h, src/qtgui/rclmain.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: use getmatchingterms instead of getqueryterms for highlighting etc. in preview 2006-04-26 11:51 +0000 dockes (fa1cc55f05e9) * src/INSTALL, src/README: *** empty log message *** 2006-04-26 11:29 +0000 dockes (d92273eb3274) * src/qtgui/rclmain.cpp, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h: dblclck in reslist adds to search lineedit 2006-04-25 09:59 +0000 dockes (16f32a4eda4c) * src/index/indexer.cpp, src/rcldb/rcldb.cpp: new way for doc unique terms: only path for monodoc, only path+ipath for doc inside multidoc, add pseudo-doc for file itself 2006-04-25 08:17 +0000 dockes (4c947b29c23c) * src/common/textsplit.cpp, src/rcldb/rcldb.cpp: fixed small glitch in abstract text splitting 2006-04-23 13:37 +0000 dockes (ea8caddeb344) * src/lib/mkMake: new file. * src/lib/mkMake: *** empty log message *** 2006-04-22 06:27 +0000 dockes (1b0dd24cad31) * src/qtgui/main.cpp, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h, src/qtgui/ssearchb.ui, src/qtgui/ssearchb.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/searchdata.h: turn-off abst. build for fname search (no terms) + prototype query expansion (xapian e-set on chosen doc) + dbl-click in preview adds term to ssearch 2006-04-20 09:20 +0000 dockes (4b9c3c7bcb49) * src/common/rclconfig.cpp, src/lib/Makefile, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/qtgui/sort.ui, src/qtgui/sort.ui.h, src/qtgui/ssearchb.ui.h, src/query/sortseq.cpp, src/query/sortseq.h: mode 700 on .recoll. move showquerydetails to rclreslist 2006-04-19 08:26 +0000 dockes (9ec7ff1d0d53) * src/rcldb/searchdata.h: new file. * src/qtgui/advsearch.ui, src/qtgui/advsearch.ui.h, src/qtgui/main.cpp, src/qtgui/rclmain.h, src/qtgui/rclreslist.cpp, src/qtgui/ssearchb.ui, src/qtgui/ssearchb.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/rcldb/searchdata.h: compacted res list + completions in ssearch + additional or field 2006-04-18 08:53 +0000 dockes (7c4352949f19) * src/index/recollindex.cpp, src/lib/Makefile, src/qtgui/advsearch.ui, src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/rclmain.cpp, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h, src/qtgui/recollmain.ui, src/qtgui/ssearchb.ui, src/qtgui/ssearchb.ui.h, src/qtgui/uiprefs.ui, src/qtgui/uiprefs.ui.h: new libs Makefile+autoSearchOnWS 2006-04-15 17:15 +0000 dockes (cc178f316e64) * src/qtgui/main.cpp, src/query/Makefile: small aix tweaks 2006-04-15 16:51 +0000 dockes (356148054ef1) * src/mk/AIX: new file. * src/mk/AIX: *** empty log message *** 2006-04-13 09:50 +0000 dockes (fe982a2684e4) * src/rcldb/stemdb.cpp, src/rcldb/stemdb.h: new file. * src/lib/Makefile, src/rcldb/rcldb.cpp, src/rcldb/stemdb.cpp, src/rcldb/stemdb.h: extracted stem database from rcldb to make it smaller 2006-04-12 10:41 +0000 dockes (6892025a5c8e) * src/index/indexer.cpp, src/index/indexer.h, src/qtgui/idxthread.cpp, src/qtgui/idxthread.h, src/qtgui/rclmain.cpp, src/qtgui/recollmain.ui, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: improve indexing status reporting 2006-04-12 07:26 +0000 dockes (44ac63815611) * src/qtgui/rclmain.cpp, src/rcldb/rcldb.cpp: Fix history not working after thread index run 2006-04-11 07:14 +0000 dockes (bd453ed96f6a) * src/rcldb/rcldb.cpp: fix rare case where stem itself was forgotten in list of possible derivatives 2006-04-11 06:49 +0000 dockes (dd7f793fdf8e) * src/common/textsplit.cpp, src/rcldb/rcldb.cpp, src/utils/smallut.cpp, src/utils/smallut.h: comments and moving some util routines out of rcldb.cpp 2006-04-08 14:00 +0000 dockes (d60e656c348f) * src/VERSION, src/doc/user/usermanual.sgml: *** empty log message *** 2006-04-07 13:10 +0000 dockes (dc4ff4178b85) * src/qtgui/advsearch.ui.h, src/rcldb/rcldb.cpp: check for and forbid pure negative query 2006-04-07 13:08 +0000 dockes (7da00eb0c7aa) * src/qtgui/guiutils.cpp: RECOLL_EXTRA_DBS environment variable 2006-04-07 13:07 +0000 dockes (f040ff3bf0aa) * src/doc/user/usermanual.sgml: use indexing instead of indexation 2006-04-07 08:51 +0000 dockes (52451e342e49) * src/internfile/mh_mail.cpp, src/internfile/mh_mail.h: comments+conventions 2006-04-06 17:39 +0000 dockes (7d2906a0371d) * packaging/FreeBSD/recoll/pkg-plist: merge modif from committer 2006-04-06 14:28 +0000 dockes (52d4a2c2a341) * src/sampleconf/recoll.conf.in: stem only for english by default 2006-04-06 13:09 +0000 dockes (fa565da09aa7 [RECOLL-1_4_0]) * src/VERSION: 1.4.0 2006-04-06 13:08 +0000 dockes (1436c843e74e) * src/VERSION, src/rcldb/rcldb.h, src/recollinstall.in: rpmlint wants 755 for execs 2006-04-06 12:34 +0000 dockes (687369f6736c) * src/INSTALL, src/README: *** empty log message *** 2006-04-05 15:41 +0000 dockes (359711d5fbe3) * src/recollinstall.in: fix the installed file perms 2006-04-05 13:39 +0000 dockes (fdd0d33d5c2e) * src/qtgui/guiutils.cpp, src/qtgui/uiprefs.ui, src/qtgui/uiprefs.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: small fixes for extra db selection 2006-04-05 13:30 +0000 dockes (3277935457e9) * src/doc/user/usermanual.sgml: small fixes 2006-04-05 12:50 +0000 dockes (98d8d7d74aee) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/main.cpp, src/qtgui/uiprefs.ui, src/qtgui/uiprefs.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: additional search databases 2006-04-05 06:26 +0000 dockes (e8f1cc7c2bbf) * src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: simplified class structure 2006-04-04 16:03 +0000 dockes (ddb66052e3e8 [RECOLL-1_4_0pre1]) * src/filters/rcldjvu, src/filters/rclmedia, src/filters/rclps, src/filters/rclsoff: handle paths with embedded spaces 2006-04-04 15:44 +0000 dockes (933f89f10033) * src/filters/rcluncomp: handle paths with embedded spaces 2006-04-04 13:49 +0000 dockes (dec4932652ae) * src/index/indexer.cpp, src/index/indexer.h, src/index/recollindex.cpp, src/qtgui/idxthread.cpp, src/qtgui/idxthread.h, src/qtgui/rclmain.cpp: make indexation more easily cancellable 2006-04-04 12:37 +0000 dockes (17342a5b330b) * src/doc/user/usermanual.sgml, src/index/indexer.cpp, src/index/indexer.h: check for symlinks in the topdirs list. Generate diags in confIndexer 2006-04-04 10:38 +0000 dockes (ec632eb29364) * src/qtgui/guiutils.cpp, src/qtgui/rclreslist.cpp: setup initial default window size smaller 2006-04-04 09:36 +0000 dockes (d175998d9270) * src/common/unacpp.cpp, src/unac/unac.c: clarify/clean up mem buffer handling 2006-04-04 09:35 +0000 dockes (b14f9df37817) * src/utils/conftree.h: fix potential minor memory leak when copying conftrees 2006-04-04 09:34 +0000 dockes (aaddcbb06c7a) * src/index/indexer.cpp, src/index/indexer.h: cosmetic: add m_ prefix to private vars 2006-04-04 07:55 +0000 dockes (1bda0dcafd17) * src/qtgui/main.cpp, src/qtgui/preview/preview.ui.h, src/qtgui/rclmain.cpp: Get things to compile with QT_NO_STL 2006-04-03 14:16 +0000 dockes (4957b439000e) * packaging/FreeBSD/recoll/Makefile: add mods from port tree 2006-04-03 12:59 +0000 dockes (62a51f626e46) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo: *** empty log message *** 2006-04-03 11:43 +0000 dockes (0449d2ba4099 [RECOLL-1_3_3]) * src/VERSION: 1.3.3 2006-04-03 11:43 +0000 dockes (6b54e368d2d1) * src/common/rclconfig.cpp: small port fixes for fbsd4 and solaris 2006-04-03 09:42 +0000 dockes (cd9fe4976fae) * src/utils/execmd.cpp: warning 2006-04-01 21:02 +0000 dockes (eef792b97ce8 [RECOLL-1_3_2]) * src/VERSION, src/qtgui/rclmain.cpp: limit max length of displayed query details. 1.3.2 2006-04-01 09:15 +0000 dockes (315da01fb1a5 [RECOLL-1_3_1]) * src/INSTALL, src/README: *** empty log message *** 2006-04-01 09:15 +0000 dockes (e9f0a85fc18e) * src/INSTALL, src/README, src/qtgui/rclreslist.cpp, src/qtgui/ssearchb.ui: updated INSTALL+README. Fix tab focus in main window 2006-04-01 08:07 +0000 dockes (916faf93fb66) * src/qtgui/rclmain.cpp: urlencode file name before executing ext app with url param 2006-04-01 07:48 +0000 dockes (adbde9cd60b9) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- plist: 1.3.1 2006-04-01 07:34 +0000 dockes (4cd3e4e4074c) * src/sampleconf/mimeconf, src/sampleconf/mimemap: Allow ext edit for c/c++ files. 1.3.1 2? 2006-03-31 17:19 +0000 dockes (35db6f17bdd8) * src/VERSION, src/qtgui/rclmain.cpp, src/qtgui/rclreslist.cpp, src/qtgui/recoll_ru.ts, src/qtgui/recoll_uk.ts: fixed reslist header charset issues. 1.3.1 first 2006-03-31 09:02 +0000 dockes (ae3d9c9deb6d) * src/INSTALL, src/README: *** empty log message *** 2006-03-31 07:51 +0000 dockes (0fccf51c6905 [RECOLL-1_3_1pre3]) * src/qtgui/recoll_uk.ts: new file. * src/qtgui/recoll_uk.ts: *** empty log message *** 2006-03-30 13:00 +0000 dockes (b41828dda0ac) * src/common/Makefile: cleanup rclconfig 2006-03-30 10:31 +0000 dockes (afbdbc31ff1c) * src/sampleconf/recoll.conf.in: dont set defaultcharset to 8859-1: will let nls info be used 2006-03-30 10:31 +0000 dockes (582fa2a09db3) * src/doc/user/usermanual.sgml: *** empty log message *** 2006-03-30 08:19 +0000 dockes (89efa1c78c3c [RECOLL-1_3_1pre2]) * src/qtgui/recoll_fr.ts, src/qtgui/recoll_ru.ts: lupdate 2006-03-30 07:54 +0000 dockes (0b236faa0b9d) * src/qtgui/advsearch.ui: cleaned up layout 2006-03-29 17:31 +0000 dockes (7cb115f5789c) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h, src/qtgui/main.cpp, src/qtgui/ssearchb.ui, src/qtgui/ssearchb.ui.h: gui: replaced checkboxes for all/filename in simple search with droplist 2006-03-29 13:08 +0000 dockes (ce199bb02759) * src/VERSION, src/common/Makefile, src/common/rclconfig.cpp, src/common/rclconfig.h, src/internfile/mimehandler.cpp, src/internfile/mimehandler.h, src/qtgui/rclreslist.cpp, src/sampleconf/mimeconf, src/sampleconf/mimemap: result list: show preview and edit links only when they can be used 2006-03-29 11:18 +0000 dockes (5f22b93705b4) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/common/rclinit.cpp, src/index/indexer.cpp, src/qtgui/preview/preview.ui.h, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/pathut.cpp, src/utils/pathut.h, src/utils/transcode.cpp, src/utils/transcode.h: try to better handle non-ascii file names 2006-03-28 12:49 +0000 dockes (a081a1b65de1) * src/doc/man/recoll.conf.5, src/doc/user/usermanual.sgml, src/qtgui/recoll.pro.in, src/recollinstall.in, src/sampleconf/recoll.conf.in: 1.3.1pre1 2006-03-28 12:18 +0000 dockes (7429c22d162b) * src/INSTALL, src/README: *** empty log message *** 2006-03-28 09:38 +0000 dockes (25e1ed25acc5) * src/filters/rclmedia, src/qtgui/mtpics/sownd.png: new file. * src/filters/rclmedia, src/qtgui/mtpics/sownd.png, src/sampleconf/mimeconf, src/sampleconf/mimemap: filter for indexing mp3 tags 2006-03-28 09:36 +0000 dockes (fb852147db29) * src/internfile/mh_unknown.h: new file. * src/internfile/mh_unknown.h: added code to specifically index/search file names 2006-03-22 16:24 +0000 dockes (4467274ce405) * src/index/indexer.cpp, src/index/indexer.h, src/qtgui/idxthread.cpp, src/qtgui/idxthread.h, src/qtgui/rclmain.cpp, src/qtgui/recollmain.ui: show current filename as feedback during indexation 2006-03-22 14:25 +0000 dockes (5dae5f8a140d) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/utils/conftree.cpp, src/utils/conftree.h: Replace user config with central values + override 2006-03-22 11:17 +0000 dockes (1f04e3bfeb4a) * src/qtgui/rclreslist.cpp: fix size display 2006-03-21 15:11 +0000 dockes (88d6359d2739) * src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h: implement right click menu in result list 2006-03-21 13:46 +0000 dockes (56610f5d03b3) * src/qtgui/rclmain.cpp, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h: replaced (double)clicks in the result list with links 2006-03-21 13:27 +0000 dockes (cc41e73a4f5a) * src/qtgui/rclreslist.cpp: ckpt 2006-03-21 11:04 +0000 dockes (b1dc67961a45) * src/index/mimetype.cpp: sanity check on file -i return 2006-03-21 09:15 +0000 dockes (8589c7c01f25) * src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h: new file. * src/qtgui/reslistb.ui, src/qtgui/reslistb.ui.h: deleted file. * src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/rclreslist.cpp, src/qtgui/rclreslist.h, src/qtgui/recollmain.ui, src/qtgui/reslistb.ui, src/qtgui/reslistb.ui.h: reslistb form replaced by object derived from QTextBrowser 2006-03-20 16:05 +0000 dockes (70c0ec0275a9) * src/VERSION, src/index/indexer.cpp, src/internfile/internfile.cpp, src/internfile/mimehandler.cpp, src/qtgui/advsearch.ui, src/qtgui/advsearch.ui.h, src/qtgui/ssearchb.ui, src/qtgui/ssearchb.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: added code to specifically index/search file names 2006-03-20 15:14 +0000 dockes (86bb2d64fdd9) * src/internfile/mh_text.cpp: get rid of unused temp 2006-03-20 09:54 +0000 dockes (fea74448199d) * src/utils/pathut.h: comments 2006-03-20 09:54 +0000 dockes (bf4772fd96ff) * src/sampleconf/mimemap: add # to ignd suffixes 2006-03-20 09:51 +0000 dockes (218c67bcb769) * src/common/rclconfig.cpp, src/common/rclconfig.h: try to get default charset from LANG if not in config 2006-03-20 09:50 +0000 dockes (2d633e45c451) * src/makestaticdist.sh: desktop file 2006-03-16 14:00 +0000 dockes (b45dd89bb177) * src/recoll.desktop: new file. * src/recoll.desktop: initial version from Michael Shigorin 2006-03-16 13:49 +0000 dockes (e3e216dfacb6) * src/qtgui/recoll_ru.ts: new file. * src/qtgui/recoll_ru.ts: initial version from Michael Shigorin 2006-03-04 10:09 +0000 dockes (983d0984e972 [RECOLL-1_2_3]) * src/VERSION, src/doc/user/usermanual.sgml: 1.2.3 2006-02-21 12:57 +0000 dockes (29500b27662b) * src/INSTALL, src/README: *** empty log message *** 2006-02-21 12:56 +0000 dockes (0bc6bf836dfe) * src/Makefile.in, src/configure, src/configure.ac: ensure Makefile uses same qmake as configure 2006-02-21 12:52 +0000 dockes (9a69d49b1448) * src/query/docseq.h, src/query/sortseq.h: sorted sequence title would never show 2006-02-07 10:26 +0000 dockes (8881db16fe21) * src/qtgui/reslistb.ui.h, src/query/docseq.cpp, src/query/docseq.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: fix problems with doc fetch sequence (have to know where to stop) 2006-02-07 09:44 +0000 dockes (fbfb30458fc2) * src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h: replace computation of term positions in editor text with search for 1st query term 2006-02-03 11:47 +0000 dockes (1dbf9bcedfc0) * src/filters/rcldvi: option to use catdvi 2006-02-03 10:53 +0000 dockes (f219261a580b) * src/filters/rcldjvu, src/filters/rcldvi: new file. * src/filters/rcldjvu, src/filters/rcldvi, src/filters/rclps, src/sampleconf/mimeconf, src/sampleconf/mimemap: added dvi and djvu support 2006-02-02 09:45 +0000 dockes (71a4b9e391e0) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo: 1.2.2 2006-02-02 08:58 +0000 dockes (c2f3b36a7169 [RECOLL-1_2_2]) * src/rcldb/rcldb.cpp, src/utils/pathut.cpp: suppress 2 compilation warnings (one was actual 64bits bug but inocuous 2006-02-02 08:35 +0000 dockes (4d473bd0d9a8) * src/Makefile.in, src/VERSION, src/configure, src/configure.ac, src/qtgui/main.cpp, src/qtgui/preview/preview.ui.h: fix small cc glitches: qt3.1, xapian-config 2006-02-01 14:34 +0000 dockes (a4deac6ede77 [RECOLL-1_2_1]) * src/qtgui/guiutils.cpp, src/qtgui/reslistb.ui.h: fbsd4 cc 2006-02-01 14:27 +0000 dockes (b005945089dc) * src/VERSION: *** empty log message *** 2006-02-01 14:18 +0000 dockes (1f6da4b2f946) * src/common/textsplit.cpp: use string::erase() not clear() 2006-02-01 09:00 +0000 dockes (09b3a24a6173 [RECOLL-1_2_0]) * src/recollinstall.in: *** empty log message *** 2006-02-01 08:19 +0000 dockes (bef8d87339d0) * packaging/rpm/recoll.spec: *** empty log message *** 2006-02-01 07:14 +0000 dockes (5c4deca7b177) * src/excludefile, src/utils/base64.cpp: *** empty log message *** 2006-02-01 07:12 +0000 dockes (77e021af3fa0) * src/INSTALL, src/README, src/doc/user/usermanual.sgml, src/excludefile, src/makesrcdist.sh: *** empty log message *** 2006-01-31 11:39 +0000 dockes (73f22e91d844) * src/qtgui/reslistb.ui.h: Clicking on "No results found" will also display the expanded query 2006-01-31 11:39 +0000 dockes (c225bd05e9c1) * src/qtgui/recoll.h: close/reopen db by default: let us see results of recollindex -i 2006-01-30 12:51 +0000 dockes (cd40d5627d38) * src/qtgui/recoll_fr.ts: *** empty log message *** 2006-01-30 11:15 +0000 dockes (962649c706ef) * src/common/rclconfig.h, src/common/rclinit.h, src/common/textsplit.h, src/common/unacpp.h, src/common/uproplist.h, src/index/csguess.h, src/index/indexer.h, src/index/mimetype.h, src/internfile/htmlparse.cpp, src/internfile/htmlparse.h, src/internfile/indextext.h, src/internfile/internfile.h, src/internfile/mh_exec.h, src/internfile/mh_html.h, src/internfile/mh_mail.h, src/internfile/mh_text.h, src/internfile/mimehandler.h, src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h, src/qtgui/advsearch.ui.h, src/qtgui/guiutils.h, src/qtgui/idxthread.h, src/qtgui/plaintorich.h, src/qtgui/preview/preview.ui.h, src/qtgui/rclmain.h, src/qtgui/recoll.h, src/qtgui/recollmain.ui.h, src/qtgui/reslistb.ui.h, src/qtgui/sort.ui.h, src/qtgui/ssearchb.ui.h, src/qtgui/uiprefs.ui.h, src/query/docseq.h, src/query/history.h, src/query/sortseq.h, src/rcldb/pathhash.h, src/rcldb/rcldb.h, src/utils/base64.h, src/utils/cancelcheck.h, src/utils/conftree.h, src/utils/copyfile.h, src/utils/debuglog.h, src/utils/execmd.h, src/utils/fstreewalk.h, src/utils/idfile.h, src/utils/mimeparse.h, src/utils/pathut.h, src/utils/readfile.h, src/utils/smallut.h, src/utils/transcode.h, src/utils/utf8iter.h, src/utils/wipedir.h: *** empty log message *** 2006-01-30 10:01 +0000 dockes (f683194d38a4) * src/qtgui/preview/preview.ui.h: dont highlight terms in very big docs: too slow 2006-01-30 09:32 +0000 dockes (dc8cbf051f54) * src/mk/localdefs.in: -O2 2006-01-30 09:28 +0000 dockes (af56f00261eb) * src/qtgui/guiutils.cpp, src/qtgui/main.cpp, src/qtgui/rclmain.cpp, src/qtgui/uiprefs.ui, src/qtgui/uiprefs.ui.h: help browser selection in prefs 2006-01-30 09:28 +0000 dockes (df275d18bee6) * src/utils/execmd.cpp: *** empty log message *** 2006-01-30 09:28 +0000 dockes (6d7b08c3bba0) * src/common/textsplit.cpp: moved span cleanup where it belonged 2006-01-28 15:36 +0000 dockes (b65e6344a9e4) * src/common/textsplit.cpp, src/common/textsplit.h: *** empty log message *** 2006-01-28 10:23 +0000 dockes (507b05e72779) * src/common/textsplit.cpp, src/common/textsplit.h, src/configure, src/configure.ac, src/query/xadump.cpp, src/utils/utf8iter.h: more textsplit tweaking 2006-01-27 13:43 +0000 dockes (8ed38cba7965) * src/utils/cancelcheck.h: new file. * src/utils/cancelcheck.h: *** empty log message *** 2006-01-27 13:43 +0000 dockes (fa13d8fe2fc9) * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h: new file. * src/qtgui/guiutils.cpp, src/qtgui/guiutils.h: extracted code from main and others 2006-01-27 13:42 +0000 dockes (96572eee9528) * src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/rclmain.cpp: implement cancellation in preview loading 2006-01-27 13:38 +0000 dockes (3ad2458e654a) * src/internfile/myhtmlparse.cpp: strip whitespace and newlines (as the original version), except in pre tags 2006-01-27 13:37 +0000 dockes (80dbbac5b981) * src/filters/rcldoc, src/filters/rclpdf, src/filters/rclps, src/filters/rclsoff: fix to output
when needed + other misc pbs 2006-01-27 13:34 +0000 dockes (538235c10cd7) * src/rcldb/rcldb.cpp: define some constants and increase abstract context width 2006-01-27 11:25 +0000 dockes (1d381cea9ec3) * src/internfile/htmlparse.cpp: missing amp entity translation 2006-01-26 17:59 +0000 dockes (15b82e0f9689) * src/internfile/mh_exec.cpp: check for cancellation 2006-01-26 17:59 +0000 dockes (81f5d1264b7d) * src/utils/execmd.cpp, src/utils/execmd.h: also test cancel on select timeout 2006-01-26 17:44 +0000 dockes (77efdf7b7e93) * src/utils/execmd.cpp, src/utils/execmd.h: make execCmd exception-safe 2006-01-26 14:02 +0000 dockes (ffd1ec38fb9f) * src/qtgui/main.cpp, src/qtgui/rclmain.cpp, src/qtgui/recoll.h, src/qtgui/uiprefs.ui, src/qtgui/uiprefs.ui.h: abstract params 2006-01-26 14:01 +0000 dockes (c34965eaaa05) * src/qtgui/reslistb.ui, src/qtgui/reslistb.ui.h: abstracts + doc sizes 2006-01-26 12:30 +0000 dockes (c3718d2ceeae) * src/query/docseq.cpp: let the db do whats needed to get a result count 2006-01-26 12:29 +0000 dockes (bc0a233de310) * src/utils/smallut.cpp, src/utils/smallut.h: chrono 2006-01-26 12:28 +0000 dockes (69be9a0edd98) * src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: abstract building from position data 2006-01-26 07:03 +0000 dockes (2c5403cbdbc1) * src/qtgui/recoll.pro: deleted file. * src/qtgui/recoll.pro: replaced by recoll.pro.in 2006-01-26 07:03 +0000 dockes (7a03d26ad54d) * src/qtgui/recoll.pro, src/qtgui/recoll.pro.in, src/utils/smallut.h: *** empty log message *** 2006-01-26 07:02 +0000 dockes (de94ebf3cb51) * src/index/indexer.cpp: pass size info to db.add 2006-01-25 08:39 +0000 dockes (fc5ab7249caa) * src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h: reenable stripping newlines 2006-01-25 08:09 +0000 dockes (1ce613930379) * src/query/Makefile, src/query/xadump.cpp: xadump improvements 2006-01-24 12:22 +0000 dockes (6a16d14c076e) * src/qtgui/rclmain.cpp: fix signal type 2006-01-24 12:22 +0000 dockes (322a0f010b59) * src/utils/execmd.cpp, src/utils/execmd.h: add feedback and possible cancellation 2006-01-23 17:21 +0000 dockes (d16bcca9bc1e) * src/qtgui/images/d_nextpage.png, src/qtgui/images/d_prevpage.png: new file. * src/qtgui/images/d_nextpage.png, src/qtgui/images/d_prevpage.png, src/qtgui/rclmain.cpp, src/qtgui/recoll.pro, src/qtgui/recollmain.ui: slightly improved the icon situation 2006-01-23 16:52 +0000 dockes (a51e0cfa77db) * src/qtgui/images/asearch.png, src/qtgui/images/history.png, src/qtgui/images/nextpage.png, src/qtgui/images/prevpage.png, src/qtgui/images/sortparms.png: *** empty log message *** 2006-01-23 15:43 +0000 dockes (907a44f71ddc) * src/qtgui/images/editcopy, src/qtgui/images/editcut, src/qtgui/images/editpaste, src/qtgui/images/filenew, src/qtgui/images/fileopen, src/qtgui/images/filesave, src/qtgui/images/print, src/qtgui/images/redo, src/qtgui/images/searchfind, src/qtgui/images/undo: deleted file. * src/qtgui/images/editcopy, src/qtgui/images/editcut, src/qtgui/images/editpaste, src/qtgui/images/filenew, src/qtgui/images/fileopen, src/qtgui/images/filesave, src/qtgui/images/print, src/qtgui/images/redo, src/qtgui/images/searchfind, src/qtgui/images/undo: *** empty log message *** 2006-01-23 13:32 +0000 dockes (b27df12a0147) * src/common/rclconfig.cpp, src/common/rclinit.cpp, src/common/textsplit.cpp, src/common/unacpp.cpp, src/index/csguess.cpp, src/index/indexer.cpp, src/index/mimetype.cpp, src/index/recollindex.cpp, src/internfile/internfile.cpp, src/internfile/mh_exec.cpp, src/internfile/mh_mail.cpp, src/internfile/mh_text.cpp, src/internfile/mimehandler.cpp, src/query/docseq.cpp, src/query/history.cpp, src/query/qtry.cpp, src/query/qxtry.cpp, src/query/sortseq.cpp, src/query/xadump.cpp, src/rcldb/pathhash.cpp, src/rcldb/rcldb.cpp, src/utils/base64.cpp, src/utils/conftree.cpp, src/utils/copyfile.cpp, src/utils/debuglog.cpp, src/utils/execmd.cpp, src/utils/fstreewalk.cpp, src/utils/idfile.cpp, src/utils/mimeparse.cpp, src/utils/pathut.cpp, src/utils/readfile.cpp, src/utils/smallut.cpp, src/utils/transcode.cpp, src/utils/utf8iter.cpp, src/utils/wipedir.cpp: reference to GPL in all .cpp files 2006-01-23 13:32 +0000 dockes (c2c52e3c568f) * src/qtgui/idxthread.cpp, src/qtgui/main.cpp, src/qtgui/plaintorich.cpp, src/qtgui/rclmain.cpp, src/qtgui/recoll.h, src/qtgui/recoll.pro, src/qtgui/reslistb.ui.h, src/qtgui/uiprefs.ui.h: more refactoring 2006-01-23 07:15 +0000 dockes (639d2208e231) * src/qtgui/rclmain.cpp: *** empty log message *** 2006-01-23 07:07 +0000 dockes (29cad268f7ba) * src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/reslistb.ui, src/qtgui/reslistb.ui.h, src/qtgui/ssearchb.ui.h: more modularization 2006-01-22 18:46 +0000 dockes (c329a0d633e1) * src/qtgui/recoll.pro.in: new file. * src/qtgui/main.cpp, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/recoll.pro, src/qtgui/recoll.pro.in, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/qtgui/reslistb.ui, src/qtgui/reslistb.ui.h, src/qtgui/ssearchb.ui: extract functionality from main window 2006-01-22 15:16 +0000 dockes (f8f81a690e3d) * src/qtgui/reslistb.ui, src/qtgui/reslistb.ui.h: *** empty log message *** 2006-01-22 13:56 +0000 dockes (b62fca0983d3) * src/qtgui/reslistb.ui, src/qtgui/reslistb.ui.h, src/qtgui/ssearchb.ui.h: new file. * src/qtgui/reslistb.ui, src/qtgui/reslistb.ui.h, src/qtgui/ssearchb.ui.h: *** empty log message *** 2006-01-22 07:41 +0000 dockes (50553b4f8d29 [T1_2]) * src/qtgui/ssearchb.ui: *** empty log message *** 2006-01-22 07:25 +0000 dockes (f5ecee171cca) * src/qtgui/ssearchb.ui: new file. * src/qtgui/ssearchb.ui: *** empty log message *** 2006-01-21 15:36 +0000 dockes (283be80e303b) * src/configure: *** empty log message *** 2006-01-21 15:36 +0000 dockes (57061cf4c252) * src/Makefile.in, src/configure, src/configure.ac, src/makesrcdist.sh, src/qtgui/recoll.pro: enable building from inside qtgui/ 2006-01-21 15:25 +0000 dockes (ce790ab8e905) * packaging/rpm/recollmdk.spec: new file. * packaging/rpm/recollmdk.spec: *** empty log message *** 2006-01-21 10:47 +0000 dockes (47b92b35b369) * src/INSTALL, src/README: *** empty log message *** 2006-01-20 14:58 +0000 dockes (9dfcca9b0073) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/qtgui/main.cpp, src/qtgui/preview/preview.ui.h, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/recoll.h: qt main program cleanup 2006-01-20 12:46 +0000 dockes (04782d3c08bb) * src/qtgui/rclmain.cpp, src/qtgui/rclmain.h: new file. * src/qtgui/main.cpp, src/qtgui/rclmain.cpp, src/qtgui/rclmain.h, src/qtgui/recoll.pro, src/qtgui/recoll_fr.ts, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: separated code from design by subclassing recollmain 2006-01-20 10:01 +0000 dockes (f1c90fc5dd19) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/kde/kioslave/recoll/00README.txt, src/mk/commondefs, src/mk/localdefs.in, src/qtgui/idxthread.cpp, src/qtgui/idxthread.h, src/qtgui/main.cpp, src/utils/conftree.h, src/utils/debuglog.h: cleanup 2006-01-19 17:11 +0000 dockes (b6465d0ee08c) * src/Makefile.in: new file. * src/Makefile: deleted file. * src/Makefile, src/Makefile.in, src/common/rclconfig.cpp, src/common/rclconfig.h, src/configure, src/configure.ac, src/internfile/internfile.cpp, src/internfile/mh_exec.cpp, src/mk/localdefs.in, src/qtgui/main.cpp, src/qtgui/recoll.pro, src/recollinstall.in: slight config cleanup 2006-01-19 15:08 +0000 dockes (211c1066ac8f) * src/kde/kioslave/recoll/00README.txt: new file. * src/kde/kioslave/recoll/00README.txt: end of test, doesnt look very useful 2006-01-19 14:57 +0000 dockes (302ee688e96a) * src/kde/kioslave/recoll/kio_recoll.la: new file. * src/kde/kioslave/recoll/Makefile, src/kde/kioslave/recoll/kio_recoll.cpp, src/kde/kioslave/recoll/kio_recoll.h, src/kde/kioslave/recoll/kio_recoll.la: end of initial experimentation 2006-01-19 12:03 +0000 dockes (ffb549062074) * src/utils/Makefile: *** empty log message *** 2006-01-19 12:01 +0000 dockes (0e6b7d796f28) * packaging/FreeBSD/recoll/Makefile, src/Makefile, src/VERSION, src/bincimapmime/Makefile, src/common/Makefile, src/doc/user/usermanual.sgml, src/index/Makefile, src/kde/kioslave/recoll/Makefile, src/kde/kioslave/recoll/kio_recoll.cpp, src/kde/kioslave/recoll/kio_recoll.h, src/lib/Makefile, src/makestaticdist.sh, src/mk/Darwin, src/mk/FreeBSD, src/mk/Linux, src/mk/SunOS, src/mk/commondefs, src/mk/localdefs.in, src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h, src/qtgui/recollmain.ui.h, src/query/Makefile, src/utils/Makefile, src/utils/smallut.cpp, src/utils/smallut.h: misc small mods to help with building kio_recoll 2006-01-18 13:41 +0000 dockes (ebf94c8fc21c) * src/kde/kioslave/recoll/Makefile, src/kde/kioslave/recoll/kio_recoll.cpp, src/kde/kioslave/recoll/kio_recoll.h, src/kde/kioslave/recoll/recoll.protocol: new file. * src/kde/kioslave/recoll/Makefile, src/kde/kioslave/recoll/kio_recoll.cpp, src/kde/kioslave/recoll/kio_recoll.h, src/kde/kioslave/recoll/recoll.protocol: *** empty log message *** 2006-01-17 10:08 +0000 dockes (9784891fd0a7) * src/utils/mimeparse.h: comments and clarification 2006-01-17 09:31 +0000 dockes (08549e5e4a9e) * src/index/indexer.cpp, src/utils/fstreewalk.cpp, src/utils/fstreewalk.h: cleanup and comments 2006-01-14 13:09 +0000 dockes (d7ac146b7dd5) * src/configure, src/configure.ac: do a better search for qt configuration 2006-01-14 11:48 +0000 dockes (d073ecc93317) * src/Makefile, src/configure, src/configure.ac: do a better search for qt configuration 2006-01-12 09:29 +0000 dockes (2dfd16f6a9a4 [RECOLL-1_1_0]) * src/qtgui/recoll_fr.ts: *** empty log message *** 2006-01-12 09:16 +0000 dockes (deb6607d43bf) * src/README: *** empty log message *** 2006-01-12 09:13 +0000 dockes (7635781b18c5) * src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp: handle removed docs in history 2006-01-11 17:41 +0000 dockes (bd54a740def9) * src/qtgui/recollmain.ui.h: *** empty log message *** 2006-01-11 15:09 +0000 dockes (108917b10bf3) * src/qtgui/uiprefs.ui.h: new file. * src/qtgui/uiprefs.ui.h: *** empty log message *** 2006-01-11 15:08 +0000 dockes (a03b6696412a) * src/doc/user/usermanual.sgml, src/index/Makefile, src/qtgui/recoll_fr.ts, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/query/docseq.h, src/query/sortseq.cpp, src/query/sortseq.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: translation of result list title. Show query details when clicking on header 2006-01-10 17:46 +0000 dockes (d4cc3428e381) * src/recollinstall.in: *** empty log message *** 2006-01-10 14:53 +0000 dockes (c873b3133cdd) * packaging/rpm/recoll.spec: *** empty log message *** 2006-01-10 13:52 +0000 dockes (ab4934e066f9) * src/recollinstall.in: *** empty log message *** 2006-01-10 13:41 +0000 dockes (23d6e8ae7155) * src/recollinstall.in: *** empty log message *** 2006-01-10 13:32 +0000 dockes (526cfe52f2e1) * src/recollinstall.in: *** empty log message *** 2006-01-10 13:27 +0000 dockes (a2f47b62ca03) * src/recollinstall.in: *** empty log message *** 2006-01-10 13:16 +0000 dockes (72d6ccffea15) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- plist, src/recollinstall.in: install man pages 2006-01-10 12:58 +0000 dockes (3a7d0fd4ceb7) * src/Makefile, src/common/rclconfig.cpp: warning 2006-01-10 12:55 +0000 dockes (aaeb49f89a98) * src/rcldb/rcldb.cpp: include unistd 2006-01-10 12:06 +0000 dockes (9b804748017f) * src/INSTALL, src/README: *** empty log message *** 2006-01-10 11:07 +0000 dockes (01e4fe9772b0) * src/common/rclconfig.h, src/doc/user/usermanual.sgml, src/index/recollindex.cpp, src/sampleconf/recoll.conf.in: doc + got rid of unused defaultlanguage config param 2006-01-10 09:10 +0000 dockes (34638d9bd009) * src/doc/man/recoll.conf.5: new file. * src/doc/man/recoll.conf.5: *** empty log message *** 2006-01-10 08:14 +0000 dockes (a9b485ada811) * src/doc/man/recoll.1, src/doc/man/recollindex.1: new file. * src/doc/man/recoll.1, src/doc/man/recollindex.1: *** empty log message *** 2006-01-09 16:53 +0000 dockes (29f37b7888d3) * src/excludefile, src/index/indexer.cpp, src/index/indexer.h, src/index/recollindex.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/Makefile, src/utils/pathut.cpp, src/utils/pathut.h: allow independant creation / deletion of stem dbs 2006-01-06 13:55 +0000 dockes (8831260252d9) * src/rcldb/rcldb.cpp: do a better test for a capitalized query term (no stem expand) 2006-01-06 13:20 +0000 dockes (82e02042773f) * src/qtgui/uiprefs.ui: new file. * src/qtgui/uiprefs.ui: *** empty log message *** 2006-01-06 13:19 +0000 dockes (29cdbe2390e4) * src/utils/CaseFolding.txt, src/utils/caseconvert.cpp, src/utils/caseconvert.h, src/utils/gencasefold.sh: deleted file. * src/lib/Makefile, src/rcldb/rcldb.cpp, src/unac/unac.c, src/unac/unac.h, src/utils/CaseFolding.txt, src/utils/Makefile, src/utils/caseconvert.cpp, src/utils/caseconvert.h, src/utils/gencasefold.sh: integrated case-folding into unac for better performance 2006-01-06 13:18 +0000 dockes (7840fc0ec48b) * src/common/Makefile, src/common/unacpp.cpp, src/common/unacpp.h, src/rcldb/rcldb.cpp: integrated case-folding into unac for better performance 2006-01-06 13:10 +0000 dockes (15e715082e40) * unac/CaseFolding-3.2.0.txt: new file. * unac/CaseFolding-3.2.0.txt, unac/builder.in, unac/unac.c, unac/unac.h: implemented additional case-folding 2006-01-06 13:08 +0000 dockes (f27aa43e32ef [UNAC_1_7_0]) * unac/.version, unac/AUTHORS, unac/COPYING, unac/ChangeLog, unac/INSTALL, unac/Makefile.am, unac/Makefile.in, unac/NEWS, unac/README, unac/THANKS, unac/UnicodeData-3.2.0.txt, unac/acinclude.m4, unac/aclocal.m4, unac/builder.in, unac/config.guess, unac/config.h.in, unac/config.sub, unac/configure, unac/configure.ac, unac/depcomp, unac/getopt.c, unac/getopt.h, unac/install-sh, unac/ltconfig, unac/ltmain.sh, unac/missing, unac/mkinstalldirs, unac/stamp-h.in, unac/t_unac.in, unac/unac.3, unac/unac.c, unac/unac.h, unac/unac.pc.in, unac/unac.spec.in, unac/unaccent.1, unac/unaccent.c, unac/unactest.c, unac/unactest1.c: new file. * unac/.version, unac/AUTHORS, unac/COPYING, unac/ChangeLog, unac/INSTALL, unac/Makefile.am, unac/Makefile.in, unac/NEWS, unac/README, unac/THANKS, unac/UnicodeData-3.2.0.txt, unac/acinclude.m4, unac/aclocal.m4, unac/builder.in, unac/config.guess, unac/config.h.in, unac/config.sub, unac/configure, unac/configure.ac, unac/depcomp, unac/getopt.c, unac/getopt.h, unac/install-sh, unac/ltconfig, unac/ltmain.sh, unac/missing, unac/mkinstalldirs, unac/stamp-h.in, unac/t_unac.in, unac/unac.3, unac/unac.c, unac/unac.h, unac/unac.pc.in, unac/unac.spec.in, unac/unaccent.1, unac/unaccent.c, unac/unactest.c, unac/unactest1.c: initial import 2006-01-06 07:59 +0000 dockes (52c86ee701fd) * src/index/Makefile, src/qtgui/recoll.pro: ensure relink for changed lib 2006-01-05 16:37 +0000 dockes (a2ef019b6308) * src/common/unacpp.cpp, src/common/unacpp.h, src/lib/Makefile, src/rcldb/rcldb.cpp, src/utils/Makefile: Use proper unicode lowercasing 2006-01-05 16:16 +0000 dockes (158267ddbcb6) * src/utils/CaseFolding.txt, src/utils/caseconvert.cpp, src/utils/caseconvert.h, src/utils/gencasefold.sh: new file. * src/utils/CaseFolding.txt, src/utils/caseconvert.cpp, src/utils/caseconvert.h, src/utils/gencasefold.sh: *** empty log message *** 2006-01-05 10:27 +0000 dockes (f1af15efef34) * packaging/rpm/recoll.spec: new file. * packaging/rpm/recoll.spec: *** empty log message *** 2006-01-05 10:24 +0000 dockes (55284d2ed66e) * src/Makefile, src/recollinstall.in: install tweaks for rpm compatibility 2006-01-04 11:33 +0000 dockes (236c587eb180) * src/VERSION, src/common/rclconfig.h, src/makesrcdist.sh, src/qtgui/main.cpp, src/qtgui/recoll.h, src/qtgui/recoll_fr.ts, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/recollinstall.in, src/utils/smallut.cpp, src/utils/smallut.h: add menu entry to start browser on html doc 2006-01-04 11:16 +0000 dockes (2075c2a6d71e) * src/INSTALL, src/README: *** empty log message *** 2006-01-04 11:09 +0000 dockes (9a4cceb219aa) * src/doc/user/Makefile, src/doc/user/docbook.css, src/doc/user/usermanual.sgml: new file. * src/doc/user/Makefile, src/doc/user/docbook.css, src/doc/user/usermanual.sgml: *** empty log message *** 2006-01-03 11:35 +0000 dockes (188ffc87b7d3) * src/INSTALL, src/README: *** empty log message *** 2005-12-16 10:08 +0000 dockes (789da9d2380c) * src/query/Makefile, src/query/xadump.cpp, src/unac/unac.c, src/utils/mimeparse.cpp: 64 bits fixes 2005-12-16 10:06 +0000 dockes (cf18fa6d2a7b) * src/Makefile, src/mk/localdefs.in, src/qtgui/main.cpp, src/qtgui/recoll.pro, src/recollinstall.in: get prefix to really work 2005-12-16 08:00 +0000 dockes (cca6b156e460) * src/excludefile: dont copy localdefs 2005-12-16 07:58 +0000 dockes (7b20df9408ce) * src/mk/localdefs: deleted file. * src/mk/localdefs: *** empty log message *** 2005-12-15 14:39 +0000 dockes (959564d835fd) * src/qtgui/main.cpp, src/qtgui/recoll.h, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/sampleconf/recoll.conf.in: user prefs tweaks. Allow switching stemlang from ui 2005-12-15 13:41 +0000 dockes (bf3c45bf931d) * src/qtgui/main.cpp: *** empty log message *** 2005-12-14 16:15 +0000 dockes (229d1902798e) * src/qtgui/main.cpp, src/qtgui/recoll.h, src/qtgui/recoll.pro, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: user interface preferences settable from ui 2005-12-14 11:00 +0000 dockes (3e5f6f1c000d) * src/index/indexer.cpp, src/index/indexer.h, src/index/recollindex.cpp, src/internfile/mh_html.cpp, src/internfile/mh_text.cpp, src/qtgui/preview/preview.ui.h, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/query/Makefile: allow indexing individual files. Fix pb with preview and charsets (local defcharset ignored) 2005-12-13 17:20 +0000 dockes (0895be2b8196) * src/qtgui/main.cpp, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: add allTerms checkbutton, save state in settings 2005-12-13 17:20 +0000 dockes (b522d74e613c) * src/qtgui/advsearch.ui: avoid activating random buttons when typing CR... 2005-12-13 12:43 +0000 dockes (0448daf8c23e) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- descr, packaging/FreeBSD/recoll/pkg-plist, src/common/rclconfig.cpp, src/doc/prog/Doxyfile, src/doc/prog/Makefile, src/doc/prog/filters.txt, src/qtgui/main.cpp, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/utils/fstreewalk.cpp, src/utils/pathut.cpp, src/utils/pathut.h, src/utils/smallut.cpp, src/utils/wipedir.cpp: pgup/down in result list 2005-12-08 08:44 +0000 dockes (ec006d171797) * src/common/Makefile, src/internfile/htmlparse.cpp, src/internfile/htmlparse.h, src/internfile/internfile.cpp, src/internfile/mh_html.cpp, src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h: process text from html files without a tag 2005-12-07 15:41 +0000 dockes (a44bf0c6a081 [RECOLL-1_0_14]) * src/VERSION, src/internfile/mh_mail.cpp, src/qtgui/preview/preview.ui.h, src/qtgui/recollmain.ui.h, src/query/docseq.cpp, src/utils/utf8iter.h: freebsd 4 port 2005-12-06 15:59 +0000 dockes (812bc8f9232b) * packaging/FreeBSD/recoll/distinfo: 0.13 really now 2005-12-06 15:41 +0000 dockes (3a7b74624ff4) * src/recollinstall.in: strip execs 2005-12-06 15:20 +0000 dockes (fa8c19799a41) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- plist: recoll-0.13 2005-12-06 15:10 +0000 dockes (b6df28b0d0e3) * src/README: *** empty log message *** 2005-12-06 15:10 +0000 dockes (e66dba4d628c [RECOLL-1_0_13]) * src/Makefile, src/VERSION, src/recollinstall.in: no recollinstall install 2005-12-06 12:55 +0000 dockes (cbfcc5627111) * packaging/FreeBSD/recoll/pkg-descr: *** empty log message *** 2005-12-06 10:30 +0000 dockes (d132e05e40ac) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- descr, packaging/FreeBSD/recoll/pkg-plist: *** empty log message *** 2005-12-06 09:40 +0000 dockes (f93d6a9b2336) * src/internfile/mh_html.cpp, src/internfile/myhtmlparse.cpp: previous html fix didnt work 2005-12-06 08:35 +0000 dockes (a3eec94f6861) * src/VERSION, src/internfile/internfile.cpp, src/internfile/mh_html.cpp, src/internfile/myhtmlparse.cpp: fix nasty html parse bug introduced in 1.0.9 2005-12-06 07:16 +0000 dockes (c1ccf42bf359 [RECOLL-1_0_12, RECOLL-1_0_11]) * src/qtgui/recollmain.ui: move search/clear buttons to the left side 2005-12-05 17:47 +0000 dockes (37952b251aee) * src/VERSION: 1.0.11? 2005-12-05 16:45 +0000 dockes (eecd7a311e8f) * src/qtgui/recollmain.ui.h: no %F on solaris8 2005-12-05 16:13 +0000 dockes (cd9899dcdec1) * src/utils/copyfile.cpp: *** empty log message *** 2005-12-05 16:13 +0000 dockes (7e7e675138b2) * src/query/docseq.cpp, src/query/docseq.h: avoid unneeded getDoc(0) + normalize private var names 2005-12-05 15:00 +0000 dockes (6aa562bb0180) * src/INSTALL, src/Makefile, src/README, src/qtgui/main.cpp: *** empty log message *** 2005-12-05 14:09 +0000 dockes (d3954ac2c5ec) * src/utils/copyfile.cpp, src/utils/copyfile.h: new file. * src/common/rclconfig.cpp, src/lib/Makefile, src/mk/localdefs, src/mk/localdefs.in, src/utils/copyfile.cpp, src/utils/copyfile.h: create personal config if it does not exist 2005-12-05 12:02 +0000 dockes (6d38fb24e3b1) * src/qtgui/images/asearch.png, src/qtgui/images/history.png, src/qtgui/images/nextpage.png, src/qtgui/images/prevpage.png, src/qtgui/images/sortparms.png: new file. * src/qtgui/images/asearch.png, src/qtgui/images/history.png, src/qtgui/images/nextpage.png, src/qtgui/images/prevpage.png, src/qtgui/images/sortparms.png, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/qtgui/sort.ui.h, src/query/docseq.cpp, src/query/sortseq.cpp: use toolbar buttons for prev/next + misc cleanups 2005-12-05 10:39 +0000 dockes (55a212b17808) * src/rcldb/rcldb.cpp: also index file path as terms 2005-12-04 17:10 +0000 dockes (a4005adeece9) * src/qtgui/recoll.pro: more 2005-12-04 17:10 +0000 dockes (15ce414ea700) * src/common/textsplit.cpp: split stdin 2005-12-04 14:58 +0000 dockes (369372321681) * src/qtgui/recollmain.ui: *** empty log message *** 2005-12-02 16:18 +0000 dockes (b8ea8500fe26) * src/qtgui/preview/preview.ui.h, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/qtgui/sort.ui, src/qtgui/sort.ui.h, src/query/sortseq.cpp, src/query/sortseq.h, src/rcldb/rcldb.h: 1st version of sorting interface. Needs polishing 2005-12-02 16:17 +0000 dockes (fba2b55c4ba7) * src/filters/rclpdf: new version of pdftotext broke us 2005-12-02 16:17 +0000 dockes (a31234c89a73) * src/common/rclconfig.cpp, src/recollinstall.in, src/sampleconf/recoll.conf.in: install filters to /usr/local 2005-12-02 14:18 +0000 dockes (7b585689ce4a) * src/query/sortseq.cpp: *** empty log message *** 2005-12-01 16:23 +0000 dockes (c7393c3bc8b5) * src/qtgui/sort.ui, src/qtgui/sort.ui.h, src/query/sortseq.cpp, src/query/sortseq.h: new file. * src/lib/Makefile, src/qtgui/recoll.pro, src/qtgui/recollmain.ui.h, src/qtgui/sort.ui, src/qtgui/sort.ui.h, src/query/sortseq.cpp, src/query/sortseq.h: sorting 1st steps 2005-11-30 18:37 +0000 dockes (ddba9ec4f65f) * src/configure: make recollinstall executable 2005-11-30 18:28 +0000 dockes (35f236d5ad1f) * src/configure.ac: make recollinstall executable 2005-11-30 18:26 +0000 dockes (580ae261b629) * src/sampleconf/recoll.conf.in: keep log level at 4 for index feedback 2005-11-30 18:20 +0000 dockes (2fb51c4552fb) * src/sampleconf/recoll.conf.in: decrease log level 2005-11-30 18:10 +0000 dockes (0ad46d9bcaa5) * src/query/history.cpp: *** empty log message *** 2005-11-30 18:05 +0000 dockes (653e0a145731) * src/qtgui/form1.ui.h: deleted file. * src/excludefile, src/qtgui/form1.ui.h: *** empty log message *** 2005-11-30 18:04 +0000 dockes (dfff4ecb1918) * src/qtgui/form1.ui: deleted file. * src/excludefile, src/qtgui/form1.ui: *** empty log message *** 2005-11-30 18:01 +0000 dockes (a63a8d7c49f3) * src/excludefile: *** empty log message *** 2005-11-30 17:58 +0000 dockes (7676c325de57) * src/excludefile: *** empty log message *** 2005-11-30 17:58 +0000 dockes (6ddc4c210c87) * src/utils/transcode.cpp: try harder to convert bad encodings 2005-11-30 10:36 +0000 dockes (7e0aab848f91 [RECOLL-1_0_10]) * src/README, src/qtgui/recollmain.ui: *** empty log message *** 2005-11-30 10:35 +0000 dockes (1f97b79ea735) * src/VERSION: v1.0.10 2005-11-30 10:25 +0000 dockes (3c2bcb1ec527) * src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: disable/enable buttons dep. on state 2005-11-30 09:46 +0000 dockes (a75b091acbae) * src/qtgui/advsearch.ui, src/qtgui/advsearch.ui.h, src/sampleconf/mimeconf: *** empty log message *** 2005-11-30 09:46 +0000 dockes (30a527e5014f) * src/index/indexer.cpp, src/index/indexer.h, src/index/recollindex.cpp, src/rcldb/rcldb.cpp: add option to rezero db before index 2005-11-28 15:31 +0000 dockes (d9e31422258b) * src/qtgui/main.cpp, src/qtgui/recoll.h, src/qtgui/recollmain.ui.h, src/query/docseq.cpp, src/query/docseq.h, src/query/history.cpp, src/query/history.h: store and display dates in history. Needs more work 2005-11-25 14:36 +0000 dockes (18bc54d4e426) * src/qtgui/recollmain.ui.h, src/query/history.cpp, src/utils/conftree.cpp, src/utils/smallut.cpp, src/utils/smallut.h: show history newest first + prune duplicate entries 2005-11-25 10:26 +0000 dockes (3ad346d3f29e) * src/qtgui/main.cpp, src/qtgui/recoll_fr.ts, src/recollinstall.in: install translations to share/recoll/translations 2005-11-25 10:02 +0000 dockes (6ed5669a337b) * src/query/docseq.cpp, src/query/docseq.h: new file. * src/lib/Makefile, src/qtgui/main.cpp, src/qtgui/recoll.h, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/query/docseq.cpp, src/query/docseq.h: 1st version of doc history 2005-11-25 09:14 +0000 dockes (69bab5c09012) * src/index/indexer.cpp, src/index/mimetype.cpp, src/sampleconf/mimeconf: *** empty log message *** 2005-11-25 09:13 +0000 dockes (55e99bcc0a46) * src/common/rclconfig.cpp, src/common/rclconfig.h: get all mime list from mimeconf, not mimemap 2005-11-25 09:12 +0000 dockes (87febfb9c3be) * src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: added method to retrieve doc from path/ipath 2005-11-25 08:53 +0000 dockes (756168516697) * src/Makefile, src/internfile/mimehandler.cpp, src/rcldb/rcldb.h: *** empty log message *** 2005-11-25 08:50 +0000 dockes (6fda25d19678) * src/utils/conftree.cpp, src/utils/conftree.h, src/utils/smallut.cpp, src/utils/smallut.h: add methods useful for history. move stuff to smallut 2005-11-25 08:49 +0000 dockes (bd6b75c162a5) * src/utils/base64.cpp: Strip extra null byte that we were appending 2005-11-24 18:21 +0000 dockes (ba604719481c) * src/query/history.cpp, src/query/history.h: new file. * src/query/Makefile, src/query/history.cpp, src/query/history.h, src/query/qtry.cpp, src/query/xadump.cpp: *** empty log message *** 2005-11-24 07:16 +0000 dockes (1fc7382994a5) * src/recollinstall.in, src/sampleconf/recoll.conf.in: new file. * src/bincimapmime/address.cc, src/bincimapmime/convert.cc, src/bincimapmime/iodevice.cc, src/bincimapmime/iofactory.cc, src/bincimapmime/mime-getpart.cc, src/bincimapmime/mime- parsefull.cc, src/bincimapmime/mime-parseonlyheader.cc, src/bincimapmime/mime-printbody.cc, src/bincimapmime/mime- printdoc.cc, src/bincimapmime/mime-printheader.cc, src/bincimapmime /mime-utils.h, src/bincimapmime/mime.cc, src/bincimapmime/trbinc.cc, src/common/rclconfig.cpp, src/common/textsplit.cpp, src/common/unacpp.cpp, src/configure, src/configure.ac, src/index/csguess.cpp, src/index/indexer.cpp, src/index/mimetype.cpp, src/internfile/htmlparse.cpp, src/internfile/htmlparse.h, src/internfile/internfile.cpp, src/internfile/mh_exec.cpp, src/internfile/mh_html.cpp, src/internfile/mh_mail.cpp, src/internfile/mh_text.cpp, src/internfile/mimehandler.cpp, src/makestaticdist.sh, src/qtgui/advsearch.ui.h, src/qtgui/main.cpp, src/qtgui/plaintorich.cpp, src/qtgui/preview/preview.ui.h, src/qtgui/preview/pvmain.cpp, src/qtgui/recollmain.ui.h, src/query/qtry.cpp, src/query/qxtry.cpp, src/query/xadump.cpp, src/rcldb/pathhash.cpp, src/rcldb/rcldb.cpp, src/recollinstall.in, src/sampleconf/recoll.conf.in, src/utils/base64.cpp, src/utils/execmd.cpp, src/utils/fstreewalk.cpp, src/utils/idfile.cpp, src/utils/mimeparse.cpp, src/utils/pathut.cpp, src/utils/readfile.cpp, src/utils/smallut.cpp, src/utils/smallut.h, src/utils/transcode.cpp, src/utils/utf8iter.cpp, src/utils/wipedir.cpp: *** empty log message *** 2005-11-23 13:12 +0000 dockes (a8ff464ec720) * src/recollinstall, src/sampleconf/recoll.conf: deleted file. * src/recollinstall, src/sampleconf/recoll.conf: *** empty log message *** 2005-11-23 11:11 +0000 dockes (4ba2ad248537) * src/utils/execmd.cpp: *** empty log message *** 2005-11-23 11:00 +0000 dockes (66cac25635e1) * src/INSTALL, src/README, src/rcldb/rcldb.cpp: *** empty log message *** 2005-11-23 10:57 +0000 dockes (45f106d04652 [RECOLL-1_0_9]) * src/Makefile: use prefix instead of PREFIX 2005-11-23 10:19 +0000 dockes (e7a6edd38c56) * src/Makefile, src/VERSION, src/filters/rclrtf, src/index/Makefile, src/index/mimetype.cpp, src/utils/debuglog.cpp, src/utils/mimeparse.cpp: *** empty log message *** 2005-11-23 10:18 +0000 dockes (4e530d6556d2) * src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/recollmain.ui.h: document already shown test was wrong, wouldnt show more docs from same file 2005-11-23 10:17 +0000 dockes (9944ac86338d) * src/utils/execmd.cpp: need to do _exit not exit after exec failure 2005-11-23 10:16 +0000 dockes (085c66533884) * src/internfile/mh_html.cpp, src/utils/smallut.cpp, src/utils/smallut.h: improve charset name comparison 2005-11-21 17:18 +0000 dockes (9c398b7ee69e [RECOLL-1_0_8]) * src/configure, src/configure.ac, src/index/mimetype.cpp, src/internfile/myhtmlparse.cpp, src/mk/Linux, src/mk/SunOS, src/recollinstall, src/utils/execmd.cpp: glitches in linux/solaris compil. + install 2005-11-21 16:16 +0000 dockes (7594b3dd0dc5) * src/README: *** empty log message *** 2005-11-21 16:06 +0000 dockes (8a82b3826a4a) * src/VERSION: *** empty log message *** 2005-11-21 16:05 +0000 dockes (9cc42706006d) * src/filters/rclrtf: new file. * src/filters/rclrtf, src/sampleconf/mimeconf, src/sampleconf/mimemap, src/sampleconf/recoll.conf: add support for rtf 2005-11-21 16:04 +0000 dockes (8169ca3ae210) * src/Makefile, src/makestaticdist.sh, src/recollinstall: install pics and samples to $PREFIX/local/recoll 2005-11-21 14:32 +0000 dockes (f0aaac1df843) * src/filters/rclgaim: just needs awk 2005-11-21 14:31 +0000 dockes (88649af9a0ac [RECOLL-1_0_7]) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/index/indexer.cpp, src/index/mimetype.cpp, src/index/mimetype.h, src/internfile/internfile.cpp, src/internfile/mh_html.cpp, src/internfile/mimehandler.cpp, src/internfile/mimehandler.h, src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h, src/lib/Makefile, src/qtgui/recollmain.ui.h, src/sampleconf/mimeconf, src/sampleconf/mimemap, src/sampleconf/recoll.conf: mimemap processing recentered in rclconfig. Handle directory-local suffix to mime-type definitions. Implement gaim log handling 2005-11-18 17:03 +0000 dockes (ae7d483398d2) * src/filters/rclgaim: new file. * src/filters/rclgaim: *** empty log message *** 2005-11-18 15:19 +0000 dockes (9c8cb27e5749) * src/internfile/internfile.cpp, src/internfile/internfile.h, src/internfile/mh_exec.cpp, src/internfile/mimehandler.h, src/utils/execmd.h: misc cleanup + tell filters if working for preview or index 2005-11-18 13:52 +0000 dockes (9d83fd6a7d8c) * src/utils/execmd.cpp, src/utils/execmd.h: add putenv interface 2005-11-18 13:23 +0000 dockes (c3d0cfc77a9f) * src/internfile/mh_exec.cpp, src/internfile/mh_exec.h, src/internfile/mh_text.cpp, src/internfile/mh_text.h: new file. * src/internfile/mh_exec.cpp, src/internfile/mh_exec.h, src/internfile/mh_html.cpp, src/internfile/mh_html.h, src/internfile/mh_mail.cpp, src/internfile/mh_mail.h, src/internfile/mh_text.cpp, src/internfile/mh_text.h, src/internfile/mimehandler.cpp, src/lib/Makefile: restructuring on mimehandler files 2005-11-17 17:39 +0000 dockes (e530dcacaf42) * src/VERSION: *** empty log message *** 2005-11-17 17:36 +0000 dockes (64437283f61f) * src/rcldb/rcldb.cpp: use OP_FILTER instead of OP_AND to filter on file types 2005-11-17 12:47 +0000 dockes (e9efe66d79c0) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/qtgui/main.cpp, src/qtgui/recollmain.ui, src/sampleconf/recoll.conf, src/utils/conftree.cpp, src/utils/conftree.h: allow tilde expansion for section names in config file 2005-11-16 18:31 +0000 dockes (e319e6fa047d) * src/qtgui/recollmain.ui.h: *** empty log message *** 2005-11-16 17:30 +0000 dockes (70dbf29f84e0) * src/excludefile: *** empty log message *** 2005-11-16 17:29 +0000 dockes (4c957598f6fd [RECOLL-1_0_6]) * src/mk/localdefs, src/rcldb/rcldb.cpp: use and_maybe in adv search 2005-11-16 15:07 +0000 dockes (a19870cd6761) * src/internfile/mimehandler.cpp, src/internfile/mimehandler.h, src/qtgui/main.cpp, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/recoll.h, src/qtgui/recollmain.ui.h, src/sampleconf/mimeconf, src/sampleconf/recoll.conf: Optionnally show mime type icons in result list 2005-11-16 15:05 +0000 dockes (6464421540ca) * src/qtgui/mtpics/README, src/qtgui/mtpics/document.png, src/qtgui/mtpics/drawing.png, src/qtgui/mtpics/html.png, src/qtgui/mtpics/message.png, src/qtgui/mtpics/mozilla_doc.png, src/qtgui/mtpics/pdf.png, src/qtgui/mtpics/postscript.png, src/qtgui/mtpics/presentation.png, src/qtgui/mtpics/soffice.png, src/qtgui/mtpics/spreadsheet.png, src/qtgui/mtpics/txt.png, src/qtgui/mtpics/wordprocessing.png: new file. * src/qtgui/mtpics/README, src/qtgui/mtpics/document.png, src/qtgui/mtpics/drawing.png, src/qtgui/mtpics/html.png, src/qtgui/mtpics/message.png, src/qtgui/mtpics/mozilla_doc.png, src/qtgui/mtpics/pdf.png, src/qtgui/mtpics/postscript.png, src/qtgui/mtpics/presentation.png, src/qtgui/mtpics/soffice.png, src/qtgui/mtpics/spreadsheet.png, src/qtgui/mtpics/txt.png, src/qtgui/mtpics/wordprocessing.png: *** empty log message *** 2005-11-16 11:22 +0000 dockes (f29236269564) * src/qtgui/plaintorich.cpp, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/recollmain.ui.h: Implemented better feedback during preview loading 2005-11-16 08:17 +0000 dockes (44b8c2233623) * src/Makefile, src/VERSION, src/qtgui/main.cpp, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/recoll_fr.ts, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: about dialog, remember previous mainwin geometry 2005-11-14 09:59 +0000 dockes (f196f00bd521) * src/internfile/internfile.cpp, src/internfile/internfile.h, src/qtgui/recollmain.ui.h: fix rare case where indexed file could not be previewed because of change in file identification config param 2005-11-14 09:57 +0000 dockes (5610887cf602) * src/index/indexer.cpp: comment 2005-11-14 09:56 +0000 dockes (b6c7dd9504b9) * src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: stem expansion was never done for adv search 2005-11-12 14:36 +0000 dockes (87b02b667eef) * src/README, src/mk/Linux: *** empty log message *** 2005-11-12 14:33 +0000 dockes (5743f1558790) * src/mk/localdefs.in: typo 2005-11-12 14:31 +0000 dockes (0dd4948b5c2f) * src/Makefile, src/configure, src/configure.ac: more config tweaks 2005-11-12 14:24 +0000 dockes (6d47a227c1b2) * src/utils/conftree.cpp, src/utils/conftree.h, src/utils/debuglog.cpp, src/utils/debuglog.h: new file. * src/utils/conftree.cpp, src/utils/conftree.h, src/utils/debuglog.cpp, src/utils/debuglog.h: local versions of utility files 2005-11-12 14:23 +0000 dockes (c77e47fdc6fb) * src/Makefile: *** empty log message *** 2005-11-12 14:19 +0000 dockes (49499e32e341) * src/configure.ac, src/mk/localdefs, src/mk/localdefs.in: new file. * src/Makefile, src/configure, src/configure.ac, src/mk/Darwin, src/mk/FreeBSD, src/mk/Linux, src/mk/SunOS, src/mk/commondefs, src/mk/localdefs, src/mk/localdefs.in: introduced some autoconf 2005-11-12 11:26 +0000 dockes (b13e733c2796) * src/Makefile, src/bincimapmime/Makefile, src/common/Makefile, src/index/Makefile, src/lib/Makefile, src/mk/commondefs, src/qtgui/recoll.pro, src/query/Makefile, src/sampleconf/mimemap, src/utils/Makefile: cleaned-up makes 2005-11-10 08:47 +0000 dockes (06490e6e7dc1) * src/index/Makefile, src/index/indexer.cpp, src/index/indexer.h, src/index/mimetype.cpp, src/index/mimetype.h, src/internfile/internfile.cpp, src/sampleconf/recoll.conf: add config parameter to decide if we use the file command as a final step of mimetype identification 2005-11-10 08:46 +0000 dockes (d9a64999d22d) * src/sampleconf/mimeconf, src/sampleconf/mimemap: add .Z compressed files 2005-11-09 21:40 +0000 dockes (1dd753a59d1c) * src/sampleconf/mimemap: add .odt -> openoffice. Add .php and others to ignored types 2005-11-09 21:39 +0000 dockes (a8b54cf24c83) * src/common/rclinit.cpp: test cleanup and sigcleanup not zero for small uts that dont need this 2005-11-08 21:02 +0000 dockes (344fc56239c8) * src/internfile/internfile.cpp, src/internfile/mh_html.cpp, src/internfile/mh_html.h, src/internfile/mh_mail.cpp, src/internfile/mh_mail.h, src/internfile/mimehandler.cpp, src/internfile/mimehandler.h: renamed MimeHandler::worker to mkDoc + comments for doxygen 2005-11-08 21:02 +0000 dockes (1ac76bfea47d) * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- descr, packaging/FreeBSD/recoll/pkg-plist, src/doc/prog/filters.txt, src/doc/prog/top.txt: new file. * packaging/FreeBSD/recoll/Makefile, packaging/FreeBSD/recoll/distinfo, packaging/FreeBSD/recoll/pkg- descr, packaging/FreeBSD/recoll/pkg-plist, src/doc/prog/Doxyfile, src/doc/prog/Makefile, src/doc/prog/filters.txt, src/doc/prog/top.txt: *** empty log message *** 2005-11-08 21:00 +0000 dockes (54bcdfd186f1) * src/doc/prog/Doxyfile, src/doc/prog/Makefile: new file. * src/doc/prog/Doxyfile, src/doc/prog/Makefile: *** empty log message *** 2005-11-07 15:52 +0000 dockes (a0bde5fbc55b [RECOLL-1_0_5]) * src/INSTALL, src/Makefile, src/README, src/excludefile, src/makesrcdist.sh, src/makestaticdist.sh: *** empty log message *** 2005-11-07 15:37 +0000 dockes (c6a8f5375981) * src/README: *** empty log message *** 2005-11-07 15:36 +0000 dockes (5ca00f4db306) * src/INSTALL, src/README: *** empty log message *** 2005-11-07 15:11 +0000 dockes (8ae633ae4194) * src/VERSION: *** empty log message *** 2005-11-07 15:06 +0000 dockes (6be191f54656) * src/Makefile, src/mk/commondefs, src/recollinstall: fixed installation script 2005-11-07 11:21 +0000 dockes (e48ddf065716) * src/VERSION: *** empty log message *** 2005-11-06 15:07 +0000 dockes (fef6e5d66e29 [RECOLL-1_05]) * src/qtgui/idxthread.cpp, src/qtgui/main.cpp, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/utils/base64.cpp: slightly better status printing while loading preview 2005-11-06 11:16 +0000 dockes (0fa0ac2c3e5b) * src/rcldb/pathhash.cpp, src/rcldb/pathhash.h, src/utils/base64.cpp, src/utils/base64.h, src/utils/md5.cpp, src/utils/md5.h: new file. * src/lib/Makefile, src/rcldb/pathhash.cpp, src/rcldb/pathhash.h, src/rcldb/rcldb.cpp, src/utils/base64.cpp, src/utils/base64.h, src/utils/md5.cpp, src/utils/md5.h, src/utils/mimeparse.cpp: limit path therm length through hashing 2005-11-05 15:30 +0000 dockes (eea6ede9ce9a) * src/INSTALL, src/README, src/VERSION: *** empty log message *** 2005-11-05 15:29 +0000 dockes (c99e6c9d50df) * src/rcldb/rcldb.cpp: debug message 2005-11-05 15:17 +0000 dockes (a3463f8f8c63) * src/mk/commondefs: unused def 2005-11-05 14:40 +0000 dockes (47c04f4507d0 [RECOLL-1_04]) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/common/rclinit.cpp, src/common/rclinit.h, src/index/indexer.cpp, src/index/recollindex.cpp, src/internfile/mh_mail.cpp, src/mk/SunOS, src/qtgui/main.cpp, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: separate file and document dates (mainly for email folders). Better check configuration at startup 2005-11-02 12:36 +0000 dockes (e0d52b43cd5c) * src/lib/Makefile, src/mk/commondefs: add def for RANLIB 2005-11-01 10:55 +0000 dockes (2b858432af00) * src/mk/Darwin: new file. * src/mk/Darwin: *** empty log message *** 2005-10-31 08:59 +0000 dockes (65fd4f89de80) * src/internfile/mh_mail.cpp, src/utils/mimeparse.cpp: fixed base64 decoding of email parts: str[x] = ch does not adjust length! and be more lenient with encoding errors 2005-10-22 13:10 +0000 dockes (9a5b142d31f3) * src/README: *** empty log message *** 2005-10-22 13:10 +0000 dockes (d5ccf5480db1 [RECOLL-1_03]) * src/VERSION, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: update status line when starting lengthy operations 2005-10-22 07:29 +0000 dockes (df8ad947685b) * src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: get clicks in res list to behave: drag->no click. dblclick->no single click 2005-10-22 05:35 +0000 dockes (c566d157cfd3) * src/qtgui/recoll_fr.ts: new file. * src/qtgui/main.cpp, src/qtgui/recoll.pro, src/qtgui/recoll_fr.ts: i8n 2005-10-21 15:45 +0000 dockes (34b797e01868) * src/mk/commondefs: new file. * src/mk/commondefs: *** empty log message *** 2005-10-21 15:41 +0000 dockes (08f9ad818cb3 [RECOLL-1_02]) * src/makestaticdist.sh, src/recollinstall: more verbosity in install 2005-10-21 15:22 +0000 dockes (aa642ead5a8e) * src/INSTALL: *** empty log message *** 2005-10-21 15:11 +0000 dockes (1c74d6d926b7) * src/INSTALL: *** empty log message *** 2005-10-21 14:14 +0000 dockes (662fe9bab837) * src/excludefile: *** empty log message *** 2005-10-21 14:11 +0000 dockes (1856de4bf3f6) * src/makestaticdist.sh: new file. * src/Makefile, src/makestaticdist.sh: static bin dists 2005-10-21 13:34 +0000 dockes (0c861c8b6029) * src/INSTALL, src/README: *** empty log message *** 2005-10-21 13:33 +0000 dockes (7256b6e4e2ff) * src/Makefile, src/excludefile, src/index/Makefile, src/makesrcdist.sh, src/mk/FreeBSD, src/mk/Linux, src/mk/SunOS, src/qtgui/recoll.pro: rearrange make includes+prepare bin static distrib 2005-10-21 12:15 +0000 dockes (a9773a1a4715) * src/unac/unac.c: fix args to iconv to get rid of warnings 2005-10-21 08:14 +0000 dockes (f50d252ec29b) * src/Makefile, src/VERSION, src/excludefile, src/mk/FreeBSD, src/mk/Linux, src/qtgui/preview/pvmain.cpp, src/utils/smallut.cpp: more small build tweaks. use mkdtemp if available 2005-10-20 16:20 +0000 dockes (b5fe53035720 [RECOLL-1_01]) * src/qtgui/advsearch.ui, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h: CR->search in advanced dialog. ^W close tab in preview 2005-10-20 15:42 +0000 dockes (a9e9ecfba2d2) * src/filters/rcldoc, src/filters/rclpdf, src/filters/rclps, src/filters/rclsoff, src/mk/SunOS: small fixes for SunOS 2005-10-20 12:17 +0000 dockes (bc70bba2564c) * src/README, src/makesrcdist.sh: *** empty log message *** 2005-10-20 12:17 +0000 dockes (4e8de2aee40d) * src/INSTALL, src/README: *** empty log message *** 2005-10-20 12:17 +0000 dockes (39b33b1f4e36) * src/INSTALL, src/README: *** empty log message *** 2005-10-20 12:16 +0000 dockes (45a324ad4baa) * src/INSTALL, src/README: *** empty log message *** 2005-10-20 12:16 +0000 dockes (73b1f99aef21) * src/INSTALL, src/README: *** empty log message *** 2005-10-20 12:12 +0000 dockes (b3a8d1bceb51) * src/INSTALL, src/README: *** empty log message *** 2005-10-20 11:38 +0000 dockes (5966cd48c62c) * src/sampleconf/recoll.conf: defaultlanguage->english 2005-10-20 11:33 +0000 dockes (4ba3bd42973e) * src/recollinstall: new file. * src/bincimapmime/Makefile, src/filters/rcldoc, src/filters/rclpdf, src/filters/rclps, src/filters/rclsoff, src/qtgui/preview/preview.ui.h, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/recollinstall: small installation tweaks 2005-10-20 08:34 +0000 dockes (8ce6cff4ca9c) * src/Makefile, src/VERSION, src/bincimapmime/Makefile, src/excludefile, src/lib/Makefile, src/mk/FreeBSD, src/qtgui/plaintorich.cpp, src/qtgui/preview/preview.ui.h, src/qtgui/recoll.pro: small warning and compilation adjustments 2005-10-20 07:51 +0000 dockes (b6f58b26d846 [RECOLL-1_0]) * src/configure: new file. * src/README, src/configure: *** empty log message *** 2005-10-19 16:29 +0000 dockes (46a91fdb7a8e) * src/INSTALL, src/README: *** empty log message *** 2005-10-19 16:27 +0000 dockes (92e16891b11d) * src/INSTALL: *** empty log message *** 2005-10-19 16:09 +0000 dockes (0dda1bd16921) * src/README, src/VERSION: *** empty log message *** 2005-10-19 15:22 +0000 dockes (88cadb2e703e) * src/qtgui/recollmain.ui: *** empty log message *** 2005-10-19 14:14 +0000 dockes (61cd7c267dec) * src/common/rclconfig.cpp, src/qtgui/advsearch.ui.h, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/idfile.cpp, src/utils/idfile.h: implemented filtering on file subtree 2005-10-19 10:21 +0000 dockes (598116a30bfb) * src/common/textsplit.cpp, src/common/textsplit.h, src/filters/rcldoc, src/filters/rclpdf, src/filters/rclps, src/filters/rclsoff, src/qtgui/advsearch.ui, src/qtgui/advsearch.ui.h, src/qtgui/main.cpp, src/qtgui/plaintorich.cpp, src/qtgui/recoll.h, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: most of adv search working. Still need subtree/filename filters 2005-10-17 13:36 +0000 dockes (6ce40ecb81f6) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/qtgui/advsearch.ui, src/qtgui/advsearch.ui.h, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/preview/pvmain.cpp, src/qtgui/recoll.h, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/utils/Makefile: implemented dialog/glue for advanced search 2005-10-15 12:18 +0000 dockes (b57626e188f9) * src/index/indexer.cpp, src/internfile/mh_mail.cpp, src/utils/mimeparse.cpp, src/utils/mimeparse.h: decode encoded mail headers, plus use message date instead of file mtime 2005-10-10 13:25 +0000 dockes (3797f12a0832) * src/common/textsplit.h: comments 2005-10-10 13:24 +0000 dockes (a339c123dcb9) * src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: ckpt 2005-10-10 12:29 +0000 dockes (e88bad1f996b) * src/qtgui/advsearch.ui, src/qtgui/main.cpp, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/preview/pvmain.cpp, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: ckpt 2005-09-27 06:20 +0000 dockes (8b147a42b660) * src/qtgui/preview/preview.pro, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/preview/pvmain.cpp: new file. * src/qtgui/preview/preview.pro, src/qtgui/preview/preview.ui, src/qtgui/preview/preview.ui.h, src/qtgui/preview/pvmain.cpp: *** empty log message *** 2005-09-26 16:17 +0000 dockes (783900fcd3e7) * src/qtgui/recoll.pro: *** empty log message *** 2005-09-22 16:22 +0000 dockes (1e6ccf2c2fdc) * src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h: new file. * src/qtgui/plaintorich.cpp, src/qtgui/plaintorich.h: *** empty log message *** 2005-09-22 15:00 +0000 dockes (db2d876f2a2b) * src/qtgui/recollmain.ui.h: *** empty log message *** 2005-09-22 14:09 +0000 dockes (4455c0eeffd4) * src/common/textsplit.cpp: adjust start/end of word when trimming 2005-09-22 11:10 +0000 dockes (3b9d4fc5b507) * src/common/textsplit.cpp: fix problems with word followed by . 2005-05-18 08:42 +0000 dockes (03bc1f1290cd) * src/qtgui/recoll.pro: *** empty log message *** 2005-05-17 11:46 +0000 dockes (cff6e901fde8) * src/qtgui/advsearch.ui, src/qtgui/advsearch.ui.h: new file. * src/qtgui/advsearch.ui, src/qtgui/advsearch.ui.h, src/qtgui/recoll.pro: *** empty log message *** 2005-05-17 06:30 +0000 dockes (9a44703bd049 [RECOLL-0_7]) * src/README: *** empty log message *** 2005-05-17 06:30 +0000 dockes (d2265051082d) * src/qtgui/recollmain.ui.h: escape < to < in rich text 2005-04-08 07:32 +0000 dockes (3917ab1cc937) * src/README: *** empty log message *** 2005-04-08 07:32 +0000 dockes (2f2439c9590a) * src/mk/SunOS: new file. * src/Makefile, src/mk/SunOS, src/utils/Makefile: works on solaris8 2005-04-07 09:05 +0000 dockes (0264f1839b92) * src/utils/idfile.cpp, src/utils/idfile.h: new file. * src/index/mimetype.cpp, src/lib/Makefile, src/sampleconf/mimemap, src/utils/Makefile, src/utils/idfile.cpp, src/utils/idfile.h: replaced /usr/bin/file exec with internal code 2005-04-06 10:20 +0000 dockes (ba9162debe5a) * src/bincimapmime/AUTHORS, src/bincimapmime/COPYING: new file. * src/INSTALL, src/VERSION, src/bincimapmime/AUTHORS, src/bincimapmime/COPYING, src/bincimapmime/mime-inputsource.h, src/index/indexer.cpp, src/internfile/mh_mail.cpp, src/makesrcdist.sh, src/mk/FreeBSD, src/mk/Linux, src/qtgui/main.cpp, src/rcldb/rcldb.cpp, src/sampleconf/recoll.conf, src/utils/smallut.h, src/utils/wipedir.cpp: re-port to linux 2005-04-06 09:18 +0000 dockes (d8add828aa6b) * src/README: *** empty log message *** 2005-04-06 09:13 +0000 dockes (7d5759a43255) * src/README: *** empty log message *** 2005-04-05 09:35 +0000 dockes (6232ca052972) * src/common/rclinit.cpp, src/common/rclinit.h: new file. * src/common/rclinit.cpp, src/common/rclinit.h, src/index/mimetype.cpp, src/index/recollindex.cpp, src/internfile/internfile.cpp, src/internfile/mh_mail.cpp, src/lib/Makefile, src/qtgui/main.cpp, src/rcldb/rcldb.cpp, src/sampleconf/mimemap: *** empty log message *** 2005-04-04 13:18 +0000 dockes (e69c810eb5b1) * src/index/indexer.cpp, src/index/mimetype.cpp, src/internfile/mh_html.cpp, src/internfile/mh_mail.cpp, src/rcldb/rcldb.cpp, src/sampleconf/mimeconf, src/utils/fstreewalk.cpp, src/utils/fstreewalk.h: *** empty log message *** 2005-03-31 10:04 +0000 dockes (9428bb11ff77) * src/bincimapmime/mime-inputsource.h, src/bincimapmime/mime- parsefull.cc, src/bincimapmime/mime-parseonlyheader.cc, src/bincimapmime/mime-printbody.cc, src/bincimapmime/mime.h, src/bincimapmime/trbinc.cc, src/common/rclconfig.cpp, src/internfile/mh_html.cpp, src/internfile/mh_html.h, src/internfile/mh_mail.cpp, src/internfile/mh_mail.h, src/rcldb/rcldb.cpp: mail handling 1st working version 2005-03-25 09:40 +0000 dockes (408a2650e963 [RECOLL-0_6]) * src/bincimapmime/00README.recoll, src/bincimapmime/trbinc.cc, src/internfile/mh_mail.cpp, src/internfile/mh_mail.h: new file. * src/bincimapmime/00README.recoll, src/bincimapmime/mime- printbody.cc, src/bincimapmime/mime.h, src/bincimapmime/trbinc.cc, src/common/Makefile, src/index/Makefile, src/index/indexer.cpp, src/index/mimetype.cpp, src/internfile/internfile.cpp, src/internfile/internfile.h, src/internfile/mh_html.cpp, src/internfile/mh_html.h, src/internfile/mh_mail.cpp, src/internfile/mh_mail.h, src/internfile/mimehandler.cpp, src/internfile/mimehandler.h, src/internfile/myhtmlparse.cpp, src/lib/Makefile, src/mk/FreeBSD, src/qtgui/recoll.pro, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.h, src/sampleconf/mimeconf, src/utils/mimeparse.cpp, src/utils/mimeparse.h: mail ckpt 2005-03-17 15:35 +0000 dockes (55a0c15039bf) * src/index/indexer.cpp, src/index/indexer.h, src/internfile/internfile.cpp, src/internfile/mh_html.h: only comments. Before multidoc files 2005-03-17 14:02 +0000 dockes (b1f57902f3c1) * src/bincimapmime/Makefile, src/bincimapmime/iodevice.cc, src/index/indexer.cpp, src/qtgui/recollmain.ui.h, src/sampleconf/mimeconf, src/utils/execmd.cpp, src/utils/execmd.h, src/utils/mimeparse.cpp, src/utils/smallut.cpp, src/utils/smallut.h, src/utils/utf8iter.h: checkpoint after long pause 2005-03-16 07:35 +0000 dockes (4d4d71cd89ea) * src/bincimapmime/Makefile, src/bincimapmime/address.cc, src/bincimapmime/address.h, src/bincimapmime/config.h, src/bincimapmime/convert.cc, src/bincimapmime/convert.h, src/bincimapmime/depot.h, src/bincimapmime/iodevice.cc, src/bincimapmime/iodevice.h, src/bincimapmime/iofactory.cc, src/bincimapmime/iofactory.h, src/bincimapmime/mime-getpart.cc, src/bincimapmime/mime-inputsource.h, src/bincimapmime/mime- parsefull.cc, src/bincimapmime/mime-parseonlyheader.cc, src/bincimapmime/mime-printbody.cc, src/bincimapmime/mime- printdoc.cc, src/bincimapmime/mime-printheader.cc, src/bincimapmime /mime-utils.h, src/bincimapmime/mime.cc, src/bincimapmime/mime.h, src/bincimapmime/session.h: new file. * src/bincimapmime/Makefile, src/bincimapmime/address.cc, src/bincimapmime/address.h, src/bincimapmime/config.h, src/bincimapmime/convert.cc, src/bincimapmime/convert.h, src/bincimapmime/depot.h, src/bincimapmime/iodevice.cc, src/bincimapmime/iodevice.h, src/bincimapmime/iofactory.cc, src/bincimapmime/iofactory.h, src/bincimapmime/mime-getpart.cc, src/bincimapmime/mime-inputsource.h, src/bincimapmime/mime- parsefull.cc, src/bincimapmime/mime-parseonlyheader.cc, src/bincimapmime/mime-printbody.cc, src/bincimapmime/mime- printdoc.cc, src/bincimapmime/mime-printheader.cc, src/bincimapmime /mime-utils.h, src/bincimapmime/mime.cc, src/bincimapmime/mime.h, src/bincimapmime/session.h: initial import from bincimap-1.3.3 2005-02-11 11:48 +0000 dockes (7b2bdc5c6ed9) * src/README, src/makesrcdist.sh: *** empty log message *** 2005-02-11 11:48 +0000 dockes (ffca521040c2) * src/README: *** empty log message *** 2005-02-11 11:20 +0000 dockes (7c54c58f0fd1) * src/common/uproplist.h, src/utils/utf8testin.txt: new file. * src/common/textsplit.cpp, src/common/uproplist.h, src/utils/Makefile, src/utils/utf8iter.cpp, src/utils/utf8iter.h, src/utils/utf8testin.txt: improved word extraction a bit (unicode punctuation) 2005-02-10 19:52 +0000 dockes (ba4dd19f41c4) * src/utils/utf8iter.cpp, src/utils/utf8iter.h: new file. * src/common/textsplit.cpp, src/utils/Makefile, src/utils/utf8iter.cpp, src/utils/utf8iter.h: *** empty log message *** 2005-02-10 15:21 +0000 dockes (44892bfc8d49) * src/index/indexer.cpp, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/execmd.cpp, src/utils/fstreewalk.cpp, src/utils/fstreewalk.h, src/utils/smallut.cpp, src/utils/smallut.h: implemented stem databases 2005-02-09 13:34 +0000 dockes (7517469a76b5) * src/index/Makefile, src/index/mimetype.cpp: *** empty log message *** 2005-02-09 12:07 +0000 dockes (e5d0612227af) * src/filters/rcldoc, src/filters/rclsoff, src/utils/wipedir.cpp, src/utils/wipedir.h: new file. * src/VERSION, src/filters/rcldoc, src/filters/rclsoff, src/index/indexer.cpp, src/index/mimetype.cpp, src/internfile/internfile.cpp, src/internfile/internfile.h, src/internfile/myhtmlparse.cpp, src/lib/Makefile, src/qtgui/main.cpp, src/qtgui/recoll.h, src/qtgui/recollmain.ui.h, src/sampleconf/mimeconf, src/sampleconf/mimemap, src/utils/Makefile, src/utils/smallut.cpp, src/utils/smallut.h, src/utils/wipedir.cpp, src/utils/wipedir.h: added support for openoffice and word + optimized decomp temp dir usage 2005-02-08 17:35 +0000 dockes (e5a6d4a27e1f) * src/INSTALL, src/README: *** empty log message *** 2005-02-08 15:08 +0000 dockes (f89783d5d828) * src/excludefile, src/makesrcdist.sh: new file. * src/README, src/excludefile, src/makesrcdist.sh: *** empty log message *** 2005-02-08 15:03 +0000 dockes (4ec10decb898) * src/README: *** empty log message *** 2005-02-08 14:55 +0000 dockes (07541712859f) * src/README: *** empty log message *** 2005-02-08 14:54 +0000 dockes (fa3ad0590138 [RECOLL-0_5]) * src/Makefile, src/README, src/qtgui/recoll.pro, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/sampleconf/recoll.conf: *** empty log message *** 2005-02-08 14:45 +0000 dockes (d04d78bb1af4) * src/INSTALL, src/internfile/myhtmlparse.cpp, src/qtgui/recoll.pro: *** empty log message *** 2005-02-08 11:59 +0000 dockes (b5f33d8a83cb) * src/common/textsplit.cpp, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: fixed next/prev screen pb + pb with accents when matching in preview 2005-02-08 10:56 +0000 dockes (ea8c32a3b71e) * src/common/textsplit.cpp, src/common/textsplit.h, src/rcldb/rcldb.cpp: phrases ok except for preview position 2005-02-08 09:34 +0000 dockes (8f72bd8ca147) * src/common/textsplit.cpp, src/common/textsplit.h, src/qtgui/recoll.pro, src/qtgui/recollmain.ui.h, src/query/xadump.cpp, src/rcldb/rcldb.cpp, src/utils/execmd.cpp: fixes in textsplit 2005-02-07 13:17 +0000 dockes (3e10d31a55a9) * src/common/textsplit.cpp, src/common/textsplit.h, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: simple term highlighting in query preview 2005-02-04 14:21 +0000 dockes (77a59732f8aa) * src/COPYING, src/INSTALL, src/README, src/VERSION, src/filters/rcluncomp, src/internfile/internfile.cpp, src/internfile/internfile.h, src/mk/Linux, src/qtgui/images/editcopy, src/qtgui/images/editcut, src/qtgui/images/editpaste, src/qtgui/images/filenew, src/qtgui/images/fileopen, src/qtgui/images/filesave, src/qtgui/images/print, src/qtgui/images/redo, src/qtgui/images/searchfind, src/qtgui/images/undo: new file. * src/COPYING, src/INSTALL, src/Makefile, src/README, src/VERSION, src/common/Makefile, src/common/unacpp.cpp, src/filters/rcluncomp, src/index/Makefile, src/index/csguess.cpp, src/index/indexer.cpp, src/internfile/internfile.cpp, src/internfile/internfile.h, src/internfile/mimehandler.cpp, src/lib/Makefile, src/mk/FreeBSD, src/mk/Linux, src/qtgui/idxthread.cpp, src/qtgui/images/editcopy, src/qtgui/images/editcut, src/qtgui/images/editpaste, src/qtgui/images/filenew, src/qtgui/images/fileopen, src/qtgui/images/filesave, src/qtgui/images/print, src/qtgui/images/redo, src/qtgui/images/searchfind, src/qtgui/images/undo, src/qtgui/recoll.pro, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/query/Makefile, src/rcldb/rcldb.cpp, src/utils/Makefile, src/utils/pathut.cpp, src/utils/transcode.cpp: uncompression+linux port 2005-02-04 09:39 +0000 dockes (482687ce34da) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/index/indexer.cpp, src/internfile/mh_html.cpp, src/internfile/mimehandler.cpp, src/internfile/mimehandler.h, src/lib/Makefile, src/qtgui/recollmain.ui.h, src/utils/smallut.cpp, src/utils/smallut.h: *** empty log message *** 2005-02-04 09:30 +0000 dockes (5fac5dd8a1c4) * src/sampleconf/mimeconf, src/sampleconf/mimemap, src/sampleconf/recoll.conf: *** empty log message *** 2005-02-04 09:21 +0000 dockes (2ad004ec5fd7) * src/sampleconf/mimeconf, src/sampleconf/mimemap, src/sampleconf/recoll.conf: new file. * src/sampleconf/mimeconf, src/sampleconf/mimemap, src/sampleconf/recoll.conf: *** empty log message *** 2005-02-02 17:57 +0000 dockes (4819b0b410e7) * src/filters/rclps: new file. * src/filters/rclps: *** empty log message *** 2005-02-01 17:52 +0000 dockes (023ac2c1c87f) * src/internfile/mimehandler.cpp, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp: *** empty log message *** 2005-02-01 17:20 +0000 dockes (4eb8337baa03) * src/filters/rclpdf, src/internfile/mh_html.h, src/mk/FreeBSD, src/qtgui/idxthread.h, src/qtgui/recoll.h: new file. * src/filters/rclpdf, src/index/indexer.cpp, src/internfile/mh_html.cpp, src/internfile/mh_html.h, src/internfile/mimehandler.cpp, src/internfile/mimehandler.h, src/lib/Makefile, src/mk/FreeBSD, src/qtgui/idxthread.h, src/qtgui/recoll.h, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/utils/Makefile, src/utils/execmd.cpp: added external filters and pdf handling 2005-02-01 08:42 +0000 dockes (b82908e25c6b) * src/common/Makefile, src/index/Makefile, src/index/recollindex.cpp, src/lib/Makefile, src/qtgui/idxthread.cpp, src/qtgui/main.cpp, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/query/Makefile, src/rcldb/rcldb.cpp, src/utils/Makefile: *** empty log message *** 2005-01-31 14:31 +0000 dockes (c8a32d0e0056) * src/index/indexer.cpp, src/qtgui/idxthread.cpp, src/utils/smallut.cpp, src/utils/smallut.h: new file. * src/common/rclconfig.cpp, src/index/indexer.cpp, src/index/indexer.h, src/index/recollindex.cpp, src/lib/Makefile, src/qtgui/idxthread.cpp, src/qtgui/main.cpp, src/qtgui/recoll.pro, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/query/qtry.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/Makefile, src/utils/pathut.cpp, src/utils/pathut.h, src/utils/smallut.cpp, src/utils/smallut.h: first incarnation of indexing thread 2005-01-29 15:41 +0000 dockes (3dd05c65d8ed) * src/index/recollindex.cpp, src/internfile/mimehandler.cpp, src/internfile/mimehandler.h, src/lib/Makefile, src/qtgui/main.cpp, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/Makefile: external viewer+ deleted doc purging 2005-01-28 15:25 +0000 dockes (8c6b04552a34) * src/Makefile, src/internfile/indextext.h: new file. * src/Makefile, src/internfile/indextext.h, src/internfile/myhtmlparse.cpp, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: ckpt 2005-01-28 09:37 +0000 dockes (bf2c00ad72d0) * src/internfile/mh_html.cpp, src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h, src/lib/Makefile, src/rcldb/rcldb.cpp: merged modifs from xapian/omega 0.8.5 2005-01-28 08:56 +0000 dockes (a5e2a08ce1b8) * src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h: import from xapian 0.8.5 2005-01-28 08:50 +0000 dockes (73f5b0ed50d8) * src/internfile/htmlparse.cpp, src/internfile/htmlparse.h: Initial recoll modifs for utf8 2005-01-28 08:46 +0000 dockes (04f0053d01e4) * src/internfile/mh_html.cpp, src/rcldb/rcldb.cpp: xapian 0.8.3 2005-01-28 08:45 +0000 dockes (ec7863976555) * src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h: new file. * src/internfile/myhtmlparse.cpp, src/internfile/myhtmlparse.h: *** empty log message *** 2005-01-28 08:41 +0000 dockes (c5c570040571) * src/internfile/htmlparse.cpp, src/internfile/htmlparse.h: new file. * src/internfile/htmlparse.cpp, src/internfile/htmlparse.h: xapian 0.8.3 2005-01-26 13:03 +0000 dockes (5a37e2aa9a53) * src/index/recollindex.cpp, src/internfile/mh_html.cpp, src/internfile/mimehandler.cpp, src/rcldb/rcldb.cpp: sort of indexes html 2005-01-26 11:47 +0000 dockes (eec829a74f2d) * src/internfile/mh_html.cpp: new file. * src/internfile/mh_html.cpp, src/internfile/mimehandler.cpp, src/internfile/mimehandler.h, src/lib/Makefile, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/query/qtry.cpp, src/rcldb/rcldb.cpp, src/utils/Makefile: ckpt 2005-01-26 11:45 +0000 dockes (1c17d5d56a6b) * src/utils/mimeparse.cpp, src/utils/mimeparse.h: new file. * src/utils/mimeparse.cpp, src/utils/mimeparse.h: mime header parsing embryo 2005-01-25 14:37 +0000 dockes (1d5b47c225bf) * src/internfile/mimehandler.cpp, src/internfile/mimehandler.h: new file. * src/internfile/mimehandler.cpp, src/internfile/mimehandler.h: *** empty log message *** 2005-01-25 14:37 +0000 dockes (46d42849ee3a) * src/lib/Makefile, src/qtgui/recoll.pro, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h: new file. * src/common/Makefile, src/common/rclconfig.cpp, src/index/Makefile, src/index/indexer.h, src/index/recollindex.cpp, src/lib/Makefile, src/qtgui/main.cpp, src/qtgui/recoll.pro, src/qtgui/recollmain.ui, src/qtgui/recollmain.ui.h, src/query/Makefile, src/query/qtry.cpp, src/query/xadump.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/Makefile, src/utils/transcode.h: gui connected to rcldb (init) 2005-01-24 13:17 +0000 dockes (e0104075bdd3) * src/common/Makefile, src/index/Makefile, src/qtgui/form1.ui, src/qtgui/form1.ui.h, src/qtgui/main.cpp, src/query/qtry.cpp, src/query/qxtry.cpp, src/utils/Makefile: new file. * src/common/Makefile, src/common/rclconfig.cpp, src/common/textsplit.h, src/common/unacpp.cpp, src/index/Makefile, src/qtgui/form1.ui, src/qtgui/form1.ui.h, src/qtgui/main.cpp, src/query/Makefile, src/query/qtry.cpp, src/query/qxtry.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/Makefile: *** empty log message *** 2004-12-17 15:50 +0000 dockes (4f7a0a26f6d7) * src/query/xadump.cpp, src/rcldb/rcldb.cpp: very basic indexing working 2004-12-17 15:36 +0000 dockes (325aea11f893) * src/common/unacpp.cpp, src/common/unacpp.h: new file. * src/common/unacpp.cpp, src/common/unacpp.h, src/rcldb/rcldb.cpp: *** empty log message *** 2004-12-17 15:04 +0000 dockes (930a5f50b45e) * src/unac/AUTHORS, src/unac/COPYING, src/unac/README, src/unac/README.recoll, src/unac/unac.c, src/unac/unac.h: new file. * src/unac/AUTHORS, src/unac/COPYING, src/unac/README, src/unac/README.recoll, src/unac/unac.c, src/unac/unac.h: unac 1.7.0 2004-12-17 13:01 +0000 dockes (70ded59ba246) * src/query/Makefile, src/query/xadump.cpp: new file. * src/common/rclconfig.h, src/common/textsplit.cpp, src/common/textsplit.h, src/index/recollindex.cpp, src/query/Makefile, src/query/xadump.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h: *** empty log message *** 2004-12-15 15:00 +0000 dockes (1e3483587b45) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/common/textsplit.cpp, src/index/csguess.cpp, src/index/csguess.h, src/index/indexer.h, src/index/mimetype.cpp, src/index/recollindex.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/transcode.cpp: warnings cleanup 2004-12-15 09:43 +0000 dockes (502752de59d5) * src/utils/transcode.cpp, src/utils/transcode.h: new file. * src/utils/transcode.cpp, src/utils/transcode.h: *** empty log message *** 2004-12-15 08:21 +0000 dockes (520f5e294f10) * src/index/csguess.cpp, src/index/csguess.h: new file. * src/index/csguess.cpp, src/index/csguess.h: just converted (indent+comments) from estraier 2004-12-14 17:54 +0000 dockes (12a23501eee8) * src/common/rclconfig.cpp, src/common/rclconfig.h, src/common/textsplit.h, src/index/indexer.h, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/readfile.cpp, src/utils/readfile.h: new file. * src/common/rclconfig.cpp, src/common/rclconfig.h, src/common/textsplit.cpp, src/common/textsplit.h, src/index/indexer.h, src/index/mimetype.cpp, src/index/mimetype.h, src/index/recollindex.cpp, src/rcldb/rcldb.cpp, src/rcldb/rcldb.h, src/utils/execmd.cpp, src/utils/pathut.cpp, src/utils/pathut.h, src/utils/readfile.cpp, src/utils/readfile.h: *** empty log message *** 2004-12-13 15:42 +0000 dockes (8c1fce132f19) * src/common/textsplit.cpp, src/index/mimetype.cpp, src/index/mimetype.h, src/index/recollindex.cpp: new file. * src/common/textsplit.cpp, src/index/mimetype.cpp, src/index/mimetype.h, src/index/recollindex.cpp: *** empty log message *** 2004-12-12 08:58 +0000 dockes (17a132340425) * src/utils/execmd.cpp, src/utils/execmd.h: new file. * src/utils/execmd.cpp, src/utils/execmd.h, src/utils/fstreewalk.cpp: *** empty log message *** 2004-12-10 18:13 +0000 dockes (1de12131e4a4) * src/utils/fstreewalk.cpp, src/utils/fstreewalk.h, src/utils/pathut.cpp, src/utils/pathut.h: new file. * src/utils/fstreewalk.cpp, src/utils/fstreewalk.h, src/utils/pathut.cpp, src/utils/pathut.h: *** empty log message *** 2004-12-10 18:13 +0000 unknown (318176766db7) * Standard project directories initialized by cvs2svn. recoll-1.26.3/ylwrap0000755000175000017500000000301313570165162011253 00000000000000#! /bin/sh # ylwrap - wrapper for lex/yacc invocations. Local version, the # autotools scriptversion=2015-08-06.06; # UTC doesnt work for us # because it does not move location.hh position.hh stack.hh into the # appropriate directory (which is a bug, but it's simpler to rewrite a # simple version for our needs than to fix the original). fatal() { echo $* 1>&2 exit 1 } usage() { fatal "Usage: ylwrap query/wasaparse.y" } test $# -ge 1 || usage toptmpdir=/tmp/rclylwrap$$ tmpdir=${toptmpdir}/tmp mkdir -p "${tmpdir}" cleanup() { rm -rf "${toptmpdir}"/tmp/* rmdir "${tmpdir}" rmdir "${toptmpdir}" } trap cleanup 0 2 15 # First arg is the input file input=$1 inputdir=`dirname $1` curdir=`pwd` || exit 1 absinput="${curdir}/${input}" (cd "${tmpdir}"; bison -d -y $absinput) ls $tmpdir for f in location.hh position.hh stack.hh; do cmp -s "${tmpdir}"/$f "${inputdir}"/$f || cp -p "${tmpdir}"/$f "${inputdir}" done # Note that we'd prefer to use wasaparse.h instead of wasaparse.hpp, # but automake generates a dist list with wasaparse.hpp, so no choice. # Fix the include line in y.tab.c (it wants to include y.tab.h, but we already # include it as wasaparse.hpp (cd "${tmpdir}"; \ sed -e 's/#include "y.tab.h"//' < y.tab.c > toto; \ mv -f toto y.tab.c) cmp -s "${tmpdir}"/y.tab.c "${inputdir}"/wasaparse.cpp || \ cp -p "${tmpdir}"/y.tab.c "${inputdir}"/wasaparse.cpp cmp -s "${tmpdir}"/y.tab.h "${inputdir}"/wasaparse.hpp || \ cp -p "${tmpdir}"/y.tab.h "${inputdir}"/wasaparse.hpp recoll-1.26.3/INSTALL0000644000175000017500000016160413303776060011052 00000000000000 More documentation can be found in the doc/ directory or at http://www.recoll.org Link: home: Recoll user manual Link: up: Recoll user manual Link: prev: 4.3. API Link: next: 5.2. Supporting packages Chapter 5. Installation and configuration Prev Next ---------------------------------------------------------------------- Chapter 5. Installation and configuration 5.1. Installing a binary copy Recoll binary copies are always distributed as regular packages for your system. They can be obtained either through the system's normal software distribution framework (e.g. Debian/Ubuntu apt, FreeBSD ports, etc.), or from some type of "backports" repository providing versions newer than the standard ones, or found on the Recoll WEB site in some cases. There used to exist another form of binary install, as pre-compiled source trees, but these are just less convenient than the packages and don't exist any more. The package management tools will usually automatically deal with hard dependancies for packages obtained from a proper package repository. You will have to deal with them by hand for downloaded packages (for example, when dpkg complains about missing dependancies). In all cases, you will have to check or install supporting applications for the file types that you want to index beyond those that are natively processed by Recoll (text, HTML, email files, and a few others). You should also maybe have a look at the configuration section (but this may not be necessary for a quick test with default parameters). Most parameters can be more conveniently set from the GUI interface. ---------------------------------------------------------------------- Prev Next 4.3. API Home 5.2. Supporting packages Link: home: Recoll user manual Link: up: Chapter 5. Installation and configuration Link: prev: Chapter 5. Installation and configuration Link: next: 5.3. Building from source 5.2. Supporting packages Prev Chapter 5. Installation and configuration Next ---------------------------------------------------------------------- 5.2. Supporting packages Recoll uses external applications to index some file types. You need to install them for the file types that you wish to have indexed (these are run-time optional dependencies. None is needed for building or running Recoll except for indexing their specific file type). After an indexing pass, the commands that were found missing can be displayed from the recoll File menu. The list is stored in the missing text file inside the configuration directory. A list of common file types which need external commands follows. Many of the handlers need the iconv command, which is not always listed as a dependancy. Please note that, due to the relatively dynamic nature of this information, the most up to date version is now kept on http://www.recoll.org/features.html along with links to the home pages or best source/patches pages, and misc tips. The list below is not updated often and may be quite stale. For many Linux distributions, most of the commands listed can be installed from the package repositories. However, the packages are sometimes outdated, or not the best version for Recoll, so you should take a look at http://www.recoll.org/features.html if a file type is important to you. As of Recoll release 1.14, a number of XML-based formats that were handled by ad hoc handler code now use the xsltproc command, which usually comes with libxslt. These are: abiword, fb2 (ebooks), kword, openoffice, svg. Now for the list: o Openoffice files need unzip and xsltproc. o PDF files need pdftotext which is part of Poppler (usually comes with the poppler-utils package). Avoid the original one from Xpdf. o Postscript files need pstotext. The original version has an issue with shell character in file names, which is corrected in recent packages. See http://www.recoll.org/features.html for more detail. o MS Word needs antiword. It is also useful to have wvWare installed as it may be be used as a fallback for some files which antiword does not handle. o MS Excel and PowerPoint are processed by internal Python handlers. o MS Open XML (docx) needs xsltproc. o Wordperfect files need wpd2html from the libwpd (or libwpd-tools on Ubuntu) package. o RTF files need unrtf, which, in its older versions, has much trouble with non-western character sets. Many Linux distributions carry outdated unrtf versions. Check http://www.recoll.org/features.html for details. o TeX files need untex or detex. Check http://www.recoll.org/features.html for sources if it's not packaged for your distribution. o dvi files need dvips. o djvu files need djvutxt and djvused from the DjVuLibre package. o Audio files: Recoll releases 1.14 and later use a single Python handler based on mutagen for all audio file types. o Pictures: Recoll uses the Exiftool Perl package to extract tag information. Most image file formats are supported. Note that there may not be much interest in indexing the technical tags (image size, aperture, etc.). This is only of interest if you store personal tags or textual descriptions inside the image files. o chm: files in Microsoft help format need Python and the pychm module (which needs chmlib). o ICS: up to Recoll 1.13, iCalendar files need Python and the icalendar module. icalendar is not needed for newer versions, which use internal code. o Zip archives need Python (and the standard zipfile module). o Rar archives need Python, the rarfile Python module and the unrar utility. o Midi karaoke files need Python and the Midi module o Konqueror webarchive format with Python (uses the Tarfile module). o Mimehtml web archive format (support based on the email handler, which introduces some mild weirdness, but still usable). Text, HTML, email folders, and Scribus files are processed internally. Lyx is used to index Lyx files. Many handlers need iconv and the standard sed and awk. ---------------------------------------------------------------------- Prev Up Next Chapter 5. Installation and configuration Home 5.3. Building from source Link: home: Recoll user manual Link: up: Chapter 5. Installation and configuration Link: prev: 5.2. Supporting packages Link: next: 5.4. Configuration overview 5.3. Building from source Prev Chapter 5. Installation and configuration Next ---------------------------------------------------------------------- 5.3. Building from source 5.3.1. Prerequisites If you can install any or all of the following through the package manager for your system, all the better. Especially Qt is a very big piece of software, but you will most probably be able to find a binary package. You may have to compile Xapian but this is easy. The shopping list: o C++ compiler. Up to Recoll version 1.13.04, its absence can manifest itself by strange messages about a missing iconv_open. o Development files for Xapian core. Important If you are building Xapian for an older CPU (before Pentium 4 or Athlon 64), you need to add the --disable-sse flag to the configure command. Else all Xapian application will crash with an illegal instruction error. o Development files for Qt 4 . Recoll has not been tested with Qt 5 yet. Recoll 1.15.9 was the last version to support Qt 3. If you do not want to install or build the Qt Webkit module, Recoll has a configuration option to disable its use (see further). o Development files for X11 and zlib. o You may also need libiconv. On Linux systems, the iconv interface is part of libc and you should not need to do anything special. Check the Recoll download page for up to date version information. 5.3.2. Building Recoll has been built on Linux, FreeBSD, Mac OS X, and Solaris, most versions after 2005 should be ok, maybe some older ones too (Solaris 8 is ok). If you build on another system, and need to modify things, I would very much welcome patches. Configure options: o --without-aspell will disable the code for phonetic matching of search terms. o --with-fam or --with-inotify will enable the code for real time indexing. Inotify support is enabled by default on recent Linux systems. o --with-qzeitgeist will enable sending Zeitgeist events about the visited search results, and needs the qzeitgeist package. o --disable-webkit is available from version 1.17 to implement the result list with a Qt QTextBrowser instead of a WebKit widget if you do not or can't depend on the latter. o --disable-idxthreads is available from version 1.19 to suppress multithreading inside the indexing process. You can also use the run-time configuration to restrict recollindex to using a single thread, but the compile-time option may disable a few more unused locks. This only applies to the use of multithreading for the core index processing (data input). The Recoll monitor mode always uses at least two threads of execution. o --disable-python-module will avoid building the Python module. o --disable-xattr will prevent fetching data from file extended attributes. Beyond a few standard attributes, fetching extended attributes data can only be useful is some application stores data in there, and also needs some simple configuration (see comments in the fields configuration file). o --enable-camelcase will enable splitting camelCase words. This is not enabled by default as it has the unfortunate side-effect of making some phrase searches quite confusing: ie, "MySQL manual" would be matched by "MySQL manual" and "my sql manual" but not "mysql manual" (only inside phrase searches). o --with-file-command Specify the version of the 'file' command to use (ie: --with-file-command=/usr/local/bin/file). Can be useful to enable the gnu version on systems where the native one is bad. o --disable-qtgui Disable the Qt interface. Will allow building the indexer and the command line search program in absence of a Qt environment. o --disable-x11mon Disable X11 connection monitoring inside recollindex. Together with --disable-qtgui, this allows building recoll without Qt and X11. o --disable-pic will compile Recoll with position-dependant code. This is incompatible with building the KIO or the Python or PHP extensions, but might yield very marginally faster code. o Of course the usual autoconf configure options, like --prefix apply. Normal procedure: cd recoll-xxx ./configure make (practices usual hardship-repelling invocations) There is little auto-configuration. The configure script will mainly link one of the system-specific files in the mk directory to mk/sysconf. If your system is not known yet, it will tell you as much, and you may want to manually copy and modify one of the existing files (the new file name should be the output of uname -s). 5.3.2.1. Building on Solaris We did not test building the GUI on Solaris for recent versions. You will need at least Qt 4.4. There are some hints on an old web site page, they may still be valid. Someone did test the 1.19 indexer and Python module build, they do work, with a few minor glitches. Be sure to use GNU make and install. 5.3.3. Installation Either type make install or execute recollinstall prefix, in the root of the source tree. This will copy the commands to prefix/bin and the sample configuration files, scripts and other shared data to prefix/share/recoll. If the installation prefix given to recollinstall is different from either the system default or the value which was specified when executing configure (as in configure --prefix /some/path), you will have to set the RECOLL_DATADIR environment variable to indicate where the shared data is to be found (ie for (ba)sh: export RECOLL_DATADIR=/some/path/share/recoll). You can then proceed to configuration. ---------------------------------------------------------------------- Prev Up Next 5.2. Supporting packages Home 5.4. Configuration overview Link: home: Recoll user manual Link: up: Chapter 5. Installation and configuration Link: prev: 5.3. Building from source 5.4. Configuration overview Prev Chapter 5. Installation and configuration ---------------------------------------------------------------------- 5.4. Configuration overview Most of the parameters specific to the recoll GUI are set through the Preferences menu and stored in the standard Qt place ($HOME/.config/Recoll.org/recoll.conf). You probably do not want to edit this by hand. Recoll indexing options are set inside text configuration files located in a configuration directory. There can be several such directories, each of which defines the parameters for one index. The configuration files can be edited by hand or through the Index configuration dialog (Preferences menu). The GUI tool will try to respect your formatting and comments as much as possible, so it is quite possible to use both ways. The most accurate documentation for the configuration parameters is given by comments inside the default files, and we will just give a general overview here. By default, for each index, there are two sets of configuration files. System-wide configuration files are kept in a directory named like /usr/[local/]share/recoll/examples, and define default values, shared by all indexes. For each index, a parallel set of files defines the customized parameters. In addition (as of Recoll version 1.19.7), it is possible to specify two additional configuration directories which will be stacked before and after the user configuration directory. These are defined by the RECOLL_CONFTOP and RECOLL_CONFMID environment variables. Values from configuration files inside the top directory will override user ones, values from configuration files inside the middle directory will override system ones and be overriden by user ones. These two variables may be of use to applications which augment Recoll functionality, and need to add configuration data without disturbing the user's files. Please note that the two, currently single, values will probably be interpreted as colon-separated lists in the future: do not use colon characters inside the directory paths. The default location of the configuration is the .recoll directory in your home. Most people will only use this directory. This location can be changed, or others can be added with the RECOLL_CONFDIR environment variable or the -c option parameter to recoll and recollindex. If the .recoll directory does not exist when recoll or recollindex are started, it will be created with a set of empty configuration files. recoll will give you a chance to edit the configuration file before starting indexing. recollindex will proceed immediately. To avoid mistakes, the automatic directory creation will only occur for the default location, not if -c or RECOLL_CONFDIR were used (in the latter cases, you will have to create the directory). All configuration files share the same format. For example, a short extract of the main configuration file might look as follows: # Space-separated list of directories to index. topdirs = ~/docs /usr/share/doc [~/somedirectory-with-utf8-txt-files] defaultcharset = utf-8 There are three kinds of lines: o Comment (starts with #) or empty. o Parameter affectation (name = value). o Section definition ([somedirname]). Depending on the type of configuration file, section definitions either separate groups of parameters or allow redefining some parameters for a directory sub-tree. They stay in effect until another section definition, or the end of file, is encountered. Some of the parameters used for indexing are looked up hierarchically from the current directory location upwards. Not all parameters can be meaningfully redefined, this is specified for each in the next section. When found at the beginning of a file path, the tilde character (~) is expanded to the name of the user's home directory, as a shell would do. White space is used for separation inside lists. List elements with embedded spaces can be quoted using double-quotes. Encoding issues. Most of the configuration parameters are plain ASCII. Two particular sets of values may cause encoding issues: o File path parameters may contain non-ascii characters and should use the exact same byte values as found in the file system directory. Usually, this means that the configuration file should use the system default locale encoding. o The unac_except_trans parameter should be encoded in UTF-8. If your system locale is not UTF-8, and you need to also specify non-ascii file paths, this poses a difficulty because common text editors cannot handle multiple encodings in a single file. In this relatively unlikely case, you can edit the configuration file as two separate text files with appropriate encodings, and concatenate them to create the complete configuration. 5.4.1. Environment variables RECOLL_CONFDIR Defines the main configuration directory. RECOLL_TMPDIR, TMPDIR Locations for temporary files, in this order of priority. The default if none of these is set is to use /tmp. Big temporary files may be created during indexing, mostly for decompressing, and also for processing, e.g. email attachments. RECOLL_CONFTOP, RECOLL_CONFMID Allow adding configuration directories with priorities below and above the user directory (see above the Configuration overview section for details). RECOLL_EXTRA_DBS, RECOLL_ACTIVE_EXTRA_DBS Help for setting up external indexes. See this paragraph for explanations. RECOLL_DATADIR Defines replacement for the default location of Recoll data files, normally found in, e.g., /usr/share/recoll). RECOLL_FILTERSDIR Defines replacement for the default location of Recoll filters, normally found in, e.g., /usr/share/recoll/filters). ASPELL_PROG aspell program to use for creating the spelling dictionary. The result has to be compatible with the libaspell which Recoll is using. VARNAME Blabla 5.4.2. The main configuration file, recoll.conf recoll.conf is the main configuration file. It defines things like what to index (top directories and things to ignore), and the default character set to use for document types which do not specify it internally. The default configuration will index your home directory. If this is not appropriate, start recoll to create a blank configuration, click Cancel, and edit the configuration file before restarting the command. This will start the initial indexing, which may take some time. Most of the following parameters can be changed from the Index Configuration menu in the recoll interface. Some can only be set by editing the configuration file. 5.4.2.1. Parameters affecting what documents we index: topdirs Specifies the list of directories or files to index (recursively for directories). You can use symbolic links as elements of this list. See the followLinks option about following symbolic links found under the top elements (not followed by default). skippedNames A space-separated list of wilcard patterns for names of files or directories that should be completely ignored. The list defined in the default file is: skippedNames = #* bin CVS Cache cache* caughtspam tmp .thumbnails .svn \ *~ .beagle .git .hg .bzr loop.ps .xsession-errors \ .recoll* xapiandb recollrc recoll.conf The list can be redefined at any sub-directory in the indexed area. The top-level directories are not affected by this list (that is, a directory in topdirs might match and would still be indexed). The list in the default configuration does not exclude hidden directories (names beginning with a dot), which means that it may index quite a few things that you do not want. On the other hand, email user agents like thunderbird usually store messages in hidden directories, and you probably want this indexed. One possible solution is to have .* in skippedNames, and add things like ~/.thunderbird or ~/.evolution in topdirs. Not even the file names are indexed for patterns in this list. See the noContentSuffixes variable for an alternative approach which indexes the file names. noContentSuffixes This is a list of file name endings (not wildcard expressions, nor dot-delimited suffixes). Only the names of matching files will be indexed (no attempt at MIME type identification, no decompression, no content indexing). This can be redefined for subdirectories, and edited from the GUI. The default value is: noContentSuffixes = .md5 .map \ .o .lib .dll .a .sys .exe .com \ .mpp .mpt .vsd \ .img .img.gz .img.bz2 .img.xz .image .image.gz .image.bz2 .image.xz \ .dat .bak .rdf .log.gz .log .db .msf .pid \ ,v ~ # skippedPaths and daemSkippedPaths A space-separated list of patterns for paths of files or directories that should be skipped. There is no default in the sample configuration file, but the code always adds the configuration and database directories in there. skippedPaths is used both by batch and real time indexing. daemSkippedPaths can be used to specify things that should be indexed at startup, but not monitored. Example of use for skipping text files only in a specific directory: skippedPaths = ~/somedir/*.txt skippedPathsFnmPathname The values in the *skippedPaths variables are matched by default with fnmatch(3), with the FNM_PATHNAME flag. This means that '/' characters must be matched explicitely. You can set skippedPathsFnmPathname to 0 to disable the use of FNM_PATHNAME (meaning that /*/dir3 will match /dir1/dir2/dir3). zipSkippedNames A space-separated list of patterns for names of files or directories that should be ignored inside zip archives. This is used directly by the zip handler, and has a function similar to skippedNames, but works independantly. Can be redefined for filesystem subdirectories. For versions up to 1.19, you will need to update the Zip handler and install a supplementary Python module. The details are described on the Recoll wiki. followLinks Specifies if the indexer should follow symbolic links while walking the file tree. The default is to ignore symbolic links to avoid multiple indexing of linked files. No effort is made to avoid duplication when this option is set to true. This option can be set individually for each of the topdirs members by using sections. It can not be changed below the topdirs level. indexedmimetypes Recoll normally indexes any file which it knows how to read. This list lets you restrict the indexed MIME types to what you specify. If the variable is unspecified or the list empty (the default), all supported types are processed. Can be redefined for subdirectories. excludedmimetypes This list lets you exclude some MIME types from indexing. Can be redefined for subdirectories. compressedfilemaxkbs Size limit for compressed (.gz or .bz2) files. These need to be decompressed in a temporary directory for identification, which can be very wasteful if 'uninteresting' big compressed files are present. Negative means no limit, 0 means no processing of any compressed file. Defaults to -1. textfilemaxmbs Maximum size for text files. Very big text files are often uninteresting logs. Set to -1 to disable (default 20MB). textfilepagekbs If set to other than -1, text files will be indexed as multiple documents of the given page size. This may be useful if you do want to index very big text files as it will both reduce memory usage at index time and help with loading data to the preview window. A size of a few megabytes would seem reasonable (default: 1MB). membermaxkbs This defines the maximum size in kilobytes for an archive member (zip, tar or rar at the moment). Bigger entries will be skipped. indexallfilenames Recoll indexes file names in a special section of the database to allow specific file names searches using wild cards. This parameter decides if file name indexing is performed only for files with MIME types that would qualify them for full text indexing, or for all files inside the selected subtrees, independently of MIME type. usesystemfilecommand Decide if we execute a system command (file -i by default) as a final step for determining the MIME type for a file (the main procedure uses suffix associations as defined in the mimemap file). This can be useful for files with suffix-less names, but it will also cause the indexing of many bogus "text" files. systemfilecommand Command to use for mime for mime type determination if usesystefilecommand is set. Recent versions of xdg-mime sometimes work better than file. processwebqueue If this is set, process the directory where Web browser plugins copy visited pages for indexing. webqueuedir The path to the web indexing queue. This is hard-coded in the Firefox plugin as ~/.recollweb/ToIndex so there should be no need to change it. 5.4.2.2. Parameters affecting how we generate terms: Changing some of these parameters will imply a full reindex. Also, when using multiple indexes, it may not make sense to search indexes that don't share the values for these parameters, because they usually affect both search and index operations. indexStripChars Decide if we strip characters of diacritics and convert them to lower-case before terms are indexed. If we don't, searches sensitive to case and diacritics can be performed, but the index will be bigger, and some marginal weirdness may sometimes occur. The default is a stripped index (indexStripChars = 1) for now. When using multiple indexes for a search, this parameter must be defined identically for all. Changing the value implies an index reset. maxTermExpand Maximum expansion count for a single term (e.g.: when using wildcards). The default of 10000 is reasonable and will avoid queries that appear frozen while the engine is walking the term list. maxXapianClauses Maximum number of elementary clauses we can add to a single Xapian query. In some cases, the result of term expansion can be multiplicative, and we want to avoid using excessive memory. The default of 100 000 should be both high enough in most cases and compatible with current typical hardware configurations. nonumbers If this set to true, no terms will be generated for numbers. For example "123", "1.5e6", 192.168.1.4, would not be indexed ("value123" would still be). Numbers are often quite interesting to search for, and this should probably not be set except for special situations, ie, scientific documents with huge amounts of numbers in them. This can only be set for a whole index, not for a subtree. nocjk If this set to true, specific east asian (Chinese Korean Japanese) characters/word splitting is turned off. This will save a small amount of cpu if you have no CJK documents. If your document base does include such text but you are not interested in searching it, setting nocjk may be a significant time and space saver. cjkngramlen This lets you adjust the size of n-grams used for indexing CJK text. The default value of 2 is probably appropriate in most cases. A value of 3 would allow more precision and efficiency on longer words, but the index will be approximately twice as large. indexstemminglanguages A list of languages for which the stem expansion databases will be built. See recollindex(1) or use the recollindex -l command for possible values. You can add a stem expansion database for a different language by using recollindex -s, but it will be deleted during the next indexing. Only languages listed in the configuration file are permanent. defaultcharset The name of the character set used for files that do not contain a character set definition (ie: plain text files). This can be redefined for any sub-directory. If it is not set at all, the character set used is the one defined by the nls environment ( LC_ALL, LC_CTYPE, LANG), or iso8859-1 if nothing is set. unac_except_trans This is a list of characters, encoded in UTF-8, which should be handled specially when converting text to unaccented lowercase. For example, in Swedish, the letter a with diaeresis has full alphabet citizenship and should not be turned into an a. Each element in the space-separated list has the special character as first element and the translation following. The handling of both the lowercase and upper-case versions of a character should be specified, as appartenance to the list will turn-off both standard accent and case processing. Example for Swedish: unac_except_trans = aaaa AAaa a:a: A:a: o:o: O:o: Note that the translation is not limited to a single character, you could very well have something like u:ue in the list. The default value set for unac_except_trans can't be listed here because I have trouble with SGML and UTF-8, but it only contains ligature decompositions: german ss, oe, ae, fi, fl. This parameter can't be defined for subdirectories, it is global, because there is no way to do otherwise when querying. If you have document sets which would need different values, you will have to index and query them separately. maildefcharset This can be used to define the default character set specifically for email messages which don't specify it. This is mainly useful for readpst (libpst) dumps, which are utf-8 but do not say so. localfields This allows setting fields for all documents under a given directory. Typical usage would be to set an "rclaptg" field, to be used in mimeview to select a specific viewer. If several fields are to be set, they should be separated with a semi-colon (';') character, which there is currently no way to escape. Also note the initial semi-colon. Example: localfields= ;rclaptg=gnus;other = val, then select specifier viewer with mimetype|tag=... in mimeview. testmodifusemtime If true, use mtime instead of default ctime to determine if a file has been modified (in addition to size, which is always used). Setting this can reduce re-indexing on systems where extended attributes are modified (by some other application), but not indexed (changing extended attributes only affects ctime). Notes: o This may prevent detection of change in some marginal file rename cases (the target would need to have the same size and mtime). o You should probably also set noxattrfields to 1 in this case, except if you still prefer to perform xattr indexing, for example if the local file update pattern makes it of value (as in general, there is a risk for pure extended attributes updates without file modification to go undetected). Perform a full index reset after changing the value of this parameter. noxattrfields Recoll versions 1.19 and later automatically translate file extended attributes into document fields (to be processed according to the parameters from the fields file). Setting this variable to 1 will disable the behaviour. metadatacmds This allows executing external commands for each file and storing the output in Recoll document fields. This could be used for example to index external tag data. The value is a list of field names and commands, don't forget an initial semi-colon. Example: [/some/area/of/the/fs] metadatacmds = ; tags = tmsu tags %f; otherfield = somecmd -xx %f As a specially disgusting hack brought by Recoll 1.19.7, if a "field name" begins with rclmulti, the data returned by the command is expected to contain multiple field values, in configuration file format. This allows setting several fields by executing a single command. Example: metadatacmds = ; rclmulti1 = somecmd %f If somecmd returns data in the form of: field1 = value1 field2 = value for field2 field1 and field2 will be set inside the document metadata. 5.4.2.3. Parameters affecting where and how we store things: dbdir The name of the Xapian data directory. It will be created if needed when the index is initialized. If this is not an absolute path, it will be interpreted relative to the configuration directory. The value can have embedded spaces but starting or trailing spaces will be trimmed. You cannot use quotes here. idxstatusfile The name of the scratch file where the indexer process updates its status. Default: idxstatus.txt inside the configuration directory. maxfsoccuppc Maximum file system occupation before we stop indexing. The value is a percentage, corresponding to what the "Capacity" df output column shows. The default value is 0, meaning no checking. mboxcachedir The directory where mbox message offsets cache files are held. This is normally $RECOLL_CONFDIR/mboxcache, but it may be useful to share a directory between different configurations. mboxcacheminmbs The minimum mbox file size over which we cache the offsets. There is really no sense in caching offsets for small files. The default is 5 MB. webcachedir This is only used by the web browser plugin indexing code, and defines where the cache for visited pages will live. Default: $RECOLL_CONFDIR/webcache webcachemaxmbs This is only used by the web browser plugin indexing code, and defines the maximum size for the web page cache. Default: 40 MB. Quite unfortunately, this is only taken into account when creating the cache file. You need to delete the file for a change to be taken into account. idxflushmb Threshold (megabytes of new text data) where we flush from memory to disk index. Setting this can help control memory usage. A value of 0 means no explicit flushing, letting Xapian use its own default, which is flushing every 10000 (or XAPIAN_FLUSH_THRESHOLD) documents, which gives little memory usage control, as memory usage also depends on average document size. The default value is 10, and it is probably a bit low. If your system usually has free memory, you can try higher values between 20 and 80. In my experience, values beyond 100 are always counterproductive. 5.4.2.4. Parameters affecting multithread processing The Recoll indexing process recollindex can use multiple threads to speed up indexing on multiprocessor systems. The work done to index files is divided in several stages and some of the stages can be executed by multiple threads. The stages are: 1. File system walking: this is always performed by the main thread. 2. File conversion and data extraction. 3. Text processing (splitting, stemming, etc.) 4. Xapian index update. You can also read a longer document about the transformation of Recoll indexing to multithreading. The threads configuration is controlled by two configuration file parameters. thrQSizes This variable defines the job input queues configuration. There are three possible queues for stages 2, 3 and 4, and this parameter should give the queue depth for each stage (three integer values). If a value of -1 is used for a given stage, no queue is used, and the thread will go on performing the next stage. In practise, deep queues have not been shown to increase performance. A value of 0 for the first queue tells Recoll to perform autoconfiguration (no need for the two other values in this case) - this is the default configuration. thrTCounts This defines the number of threads used for each stage. If a value of -1 is used for one of the queue depths, the corresponding thread count is ignored. It makes no sense to use a value other than 1 for the last stage because updating the Xapian index is necessarily single-threaded (and protected by a mutex). The following example would use three queues (of depth 2), and 4 threads for converting source documents, 2 for processing their text, and one to update the index. This was tested to be the best configuration on the test system (quadri-processor with multiple disks). thrQSizes = 2 2 2 thrTCounts = 4 2 1 The following example would use a single queue, and the complete processing for each document would be performed by a single thread (several documents will still be processed in parallel in most cases). The threads will use mutual exclusion when entering the index update stage. In practise the performance would be close to the precedent case in general, but worse in certain cases (e.g. a Zip archive would be performed purely sequentially), so the previous approach is preferred. YMMV... The 2 last values for thrTCounts are ignored. thrQSizes = 2 -1 -1 thrTCounts = 6 1 1 The following example would disable multithreading. Indexing will be performed by a single thread. thrQSizes = -1 -1 -1 5.4.2.5. Miscellaneous parameters: autodiacsens IF the index is not stripped, decide if we automatically trigger diacritics sensitivity if the search term has accented characters (not in unac_except_trans). Else you need to use the query language and the D modifier to specify diacritics sensitivity. Default is no. autocasesens IF the index is not stripped, decide if we automatically trigger character case sensitivity if the search term has upper-case characters in any but the first position. Else you need to use the query language and the C modifier to specify character-case sensitivity. Default is yes. loglevel,daemloglevel Verbosity level for recoll and recollindex. A value of 4 lists quite a lot of debug/information messages. 2 only lists errors. The daemversion is specific to the indexing monitor daemon. logfilename, daemlogfilename Where the messages should go. 'stderr' can be used as a special value, and is the default. The daemversion is specific to the indexing monitor daemon. checkneedretryindexscript This defines the name for a command executed by recollindex when starting indexing. If the exit status of the command is 0, recollindex retries to index all files which previously could not be indexed because of data extraction errors. The default value is a script which checks if any of the common bin directories have changed (indicating that a helper program may have been installed). mondelaypatterns This allows specify wildcard path patterns (processed with fnmatch(3) with 0 flag), to match files which change too often and for which a delay should be observed before re-indexing. This is a space-separated list, each entry being a pattern and a time in seconds, separated by a colon. You can use double quotes if a path entry contains white space. Example: mondelaypatterns = *.log:20 "this one has spaces*:10" monixinterval Minimum interval (seconds) for processing the indexing queue. The real time monitor does not process each event when it comes in, but will wait this time for the queue to accumulate to diminish overhead and in order to aggregate multiple events to the same file. Default 30 S. monauxinterval Period (in seconds) at which the real time monitor will regenerate the auxiliary databases (spelling, stemming) if needed. The default is one hour. monioniceclass, monioniceclassdata These allow defining the ionice class and data used by the indexer (default class 3, no data). filtermaxseconds Maximum handler execution time, after which it is aborted. Some postscript programs just loop... filtermaxmbytes Recoll 1.20.7 and later. Maximum handler memory utilisation. This uses setrlimit(RLIMIT_AS) on most systems (total virtual memory space size limit). Some programs may start with 500 MBytes of mapped shared libraries, so take this into account when choosing a value. The default is a liberal 2000MB. filtersdir A directory to search for the external input handler scripts used to index some types of files. The value should not be changed, except if you want to modify one of the default scripts. The value can be redefined for any sub-directory. iconsdir The name of the directory where recoll result list icons are stored. You can change this if you want different images. idxabsmlen Recoll stores an abstract for each indexed file inside the database. The text can come from an actual 'abstract' section in the document or will just be the beginning of the document. It is stored in the index so that it can be displayed inside the result lists without decoding the original file. The idxabsmlen parameter defines the size of the stored abstract. The default value is 250 bytes. The search interface gives you the choice to display this stored text or a synthetic abstract built by extracting text around the search terms. If you always prefer the synthetic abstract, you can reduce this value and save a little space. idxmetastoredlen Maximum stored length for metadata fields. This does not affect indexing (the whole field is processed anyway), just the amount of data stored in the index for the purpose of displaying fields inside result lists or previews. The default value is 150 bytes which may be too low if you have custom fields. aspellLanguage Language definitions to use when creating the aspell dictionary. The value must match a set of aspell language definition files. You can type "aspell config" to see where these are installed (look for data-dir). The default if the variable is not set is to use your desktop national language environment to guess the value. noaspell If this is set, the aspell dictionary generation is turned off. Useful for cases where you don't need the functionality or when it is unusable because aspell crashes during dictionary generation. mhmboxquirks This allows definining location-related quirks for the mailbox handler. Currently only the tbird flag is defined, and it should be set for directories which hold Thunderbird data, as their folder format is weird. 5.4.3. The fields file This file contains information about dynamic fields handling in Recoll. Some very basic fields have hard-wired behaviour, and, mostly, you should not change the original data inside the fields file. But you can create custom fields fitting your data and handle them just like they were native ones. The fields file has several sections, which each define an aspect of fields processing. Quite often, you'll have to modify several sections to obtain the desired behaviour. We will only give a short description here, you should refer to the comments inside the default file for more detailed information. Field names should be lowercase alphabetic ASCII. [prefixes] A field becomes indexed (searchable) by having a prefix defined in this section. [stored] A field becomes stored (displayable inside results) by having its name listed in this section (typically with an empty value). [aliases] This section defines lists of synonyms for the canonical names used inside the [prefixes] and [stored] sections [queryaliases] This section also defines aliases for the canonic field names, with the difference that the substitution will only be used at query time, avoiding any possibility that the value would pick-up random metadata from documents. handler-specific sections Some input handlers may need specific configuration for handling fields. Only the email message handler currently has such a section (named [mail]). It allows indexing arbitrary email headers in addition to the ones indexed by default. Other such sections may appear in the future. Here follows a small example of a personal fields file. This would extract a specific email header and use it as a searchable field, with data displayable inside result lists. (Side note: as the email handler does no decoding on the values, only plain ascii headers can be indexed, and only the first occurrence will be used for headers that occur several times). [prefixes] # Index mailmytag contents (with the given prefix) mailmytag = XMTAG [stored] # Store mailmytag inside the document data record (so that it can be # displayed - as %(mailmytag) - in result lists). mailmytag = [queryaliases] filename = fn containerfilename = cfn [mail] # Extract the X-My-Tag mail header, and use it internally with the # mailmytag field name x-my-tag = mailmytag 5.4.3.1. Extended attributes in the fields file Recoll versions 1.19 and later process user extended file attributes as documents fields by default. Attributes are processed as fields of the same name, after removing the user prefix on Linux. The [xattrtofields] section of the fields file allows specifying translations from extended attributes names to Recoll field names. An empty translation disables use of the corresponding attribute data. 5.4.4. The mimemap file mimemap specifies the file name extension to MIME type mappings. For file names without an extension, or with an unknown one, the system's file -i command will be executed to determine the MIME type (this can be switched off inside the main configuration file). The mappings can be specified on a per-subtree basis, which may be useful in some cases. Example: gaim logs have a .txt extension but should be handled specially, which is possible because they are usually all located in one place. The recoll_noindex mimemap variable has been moved to recoll.conf and renamed to noContentSuffixes, while keeping the same function, as of Recoll version 1.21. For older Recoll versions, see the documentation for noContentSuffixes but use recoll_noindex in mimemap. 5.4.5. The mimeconf file mimeconf specifies how the different MIME types are handled for indexing, and which icons are displayed in the recoll result lists. Changing the parameters in the [index] section is probably not a good idea except if you are a Recoll developer. The [icons] section allows you to change the icons which are displayed by recoll in the result lists (the values are the basenames of the png images inside the iconsdir directory (specified in recoll.conf). 5.4.6. The mimeview file mimeview specifies which programs are started when you click on an Open link in a result list. Ie: HTML is normally displayed using firefox, but you may prefer Konqueror, your openoffice.org program might be named oofice instead of openoffice etc. Changes to this file can be done by direct editing, or through the recoll GUI preferences dialog. If Use desktop preferences to choose document editor is checked in the Recoll GUI preferences, all mimeview entries will be ignored except the one labelled application/x-all (which is set to use xdg-open by default). In this case, the xallexcepts top level variable defines a list of MIME type exceptions which will be processed according to the local entries instead of being passed to the desktop. This is so that specific Recoll options such as a page number or a search string can be passed to applications that support them, such as the evince viewer. As for the other configuration files, the normal usage is to have a mimeview inside your own configuration directory, with just the non-default entries, which will override those from the central configuration file. All viewer definition entries must be placed under a [view] section. The keys in the file are normally MIME types. You can add an application tag to specialize the choice for an area of the filesystem (using a localfields specification in mimeconf). The syntax for the key is mimetype|tag The nouncompforviewmts entry, (placed at the top level, outside of the [view] section), holds a list of MIME types that should not be uncompressed before starting the viewer (if they are found compressed, ie: mydoc.doc.gz). The right side of each assignment holds a command to be executed for opening the file. The following substitutions are performed: o %D. Document date o %f. File name. This may be the name of a temporary file if it was necessary to create one (ie: to extract a subdocument from a container). o %i. Internal path, for subdocuments of containers. The format depends on the container type. If this appears in the command line, Recoll will not create a temporary file to extract the subdocument, expecting the called application (possibly a script) to be able to handle it. o %M. MIME type o %p. Page index. Only significant for a subset of document types, currently only PDF, Postscript and DVI files. Can be used to start the editor at the right page for a match or snippet. o %s. Search term. The value will only be set for documents with indexed page numbers (ie: PDF). The value will be one of the matched search terms. It would allow pre-setting the value in the "Find" entry inside Evince for example, for easy highlighting of the term. o %u. Url. In addition to the predefined values above, all strings like %(fieldname) will be replaced by the value of the field named fieldname for the document. This could be used in combination with field customisation to help with opening the document. 5.4.7. The ptrans file ptrans specifies query-time path translations. These can be useful in multiple cases. The file has a section for any index which needs translations, either the main one or additional query indexes. The sections are named with the Xapian index directory names. No slash character should exist at the end of the paths (all comparisons are textual). An exemple should make things sufficiently clear [/home/me/.recoll/xapiandb] /this/directory/moved = /to/this/place [/path/to/additional/xapiandb] /server/volume1/docdir = /net/server/volume1/docdir /server/volume2/docdir = /net/server/volume2/docdir 5.4.8. Examples of configuration adjustments 5.4.8.1. Adding an external viewer for an non-indexed type Imagine that you have some kind of file which does not have indexable content, but for which you would like to have a functional Open link in the result list (when found by file name). The file names end in .blob and can be displayed by application blobviewer. You need two entries in the configuration files for this to work: o In $RECOLL_CONFDIR/mimemap (typically ~/.recoll/mimemap), add the following line: .blob = application/x-blobapp Note that the MIME type is made up here, and you could call it diesel/oil just the same. o In $RECOLL_CONFDIR/mimeview under the [view] section, add: application/x-blobapp = blobviewer %f We are supposing that blobviewer wants a file name parameter here, you would use %u if it liked URLs better. If you just wanted to change the application used by Recoll to display a MIME type which it already knows, you would just need to edit mimeview. The entries you add in your personal file override those in the central configuration, which you do not need to alter. mimeview can also be modified from the Gui. 5.4.8.2. Adding indexing support for a new file type Let us now imagine that the above .blob files actually contain indexable text and that you know how to extract it with a command line program. Getting Recoll to index the files is easy. You need to perform the above alteration, and also to add data to the mimeconf file (typically in ~/.recoll/mimeconf): o Under the [index] section, add the following line (more about the rclblob indexing script later): application/x-blobapp = exec rclblob o Under the [icons] section, you should choose an icon to be displayed for the files inside the result lists. Icons are normally 64x64 pixels PNG files which live in /usr/[local/]share/recoll/images. o Under the [categories] section, you should add the MIME type where it makes sense (you can also create a category). Categories may be used for filtering in advanced search. The rclblob handler should be an executable program or script which exists inside /usr/[local/]share/recoll/filters. It will be given a file name as argument and should output the text or html contents on the standard output. The filter programming section describes in more detail how to write an input handler. ---------------------------------------------------------------------- Prev Up 5.3. Building from source Home recoll-1.26.3/configure.ac0000644000175000017500000004454613566731615012324 00000000000000AC_INIT([Recoll], m4_esyscmd_s(cat VERSION)) AC_CONFIG_HEADERS([common/autoconfig.h]) AH_BOTTOM([#include "conf_post.h"]) AC_PREREQ(2.53) AC_CONFIG_SRCDIR(index/recollindex.cpp) AM_INIT_AUTOMAKE([1.10 no-define subdir-objects foreign]) AC_DISABLE_STATIC LT_INIT AC_CONFIG_MACRO_DIR([m4]) m4_include([m4/iconv.m4]) AM_ICONV INCICONV=$CPPFLAGS LIBICONV=$LTLIBICONV AC_PROG_CXX # AC_PROG_CXX used to set CXX to C when no compiler was found, but now it's # g++. So actually try to build a program to verify the compiler. if test C$CXX = C ; then AC_MSG_ERROR([C++ compiler needed. Please install one (ie: gnu g++)]) fi AC_LANG_PUSH([C++]) AC_TRY_LINK([],[], rcl_link_ok=yes, rcl_link_ok=no) if test "$rcl_link_ok" = "no" ; then AC_MSG_ERROR([No working C++ compiler was found]) fi AC_LANG_POP([C++]) AC_PROG_YACC AC_PROG_LIBTOOL AC_C_BIGENDIAN AC_SYS_LARGEFILE # OpenBSD needs sys/param.h for mount.h to compile AC_CHECK_HEADERS([sys/param.h, spawn.h]) AC_CHECK_FUNCS([posix_spawn setrlimit kqueue vsnprintf]) if test "x$ac_cv_func_posix_spawn" = xyes; then : AC_ARG_ENABLE(posix_spawn, AC_HELP_STRING([--enable-posix_spawn], [Enable the use of posix_spawn().]), posixSpawnEnabled=$enableval, posixSpawnEnabled=no) fi if test X$posixSpawnEnabled = Xyes ; then AC_DEFINE(USE_POSIX_SPAWN, 1, [Use posix_spawn()]) fi # Check for where to find unordered_map etc. AC_LANG_PUSH([C++]) AC_CHECK_HEADER(tr1/unordered_map,[AC_DEFINE([HAVE_TR1_UNORDERED], [],["Have tr1"])],[]) AC_CHECK_HEADER(unordered_map,[AC_DEFINE([HAVE_CXX0X_UNORDERED], [],["Have C++0x"])],[]) AC_TRY_COMPILE([ #include ],[ std::shared_ptr ptr; ], rcl_shared_ptr_std="1", rcl_shared_ptr_std="0") AC_TRY_COMPILE([ #include ],[ std::tr1::shared_ptr ptr; ], rcl_shared_ptr_tr1="1", rcl_shared_ptr_tr1="0") if test X$rcl_shared_ptr_std = X1; then AC_DEFINE(HAVE_SHARED_PTR_STD, [], [Has std::shared_ptr]) elif test X$rcl_shared_ptr_tr1 = X1; then AC_DEFINE(HAVE_SHARED_PTR_TR1, [], [Has std::tr1::shared_ptr]) fi AC_LANG_POP([C++]) AC_CHECK_HEADERS([sys/mount.h sys/statfs.h sys/statvfs.h sys/vfs.h], [], [], [#ifdef HAVE_SYS_PARAM_H # include #endif ]) # Use specific 'file' command ? (Useful on solaris to specify # /usr/local/bin/file instead of the system's which doesn't understand '-i' AC_ARG_WITH(file-command, AC_HELP_STRING([--with-file-command], [Specify version of 'file' command (ie: --with-file-command=/usr/local/bin/file)]), withFileCommand=$withval, withFileCommand=file) case $withFileCommand in file) AC_PATH_PROG(fileProg, file);; *) fileProg=$withFileCommand;; esac if test ! -x "$fileProg"; then AC_MSG_ERROR([$fileProg does not exist or is not executable]) fi AC_DEFINE_UNQUOTED(FILE_PROG, "$fileProg", [Path to the file program]) # Can't use Solaris standard 'file' command, it doesn't support -i AC_DEFINE(USE_SYSTEM_FILE_COMMAND, 1, [Enable using the system's 'file' command to id mime if we fail internally]) # Use aspell to provide spelling expansions ? # The default is yes. If we do find an aspell installation, we use it. Else # we do compile the aspell module using an internal copy of aspell.h # Only --with-aspell=no will completely disable aspell support AC_ARG_WITH(aspell, AC_HELP_STRING([--without-aspell], [Disable use of aspell spelling package to provide term expansion to other spellings]), withAspell=$withval, withAspell=yes) case $withAspell in no);; yes) AC_PATH_PROG(aspellProg, aspell) ;; *) # The argument should be the path to the aspell program aspellProg=$withAspell ;; esac if test X$withAspell != Xno ; then AC_DEFINE(RCL_USE_ASPELL, 1, [Compile the aspell interface]) if test X$aspellProg != X ; then aspellBase=`dirname $aspellProg` aspellBase=`dirname $aspellBase` AC_DEFINE_UNQUOTED(ASPELL_PROG, "$aspellProg", [Path to the aspell program]) if test -f $aspellBase/include/aspell.h ; then AC_DEFINE_UNQUOTED(ASPELL_INCLUDE, "$aspellBase/include/aspell.h", [Path to the aspell api include file]) else AC_MSG_NOTICE([aspell support enabled but aspell package not found. Compiling with internal aspell interface file]) AC_DEFINE(ASPELL_INCLUDE, ["aspell-local.h"]) fi else # aspell support enabled but no aspell install yet AC_MSG_NOTICE([aspell support enabled but aspell package not found. Compiling with internal aspell interface file]) AC_DEFINE(ASPELL_INCLUDE, ["aspell-local.h"]) fi fi if test -f /usr/include/sys/inotify.h -o -f /usr/include/linux/inotify.h; then inot_default=yes else inot_default=no fi # Real time monitoring with inotify AC_ARG_WITH(inotify, AC_HELP_STRING([--with-inotify], [Use inotify for almost real time indexing of modified files (the default is yes on Linux).]), withInotify=$withval, withInotify=$inot_default) if test X$withInotify != Xno ; then AC_MSG_NOTICE([enabled support for inotify monitoring]) AC_DEFINE(RCL_MONITOR, 1, [Real time monitoring option]) AC_DEFINE(RCL_USE_INOTIFY, 1, [Compile the inotify interface]) else AC_MSG_NOTICE([inotify not found, inotify monitoring disabled]) fi # Real time monitoring with FAM AC_ARG_WITH(fam, AC_HELP_STRING([--with-fam], [Use File Alteration Monitor for almost real time indexing of modified files. Give the fam/gamin library as argument (ie: /usr/lib/libfam.so) if configure does not find the right one.]), withFam=$withval, withFam=yes) if test X$withFam != Xno -a X$withInotify != Xno ; then AC_MSG_NOTICE([FAM support enabled but inotify support also enabled. Disabling FAM support and using inotify]) withFam=no fi famLib="" case $withFam in no);; yes) for dir in /usr/local/lib ${libdir};do if test -f $dir/libfam.so ; then famLib=$dir/libfam.so;break;fi done if test X$famLib = X ; then AC_MSG_NOTICE([FAM library not found, disabling FAM and real time indexing support]) withFam=no fi ;; *) # The argument should be the path to the fam library famLib=$withFam ;; esac if test X$withFam != Xno ; then AC_DEFINE(RCL_MONITOR, 1, [Real time monitoring option]) AC_DEFINE(RCL_USE_FAM, 1, [Compile the fam interface]) if test X$famLib != X ; then famLibDir=`dirname $famLib` famBase=`dirname $famLibDir` famBLib=`basename $famLib .so | sed -e s/lib//` if test ! -f $famBase/include/fam.h ; then AC_MSG_ERROR([fam.h not found in $famBase/include. Specify --with-fam=no to disable fam support]) fi LIBFAM="-L$famLibDir -l$famBLib" AC_MSG_NOTICE([fam library directive: $LIBFAM]) AC_DEFINE_UNQUOTED(FAM_INCLUDE, "$famBase/include/fam.h", [Path to the fam api include file]) else AC_MSG_ERROR([fam library not found]) fi fi # Enable use of threads in the indexing pipeline. # Disabled by default on OS X as this actually hurts performance. # Also disabled on Windows (which does not use configure, see autoconfig-win.h) case ${host_os} in darwin*) AC_ARG_ENABLE(idxthreads, [--enable-idxthreads Enable multithread indexing.], idxthreadsEnabled=$enableval, idxthreadsEnabled=no) ;; *) AC_ARG_ENABLE(idxthreads, [--disable-idxthreads Disable multithread indexing.], idxthreadsEnabled=$enableval, idxthreadsEnabled=yes) ;; esac AM_CONDITIONAL(NOTHREADS, [test X$idxthreadsEnabled = Xno]) if test X$idxthreadsEnabled = Xyes ; then AC_DEFINE(IDX_THREADS, 1, [Use multiple threads for indexing]) fi AC_ARG_ENABLE(testmains, AC_HELP_STRING([--enable-testmains], [Enable building small test drivers. These are not unit tests.]), buildtestmains=$enableval, buildtestmains=no) AM_CONDITIONAL([COND_TESTMAINS], [test "$buildtestmains" = yes]) # Enable CamelCase word splitting. This is optional because it causes # problems with phrases: with camelcase enabled, "MySQL manual" # will be matched by "MySQL manual" and "my sql manual" but not # "mysql manual" (which would need increased slack as manual is now at pos # 2 instead of 1 AC_ARG_ENABLE(camelcase, AC_HELP_STRING([--enable-camelcase], [Enable splitting camelCase words. This is not enabled by default as this makes phrase matches more difficult: you need to use matching case in the phrase query to get a match. Ie querying for "MySQL manual" and "my sql manual" are the same, but not the same as "mysql manual" (in phrases only and you could raise the phrase slack to get a match).]), camelcaseEnabled=$enableval, camelcaseEnabled=no) if test X$camelcaseEnabled = Xyes ; then AC_DEFINE(RCL_SPLIT_CAMELCASE, 1, [Split camelCase words]) fi # Disable building the python module. AC_ARG_ENABLE(python-module, AC_HELP_STRING([--disable-python-module], [Do not build the Python module.]), pythonEnabled=$enableval, pythonEnabled=yes) AM_CONDITIONAL(MAKEPYTHON, [test X$pythonEnabled = Xyes]) # Disable building the libchm python wrapper AC_ARG_ENABLE(python-chm, AC_HELP_STRING([--disable-python-chm], [Do not build the libchm Python wrapper.]), pythonChmEnabled=$enableval, pythonChmEnabled=yes) if test X$pythonChmEnabled = Xyes; then AC_CHECK_LIB([chm], [chm_resolve_object], [], [AC_MSG_ERROR([--enable-python-chm is set but libchm is not found])]) fi AM_CONDITIONAL(MAKEPYTHONCHM, [test X$pythonChmEnabled = Xyes]) AC_CHECK_FUNCS(mkdtemp) AC_CHECK_LIB([pthread], [pthread_create], [], []) AC_SEARCH_LIBS([dlopen], [dl], [], []) if test X$ac_cv_search_function != Xno ; then AC_DEFINE(HAVE_DLOPEN, 1, [dlopen function is available]) fi AC_CHECK_LIB([z], [zlibVersion], [], []) ############# Putenv AC_MSG_CHECKING(for type of string parameter to putenv) AC_LANG_PUSH([C++]) AC_TRY_COMPILE([ #include ],[ putenv((const char *)0); ], rcl_putenv_string_const="1", rcl_putenv_string_const="0") if test X$rcl_putenv_string_const = X1 ; then AC_DEFINE(PUTENV_ARG_CONST, 1, [putenv parameter is const]) fi AC_LANG_POP([C++]) #### Look for Xapian. Done in a strange way to work around autoconf # cache XAPIAN_CONFIG=${XAPIAN_CONFIG:-no} if test "$XAPIAN_CONFIG" = "no"; then AC_PATH_PROG(XAPIAN_CONFIG0, [xapian-config], no) XAPIAN_CONFIG=$XAPIAN_CONFIG0 fi if test "$XAPIAN_CONFIG" = "no"; then AC_PATH_PROG(XAPIAN_CONFIG1, [xapian-config-1.3], no) XAPIAN_CONFIG=$XAPIAN_CONFIG1 fi if test "$XAPIAN_CONFIG" = "no"; then AC_PATH_PROG(XAPIAN_CONFIG2, [xapian-config-1.1], no) XAPIAN_CONFIG=$XAPIAN_CONFIG2 fi if test "$XAPIAN_CONFIG" = "no" ; then AC_MSG_ERROR([Cannot find xapian-config command in $PATH. Is xapian-core installed ?]) exit 1 fi LIBXAPIAN=`$XAPIAN_CONFIG --libs` # The --static thing fails with older Xapians. Happily enough they don't # need it either (because there are no needed libraries (no uuid and we # deal explicitly with libz) LIBXAPIANSTATICEXTRA=`$XAPIAN_CONFIG --static --libs 2> /dev/null` # Workaround for problem in xapian-config in some versions: wrongly lists # libstdc++.la in the lib list for i in $LIBXAPIAN ; do case $i in *stdc++*|-lm|-lgcc_s|-lc);; *) tmpxaplib="$tmpxaplib $i";; esac done LIBXAPIAN=$tmpxaplib LIBXAPIANDIR=`$XAPIAN_CONFIG --libs | awk '{print $1}'` case A"$LIBXAPIANDIR" in A-L*) LIBXAPIANDIR=`echo $LIBXAPIANDIR | sed -e 's/-L//'`;; *) LIBXAPIANDIR="";; esac XAPIANCXXFLAGS=`$XAPIAN_CONFIG --cxxflags` #echo XAPIAN_CONFIG: $XAPIAN_CONFIG #echo LIBXAPIAN: $LIBXAPIAN #echo LIBXAPIANDIR: $LIBXAPIANDIR #echo LIBXAPIANSTATICEXTRA: $LIBXAPIANSTATICEXTRA #echo XAPIANCXXFLAGS: $XAPIANCXXFLAGS XSLT_CONFIG=${XSLT_CONFIG:-no} if test "$XSLT_CONFIG" = "no"; then AC_PATH_PROG(XSLT_CONFIG0, [xslt-config], no) XSLT_CONFIG=$XSLT_CONFIG0 fi if test "$XSLT_CONFIG" = "no" ; then AC_MSG_ERROR([Cannot find xslt-config command in $PATH. Is libxslt installed ?]) exit 1 fi XSLT_CFLAGS=`xslt-config --cflags` XSLT_LINKADD=`xslt-config --libs` AC_ARG_ENABLE(xadump, AC_HELP_STRING([--enable-xadump], [Enable building the xadump low level Xapian access program.]), enableXADUMP=$enableval, enableXADUMP="no") AM_CONDITIONAL(MAKEXADUMP, [test X$enableXADUMP = Xyes]) AC_ARG_ENABLE(userdoc, AC_HELP_STRING([--disable-userdoc], [Disable building the user manual. (Avoids the need for docbook xml/xsl files and TeX tools.]), enableUserdoc=$enableval, enableUserdoc="yes") AM_CONDITIONAL(MAKEUSERDOC, [test X$enableUserdoc = Xyes]) #### QT # The way qt and its tools (qmake especially) are installed is very # different between systems (and maybe qt versions) # # In general we need QTDIR to be set, because it is used inside the # qmake-generated makefiles. But there are exceptions: ie on debian3.1 (at # least on the sourceforge compile farm), QTDIR is not needed because qmake # generates hard paths (and is installed in /usr/bin). We don't want to # force the user to set QTDIR if it is not needed. # # The logic is then to first look for qmake, possibly using QTDIR if it is # set. # # If QTDIR is not set, we then generate a bogus qt project and check if # QTDIR is needed in the Makefile, in which case we complain. # # QMAKESPEC: on most Linux system, there is a 'default' link inside the # mkspecs directory, so that QMAKESPEC is not needed. # If QMAKESPEC is not set and needed, the qmake test at the previous test # will have failed, and we tell the user to check his environment. # AC_ARG_ENABLE(qtgui, AC_HELP_STRING([--disable-qtgui], [Disable the QT-based graphical user interface.]), enableQT=$enableval, enableQT="yes") AM_CONDITIONAL(MAKEQT, [test X$enableQT = Xyes]) AC_ARG_ENABLE(recollq, AC_HELP_STRING([--enable-recollq], [Enable building the recollq command line query tool (recoll -t without need for Qt). This is done by default if --disable-qtgui is set but this option enables forcing it.]), enableRECOLLQ=$enableval, enableRECOLLQ="no") if test X"$enableRECOLLQ" != X ; then AM_CONDITIONAL(MAKECMDLINE, [test X$enableRECOLLQ = Xyes]) else AM_CONDITIONAL(MAKECMDLINE, [test X$enableQT = Xno]) fi if test X$enableQT = Xyes ; then if test X$QTDIR != X ; then PATH=$PATH:$QTDIR/bin export PATH fi if test X$QMAKE = X ; then QMAKE=qmake fi case $QMAKE in */*) QMAKEPATH=$QMAKE;; *) AC_PATH_PROG([QMAKEPATH], $QMAKE, NOTFOUND);; esac if test X$QMAKEPATH = XNOTFOUND ; then AC_MSG_ERROR([Cannot find the qmake program. Maybe you need to install qt development files and tools and/or set the QTDIR environment variable?]) fi QMAKE=$QMAKEPATH # Check Qt version qmakevers="`${QMAKE} --version 2>&1`" #echo "qmake version: $qmakevers" v4=`expr "$qmakevers" : '.*Qt[ ][ ]*version[ ][ ]*4.*'` v5=`expr "$qmakevers" : '.*Qt[ ][ ]*version[ ][ ]*5.*'` if test X$v4 = X0 -a X$v5 = X0; then AC_MSG_ERROR([Bad qt/qmake version string (not 4 or 5?): $qmakevers]) else if test X$v4 != X0 ; then AC_MSG_NOTICE([using qt version 4 user interface]) else AC_MSG_NOTICE([using qt version 5 user interface]) fi QTGUI=qtgui fi ##### Using Qt webkit for reslist display? Else Qt textbrowser AC_ARG_ENABLE(webkit, AC_HELP_STRING([--disable-webkit], [Disable use of qt-webkit (only meaningful if qtgui is enabled).]), enableWebkit=$enableval, enableWebkit="yes") if test "$enableWebkit" = "yes" ; then QMAKE_ENABLE_WEBKIT="" QMAKE_DISABLE_WEBKIT="#" else QMAKE_ENABLE_WEBKIT="#" QMAKE_DISABLE_WEBKIT="" fi AC_ARG_ENABLE(webengine, AC_HELP_STRING([--enable-webengine], [Enable use of qt-webengine (only meaningful if qtgui is enabled), in place or qt-webkit.]), enableWebengine=$enableval, enableWebengine="no") if test "$enableWebengine" = "yes" ; then QMAKE_ENABLE_WEBENGINE="" QMAKE_DISABLE_WEBENGINE="#" QMAKE_ENABLE_WEBKIT="#" QMAKE_DISABLE_WEBKIT="" else QMAKE_ENABLE_WEBENGINE="#" QMAKE_DISABLE_WEBENGINE="" fi ##### Using QZeitGeist lib ? Default no for now AC_ARG_WITH(qzeitgeist, AC_HELP_STRING([--with-qzeitgeist], [Enable the use of the qzeitgeist library to send zeitgeist events.]), withQZeitgeist=$withval, withQZeitgeist="no") case "$withQZeitgeist" in no) LIBQZEITGEIST=;; yes) LIBQZEITGEIST=-lqzeitgeist;; *) LIBQZEITGEIST=$withQZeitgeist;; esac if test "$withQZeitgeist" != "no" ; then QMAKE_ENABLE_ZEITGEIST="" QMAKE_DISABLE_ZEITGEIST="#" else QMAKE_ENABLE_ZEITGEIST="#" QMAKE_DISABLE_ZEITGEIST="" fi AC_CONFIG_FILES($QTGUI/recoll.pro) ##################### End QT stuff fi ### X11: this is needed for the session monitoring code (in recollindex -m) AC_ARG_ENABLE(x11mon, AC_HELP_STRING([--disable-x11mon], [Disable recollindex support for X11 session monitoring.]), enableX11mon=$enableval, enableX11mon="yes") if test X$withInotify = Xno -a X$withFam = Xno ; then enableX11mon=no fi if test "$enableX11mon" = "yes" ; then AC_PATH_XTRA X_LIBX11=-lX11 else AC_DEFINE(DISABLE_X11MON, 1, [No X11 session monitoring support]) X_LIBX11="" fi #echo X_CFLAGS "'$X_CFLAGS'" X_PRE_LIBS "'$X_PRE_LIBS'" X_LIBS \ # "'$X_LIBS'" X_LIBX11 "'$X_LIBX11'" X_EXTRA_LIBS "'$X_EXTRA_LIBS'" # For communicating the value of RECOLL_DATADIR to non-make-based # subpackages like python-recoll, we have to expand prefix in here, because # things like "datadir = ${prefix}/share" (which is what we'd get by # expanding @datadir@) don't mean a thing in Python... I guess we could # have a piece of shell-script text to be substituted into and executed by # setup.py for getting the value of pkgdatadir, but really... m_prefix=$prefix test "X$m_prefix" = "XNONE" && m_prefix=/usr/local m_datadir=${m_prefix}/share RECOLL_DATADIR=${m_datadir}/recoll RCLVERSION=$PACKAGE_VERSION RCLLIBVERSION=$RCLVERSION AC_SUBST(RECOLL_DATADIR) AC_SUBST(X_CFLAGS) AC_SUBST(X_PRE_LIBS) AC_SUBST(X_LIBS) AC_SUBST(X_LIBX11) AC_SUBST(X_EXTRA_LIBS) AC_SUBST(INCICONV) AC_SUBST(LIBICONV) AC_SUBST(LIBXAPIAN) AC_SUBST(LIBXAPIANDIR) AC_SUBST(LIBXAPIANSTATICEXTRA) AC_SUBST(LIBFAM) AC_SUBST(QMAKE) AC_SUBST(QTGUI) AC_SUBST(XAPIANCXXFLAGS) AC_SUBST(QMAKE_ENABLE_WEBKIT) AC_SUBST(QMAKE_DISABLE_WEBKIT) AC_SUBST(QMAKE_ENABLE_WEBENGINE) AC_SUBST(QMAKE_DISABLE_WEBENGINE) AC_SUBST(QMAKE_ENABLE_ZEITGEIST) AC_SUBST(QMAKE_DISABLE_ZEITGEIST) AC_SUBST(LIBQZEITGEIST) AC_SUBST(RCLVERSION) AC_SUBST(RCLLIBVERSION) AC_SUBST(XSLT_CFLAGS) AC_SUBST(XSLT_LINKADD) AC_CONFIG_FILES([Makefile python/recoll/setup.py python/pychm/setup.py]) if test X$buildtestmains = Xyes ; then AC_CONFIG_FILES([testmains/Makefile]) fi AC_OUTPUT recoll-1.26.3/depcomp0000755000175000017500000005601613570165161011376 00000000000000#! /bin/sh # depcomp - compile a program generating dependencies as side-effects scriptversion=2013-05-30.07; # UTC # Copyright (C) 1999-2014 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2, or (at your option) # any later version. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with this program. If not, see . # As a special exception to the GNU General Public License, if you # distribute this file as part of a program that contains a # configuration script generated by Autoconf, you may include it under # the same distribution terms that you use for the rest of that program. # Originally written by Alexandre Oliva . case $1 in '') echo "$0: No command. Try '$0 --help' for more information." 1>&2 exit 1; ;; -h | --h*) cat <<\EOF Usage: depcomp [--help] [--version] PROGRAM [ARGS] Run PROGRAMS ARGS to compile a file, generating dependencies as side-effects. Environment variables: depmode Dependency tracking mode. source Source file read by 'PROGRAMS ARGS'. object Object file output by 'PROGRAMS ARGS'. DEPDIR directory where to store dependencies. depfile Dependency file to output. tmpdepfile Temporary file to use when outputting dependencies. libtool Whether libtool is used (yes/no). Report bugs to . EOF exit $? ;; -v | --v*) echo "depcomp $scriptversion" exit $? ;; esac # Get the directory component of the given path, and save it in the # global variables '$dir'. Note that this directory component will # be either empty or ending with a '/' character. This is deliberate. set_dir_from () { case $1 in */*) dir=`echo "$1" | sed -e 's|/[^/]*$|/|'`;; *) dir=;; esac } # Get the suffix-stripped basename of the given path, and save it the # global variable '$base'. set_base_from () { base=`echo "$1" | sed -e 's|^.*/||' -e 's/\.[^.]*$//'` } # If no dependency file was actually created by the compiler invocation, # we still have to create a dummy depfile, to avoid errors with the # Makefile "include basename.Plo" scheme. make_dummy_depfile () { echo "#dummy" > "$depfile" } # Factor out some common post-processing of the generated depfile. # Requires the auxiliary global variable '$tmpdepfile' to be set. aix_post_process_depfile () { # If the compiler actually managed to produce a dependency file, # post-process it. if test -f "$tmpdepfile"; then # Each line is of the form 'foo.o: dependency.h'. # Do two passes, one to just change these to # $object: dependency.h # and one to simply output # dependency.h: # which is needed to avoid the deleted-header problem. { sed -e "s,^.*\.[$lower]*:,$object:," < "$tmpdepfile" sed -e "s,^.*\.[$lower]*:[$tab ]*,," -e 's,$,:,' < "$tmpdepfile" } > "$depfile" rm -f "$tmpdepfile" else make_dummy_depfile fi } # A tabulation character. tab=' ' # A newline character. nl=' ' # Character ranges might be problematic outside the C locale. # These definitions help. upper=ABCDEFGHIJKLMNOPQRSTUVWXYZ lower=abcdefghijklmnopqrstuvwxyz digits=0123456789 alpha=${upper}${lower} if test -z "$depmode" || test -z "$source" || test -z "$object"; then echo "depcomp: Variables source, object and depmode must be set" 1>&2 exit 1 fi # Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. depfile=${depfile-`echo "$object" | sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} rm -f "$tmpdepfile" # Avoid interferences from the environment. gccflag= dashmflag= # Some modes work just like other modes, but use different flags. We # parameterize here, but still list the modes in the big case below, # to make depend.m4 easier to write. Note that we *cannot* use a case # here, because this file can only contain one case statement. if test "$depmode" = hp; then # HP compiler uses -M and no extra arg. gccflag=-M depmode=gcc fi if test "$depmode" = dashXmstdout; then # This is just like dashmstdout with a different argument. dashmflag=-xM depmode=dashmstdout fi cygpath_u="cygpath -u -f -" if test "$depmode" = msvcmsys; then # This is just like msvisualcpp but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvisualcpp fi if test "$depmode" = msvc7msys; then # This is just like msvc7 but w/o cygpath translation. # Just convert the backslash-escaped backslashes to single forward # slashes to satisfy depend.m4 cygpath_u='sed s,\\\\,/,g' depmode=msvc7 fi if test "$depmode" = xlc; then # IBM C/C++ Compilers xlc/xlC can output gcc-like dependency information. gccflag=-qmakedep=gcc,-MF depmode=gcc fi case "$depmode" in gcc3) ## gcc 3 implements dependency tracking that does exactly what ## we want. Yay! Note: for some reason libtool 1.4 doesn't like ## it if -MD -MP comes after the -MF stuff. Hmm. ## Unfortunately, FreeBSD c89 acceptance of flags depends upon ## the command line argument order; so add the flags where they ## appear in depend2.am. Note that the slowdown incurred here ## affects only configure: in makefiles, %FASTDEP% shortcuts this. for arg do case $arg in -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; *) set fnord "$@" "$arg" ;; esac shift # fnord shift # $arg done "$@" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi mv "$tmpdepfile" "$depfile" ;; gcc) ## Note that this doesn't just cater to obsosete pre-3.x GCC compilers. ## but also to in-use compilers like IMB xlc/xlC and the HP C compiler. ## (see the conditional assignment to $gccflag above). ## There are various ways to get dependency output from gcc. Here's ## why we pick this rather obscure method: ## - Don't want to use -MD because we'd like the dependencies to end ## up in a subdir. Having to rename by hand is ugly. ## (We might end up doing this anyway to support other compilers.) ## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like ## -MM, not -M (despite what the docs say). Also, it might not be ## supported by the other compilers which use the 'gcc' depmode. ## - Using -M directly means running the compiler twice (even worse ## than renaming). if test -z "$gccflag"; then gccflag=-MD, fi "$@" -Wp,"$gccflag$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The second -e expression handles DOS-style file names with drive # letters. sed -e 's/^[^:]*: / /' \ -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" ## This next piece of magic avoids the "deleted header file" problem. ## The problem is that when a header file which appears in a .P file ## is deleted, the dependency causes make to die (because there is ## typically no way to rebuild the header). We avoid this by adding ## dummy dependencies for each header file. Too bad gcc doesn't do ## this for us directly. ## Some versions of gcc put a space before the ':'. On the theory ## that the space means something, we add a space to the output as ## well. hp depmode also adds that space, but also prefixes the VPATH ## to the object. Take care to not repeat it in the output. ## Some versions of the HPUX 10.20 sed can't process this invocation ## correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; sgi) if test "$libtool" = yes; then "$@" "-Wp,-MDupdate,$tmpdepfile" else "$@" -MDupdate "$tmpdepfile" fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files echo "$object : \\" > "$depfile" # Clip off the initial element (the dependent). Don't try to be # clever and replace this with sed code, as IRIX sed won't handle # lines with more than a fixed number of characters (4096 in # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; # the IRIX cc adds comments like '#:fec' to the end of the # dependency line. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' \ | tr "$nl" ' ' >> "$depfile" echo >> "$depfile" # The second pass generates a dummy entry for each header file. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" ;; xlc) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; aix) # The C for AIX Compiler uses -M and outputs the dependencies # in a .u file. In older versions, this file always lives in the # current directory. Also, the AIX compiler puts '$object:' at the # start of each line; $object doesn't have directory information. # Version 6 uses the directory in both cases. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.u tmpdepfile2=$base.u tmpdepfile3=$dir.libs/$base.u "$@" -Wc,-M else tmpdepfile1=$dir$base.u tmpdepfile2=$dir$base.u tmpdepfile3=$dir$base.u "$@" -M fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done aix_post_process_depfile ;; tcc) # tcc (Tiny C Compiler) understand '-MD -MF file' since version 0.9.26 # FIXME: That version still under development at the moment of writing. # Make that this statement remains true also for stable, released # versions. # It will wrap lines (doesn't matter whether long or short) with a # trailing '\', as in: # # foo.o : \ # foo.c \ # foo.h \ # # It will put a trailing '\' even on the last line, and will use leading # spaces rather than leading tabs (at least since its commit 0394caf7 # "Emit spaces for -MD"). "$@" -MD -MF "$tmpdepfile" stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each non-empty line is of the form 'foo.o : \' or ' dep.h \'. # We have to change lines of the first kind to '$object: \'. sed -e "s|.*:|$object :|" < "$tmpdepfile" > "$depfile" # And for each line of the second kind, we have to emit a 'dep.h:' # dummy dependency, to avoid the deleted-header problem. sed -n -e 's|^ *\(.*\) *\\$|\1:|p' < "$tmpdepfile" >> "$depfile" rm -f "$tmpdepfile" ;; ## The order of this option in the case statement is important, since the ## shell code in configure will try each of these formats in the order ## listed in this file. A plain '-MD' option would be understood by many ## compilers, so we must ensure this comes after the gcc and icc options. pgcc) # Portland's C compiler understands '-MD'. # Will always output deps to 'file.d' where file is the root name of the # source file under compilation, even if file resides in a subdirectory. # The object file name does not affect the name of the '.d' file. # pgcc 10.2 will output # foo.o: sub/foo.c sub/foo.h # and will wrap long lines using '\' : # foo.o: sub/foo.c ... \ # sub/foo.h ... \ # ... set_dir_from "$object" # Use the source, not the object, to determine the base name, since # that's sadly what pgcc will do too. set_base_from "$source" tmpdepfile=$base.d # For projects that build the same source file twice into different object # files, the pgcc approach of using the *source* file root name can cause # problems in parallel builds. Use a locking strategy to avoid stomping on # the same $tmpdepfile. lockdir=$base.d-lock trap " echo '$0: caught signal, cleaning up...' >&2 rmdir '$lockdir' exit 1 " 1 2 13 15 numtries=100 i=$numtries while test $i -gt 0; do # mkdir is a portable test-and-set. if mkdir "$lockdir" 2>/dev/null; then # This process acquired the lock. "$@" -MD stat=$? # Release the lock. rmdir "$lockdir" break else # If the lock is being held by a different process, wait # until the winning process is done or we timeout. while test -d "$lockdir" && test $i -gt 0; do sleep 1 i=`expr $i - 1` done fi i=`expr $i - 1` done trap - 1 2 13 15 if test $i -le 0; then echo "$0: failed to acquire lock after $numtries attempts" >&2 echo "$0: check lockdir '$lockdir'" >&2 exit 1 fi if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" # Each line is of the form `foo.o: dependent.h', # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. # Do two passes, one to just change these to # `$object: dependent.h' and one to simply `dependent.h:'. sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this invocation # correctly. Breaking it into two sed invocations is a workaround. sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; hp2) # The "hp" stanza above does not work with aCC (C++) and HP's ia64 # compilers, which have integrated preprocessors. The correct option # to use with these is +Maked; it writes dependencies to a file named # 'foo.d', which lands next to the object file, wherever that # happens to be. # Much of this is similar to the tru64 case; see comments there. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then tmpdepfile1=$dir$base.d tmpdepfile2=$dir.libs/$base.d "$@" -Wc,+Maked else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d "$@" +Maked fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" do test -f "$tmpdepfile" && break done if test -f "$tmpdepfile"; then sed -e "s,^.*\.[$lower]*:,$object:," "$tmpdepfile" > "$depfile" # Add 'dependent.h:' lines. sed -ne '2,${ s/^ *// s/ \\*$// s/$/:/ p }' "$tmpdepfile" >> "$depfile" else make_dummy_depfile fi rm -f "$tmpdepfile" "$tmpdepfile2" ;; tru64) # The Tru64 compiler uses -MD to generate dependencies as a side # effect. 'cc -MD -o foo.o ...' puts the dependencies into 'foo.o.d'. # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put # dependencies in 'foo.d' instead, so we check for that too. # Subdirectories are respected. set_dir_from "$object" set_base_from "$object" if test "$libtool" = yes; then # Libtool generates 2 separate objects for the 2 libraries. These # two compilations output dependencies in $dir.libs/$base.o.d and # in $dir$base.o.d. We have to check for both files, because # one of the two compilations can be disabled. We should prefer # $dir$base.o.d over $dir.libs/$base.o.d because the latter is # automatically cleaned when .libs/ is deleted, while ignoring # the former would cause a distcleancheck panic. tmpdepfile1=$dir$base.o.d # libtool 1.5 tmpdepfile2=$dir.libs/$base.o.d # Likewise. tmpdepfile3=$dir.libs/$base.d # Compaq CCC V6.2-504 "$@" -Wc,-MD else tmpdepfile1=$dir$base.d tmpdepfile2=$dir$base.d tmpdepfile3=$dir$base.d "$@" -MD fi stat=$? if test $stat -ne 0; then rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" exit $stat fi for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" do test -f "$tmpdepfile" && break done # Same post-processing that is required for AIX mode. aix_post_process_depfile ;; msvc7) if test "$libtool" = yes; then showIncludes=-Wc,-showIncludes else showIncludes=-showIncludes fi "$@" $showIncludes > "$tmpdepfile" stat=$? grep -v '^Note: including file: ' "$tmpdepfile" if test $stat -ne 0; then rm -f "$tmpdepfile" exit $stat fi rm -f "$depfile" echo "$object : \\" > "$depfile" # The first sed program below extracts the file names and escapes # backslashes for cygpath. The second sed program outputs the file # name when reading, but also accumulates all include files in the # hold buffer in order to output them again at the end. This only # works with sed implementations that can handle large buffers. sed < "$tmpdepfile" -n ' /^Note: including file: *\(.*\)/ { s//\1/ s/\\/\\\\/g p }' | $cygpath_u | sort -u | sed -n ' s/ /\\ /g s/\(.*\)/'"$tab"'\1 \\/p s/.\(.*\) \\/\1:/ H $ { s/.*/'"$tab"'/ G p }' >> "$depfile" echo >> "$depfile" # make sure the fragment doesn't end with a backslash rm -f "$tmpdepfile" ;; msvc7msys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; #nosideeffect) # This comment above is used by automake to tell side-effect # dependency tracking mechanisms from slower ones. dashmstdout) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout, regardless of -o. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done test -z "$dashmflag" && dashmflag=-M # Require at least two characters before searching for ':' # in the target name. This is to cope with DOS-style filenames: # a dependency such as 'c:/foo/bar' could be seen as target 'c' otherwise. "$@" $dashmflag | sed "s|^[$tab ]*[^:$tab ][^:][^:]*:[$tab ]*|$object: |" > "$tmpdepfile" rm -f "$depfile" cat < "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process this sed invocation # correctly. Breaking it into two sed invocations is a workaround. tr ' ' "$nl" < "$tmpdepfile" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; dashXmstdout) # This case only exists to satisfy depend.m4. It is never actually # run, as this mode is specially recognized in the preamble. exit 1 ;; makedepend) "$@" || exit $? # Remove any Libtool call if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # X makedepend shift cleared=no eat=no for arg do case $cleared in no) set ""; shift cleared=yes ;; esac if test $eat = yes; then eat=no continue fi case "$arg" in -D*|-I*) set fnord "$@" "$arg"; shift ;; # Strip any option that makedepend may not understand. Remove # the object too, otherwise makedepend will parse it as a source file. -arch) eat=yes ;; -*|$object) ;; *) set fnord "$@" "$arg"; shift ;; esac done obj_suffix=`echo "$object" | sed 's/^.*\././'` touch "$tmpdepfile" ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" rm -f "$depfile" # makedepend may prepend the VPATH from the source file name to the object. # No need to regex-escape $object, excess matching of '.' is harmless. sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" # Some versions of the HPUX 10.20 sed can't process the last invocation # correctly. Breaking it into two sed invocations is a workaround. sed '1,2d' "$tmpdepfile" \ | tr ' ' "$nl" \ | sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' \ | sed -e 's/$/ :/' >> "$depfile" rm -f "$tmpdepfile" "$tmpdepfile".bak ;; cpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi # Remove '-o $object'. IFS=" " for arg do case $arg in -o) shift ;; $object) shift ;; *) set fnord "$@" "$arg" shift # fnord shift # $arg ;; esac done "$@" -E \ | sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ | sed '$ s: \\$::' > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" cat < "$tmpdepfile" >> "$depfile" sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" rm -f "$tmpdepfile" ;; msvisualcpp) # Important note: in order to support this mode, a compiler *must* # always write the preprocessed file to stdout. "$@" || exit $? # Remove the call to Libtool. if test "$libtool" = yes; then while test "X$1" != 'X--mode=compile'; do shift done shift fi IFS=" " for arg do case "$arg" in -o) shift ;; $object) shift ;; "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") set fnord "$@" shift shift ;; *) set fnord "$@" "$arg" shift shift ;; esac done "$@" -E 2>/dev/null | sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" rm -f "$depfile" echo "$object : \\" > "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::'"$tab"'\1 \\:p' >> "$depfile" echo "$tab" >> "$depfile" sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" rm -f "$tmpdepfile" ;; msvcmsys) # This case exists only to let depend.m4 do its work. It works by # looking at the text of this script. This case will never be run, # since it is checked for above. exit 1 ;; none) exec "$@" ;; *) echo "Unknown depmode $depmode" 1>&2 exit 1 ;; esac exit 0 # Local Variables: # mode: shell-script # sh-indentation: 2 # eval: (add-hook 'write-file-hooks 'time-stamp) # time-stamp-start: "scriptversion=" # time-stamp-format: "%:y-%02m-%02d.%02H" # time-stamp-time-zone: "UTC" # time-stamp-end: "; # UTC" # End: recoll-1.26.3/xaposix/0000755000175000017500000000000013570165410011561 500000000000000recoll-1.26.3/xaposix/safesyswait.h0000644000175000017500000000244513303776060014224 00000000000000/** @file safesyswait.h * @brief #include , with portability stuff. */ /* Copyright (C) 2010 Olly Betts * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 * USA */ #ifndef XAPIAN_INCLUDED_SAFESYSWAIT_H #define XAPIAN_INCLUDED_SAFESYSWAIT_H #ifndef __WIN32__ # include #else // We don't try to replace waitpid(), etc - they're only useful for us when // we can fork(). But it's handy to be able to use WIFEXITED() and // WEXITSTATUS(). # ifndef WIFEXITED # define WIFEXITED(STATUS) (STATUS != -1) # endif # ifndef WEXITSTATUS # define WEXITSTATUS(STATUS) (STATUS) # endif #endif #endif /* XAPIAN_INCLUDED_SAFESYSWAIT_H */ recoll-1.26.3/xaposix/safefcntl.h0000644000175000017500000000427313303776060013630 00000000000000/* safefcntl.h: #include , but working around broken platforms. * * Copyright (C) 2006,2007 Olly Betts * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 * USA */ #ifndef XAPIAN_INCLUDED_SAFEFCNTL_H #define XAPIAN_INCLUDED_SAFEFCNTL_H #include #if defined __cplusplus && defined open // On some versions of Solaris, fcntl.h pollutes the namespace by #define-ing // "open" to "open64" when largefile support is enabled. This causes problems // if you have a method called "open" (other symbols are also #define-d // e.g. "creat" to "creat64", but only "open" is a problem for Xapian so // that's the only one we currently fix). #ifdef _MSC_VER // MSVC #define-s open but also defines a function called open, so just undef // the macro. # undef open #else inline int fcntl_open_(const char *filename, int flags, mode_t mode) { return open(filename, flags, mode); } inline int fcntl_open_(const char *filename, int flags) { return open(filename, flags); } #undef open inline int open(const char *filename, int flags, mode_t mode) { return fcntl_open_(filename, flags, mode); } inline int open(const char *filename, int flags) { return fcntl_open_(filename, flags); } #endif #endif // O_BINARY is only useful for platforms like Windows which distinguish between // text and binary files, but it's cleaner to define it to 0 here for other // platforms so we can avoid #ifdef where we need to use it in the code. #ifndef __WIN32__ # ifndef O_BINARY # define O_BINARY 0 # endif #endif #endif /* XAPIAN_INCLUDED_SAFEFCNTL_H */ recoll-1.26.3/xaposix/safeunistd.h0000644000175000017500000000465513303776060014034 00000000000000/* safeunistd.h: , but with compat. and large file support for MSVC. * * Copyright (C) 2007 Olly Betts * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 * USA */ #ifndef XAPIAN_INCLUDED_SAFEUNISTD_H #define XAPIAN_INCLUDED_SAFEUNISTD_H #ifndef _MSC_VER # include #else // sys/types.h has a typedef for off_t so make sure we've seen that before // we hide it behind a #define. # include // MSVC doesn't even HAVE unistd.h - io.h seems the nearest equivalent. // We also need to do some renaming of functions to get versions which // work on large files. # include # ifdef lseek # undef lseek # endif # ifdef off_t # undef off_t # endif # define lseek(FD, OFF, WHENCE) _lseeki64(FD, OFF, WHENCE) # define off_t __int64 // process.h is needed for getpid(). # include #endif #ifdef __WIN32__ #ifdef _MSC_VER /* Recent MinGW versions define this */ inline unsigned int sleep(unsigned int seconds) { // Use our own little helper function to avoid pulling in . extern void xapian_sleep_milliseconds(unsigned int millisecs); // Sleep takes a time interval in milliseconds, whereas POSIX sleep takes // a time interval in seconds, so we need to multiply 'seconds' by 1000. // // But make sure the multiplication won't overflow! 4294967 seconds is // nearly 50 days, so just sleep for that long and return the number of // seconds left to sleep for. The common case of sleep(CONSTANT) should // optimise to just xapian_sleep_milliseconds(CONSTANT). if (seconds > 4294967u) { xapian_sleep_milliseconds(4294967000u); return seconds - 4294967u; } xapian_sleep_milliseconds(seconds * 1000u); return 0; } #endif /* _MSC_VER*/ #endif /* __WIN32__ */ #endif /* XAPIAN_INCLUDED_SAFEUNISTD_H */ recoll-1.26.3/xaposix/safesysstat.h0000644000175000017500000000567513303776060014243 00000000000000/* safesysstat.h: #include , but enabling large file support. * * Copyright (C) 2007,2012 Olly Betts * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 * USA */ #ifndef XAPIAN_INCLUDED_SAFESYSSTAT_H #define XAPIAN_INCLUDED_SAFESYSSTAT_H #include // For most platforms, AC_SYS_LARGEFILE enables support for large files at // configure time, but MSVC doesn't use configure so we have to put the // magic somewhere else - i.e. here! #ifdef _MSC_VER // MSVC needs to call _stati64() instead of stat() and the struct which holds // the information is "struct _stati64" instead of "struct stat" so we just // use #define to replace both in one go. We also want to use _fstati64() // instead of fstat() but in this case we can use a function-like macro. // // This hack is a problem is we ever want a method called "stat", or one called // fstat which takes 2 parameters, but we can probably live with these // limitations. #ifdef stat # undef stat #endif #ifdef fstat # undef fstat #endif // NB: _stati64 not _stat64 (the latter just returns a 64 bit timestamp). #define stat _stati64 #define fstat(FD, BUF) _fstati64(FD,BUF) #endif #ifdef __WIN32__ // MSVC lacks these POSIX macros and other compilers may too: #ifndef S_ISDIR # define S_ISDIR(ST_MODE) (((ST_MODE) & _S_IFMT) == _S_IFDIR) #endif #ifndef S_ISREG # define S_ISREG(ST_MODE) (((ST_MODE) & _S_IFMT) == _S_IFREG) #endif // On UNIX, mkdir() is prototyped in but on Windows it's in // , so just include that from here to avoid build failures on // MSVC just because of some new use of mkdir(). This also reduces the // number of conditionalised #include statements we need in the sources. #include // Add overloaded version of mkdir which takes an (ignored) mode argument // to allow source code to just specify a mode argument unconditionally. // // The () around mkdir are in case it's defined as a macro. inline int (mkdir)(const char *pathname, mode_t /*mode*/) { return _mkdir(pathname); } #else // These were specified by POSIX.1-1996, so most platforms should have // these by now: #ifndef S_ISDIR # define S_ISDIR(ST_MODE) (((ST_MODE) & S_IFMT) == S_IFDIR) #endif #ifndef S_ISREG # define S_ISREG(ST_MODE) (((ST_MODE) & S_IFMT) == S_IFREG) #endif #endif #endif /* XAPIAN_INCLUDED_SAFESYSSTAT_H */ recoll-1.26.3/python/0000755000175000017500000000000013570165410011407 500000000000000recoll-1.26.3/python/pychm/0000755000175000017500000000000013570165410012527 500000000000000recoll-1.26.3/python/pychm/COPYING0000644000175000017500000003542713533651561013523 00000000000000 GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc. 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS recoll-1.26.3/python/pychm/MANIFEST.in0000644000175000017500000000004713533651561014214 00000000000000include COPYING include chm/swig_chm.i recoll-1.26.3/python/pychm/recollchm/0000755000175000017500000000000013533651561014505 500000000000000recoll-1.26.3/python/pychm/recollchm/chm.py0000644000175000017500000005027613533651561015560 00000000000000# Copyright (C) 2003-2006 Rubens Ramos # # Based on code by: # Copyright (C) 2003 Razvan Cojocaru # # pychm is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this program; see the file COPYING. If not, # write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301, USA ''' chm - A high-level front end for the chmlib python module. The chm module provides high level access to the functionality included in chmlib. It encapsulates functions in the CHMFile class, and provides some additional features, such as the ability to obtain the contents tree of a CHM archive. ''' from . import chmlib from . import extra import array import os.path import sys charset_table = { 0: 'iso8859_1', # ANSI_CHARSET 238: 'iso8859_2', # EASTEUROPE_CHARSET 178: 'iso8859_6', # ARABIC_CHARSET 161: 'iso8859_7', # GREEK_CHARSET 177: 'iso8859_8', # HEBREW_CHARSET 162: 'iso8859_9', # TURKISH_CHARSET 222: 'iso8859_11', # THAI_CHARSET - hmm not in python 2.2... 186: 'iso8859_13', # BALTIC_CHARSET 204: 'cp1251', # RUSSIAN_CHARSET 255: 'cp437', # OEM_CHARSET 128: 'cp932', # SHIFTJIS_CHARSET 134: 'cp936', # GB2312_CHARSET 129: 'cp949', # HANGUL_CHARSET 136: 'cp950', # CHINESEBIG5_CHARSET 1: None, # DEFAULT_CHARSET 2: None, # SYMBOL_CHARSET 130: None, # JOHAB_CHARSET 163: None, # VIETNAMESE_CHARSET 77: None, # MAC_CHARSET } locale_table = { 0x0436: ('iso8859_1', "Afrikaans", "Western Europe & US"), 0x041c: ('iso8859_2', "Albanian", "Central Europe"), 0x0401: ('iso8859_6', "Arabic_Saudi_Arabia", "Arabic"), 0x0801: ('iso8859_6', "Arabic_Iraq", "Arabic"), 0x0c01: ('iso8859_6', "Arabic_Egypt", "Arabic"), 0x1001: ('iso8859_6', "Arabic_Libya", "Arabic"), 0x1401: ('iso8859_6', "Arabic_Algeria", "Arabic"), 0x1801: ('iso8859_6', "Arabic_Morocco", "Arabic"), 0x1c01: ('iso8859_6', "Arabic_Tunisia", "Arabic"), 0x2001: ('iso8859_6', "Arabic_Oman", "Arabic"), 0x2401: ('iso8859_6', "Arabic_Yemen", "Arabic"), 0x2801: ('iso8859_6', "Arabic_Syria", "Arabic"), 0x2c01: ('iso8859_6', "Arabic_Jordan", "Arabic"), 0x3001: ('iso8859_6', "Arabic_Lebanon", "Arabic"), 0x3401: ('iso8859_6', "Arabic_Kuwait", "Arabic"), 0x3801: ('iso8859_6', "Arabic_UAE", "Arabic"), 0x3c01: ('iso8859_6', "Arabic_Bahrain", "Arabic"), 0x4001: ('iso8859_6', "Arabic_Qatar", "Arabic"), 0x042b: (None, "Armenian", "Armenian"), 0x042c: ('iso8859_9', "Azeri_Latin", "Turkish"), 0x082c: ('cp1251', "Azeri_Cyrillic", "Cyrillic"), 0x042d: ('iso8859_1', "Basque", "Western Europe & US"), 0x0423: ('cp1251', "Belarusian", "Cyrillic"), 0x0402: ('cp1251', "Bulgarian", "Cyrillic"), 0x0403: ('iso8859_1', "Catalan", "Western Europe & US"), 0x0404: ('cp950', "Chinese_Taiwan", "Traditional Chinese"), 0x0804: ('cp936', "Chinese_PRC", "Simplified Chinese"), 0x0c04: ('cp950', "Chinese_Hong_Kong", "Traditional Chinese"), 0x1004: ('cp936', "Chinese_Singapore", "Simplified Chinese"), 0x1404: ('cp950', "Chinese_Macau", "Traditional Chinese"), 0x041a: ('iso8859_2', "Croatian", "Central Europe"), 0x0405: ('iso8859_2', "Czech", "Central Europe"), 0x0406: ('iso8859_1', "Danish", "Western Europe & US"), 0x0413: ('iso8859_1', "Dutch_Standard", "Western Europe & US"), 0x0813: ('iso8859_1', "Dutch_Belgian", "Western Europe & US"), 0x0409: ('iso8859_1', "English_United_States", "Western Europe & US"), 0x0809: ('iso8859_1', "English_United_Kingdom", "Western Europe & US"), 0x0c09: ('iso8859_1', "English_Australian", "Western Europe & US"), 0x1009: ('iso8859_1', "English_Canadian", "Western Europe & US"), 0x1409: ('iso8859_1', "English_New_Zealand", "Western Europe & US"), 0x1809: ('iso8859_1', "English_Irish", "Western Europe & US"), 0x1c09: ('iso8859_1', "English_South_Africa", "Western Europe & US"), 0x2009: ('iso8859_1', "English_Jamaica", "Western Europe & US"), 0x2409: ('iso8859_1', "English_Caribbean", "Western Europe & US"), 0x2809: ('iso8859_1', "English_Belize", "Western Europe & US"), 0x2c09: ('iso8859_1', "English_Trinidad", "Western Europe & US"), 0x3009: ('iso8859_1', "English_Zimbabwe", "Western Europe & US"), 0x3409: ('iso8859_1', "English_Philippines", "Western Europe & US"), 0x0425: ('iso8859_13', "Estonian", "Baltic",), 0x0438: ('iso8859_1', "Faeroese", "Western Europe & US"), 0x0429: ('iso8859_6', "Farsi", "Arabic"), 0x040b: ('iso8859_1', "Finnish", "Western Europe & US"), 0x040c: ('iso8859_1', "French_Standard", "Western Europe & US"), 0x080c: ('iso8859_1', "French_Belgian", "Western Europe & US"), 0x0c0c: ('iso8859_1', "French_Canadian", "Western Europe & US"), 0x100c: ('iso8859_1', "French_Swiss", "Western Europe & US"), 0x140c: ('iso8859_1', "French_Luxembourg", "Western Europe & US"), 0x180c: ('iso8859_1', "French_Monaco", "Western Europe & US"), 0x0437: (None, "Georgian", "Georgian"), 0x0407: ('iso8859_1', "German_Standard", "Western Europe & US"), 0x0807: ('iso8859_1', "German_Swiss", "Western Europe & US"), 0x0c07: ('iso8859_1', "German_Austrian", "Western Europe & US"), 0x1007: ('iso8859_1', "German_Luxembourg", "Western Europe & US"), 0x1407: ('iso8859_1', "German_Liechtenstein", "Western Europe & US"), 0x0408: ('iso8859_7', "Greek", "Greek"), 0x040d: ('iso8859_8', "Hebrew", "Hebrew"), 0x0439: (None, "Hindi", "Indic"), 0x040e: ('iso8859_2', "Hungarian", "Central Europe"), 0x040f: ('iso8859_1', "Icelandic", "Western Europe & US"), 0x0421: ('iso8859_1', "Indonesian", "Western Europe & US"), 0x0410: ('iso8859_1', "Italian_Standard", "Western Europe & US"), 0x0810: ('iso8859_1', "Italian_Swiss", "Western Europe & US"), 0x0411: ('cp932', "Japanese", "Japanese"), 0x043f: ('cp1251', "Kazakh", "Cyrillic"), 0x0457: (None, "Konkani", "Indic"), 0x0412: ('cp949', "Korean", "Korean"), 0x0426: ('iso8859_13', "Latvian", "Baltic",), 0x0427: ('iso8859_13', "Lithuanian", "Baltic",), 0x042f: ('cp1251', "Macedonian", "Cyrillic"), 0x043e: ('iso8859_1', "Malay_Malaysia", "Western Europe & US"), 0x083e: ('iso8859_1', "Malay_Brunei_Darussalam", "Western Europe & US"), 0x044e: (None, "Marathi", "Indic"), 0x0414: ('iso8859_1', "Norwegian_Bokmal", "Western Europe & US"), 0x0814: ('iso8859_1', "Norwegian_Nynorsk", "Western Europe & US"), 0x0415: ('iso8859_2', "Polish", "Central Europe"), 0x0416: ('iso8859_1', "Portuguese_Brazilian", "Western Europe & US"), 0x0816: ('iso8859_1', "Portuguese_Standard", "Western Europe & US"), 0x0418: ('iso8859_2', "Romanian", "Central Europe"), 0x0419: ('cp1251', "Russian", "Cyrillic"), 0x044f: (None, "Sanskrit", "Indic"), 0x081a: ('iso8859_2', "Serbian_Latin", "Central Europe"), 0x0c1a: ('cp1251', "Serbian_Cyrillic", "Cyrillic"), 0x041b: ('iso8859_2', "Slovak", "Central Europe"), 0x0424: ('iso8859_2', "Slovenian", "Central Europe"), 0x040a: ('iso8859_1', "Spanish_Trad_Sort", "Western Europe & US"), 0x080a: ('iso8859_1', "Spanish_Mexican", "Western Europe & US"), 0x0c0a: ('iso8859_1', "Spanish_Modern_Sort", "Western Europe & US"), 0x100a: ('iso8859_1', "Spanish_Guatemala", "Western Europe & US"), 0x140a: ('iso8859_1', "Spanish_Costa_Rica", "Western Europe & US"), 0x180a: ('iso8859_1', "Spanish_Panama", "Western Europe & US"), 0x1c0a: ('iso8859_1', "Spanish_Dominican_Repub", "Western Europe & US"), 0x200a: ('iso8859_1', "Spanish_Venezuela", "Western Europe & US"), 0x240a: ('iso8859_1', "Spanish_Colombia", "Western Europe & US"), 0x280a: ('iso8859_1', "Spanish_Peru", "Western Europe & US"), 0x2c0a: ('iso8859_1', "Spanish_Argentina", "Western Europe & US"), 0x300a: ('iso8859_1', "Spanish_Ecuador", "Western Europe & US"), 0x340a: ('iso8859_1', "Spanish_Chile", "Western Europe & US"), 0x380a: ('iso8859_1', "Spanish_Uruguay", "Western Europe & US"), 0x3c0a: ('iso8859_1', "Spanish_Paraguay", "Western Europe & US"), 0x400a: ('iso8859_1', "Spanish_Bolivia", "Western Europe & US"), 0x440a: ('iso8859_1', "Spanish_El_Salvador", "Western Europe & US"), 0x480a: ('iso8859_1', "Spanish_Honduras", "Western Europe & US"), 0x4c0a: ('iso8859_1', "Spanish_Nicaragua", "Western Europe & US"), 0x500a: ('iso8859_1', "Spanish_Puerto_Rico", "Western Europe & US"), 0x0441: ('iso8859_1', "Swahili", "Western Europe & US"), 0x041d: ('iso8859_1', "Swedish", "Western Europe & US"), 0x081d: ('iso8859_1', "Swedish_Finland", "Western Europe & US"), 0x0449: (None, "Tamil", "Indic"), 0x0444: ('cp1251', "Tatar", "Cyrillic"), 0x041e: ('iso8859_11', "Thai", "Thai"), 0x041f: ('iso8859_9', "Turkish", "Turkish"), 0x0422: ('cp1251', "Ukrainian", "Cyrillic"), 0x0420: ('iso8859_6', "Urdu", "Arabic"), 0x0443: ('iso8859_9', "Uzbek_Latin", "Turkish"), 0x0843: ('cp1251', "Uzbek_Cyrillic", "Cyrillic"), 0x042a: (None, "Vietnamese", "Vietnamese") } class CHMFile: "A class to manage access to CHM files." filename = "" file = None title = "" home = "/" index = None topics = None encoding = None lcid = None binaryindex = None def __init__(self): self.searchable = 0 def LoadCHM(self, archiveName): '''Loads a CHM archive. This function will also call GetArchiveInfo to obtain information such as the index file name and the topics file. It returns 1 on success, and 0 if it fails. ''' if self.filename is not None: self.CloseCHM() self.file = chmlib.chm_open(archiveName) if self.file is None: return 0 self.filename = archiveName self.GetArchiveInfo() return 1 def CloseCHM(self): '''Closes the CHM archive. This function will close the CHM file, if it is open. All variables are also reset. ''' if self.filename is not None: chmlib.chm_close(self.file) self.file = None self.filename = '' self.title = "" self.home = "/" self.index = None self.topics = None self.encoding = None def GetArchiveInfo(self): '''Obtains information on CHM archive. This function checks the /#SYSTEM file inside the CHM archive to obtain the index, home page, topics, encoding and title. It is called from LoadCHM. ''' self.searchable = extra.is_searchable(self.file) self.lcid = None result, ui = chmlib.chm_resolve_object(self.file, b'/#SYSTEM') if (result != chmlib.CHM_RESOLVE_SUCCESS): sys.stderr.write('GetArchiveInfo: #SYSTEM does not exist\n') return 0 size, text = chmlib.chm_retrieve_object(self.file, ui, 4, ui.length) if (size == 0): sys.stderr.write('GetArchiveInfo: file size = 0\n') return 0 buff = array.array('B', text) index = 0 while (index < size): cursor = buff[index] + (buff[index+1] * 256) if (cursor == 0): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.topics = b'/' + text[index:index+cursor-1] elif (cursor == 1): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.index = b'/' + text[index:index+cursor-1] elif (cursor == 2): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.home = b'/' + text[index:index+cursor-1] elif (cursor == 3): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.title = text[index:index+cursor-1] elif (cursor == 4): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.lcid = buff[index] + (buff[index+1] * 256) elif (cursor == 6): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 tmp = text[index:index+cursor-1] if not self.topics: tmp1 = b'/' + tmp + b'.hhc' tmp2 = b'/' + tmp + b'.hhk' res1, ui1 = chmlib.chm_resolve_object(self.file, tmp1) res2, ui2 = chmlib.chm_resolve_object(self.file, tmp2) if not self.topics and res1 == chmlib.CHM_RESOLVE_SUCCESS: self.topics = b'/' + tmp + b'.hhc' if not self.index and res2 == chmlib.CHM_RESOLVE_SUCCESS: self.index = b'/' + tmp + b'.hhk' elif (cursor == 16): index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 self.encoding = text[index:index+cursor-1] else: index += 2 cursor = buff[index] + (buff[index+1] * 256) index += 2 index += cursor self.GetWindowsInfo() if not self.lcid: self.lcid = extra.get_lcid(self.file) return 1 def GetTopicsTree(self): '''Reads and returns the topics tree. This auxiliary function reads and returns the topics tree file contents for the CHM archive. ''' if self.topics is None: return None if self.topics: res, ui = chmlib.chm_resolve_object(self.file, self.topics) if (res != chmlib.CHM_RESOLVE_SUCCESS): return None size, text = chmlib.chm_retrieve_object(self.file, ui, 0, ui.length) if (size == 0): sys.stderr.write('GetTopicsTree: file size = 0\n') return None return text def GetIndex(self): '''Reads and returns the index tree. This auxiliary function reads and returns the index tree file contents for the CHM archive. ''' if self.index is None: return None if self.index: res, ui = chmlib.chm_resolve_object(self.file, self.index) if (res != chmlib.CHM_RESOLVE_SUCCESS): return None size, text = chmlib.chm_retrieve_object(self.file, ui, 0, ui.length) if (size == 0): sys.stderr.write('GetIndex: file size = 0\n') return None return text def ResolveObject(self, document): '''Tries to locate a document in the archive. This function tries to locate the document inside the archive. It returns a tuple where the first element is zero if the function was successful, and the second is the UnitInfo for that document. The UnitInfo is used to retrieve the document contents ''' if self.file: # path = os.path.abspath(document) # wtf?? the index contents # are independant of the os ! path = document return chmlib.chm_resolve_object(self.file, path) else: return (1, None) def RetrieveObject(self, ui, start=-1, length=-1): '''Retrieves the contents of a document. This function takes a UnitInfo and two optional arguments, the first being the start address and the second is the length. These define the amount of data to be read from the archive. ''' if self.file and ui: if length == -1: len = ui.length else: len = length if start == -1: st = 0 else: st = long(start) return chmlib.chm_retrieve_object(self.file, ui, st, len) else: return (0, '') def Search(self, text, wholewords=0, titleonly=0): '''Performs full-text search on the archive. The first parameter is the word to look for, the second indicates if the search should be for whole words only, and the third parameter indicates if the search should be restricted to page titles. This method will return a tuple, the first item indicating if the search results were partial, and the second item being a dictionary containing the results.''' if text and text != '' and self.file: return extra.search(self.file, text, wholewords, titleonly) else: return None def IsSearchable(self): '''Indicates if the full-text search is available for this archive - this flag is updated when GetArchiveInfo is called''' return self.searchable def GetEncoding(self): '''Returns a string that can be used with the codecs python package to encode or decode the files in the chm archive. If an error is found, or if it is not possible to find the encoding, None is returned.''' if self.encoding: vals = self.encoding.split(b',') if len(vals) > 2: try: return charset_table[int(vals[2])] except KeyError: pass return None def GetLCID(self): '''Returns the archive Locale ID''' if self.lcid in locale_table: return locale_table[self.lcid] else: return None def GetDWORD(self, buff, idx=0): '''Internal method. Reads a double word (4 bytes) from a buffer. ''' result = buff[idx] + (buff[idx+1] << 8) + (buff[idx+2] << 16) + \ (buff[idx+3] << 24) if result == 0xFFFFFFFF: result = 0 return result def GetString(self, text, idx): '''Internal method. Retrieves a string from the #STRINGS buffer. ''' next = text.find(b'\x00', idx) chunk = text[idx:next] return chunk def GetWindowsInfo(self): '''Gets information from the #WINDOWS file. Checks the #WINDOWS file to see if it has any info that was not found in #SYSTEM (topics, index or default page. ''' result, ui = chmlib.chm_resolve_object(self.file, b'/#WINDOWS') if (result != chmlib.CHM_RESOLVE_SUCCESS): return -1 size, text = chmlib.chm_retrieve_object(self.file, ui, 0, 8) if (size < 8): return -2 buff = array.array('B', text) num_entries = self.GetDWORD(buff, 0) entry_size = self.GetDWORD(buff, 4) if num_entries < 1: return -3 size, text = chmlib.chm_retrieve_object(self.file, ui, 8, entry_size) if (size < entry_size): return -4 buff = array.array('B', text) toc_index = self.GetDWORD(buff, 0x60) idx_index = self.GetDWORD(buff, 0x64) dft_index = self.GetDWORD(buff, 0x68) result, ui = chmlib.chm_resolve_object(self.file, b'/#STRINGS') if (result != chmlib.CHM_RESOLVE_SUCCESS): return -5 size, text = chmlib.chm_retrieve_object(self.file, ui, 0, ui.length) if (size == 0): return -6 if (not self.topics): self.topics = self.GetString(text, toc_index) if not self.topics.startswith(b"/"): self.topics = b"/" + self.topics if (not self.index): self.index = self.GetString(text, idx_index) if not self.index.startswith(b"/"): self.index = b"/" + self.index if (dft_index != 0): self.home = self.GetString(text, dft_index) if not self.home.startswith(b"/"): self.home = b"/" + self.home recoll-1.26.3/python/pychm/recollchm/swig_chm.i0000644000175000017500000001474213533651561016407 00000000000000%module chmlib %begin %{ #define SWIG_PYTHON_STRICT_BYTE_CHAR %} %include "typemaps.i" %include "cstring.i" %{ /* Copyright (C) 2003 Rubens Ramos Based on code by: Copyright (C) 2003 Razvan Cojocaru pychm is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA $Id$ */ #include "chm_lib.h" #include static PyObject *my_callback = NULL; static PyObject * my_set_callback(PyObject *dummy, PyObject *arg) { PyObject *result = NULL; if (!PyCallable_Check(arg)) { PyErr_SetString(PyExc_TypeError, "parameter must be callable"); return NULL; } Py_XINCREF(arg); /* Add a reference to new callback */ Py_XDECREF(my_callback); /* Dispose of previous callback */ my_callback = arg; /* Remember new callback */ /* Boilerplate to return "None" */ Py_INCREF(Py_None); result = Py_None; return result; } int dummy_enumerator (struct chmFile *h, struct chmUnitInfo *ui, void *context) { PyObject *arglist; PyObject *result; PyObject *py_h; PyObject *py_ui; PyObject *py_c; py_h = SWIG_NewPointerObj((void *) h, SWIGTYPE_p_chmFile, 0); py_ui = SWIG_NewPointerObj((void *) ui, SWIGTYPE_p_chmUnitInfo, 0); /* The following was: py_c = PyCObject_AsVoidPtr(context); which did not make sense because the function takes a PyObject * and returns a void *, not the reverse. This was probably never used?? In doubt, replace with a call which makes sense and hope for the best... */ py_c = PyCapsule_New(context, "context", NULL); /* Time to call the callback */ arglist = Py_BuildValue("(OOO)", py_h, py_ui, py_c); if (arglist) { result = PyEval_CallObject(my_callback, arglist); Py_DECREF(arglist); Py_DECREF(result); Py_DECREF(py_h); Py_DECREF(py_ui); Py_DECREF(py_c); if (result == NULL) { return 0; /* Pass error back */ } else { return 1; } } else return 0; } %} %typemap(in) CHM_ENUMERATOR { if (!my_set_callback(self, $input)) goto fail; $1 = dummy_enumerator; } %typemap(in) void *context { if (!($1 = PyCapsule_New($input, "context", NULL))) goto fail; } %typemap(in, numinputs=0) struct chmUnitInfo *OutValue (struct chmUnitInfo *temp = (struct chmUnitInfo *) calloc(1, sizeof(struct chmUnitInfo))) { $1 = temp; } %typemap(argout) struct chmUnitInfo *OutValue { PyObject *o, *o2, *o3; o = SWIG_NewPointerObj((void *) $1, SWIGTYPE_p_chmUnitInfo, 1); if ((!$result) || ($result == Py_None)) { $result = o; } else { if (!PyTuple_Check($result)) { PyObject *o2 = $result; $result = PyTuple_New(1); PyTuple_SetItem($result,0,o2); } o3 = PyTuple_New(1); PyTuple_SetItem(o3,0,o); o2 = $result; $result = PySequence_Concat(o2,o3); Py_DECREF(o2); Py_DECREF(o3); } } %typemap(check) unsigned char *OUTPUT { /* nasty hack */ #ifdef __cplusplus $1 = ($1_ltype) new char[arg5]; #else $1 = ($1_ltype) malloc(arg5); #endif if ($1 == NULL) SWIG_fail; } %typemap(argout,fragment="t_output_helper") unsigned char *OUTPUT { PyObject *o; o = SWIG_FromCharPtrAndSize((const char*)$1, arg5); /* o = PyString_FromStringAndSize($1, arg5);*/ $result = t_output_helper($result,o); #ifdef __cplusplus delete [] $1; #else free($1); #endif } #ifdef WIN32 typedef unsigned __int64 LONGUINT64; typedef __int64 LONGINT64; #else typedef unsigned long long LONGUINT64; typedef long long LONGINT64; #endif /* the two available spaces in a CHM file */ /* N.B.: The format supports arbitrarily many spaces, but only */ /* two appear to be used at present. */ #define CHM_UNCOMPRESSED (0) #define CHM_COMPRESSED (1) /* structure representing an ITS (CHM) file stream */ struct chmFile; /* structure representing an element from an ITS file stream */ #define CHM_MAX_PATHLEN 256 struct chmUnitInfo { LONGUINT64 start; LONGUINT64 length; int space; char path[CHM_MAX_PATHLEN+1]; }; /* open an ITS archive */ struct chmFile* chm_open(const char *filename); /* close an ITS archive */ void chm_close(struct chmFile *h); /* methods for ssetting tuning parameters for particular file */ #define CHM_PARAM_MAX_BLOCKS_CACHED 0 void chm_set_param(struct chmFile *h, int paramType, int paramVal); /* resolve a particular object from the archive */ #define CHM_RESOLVE_SUCCESS (0) #define CHM_RESOLVE_FAILURE (1) int chm_resolve_object(struct chmFile *h, const char *objPath, struct chmUnitInfo *OutValue); /* retrieve part of an object from the archive */ LONGINT64 chm_retrieve_object(struct chmFile *h, struct chmUnitInfo *ui, unsigned char *OUTPUT, LONGUINT64 addr, LONGINT64 len); /* enumerate the objects in the .chm archive */ typedef int (*CHM_ENUMERATOR)(struct chmFile *h, struct chmUnitInfo *ui, void *context); #define CHM_ENUMERATE_NORMAL (1) #define CHM_ENUMERATE_META (2) #define CHM_ENUMERATE_SPECIAL (4) #define CHM_ENUMERATE_FILES (8) #define CHM_ENUMERATE_DIRS (16) #define CHM_ENUMERATE_ALL (31) #define CHM_ENUMERATOR_FAILURE (0) #define CHM_ENUMERATOR_CONTINUE (1) #define CHM_ENUMERATOR_SUCCESS (2) int chm_enumerate(struct chmFile *h, int what, CHM_ENUMERATOR e, void *context); int chm_enumerate_dir(struct chmFile *h, const char *prefix, int what, CHM_ENUMERATOR e, void *context); recoll-1.26.3/python/pychm/recollchm/extra.c0000644000175000017500000004421113533651561015716 00000000000000/* * extra.c - full-text search support for pychm * * Copyright (C) 2004 Rubens Ramos * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, * Boston, MA 02110-1301, USA. * * Author: Rubens Ramos * * Heavily based on work done by: * Pabs - chmdeco * Razvan Cojocaru - xCHM * */ #include "chm_lib.h" #ifdef __PYTHON__ #include "Python.h" #else #include #define PyObject void #endif typedef struct { PyObject_HEAD void *ptr; void *ty; int own; PyObject *next; #ifdef SWIGPYTHON_BUILTIN PyObject *dict; #endif } SwigPyObject; #include #if defined(_WIN32) || defined(__WIN32__) # if defined(_MSC_VER) # if defined(STATIC_LINKED) # define MODEXPORT(a) a # define MODIMPORT(a) extern a # else # define MODEXPORT(a) __declspec(dllexport) a # define MODIMPORT(a) extern a # endif #define uint64_t unsigned long long #define uint32_t unsigned int #define uint16_t unsigned short #define uint8_t unsigned char #define size_t int #define strcasecmp _stricmp #define strncasecmp _strnicmp # else # if defined(__BORLANDC__) # define MODEXPORT(a) a _export # define MODIMPORT(a) a _export # else # define MODEXPORT(a) a # define MODIMPORT(a) a # endif # endif #else # define MODEXPORT(a) a # define MODIMPORT(a) a #include #include #endif #define false 0 #define true 1 #define FTS_HEADER_LEN 0x32 #define TOPICS_ENTRY_LEN 16 #define COMMON_BUF_LEN 1025 #define FREE(x) free (x); x = NULL static uint16_t get_uint16 (uint8_t* b) { return b[0] | b[1]<<8; } static uint32_t get_uint32 (uint8_t* b) { return b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24; } static uint64_t be_encint (unsigned char *buffer, size_t *length) { uint64_t result = 0; int shift=0; *length = 0; do { result |= ((*buffer) & 0x7f) << shift; shift += 7; *length = *length + 1; } while (*(buffer++) & 0x80); return result; } /* Finds the first unset bit in memory. Returns the number of set bits found. Returns -1 if the buffer runs out before we find an unset bit. */ static int ffus (unsigned char* byte, int* bit, size_t *length) { int bits = 0; *length = 0; while(*byte & (1 << *bit)){ if(*bit) --(*bit); else { ++byte; ++(*length); *bit = 7; } ++bits; } if(*bit) --(*bit); else { ++(*length); *bit = 7; } return bits; } static uint64_t sr_int(unsigned char* byte, int* bit, unsigned char s, unsigned char r, size_t *length) { uint64_t ret; unsigned char mask; int n, n_bits, num_bits, base, count; size_t fflen; *length = 0; if(!bit || *bit > 7 || s != 2) return ~(uint64_t)0; ret = 0; count = ffus(byte, bit, &fflen); *length += fflen; byte += *length; n_bits = n = r + (count ? count-1 : 0) ; while (n > 0) { num_bits = n > *bit ? *bit : n-1; base = n > *bit ? 0 : *bit - (n-1); switch (num_bits){ case 0: mask = 1; break; case 1: mask = 3; break; case 2: mask = 7; break; case 3: mask = 0xf; break; case 4: mask = 0x1f; break; case 5: mask = 0x3f; break; case 6: mask = 0x7f; break; case 7: mask = 0xff; break; default: mask = 0xff; break; } mask <<= base; ret = (ret << (num_bits+1)) | (uint64_t)((*byte & mask) >> base); if( n > *bit ){ ++byte; ++(*length); n -= *bit+1; *bit = 7; } else { *bit -= n; n = 0; } } if(count) ret |= (uint64_t)1 << n_bits; return ret; } static uint32_t get_leaf_node_offset(struct chmFile *chmfile, const char *text, uint32_t initial_offset, uint32_t buff_size, uint16_t tree_depth, struct chmUnitInfo *ui) { unsigned char word_len; unsigned char pos; uint16_t free_space; char *wrd_buf; char *word = NULL; uint32_t test_offset = 0; uint32_t i = sizeof(uint16_t); unsigned char *buffer = malloc (buff_size); if (NULL == buffer) return 0; while (--tree_depth) { if (initial_offset == test_offset) { FREE(buffer); return 0; } test_offset = initial_offset; if (chm_retrieve_object (chmfile, ui, buffer, initial_offset, buff_size) == 0) { FREE(buffer); return 0; } free_space = get_uint16 (buffer); while (i < buff_size - free_space) { word_len = *(buffer + i); pos = *(buffer + i + 1); wrd_buf = malloc (word_len); memcpy (wrd_buf, buffer + i + 2, word_len - 1); wrd_buf[word_len - 1] = 0; if (pos == 0) { FREE (word); word = (char *) strdup (wrd_buf); } else { word = realloc (word, word_len + pos + 1); strcpy (word + pos, wrd_buf); } FREE(wrd_buf); if (strcasecmp (text, word) <= 0) { initial_offset = get_uint32 (buffer + i + word_len + 1); break; } i += word_len + sizeof (unsigned char) + sizeof(uint32_t) + sizeof(uint16_t); } } if(initial_offset == test_offset) initial_offset = 0; FREE(word); FREE(buffer); return initial_offset; } static int pychm_process_wlc (struct chmFile *chmfile, uint64_t wlc_count, uint64_t wlc_size, uint32_t wlc_offset, unsigned char ds, unsigned char dr, unsigned char cs, unsigned char cr, unsigned char ls, unsigned char lr, struct chmUnitInfo *uimain, struct chmUnitInfo* uitbl, struct chmUnitInfo *uistrings, struct chmUnitInfo* topics, struct chmUnitInfo *urlstr, PyObject *dict) { uint32_t stroff, urloff; uint64_t i, j, count; size_t length; int wlc_bit = 7; size_t off = 0; uint64_t index = 0; unsigned char entry[TOPICS_ENTRY_LEN]; unsigned char combuf[COMMON_BUF_LEN]; unsigned char *buffer = malloc (wlc_size); char *url = NULL; char *topic = NULL; if (chm_retrieve_object(chmfile, uimain, buffer, wlc_offset, wlc_size) == 0) { FREE(buffer); return false; } for (i = 0; i < wlc_count; ++i) { if(wlc_bit != 7) { ++off; wlc_bit = 7; } index += sr_int(buffer + off, &wlc_bit, ds, dr, &length); off += length; if(chm_retrieve_object(chmfile, topics, entry, index * 16, TOPICS_ENTRY_LEN) == 0) { FREE(topic); FREE(url); FREE(buffer); return false; } combuf[COMMON_BUF_LEN - 1] = 0; stroff = get_uint32 (entry + 4); FREE (topic); if (chm_retrieve_object (chmfile, uistrings, combuf, stroff, COMMON_BUF_LEN - 1) == 0) { topic = strdup ("Untitled in index"); } else { combuf[COMMON_BUF_LEN - 1] = 0; topic = strdup ((char *)combuf); } urloff = get_uint32 (entry + 8); if(chm_retrieve_object (chmfile, uitbl, combuf, urloff, 12) == 0) { FREE(buffer); return false; } urloff = get_uint32 (combuf + 8); if (chm_retrieve_object (chmfile, urlstr, combuf, urloff + 8, COMMON_BUF_LEN - 1) == 0) { FREE(topic); FREE(url); FREE(buffer); return false; } combuf[COMMON_BUF_LEN - 1] = 0; FREE (url); url = strdup ((char *)combuf); if (url && topic) { #ifdef __PYTHON__ PyDict_SetItem(dict, #if PY_MAJOR_VERSION >= 3 PyBytes_FromStringAndSize(topic, strlen(topic)), PyBytes_FromStringAndSize(url, strlen(url)) #else PyString_FromString (topic), PyString_FromString (url) #endif ); #else printf ("%s ==> %s\n", url, topic); #endif } count = sr_int (buffer + off, &wlc_bit, cs, cr, &length); off += length; for (j = 0; j < count; ++j) { sr_int (buffer + off, &wlc_bit, ls, lr, &length); off += length; } } FREE(topic); FREE(url); FREE(buffer); return true; } static int chm_search (struct chmFile *chmfile, const char *text, int whole_words, int titles_only, PyObject *dict) { unsigned char header[FTS_HEADER_LEN]; unsigned char doc_index_s; unsigned char doc_index_r; unsigned char code_count_s; unsigned char code_count_r; unsigned char loc_codes_s; unsigned char loc_codes_r; unsigned char word_len, pos; unsigned char *buffer; char *word = NULL; uint32_t node_offset; uint32_t node_len; uint16_t tree_depth; uint32_t i; uint16_t free_space; uint64_t wlc_count, wlc_size; uint32_t wlc_offset; char *wrd_buf; unsigned char title; size_t encsz; struct chmUnitInfo ui, uitopics, uiurltbl, uistrings, uiurlstr; int partial = false; if (NULL == text) return -1; if (chm_resolve_object (chmfile, "/$FIftiMain", &ui) != CHM_RESOLVE_SUCCESS || chm_resolve_object (chmfile, "/#TOPICS", &uitopics) != CHM_RESOLVE_SUCCESS || chm_resolve_object (chmfile, "/#STRINGS", &uistrings) != CHM_RESOLVE_SUCCESS || chm_resolve_object (chmfile, "/#URLTBL", &uiurltbl) != CHM_RESOLVE_SUCCESS || chm_resolve_object (chmfile, "/#URLSTR", &uiurlstr) != CHM_RESOLVE_SUCCESS) return false; if(chm_retrieve_object(chmfile, &ui, header, 0, FTS_HEADER_LEN) == 0) return false; doc_index_s = header[0x1E]; doc_index_r = header[0x1F]; code_count_s = header[0x20]; code_count_r = header[0x21]; loc_codes_s = header[0x22]; loc_codes_r = header[0x23]; if(doc_index_s != 2 || code_count_s != 2 || loc_codes_s != 2) { return false; } node_offset = get_uint32 (header + 0x14); node_len = get_uint32 (header + 0x2e); tree_depth = get_uint16 (header + 0x18); i = sizeof(uint16_t); buffer = malloc (node_len); node_offset = get_leaf_node_offset (chmfile, text, node_offset, node_len, tree_depth, &ui); if (!node_offset) { FREE(buffer); return false; } do { if (chm_retrieve_object (chmfile, &ui, buffer, node_offset, node_len) == 0) { FREE(word); FREE(buffer); return false; } free_space = get_uint16 (buffer + 6); i = sizeof(uint32_t) + sizeof(uint16_t) + sizeof(uint16_t); encsz = 0; while (i < node_len - free_space) { word_len = *(buffer + i); pos = *(buffer + i + 1); wrd_buf = malloc (word_len); memcpy (wrd_buf, buffer + i + 2, word_len - 1); wrd_buf[word_len - 1] = 0; if (pos == 0) { FREE(word); word = (char *) strdup (wrd_buf); } else { word = realloc (word, word_len + pos + 1); strcpy (word + pos, wrd_buf); } FREE(wrd_buf); i += 2 + word_len; title = *(buffer + i - 1); wlc_count = be_encint (buffer + i, &encsz); i += encsz; wlc_offset = get_uint32 (buffer + i); i += sizeof(uint32_t) + sizeof(uint16_t); wlc_size = be_encint (buffer + i, &encsz); i += encsz; node_offset = get_uint32 (buffer); if (!title && titles_only) continue; if (whole_words && !strcasecmp(text, word)) { partial = pychm_process_wlc (chmfile, wlc_count, wlc_size, wlc_offset, doc_index_s, doc_index_r,code_count_s, code_count_r, loc_codes_s, loc_codes_r, &ui, &uiurltbl, &uistrings, &uitopics, &uiurlstr, dict); FREE(word); FREE(buffer); return partial; } if (!whole_words) { if (!strncasecmp (word, text, strlen(text))) { partial = true; pychm_process_wlc (chmfile, wlc_count, wlc_size, wlc_offset, doc_index_s, doc_index_r,code_count_s, code_count_r, loc_codes_s, loc_codes_r, &ui, &uiurltbl, &uistrings, &uitopics, &uiurlstr, dict); } else if (strncasecmp (text, word, strlen(text)) < -1) break; } } } while (!whole_words && !strncmp (word, text, strlen(text)) && node_offset); FREE(word); FREE(buffer); return partial; } typedef struct { const char *file; int offset; } Langrec; static Langrec lang_files[] = { {"/$FIftiMain", 0x7E}, {"$WWKeywordLinks/BTree", 0x34}, {"$WWAssociativeLinks/BTree", 0x34} }; #define LANG_FILES_SIZE (sizeof(lang_files)/sizeof(Langrec)) static int chm_get_lcid (struct chmFile *chmfile) { struct chmUnitInfo ui; uint32_t lang; int i; for (i=0; iptr; if (chm_resolve_object (file, "/$FIftiMain", &ui) != CHM_RESOLVE_SUCCESS || chm_resolve_object (file, "/#TOPICS", &ui) != CHM_RESOLVE_SUCCESS || chm_resolve_object (file, "/#STRINGS", &ui) != CHM_RESOLVE_SUCCESS || chm_resolve_object (file, "/#URLTBL", &ui) != CHM_RESOLVE_SUCCESS || chm_resolve_object (file, "/#URLSTR", &ui) != CHM_RESOLVE_SUCCESS) return Py_BuildValue ("i", 0); else return Py_BuildValue ("i", 1); } else { PyErr_SetString(PyExc_TypeError, "Expected chmfile (not CHMFile!)"); return NULL; } } static PyObject * search (PyObject *self, PyObject *args) { char *text; int whole_words = 0; int titles_only = 0; int partial; struct chmFile *file; PyObject *obj0; PyObject *dict; #if PY_MAJOR_VERSION >= 3 PyObject *obj1; if (PyArg_ParseTuple (args, "OSii:search", &obj0, &obj1, #else if (PyArg_ParseTuple (args, "Osii:search", &obj0, &text, #endif &whole_words, &titles_only)) { #if PY_MAJOR_VERSION >= 3 text = PyBytes_AsString(obj1); #endif dict = PyDict_New(); if (dict) { file = (struct chmFile *) ((SwigPyObject*)(obj0))->ptr; partial = chm_search (file, text, whole_words, titles_only, dict); return Py_BuildValue ("(iO)", partial, dict); } else { PyErr_NoMemory(); return NULL; } } else { PyErr_SetString(PyExc_TypeError, "Expected chmfile (not CHMFile!), string, int, int"); return NULL; } } static PyObject * get_lcid (PyObject *self, PyObject *args) { int code; struct chmFile *file; PyObject *obj0; if (PyArg_ParseTuple (args, "O:get_lcid", &obj0)) { file = (struct chmFile *) ((SwigPyObject*)(obj0))->ptr; code = chm_get_lcid (file); if (code != -1) return Py_BuildValue ("i", code); else Py_INCREF(Py_None); return Py_None; } else { PyErr_SetString(PyExc_TypeError,"Expected a chmfile (not a CHMFile!)"); return NULL; } } static PyMethodDef IndexMethods[] = { {"get_lcid", get_lcid, METH_VARARGS, "Returns LCID (Locale ID) for archive."}, {"search", search, METH_VARARGS, "Perform Full-Text search."}, {"is_searchable", is_searchable, METH_VARARGS, "Return 1 if it is possible to search the archive, 0 otherwise."}, {NULL, NULL, 0, NULL} }; #if PY_MAJOR_VERSION >= 3 static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "extra", NULL, -1, IndexMethods, NULL, NULL, NULL, NULL }; #define INITERROR return NULL #else /* python < 3 */ #define INITERROR return #endif /* python 3/2 */ #if PY_MAJOR_VERSION >= 3 PyObject* PyInit_extra(void) #else void initextra (void) #endif { PyObject *module; #if PY_MAJOR_VERSION >= 3 module = PyModule_Create(&moduledef); #else module = Py_InitModule ("extra", IndexMethods); #endif if (module == NULL) INITERROR; #if PY_MAJOR_VERSION >= 3 return module; #endif } #else int main (int argc, char **argv) { struct chmFile *file; char text[255]; int whole_words, titles_only; int partial; if (argc == 2) { file = chm_open (argv[1]); if (file) { printf ("\nLCID= %d (%08X)\n", chm_get_lcid(file), chm_get_lcid(file)); while (1) { printf ("\n \n"); printf ("> "); if (scanf ("%d %d %s", &whole_words, &titles_only, text)) partial = chm_search (file, text, whole_words, titles_only, NULL); else break; printf ("Partial = %d\n", partial); } chm_close (file); return 0; } return -1; } else { printf ("\n%s \n", argv[0]); return 0; } } #endif recoll-1.26.3/python/pychm/recollchm/chmlib.py0000644000175000017500000001410413533651561016235 00000000000000# This file was automatically generated by SWIG (http://www.swig.org). # Version 3.0.10 # # Do not make changes to this file unless you know what you are doing--modify # the SWIG interface file instead. from sys import version_info as _swig_python_version_info if _swig_python_version_info >= (2, 7, 0): def swig_import_helper(): import importlib pkg = __name__.rpartition('.')[0] mname = '.'.join((pkg, '_chmlib')).lstrip('.') try: return importlib.import_module(mname) except ImportError: return importlib.import_module('_chmlib') _chmlib = swig_import_helper() del swig_import_helper elif _swig_python_version_info >= (2, 6, 0): def swig_import_helper(): from os.path import dirname import imp fp = None try: fp, pathname, description = imp.find_module('_chmlib', [dirname(__file__)]) except ImportError: import _chmlib return _chmlib if fp is not None: try: _mod = imp.load_module('_chmlib', fp, pathname, description) finally: fp.close() return _mod _chmlib = swig_import_helper() del swig_import_helper else: import _chmlib del _swig_python_version_info try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. try: import builtins as __builtin__ except ImportError: import __builtin__ def _swig_setattr_nondynamic(self, class_type, name, value, static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'SwigPyObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name, None) if method: return method(self, value) if (not static): if _newclass: object.__setattr__(self, name, value) else: self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self, class_type, name, value): return _swig_setattr_nondynamic(self, class_type, name, value, 0) def _swig_getattr(self, class_type, name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name, None) if method: return method(self) raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name)) def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except __builtin__.Exception: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) try: _object = object _newclass = 1 except __builtin__.Exception: class _object: pass _newclass = 0 CHM_UNCOMPRESSED = _chmlib.CHM_UNCOMPRESSED CHM_COMPRESSED = _chmlib.CHM_COMPRESSED CHM_MAX_PATHLEN = _chmlib.CHM_MAX_PATHLEN class chmUnitInfo(_object): __swig_setmethods__ = {} __setattr__ = lambda self, name, value: _swig_setattr(self, chmUnitInfo, name, value) __swig_getmethods__ = {} __getattr__ = lambda self, name: _swig_getattr(self, chmUnitInfo, name) __repr__ = _swig_repr __swig_setmethods__["start"] = _chmlib.chmUnitInfo_start_set __swig_getmethods__["start"] = _chmlib.chmUnitInfo_start_get if _newclass: start = _swig_property(_chmlib.chmUnitInfo_start_get, _chmlib.chmUnitInfo_start_set) __swig_setmethods__["length"] = _chmlib.chmUnitInfo_length_set __swig_getmethods__["length"] = _chmlib.chmUnitInfo_length_get if _newclass: length = _swig_property(_chmlib.chmUnitInfo_length_get, _chmlib.chmUnitInfo_length_set) __swig_setmethods__["space"] = _chmlib.chmUnitInfo_space_set __swig_getmethods__["space"] = _chmlib.chmUnitInfo_space_get if _newclass: space = _swig_property(_chmlib.chmUnitInfo_space_get, _chmlib.chmUnitInfo_space_set) __swig_setmethods__["path"] = _chmlib.chmUnitInfo_path_set __swig_getmethods__["path"] = _chmlib.chmUnitInfo_path_get if _newclass: path = _swig_property(_chmlib.chmUnitInfo_path_get, _chmlib.chmUnitInfo_path_set) def __init__(self): this = _chmlib.new_chmUnitInfo() try: self.this.append(this) except __builtin__.Exception: self.this = this __swig_destroy__ = _chmlib.delete_chmUnitInfo __del__ = lambda self: None chmUnitInfo_swigregister = _chmlib.chmUnitInfo_swigregister chmUnitInfo_swigregister(chmUnitInfo) def chm_open(filename): return _chmlib.chm_open(filename) chm_open = _chmlib.chm_open def chm_close(h): return _chmlib.chm_close(h) chm_close = _chmlib.chm_close CHM_PARAM_MAX_BLOCKS_CACHED = _chmlib.CHM_PARAM_MAX_BLOCKS_CACHED def chm_set_param(h, paramType, paramVal): return _chmlib.chm_set_param(h, paramType, paramVal) chm_set_param = _chmlib.chm_set_param CHM_RESOLVE_SUCCESS = _chmlib.CHM_RESOLVE_SUCCESS CHM_RESOLVE_FAILURE = _chmlib.CHM_RESOLVE_FAILURE def chm_resolve_object(h, objPath): return _chmlib.chm_resolve_object(h, objPath) chm_resolve_object = _chmlib.chm_resolve_object def chm_retrieve_object(h, ui, addr, len): return _chmlib.chm_retrieve_object(h, ui, addr, len) chm_retrieve_object = _chmlib.chm_retrieve_object CHM_ENUMERATE_NORMAL = _chmlib.CHM_ENUMERATE_NORMAL CHM_ENUMERATE_META = _chmlib.CHM_ENUMERATE_META CHM_ENUMERATE_SPECIAL = _chmlib.CHM_ENUMERATE_SPECIAL CHM_ENUMERATE_FILES = _chmlib.CHM_ENUMERATE_FILES CHM_ENUMERATE_DIRS = _chmlib.CHM_ENUMERATE_DIRS CHM_ENUMERATE_ALL = _chmlib.CHM_ENUMERATE_ALL CHM_ENUMERATOR_FAILURE = _chmlib.CHM_ENUMERATOR_FAILURE CHM_ENUMERATOR_CONTINUE = _chmlib.CHM_ENUMERATOR_CONTINUE CHM_ENUMERATOR_SUCCESS = _chmlib.CHM_ENUMERATOR_SUCCESS def chm_enumerate(h, what, e, context): return _chmlib.chm_enumerate(h, what, e, context) chm_enumerate = _chmlib.chm_enumerate def chm_enumerate_dir(h, prefix, what, e, context): return _chmlib.chm_enumerate_dir(h, prefix, what, e, context) chm_enumerate_dir = _chmlib.chm_enumerate_dir # This file is compatible with both classic and new-style classes. recoll-1.26.3/python/pychm/recollchm/__init__.py0000644000175000017500000000250113533651561016534 00000000000000# Copyright (C) 2003-2006 Rubens Ramos # # pychm is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public # License along with this program; see the file COPYING. If not, # write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, # Boston, MA 02110-1301, USA # ''' chm - A package to manipulate CHM files The chm package provides four modules: chm, chmlib, extra and _chmlib. _chmlib and chmlib are very low level libraries generated from SWIG interface files, and are simple wrappers around the API defined by the C library chmlib. The extra module adds full-text search support. the chm module provides some higher level classes to simplify access to the CHM files information. ''' __all__ = ["chm", "chmlib", "_chmlib", "extra"] __version__ = "0.8.4.1+git" __revision__ = "$Id$" recoll-1.26.3/python/pychm/recollchm/swig_chm.c0000644000175000017500000044277113533651561016410 00000000000000/* ---------------------------------------------------------------------------- * This file was automatically generated by SWIG (http://www.swig.org). * Version 3.0.10 * * This file is not intended to be easily readable and contains a number of * coding conventions designed to improve portability and efficiency. Do not make * changes to this file unless you know what you are doing--modify the SWIG * interface file instead. * ----------------------------------------------------------------------------- */ #define SWIG_PYTHON_STRICT_BYTE_CHAR #ifndef SWIGPYTHON #define SWIGPYTHON #endif #define SWIG_PYTHON_DIRECTOR_NO_VTABLE /* ----------------------------------------------------------------------------- * This section contains generic SWIG labels for method/variable * declarations/attributes, and other compiler dependent labels. * ----------------------------------------------------------------------------- */ /* template workaround for compilers that cannot correctly implement the C++ standard */ #ifndef SWIGTEMPLATEDISAMBIGUATOR # if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560) # define SWIGTEMPLATEDISAMBIGUATOR template # elif defined(__HP_aCC) /* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */ /* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */ # define SWIGTEMPLATEDISAMBIGUATOR template # else # define SWIGTEMPLATEDISAMBIGUATOR # endif #endif /* inline attribute */ #ifndef SWIGINLINE # if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__)) # define SWIGINLINE inline # else # define SWIGINLINE # endif #endif /* attribute recognised by some compilers to avoid 'unused' warnings */ #ifndef SWIGUNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif # elif defined(__ICC) # define SWIGUNUSED __attribute__ ((__unused__)) # else # define SWIGUNUSED # endif #endif #ifndef SWIG_MSC_UNSUPPRESS_4505 # if defined(_MSC_VER) # pragma warning(disable : 4505) /* unreferenced local function has been removed */ # endif #endif #ifndef SWIGUNUSEDPARM # ifdef __cplusplus # define SWIGUNUSEDPARM(p) # else # define SWIGUNUSEDPARM(p) p SWIGUNUSED # endif #endif /* internal SWIG method */ #ifndef SWIGINTERN # define SWIGINTERN static SWIGUNUSED #endif /* internal inline SWIG method */ #ifndef SWIGINTERNINLINE # define SWIGINTERNINLINE SWIGINTERN SWIGINLINE #endif /* exporting methods */ #if defined(__GNUC__) # if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) # ifndef GCC_HASCLASSVISIBILITY # define GCC_HASCLASSVISIBILITY # endif # endif #endif #ifndef SWIGEXPORT # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # if defined(STATIC_LINKED) # define SWIGEXPORT # else # define SWIGEXPORT __declspec(dllexport) # endif # else # if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY) # define SWIGEXPORT __attribute__ ((visibility("default"))) # else # define SWIGEXPORT # endif # endif #endif /* calling conventions for Windows */ #ifndef SWIGSTDCALL # if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # define SWIGSTDCALL __stdcall # else # define SWIGSTDCALL # endif #endif /* Deal with Microsoft's attempt at deprecating C standard runtime functions */ #if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE) # define _CRT_SECURE_NO_DEPRECATE #endif /* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */ #if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE) # define _SCL_SECURE_NO_DEPRECATE #endif /* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */ #if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES) # define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0 #endif /* Intel's compiler complains if a variable which was never initialised is * cast to void, which is a common idiom which we use to indicate that we * are aware a variable isn't used. So we just silence that warning. * See: https://github.com/swig/swig/issues/192 for more discussion. */ #ifdef __INTEL_COMPILER # pragma warning disable 592 #endif #if defined(_DEBUG) && defined(SWIG_PYTHON_INTERPRETER_NO_DEBUG) /* Use debug wrappers with the Python release dll */ # undef _DEBUG # include # define _DEBUG #else # include #endif /* ----------------------------------------------------------------------------- * swigrun.swg * * This file contains generic C API SWIG runtime support for pointer * type checking. * ----------------------------------------------------------------------------- */ /* This should only be incremented when either the layout of swig_type_info changes, or for whatever reason, the runtime changes incompatibly */ #define SWIG_RUNTIME_VERSION "4" /* define SWIG_TYPE_TABLE_NAME as "SWIG_TYPE_TABLE" */ #ifdef SWIG_TYPE_TABLE # define SWIG_QUOTE_STRING(x) #x # define SWIG_EXPAND_AND_QUOTE_STRING(x) SWIG_QUOTE_STRING(x) # define SWIG_TYPE_TABLE_NAME SWIG_EXPAND_AND_QUOTE_STRING(SWIG_TYPE_TABLE) #else # define SWIG_TYPE_TABLE_NAME #endif /* You can use the SWIGRUNTIME and SWIGRUNTIMEINLINE macros for creating a static or dynamic library from the SWIG runtime code. In 99.9% of the cases, SWIG just needs to declare them as 'static'. But only do this if strictly necessary, ie, if you have problems with your compiler or suchlike. */ #ifndef SWIGRUNTIME # define SWIGRUNTIME SWIGINTERN #endif #ifndef SWIGRUNTIMEINLINE # define SWIGRUNTIMEINLINE SWIGRUNTIME SWIGINLINE #endif /* Generic buffer size */ #ifndef SWIG_BUFFER_SIZE # define SWIG_BUFFER_SIZE 1024 #endif /* Flags for pointer conversions */ #define SWIG_POINTER_DISOWN 0x1 #define SWIG_CAST_NEW_MEMORY 0x2 /* Flags for new pointer objects */ #define SWIG_POINTER_OWN 0x1 /* Flags/methods for returning states. The SWIG conversion methods, as ConvertPtr, return an integer that tells if the conversion was successful or not. And if not, an error code can be returned (see swigerrors.swg for the codes). Use the following macros/flags to set or process the returning states. In old versions of SWIG, code such as the following was usually written: if (SWIG_ConvertPtr(obj,vptr,ty.flags) != -1) { // success code } else { //fail code } Now you can be more explicit: int res = SWIG_ConvertPtr(obj,vptr,ty.flags); if (SWIG_IsOK(res)) { // success code } else { // fail code } which is the same really, but now you can also do Type *ptr; int res = SWIG_ConvertPtr(obj,(void **)(&ptr),ty.flags); if (SWIG_IsOK(res)) { // success code if (SWIG_IsNewObj(res) { ... delete *ptr; } else { ... } } else { // fail code } I.e., now SWIG_ConvertPtr can return new objects and you can identify the case and take care of the deallocation. Of course that also requires SWIG_ConvertPtr to return new result values, such as int SWIG_ConvertPtr(obj, ptr,...) { if () { if () { *ptr = ; return SWIG_NEWOBJ; } else { *ptr = ; return SWIG_OLDOBJ; } } else { return SWIG_BADOBJ; } } Of course, returning the plain '0(success)/-1(fail)' still works, but you can be more explicit by returning SWIG_BADOBJ, SWIG_ERROR or any of the SWIG errors code. Finally, if the SWIG_CASTRANK_MODE is enabled, the result code allows to return the 'cast rank', for example, if you have this int food(double) int fooi(int); and you call food(1) // cast rank '1' (1 -> 1.0) fooi(1) // cast rank '0' just use the SWIG_AddCast()/SWIG_CheckState() */ #define SWIG_OK (0) #define SWIG_ERROR (-1) #define SWIG_IsOK(r) (r >= 0) #define SWIG_ArgError(r) ((r != SWIG_ERROR) ? r : SWIG_TypeError) /* The CastRankLimit says how many bits are used for the cast rank */ #define SWIG_CASTRANKLIMIT (1 << 8) /* The NewMask denotes the object was created (using new/malloc) */ #define SWIG_NEWOBJMASK (SWIG_CASTRANKLIMIT << 1) /* The TmpMask is for in/out typemaps that use temporal objects */ #define SWIG_TMPOBJMASK (SWIG_NEWOBJMASK << 1) /* Simple returning values */ #define SWIG_BADOBJ (SWIG_ERROR) #define SWIG_OLDOBJ (SWIG_OK) #define SWIG_NEWOBJ (SWIG_OK | SWIG_NEWOBJMASK) #define SWIG_TMPOBJ (SWIG_OK | SWIG_TMPOBJMASK) /* Check, add and del mask methods */ #define SWIG_AddNewMask(r) (SWIG_IsOK(r) ? (r | SWIG_NEWOBJMASK) : r) #define SWIG_DelNewMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_NEWOBJMASK) : r) #define SWIG_IsNewObj(r) (SWIG_IsOK(r) && (r & SWIG_NEWOBJMASK)) #define SWIG_AddTmpMask(r) (SWIG_IsOK(r) ? (r | SWIG_TMPOBJMASK) : r) #define SWIG_DelTmpMask(r) (SWIG_IsOK(r) ? (r & ~SWIG_TMPOBJMASK) : r) #define SWIG_IsTmpObj(r) (SWIG_IsOK(r) && (r & SWIG_TMPOBJMASK)) /* Cast-Rank Mode */ #if defined(SWIG_CASTRANK_MODE) # ifndef SWIG_TypeRank # define SWIG_TypeRank unsigned long # endif # ifndef SWIG_MAXCASTRANK /* Default cast allowed */ # define SWIG_MAXCASTRANK (2) # endif # define SWIG_CASTRANKMASK ((SWIG_CASTRANKLIMIT) -1) # define SWIG_CastRank(r) (r & SWIG_CASTRANKMASK) SWIGINTERNINLINE int SWIG_AddCast(int r) { return SWIG_IsOK(r) ? ((SWIG_CastRank(r) < SWIG_MAXCASTRANK) ? (r + 1) : SWIG_ERROR) : r; } SWIGINTERNINLINE int SWIG_CheckState(int r) { return SWIG_IsOK(r) ? SWIG_CastRank(r) + 1 : 0; } #else /* no cast-rank mode */ # define SWIG_AddCast(r) (r) # define SWIG_CheckState(r) (SWIG_IsOK(r) ? 1 : 0) #endif #include #ifdef __cplusplus extern "C" { #endif typedef void *(*swig_converter_func)(void *, int *); typedef struct swig_type_info *(*swig_dycast_func)(void **); /* Structure to store information on one type */ typedef struct swig_type_info { const char *name; /* mangled name of this type */ const char *str; /* human readable name of this type */ swig_dycast_func dcast; /* dynamic cast function down a hierarchy */ struct swig_cast_info *cast; /* linked list of types that can cast into this type */ void *clientdata; /* language specific type data */ int owndata; /* flag if the structure owns the clientdata */ } swig_type_info; /* Structure to store a type and conversion function used for casting */ typedef struct swig_cast_info { swig_type_info *type; /* pointer to type that is equivalent to this type */ swig_converter_func converter; /* function to cast the void pointers */ struct swig_cast_info *next; /* pointer to next cast in linked list */ struct swig_cast_info *prev; /* pointer to the previous cast */ } swig_cast_info; /* Structure used to store module information * Each module generates one structure like this, and the runtime collects * all of these structures and stores them in a circularly linked list.*/ typedef struct swig_module_info { swig_type_info **types; /* Array of pointers to swig_type_info structures that are in this module */ size_t size; /* Number of types in this module */ struct swig_module_info *next; /* Pointer to next element in circularly linked list */ swig_type_info **type_initial; /* Array of initially generated type structures */ swig_cast_info **cast_initial; /* Array of initially generated casting structures */ void *clientdata; /* Language specific module data */ } swig_module_info; /* Compare two type names skipping the space characters, therefore "char*" == "char *" and "Class" == "Class", etc. Return 0 when the two name types are equivalent, as in strncmp, but skipping ' '. */ SWIGRUNTIME int SWIG_TypeNameComp(const char *f1, const char *l1, const char *f2, const char *l2) { for (;(f1 != l1) && (f2 != l2); ++f1, ++f2) { while ((*f1 == ' ') && (f1 != l1)) ++f1; while ((*f2 == ' ') && (f2 != l2)) ++f2; if (*f1 != *f2) return (*f1 > *f2) ? 1 : -1; } return (int)((l1 - f1) - (l2 - f2)); } /* Check type equivalence in a name list like ||... Return 0 if equal, -1 if nb < tb, 1 if nb > tb */ SWIGRUNTIME int SWIG_TypeCmp(const char *nb, const char *tb) { int equiv = 1; const char* te = tb + strlen(tb); const char* ne = nb; while (equiv != 0 && *ne) { for (nb = ne; *ne; ++ne) { if (*ne == '|') break; } equiv = SWIG_TypeNameComp(nb, ne, tb, te); if (*ne) ++ne; } return equiv; } /* Check type equivalence in a name list like ||... Return 0 if not equal, 1 if equal */ SWIGRUNTIME int SWIG_TypeEquiv(const char *nb, const char *tb) { return SWIG_TypeCmp(nb, tb) == 0 ? 1 : 0; } /* Check the typename */ SWIGRUNTIME swig_cast_info * SWIG_TypeCheck(const char *c, swig_type_info *ty) { if (ty) { swig_cast_info *iter = ty->cast; while (iter) { if (strcmp(iter->type->name, c) == 0) { if (iter == ty->cast) return iter; /* Move iter to the top of the linked list */ iter->prev->next = iter->next; if (iter->next) iter->next->prev = iter->prev; iter->next = ty->cast; iter->prev = 0; if (ty->cast) ty->cast->prev = iter; ty->cast = iter; return iter; } iter = iter->next; } } return 0; } /* Identical to SWIG_TypeCheck, except strcmp is replaced with a pointer comparison */ SWIGRUNTIME swig_cast_info * SWIG_TypeCheckStruct(swig_type_info *from, swig_type_info *ty) { if (ty) { swig_cast_info *iter = ty->cast; while (iter) { if (iter->type == from) { if (iter == ty->cast) return iter; /* Move iter to the top of the linked list */ iter->prev->next = iter->next; if (iter->next) iter->next->prev = iter->prev; iter->next = ty->cast; iter->prev = 0; if (ty->cast) ty->cast->prev = iter; ty->cast = iter; return iter; } iter = iter->next; } } return 0; } /* Cast a pointer up an inheritance hierarchy */ SWIGRUNTIMEINLINE void * SWIG_TypeCast(swig_cast_info *ty, void *ptr, int *newmemory) { return ((!ty) || (!ty->converter)) ? ptr : (*ty->converter)(ptr, newmemory); } /* Dynamic pointer casting. Down an inheritance hierarchy */ SWIGRUNTIME swig_type_info * SWIG_TypeDynamicCast(swig_type_info *ty, void **ptr) { swig_type_info *lastty = ty; if (!ty || !ty->dcast) return ty; while (ty && (ty->dcast)) { ty = (*ty->dcast)(ptr); if (ty) lastty = ty; } return lastty; } /* Return the name associated with this type */ SWIGRUNTIMEINLINE const char * SWIG_TypeName(const swig_type_info *ty) { return ty->name; } /* Return the pretty name associated with this type, that is an unmangled type name in a form presentable to the user. */ SWIGRUNTIME const char * SWIG_TypePrettyName(const swig_type_info *type) { /* The "str" field contains the equivalent pretty names of the type, separated by vertical-bar characters. We choose to print the last name, as it is often (?) the most specific. */ if (!type) return NULL; if (type->str != NULL) { const char *last_name = type->str; const char *s; for (s = type->str; *s; s++) if (*s == '|') last_name = s+1; return last_name; } else return type->name; } /* Set the clientdata field for a type */ SWIGRUNTIME void SWIG_TypeClientData(swig_type_info *ti, void *clientdata) { swig_cast_info *cast = ti->cast; /* if (ti->clientdata == clientdata) return; */ ti->clientdata = clientdata; while (cast) { if (!cast->converter) { swig_type_info *tc = cast->type; if (!tc->clientdata) { SWIG_TypeClientData(tc, clientdata); } } cast = cast->next; } } SWIGRUNTIME void SWIG_TypeNewClientData(swig_type_info *ti, void *clientdata) { SWIG_TypeClientData(ti, clientdata); ti->owndata = 1; } /* Search for a swig_type_info structure only by mangled name Search is a O(log #types) We start searching at module start, and finish searching when start == end. Note: if start == end at the beginning of the function, we go all the way around the circular list. */ SWIGRUNTIME swig_type_info * SWIG_MangledTypeQueryModule(swig_module_info *start, swig_module_info *end, const char *name) { swig_module_info *iter = start; do { if (iter->size) { size_t l = 0; size_t r = iter->size - 1; do { /* since l+r >= 0, we can (>> 1) instead (/ 2) */ size_t i = (l + r) >> 1; const char *iname = iter->types[i]->name; if (iname) { int compare = strcmp(name, iname); if (compare == 0) { return iter->types[i]; } else if (compare < 0) { if (i) { r = i - 1; } else { break; } } else if (compare > 0) { l = i + 1; } } else { break; /* should never happen */ } } while (l <= r); } iter = iter->next; } while (iter != end); return 0; } /* Search for a swig_type_info structure for either a mangled name or a human readable name. It first searches the mangled names of the types, which is a O(log #types) If a type is not found it then searches the human readable names, which is O(#types). We start searching at module start, and finish searching when start == end. Note: if start == end at the beginning of the function, we go all the way around the circular list. */ SWIGRUNTIME swig_type_info * SWIG_TypeQueryModule(swig_module_info *start, swig_module_info *end, const char *name) { /* STEP 1: Search the name field using binary search */ swig_type_info *ret = SWIG_MangledTypeQueryModule(start, end, name); if (ret) { return ret; } else { /* STEP 2: If the type hasn't been found, do a complete search of the str field (the human readable name) */ swig_module_info *iter = start; do { size_t i = 0; for (; i < iter->size; ++i) { if (iter->types[i]->str && (SWIG_TypeEquiv(iter->types[i]->str, name))) return iter->types[i]; } iter = iter->next; } while (iter != end); } /* neither found a match */ return 0; } /* Pack binary data into a string */ SWIGRUNTIME char * SWIG_PackData(char *c, void *ptr, size_t sz) { static const char hex[17] = "0123456789abcdef"; const unsigned char *u = (unsigned char *) ptr; const unsigned char *eu = u + sz; for (; u != eu; ++u) { unsigned char uu = *u; *(c++) = hex[(uu & 0xf0) >> 4]; *(c++) = hex[uu & 0xf]; } return c; } /* Unpack binary data from a string */ SWIGRUNTIME const char * SWIG_UnpackData(const char *c, void *ptr, size_t sz) { unsigned char *u = (unsigned char *) ptr; const unsigned char *eu = u + sz; for (; u != eu; ++u) { char d = *(c++); unsigned char uu; if ((d >= '0') && (d <= '9')) uu = (unsigned char)((d - '0') << 4); else if ((d >= 'a') && (d <= 'f')) uu = (unsigned char)((d - ('a'-10)) << 4); else return (char *) 0; d = *(c++); if ((d >= '0') && (d <= '9')) uu |= (unsigned char)(d - '0'); else if ((d >= 'a') && (d <= 'f')) uu |= (unsigned char)(d - ('a'-10)); else return (char *) 0; *u = uu; } return c; } /* Pack 'void *' into a string buffer. */ SWIGRUNTIME char * SWIG_PackVoidPtr(char *buff, void *ptr, const char *name, size_t bsz) { char *r = buff; if ((2*sizeof(void *) + 2) > bsz) return 0; *(r++) = '_'; r = SWIG_PackData(r,&ptr,sizeof(void *)); if (strlen(name) + 1 > (bsz - (r - buff))) return 0; strcpy(r,name); return buff; } SWIGRUNTIME const char * SWIG_UnpackVoidPtr(const char *c, void **ptr, const char *name) { if (*c != '_') { if (strcmp(c,"NULL") == 0) { *ptr = (void *) 0; return name; } else { return 0; } } return SWIG_UnpackData(++c,ptr,sizeof(void *)); } SWIGRUNTIME char * SWIG_PackDataName(char *buff, void *ptr, size_t sz, const char *name, size_t bsz) { char *r = buff; size_t lname = (name ? strlen(name) : 0); if ((2*sz + 2 + lname) > bsz) return 0; *(r++) = '_'; r = SWIG_PackData(r,ptr,sz); if (lname) { strncpy(r,name,lname+1); } else { *r = 0; } return buff; } SWIGRUNTIME const char * SWIG_UnpackDataName(const char *c, void *ptr, size_t sz, const char *name) { if (*c != '_') { if (strcmp(c,"NULL") == 0) { memset(ptr,0,sz); return name; } else { return 0; } } return SWIG_UnpackData(++c,ptr,sz); } #ifdef __cplusplus } #endif /* Errors in SWIG */ #define SWIG_UnknownError -1 #define SWIG_IOError -2 #define SWIG_RuntimeError -3 #define SWIG_IndexError -4 #define SWIG_TypeError -5 #define SWIG_DivisionByZero -6 #define SWIG_OverflowError -7 #define SWIG_SyntaxError -8 #define SWIG_ValueError -9 #define SWIG_SystemError -10 #define SWIG_AttributeError -11 #define SWIG_MemoryError -12 #define SWIG_NullReferenceError -13 /* Compatibility macros for Python 3 */ #if PY_VERSION_HEX >= 0x03000000 #define PyClass_Check(obj) PyObject_IsInstance(obj, (PyObject *)&PyType_Type) #define PyInt_Check(x) PyLong_Check(x) #define PyInt_AsLong(x) PyLong_AsLong(x) #define PyInt_FromLong(x) PyLong_FromLong(x) #define PyInt_FromSize_t(x) PyLong_FromSize_t(x) #define PyString_Check(name) PyBytes_Check(name) #define PyString_FromString(x) PyUnicode_FromString(x) #define PyString_Format(fmt, args) PyUnicode_Format(fmt, args) #define PyString_AsString(str) PyBytes_AsString(str) #define PyString_Size(str) PyBytes_Size(str) #define PyString_InternFromString(key) PyUnicode_InternFromString(key) #define Py_TPFLAGS_HAVE_CLASS Py_TPFLAGS_BASETYPE #define PyString_AS_STRING(x) PyUnicode_AS_STRING(x) #define _PyLong_FromSsize_t(x) PyLong_FromSsize_t(x) #endif #ifndef Py_TYPE # define Py_TYPE(op) ((op)->ob_type) #endif /* SWIG APIs for compatibility of both Python 2 & 3 */ #if PY_VERSION_HEX >= 0x03000000 # define SWIG_Python_str_FromFormat PyUnicode_FromFormat #else # define SWIG_Python_str_FromFormat PyString_FromFormat #endif /* Warning: This function will allocate a new string in Python 3, * so please call SWIG_Python_str_DelForPy3(x) to free the space. */ SWIGINTERN char* SWIG_Python_str_AsChar(PyObject *str) { #if PY_VERSION_HEX >= 0x03000000 char *cstr; char *newstr; Py_ssize_t len; str = PyUnicode_AsUTF8String(str); PyBytes_AsStringAndSize(str, &cstr, &len); newstr = (char *) malloc(len+1); memcpy(newstr, cstr, len+1); Py_XDECREF(str); return newstr; #else return PyString_AsString(str); #endif } #if PY_VERSION_HEX >= 0x03000000 # define SWIG_Python_str_DelForPy3(x) free( (void*) (x) ) #else # define SWIG_Python_str_DelForPy3(x) #endif SWIGINTERN PyObject* SWIG_Python_str_FromChar(const char *c) { #if PY_VERSION_HEX >= 0x03000000 return PyUnicode_FromString(c); #else return PyString_FromString(c); #endif } /* Add PyOS_snprintf for old Pythons */ #if PY_VERSION_HEX < 0x02020000 # if defined(_MSC_VER) || defined(__BORLANDC__) || defined(_WATCOM) # define PyOS_snprintf _snprintf # else # define PyOS_snprintf snprintf # endif #endif /* A crude PyString_FromFormat implementation for old Pythons */ #if PY_VERSION_HEX < 0x02020000 #ifndef SWIG_PYBUFFER_SIZE # define SWIG_PYBUFFER_SIZE 1024 #endif static PyObject * PyString_FromFormat(const char *fmt, ...) { va_list ap; char buf[SWIG_PYBUFFER_SIZE * 2]; int res; va_start(ap, fmt); res = vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); return (res < 0 || res >= (int)sizeof(buf)) ? 0 : PyString_FromString(buf); } #endif #ifndef PyObject_DEL # define PyObject_DEL PyObject_Del #endif /* A crude PyExc_StopIteration exception for old Pythons */ #if PY_VERSION_HEX < 0x02020000 # ifndef PyExc_StopIteration # define PyExc_StopIteration PyExc_RuntimeError # endif # ifndef PyObject_GenericGetAttr # define PyObject_GenericGetAttr 0 # endif #endif /* Py_NotImplemented is defined in 2.1 and up. */ #if PY_VERSION_HEX < 0x02010000 # ifndef Py_NotImplemented # define Py_NotImplemented PyExc_RuntimeError # endif #endif /* A crude PyString_AsStringAndSize implementation for old Pythons */ #if PY_VERSION_HEX < 0x02010000 # ifndef PyString_AsStringAndSize # define PyString_AsStringAndSize(obj, s, len) {*s = PyString_AsString(obj); *len = *s ? strlen(*s) : 0;} # endif #endif /* PySequence_Size for old Pythons */ #if PY_VERSION_HEX < 0x02000000 # ifndef PySequence_Size # define PySequence_Size PySequence_Length # endif #endif /* PyBool_FromLong for old Pythons */ #if PY_VERSION_HEX < 0x02030000 static PyObject *PyBool_FromLong(long ok) { PyObject *result = ok ? Py_True : Py_False; Py_INCREF(result); return result; } #endif /* Py_ssize_t for old Pythons */ /* This code is as recommended by: */ /* http://www.python.org/dev/peps/pep-0353/#conversion-guidelines */ #if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) typedef int Py_ssize_t; # define PY_SSIZE_T_MAX INT_MAX # define PY_SSIZE_T_MIN INT_MIN typedef inquiry lenfunc; typedef intargfunc ssizeargfunc; typedef intintargfunc ssizessizeargfunc; typedef intobjargproc ssizeobjargproc; typedef intintobjargproc ssizessizeobjargproc; typedef getreadbufferproc readbufferproc; typedef getwritebufferproc writebufferproc; typedef getsegcountproc segcountproc; typedef getcharbufferproc charbufferproc; static long PyNumber_AsSsize_t (PyObject *x, void *SWIGUNUSEDPARM(exc)) { long result = 0; PyObject *i = PyNumber_Int(x); if (i) { result = PyInt_AsLong(i); Py_DECREF(i); } return result; } #endif #if PY_VERSION_HEX < 0x02050000 #define PyInt_FromSize_t(x) PyInt_FromLong((long)x) #endif #if PY_VERSION_HEX < 0x02040000 #define Py_VISIT(op) \ do { \ if (op) { \ int vret = visit((op), arg); \ if (vret) \ return vret; \ } \ } while (0) #endif #if PY_VERSION_HEX < 0x02030000 typedef struct { PyTypeObject type; PyNumberMethods as_number; PyMappingMethods as_mapping; PySequenceMethods as_sequence; PyBufferProcs as_buffer; PyObject *name, *slots; } PyHeapTypeObject; #endif #if PY_VERSION_HEX < 0x02030000 typedef destructor freefunc; #endif #if ((PY_MAJOR_VERSION == 2 && PY_MINOR_VERSION > 6) || \ (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION > 0) || \ (PY_MAJOR_VERSION > 3)) # define SWIGPY_USE_CAPSULE # define SWIGPY_CAPSULE_NAME ((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION ".type_pointer_capsule" SWIG_TYPE_TABLE_NAME) #endif #if PY_VERSION_HEX < 0x03020000 #define PyDescr_TYPE(x) (((PyDescrObject *)(x))->d_type) #define PyDescr_NAME(x) (((PyDescrObject *)(x))->d_name) #endif /* ----------------------------------------------------------------------------- * error manipulation * ----------------------------------------------------------------------------- */ SWIGRUNTIME PyObject* SWIG_Python_ErrorType(int code) { PyObject* type = 0; switch(code) { case SWIG_MemoryError: type = PyExc_MemoryError; break; case SWIG_IOError: type = PyExc_IOError; break; case SWIG_RuntimeError: type = PyExc_RuntimeError; break; case SWIG_IndexError: type = PyExc_IndexError; break; case SWIG_TypeError: type = PyExc_TypeError; break; case SWIG_DivisionByZero: type = PyExc_ZeroDivisionError; break; case SWIG_OverflowError: type = PyExc_OverflowError; break; case SWIG_SyntaxError: type = PyExc_SyntaxError; break; case SWIG_ValueError: type = PyExc_ValueError; break; case SWIG_SystemError: type = PyExc_SystemError; break; case SWIG_AttributeError: type = PyExc_AttributeError; break; default: type = PyExc_RuntimeError; } return type; } SWIGRUNTIME void SWIG_Python_AddErrorMsg(const char* mesg) { PyObject *type = 0; PyObject *value = 0; PyObject *traceback = 0; if (PyErr_Occurred()) PyErr_Fetch(&type, &value, &traceback); if (value) { char *tmp; PyObject *old_str = PyObject_Str(value); PyErr_Clear(); Py_XINCREF(type); PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); SWIG_Python_str_DelForPy3(tmp); Py_DECREF(old_str); Py_DECREF(value); } else { PyErr_SetString(PyExc_RuntimeError, mesg); } } #if defined(SWIG_PYTHON_NO_THREADS) # if defined(SWIG_PYTHON_THREADS) # undef SWIG_PYTHON_THREADS # endif #endif #if defined(SWIG_PYTHON_THREADS) /* Threading support is enabled */ # if !defined(SWIG_PYTHON_USE_GIL) && !defined(SWIG_PYTHON_NO_USE_GIL) # if (PY_VERSION_HEX >= 0x02030000) /* For 2.3 or later, use the PyGILState calls */ # define SWIG_PYTHON_USE_GIL # endif # endif # if defined(SWIG_PYTHON_USE_GIL) /* Use PyGILState threads calls */ # ifndef SWIG_PYTHON_INITIALIZE_THREADS # define SWIG_PYTHON_INITIALIZE_THREADS PyEval_InitThreads() # endif # ifdef __cplusplus /* C++ code */ class SWIG_Python_Thread_Block { bool status; PyGILState_STATE state; public: void end() { if (status) { PyGILState_Release(state); status = false;} } SWIG_Python_Thread_Block() : status(true), state(PyGILState_Ensure()) {} ~SWIG_Python_Thread_Block() { end(); } }; class SWIG_Python_Thread_Allow { bool status; PyThreadState *save; public: void end() { if (status) { PyEval_RestoreThread(save); status = false; }} SWIG_Python_Thread_Allow() : status(true), save(PyEval_SaveThread()) {} ~SWIG_Python_Thread_Allow() { end(); } }; # define SWIG_PYTHON_THREAD_BEGIN_BLOCK SWIG_Python_Thread_Block _swig_thread_block # define SWIG_PYTHON_THREAD_END_BLOCK _swig_thread_block.end() # define SWIG_PYTHON_THREAD_BEGIN_ALLOW SWIG_Python_Thread_Allow _swig_thread_allow # define SWIG_PYTHON_THREAD_END_ALLOW _swig_thread_allow.end() # else /* C code */ # define SWIG_PYTHON_THREAD_BEGIN_BLOCK PyGILState_STATE _swig_thread_block = PyGILState_Ensure() # define SWIG_PYTHON_THREAD_END_BLOCK PyGILState_Release(_swig_thread_block) # define SWIG_PYTHON_THREAD_BEGIN_ALLOW PyThreadState *_swig_thread_allow = PyEval_SaveThread() # define SWIG_PYTHON_THREAD_END_ALLOW PyEval_RestoreThread(_swig_thread_allow) # endif # else /* Old thread way, not implemented, user must provide it */ # if !defined(SWIG_PYTHON_INITIALIZE_THREADS) # define SWIG_PYTHON_INITIALIZE_THREADS # endif # if !defined(SWIG_PYTHON_THREAD_BEGIN_BLOCK) # define SWIG_PYTHON_THREAD_BEGIN_BLOCK # endif # if !defined(SWIG_PYTHON_THREAD_END_BLOCK) # define SWIG_PYTHON_THREAD_END_BLOCK # endif # if !defined(SWIG_PYTHON_THREAD_BEGIN_ALLOW) # define SWIG_PYTHON_THREAD_BEGIN_ALLOW # endif # if !defined(SWIG_PYTHON_THREAD_END_ALLOW) # define SWIG_PYTHON_THREAD_END_ALLOW # endif # endif #else /* No thread support */ # define SWIG_PYTHON_INITIALIZE_THREADS # define SWIG_PYTHON_THREAD_BEGIN_BLOCK # define SWIG_PYTHON_THREAD_END_BLOCK # define SWIG_PYTHON_THREAD_BEGIN_ALLOW # define SWIG_PYTHON_THREAD_END_ALLOW #endif /* ----------------------------------------------------------------------------- * Python API portion that goes into the runtime * ----------------------------------------------------------------------------- */ #ifdef __cplusplus extern "C" { #endif /* ----------------------------------------------------------------------------- * Constant declarations * ----------------------------------------------------------------------------- */ /* Constant Types */ #define SWIG_PY_POINTER 4 #define SWIG_PY_BINARY 5 /* Constant information structure */ typedef struct swig_const_info { int type; char *name; long lvalue; double dvalue; void *pvalue; swig_type_info **ptype; } swig_const_info; /* ----------------------------------------------------------------------------- * Wrapper of PyInstanceMethod_New() used in Python 3 * It is exported to the generated module, used for -fastproxy * ----------------------------------------------------------------------------- */ #if PY_VERSION_HEX >= 0x03000000 SWIGRUNTIME PyObject* SWIG_PyInstanceMethod_New(PyObject *SWIGUNUSEDPARM(self), PyObject *func) { return PyInstanceMethod_New(func); } #else SWIGRUNTIME PyObject* SWIG_PyInstanceMethod_New(PyObject *SWIGUNUSEDPARM(self), PyObject *SWIGUNUSEDPARM(func)) { return NULL; } #endif #ifdef __cplusplus } #endif /* ----------------------------------------------------------------------------- * pyrun.swg * * This file contains the runtime support for Python modules * and includes code for managing global variables and pointer * type checking. * * ----------------------------------------------------------------------------- */ /* Common SWIG API */ /* for raw pointers */ #define SWIG_Python_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, 0) #define SWIG_ConvertPtr(obj, pptr, type, flags) SWIG_Python_ConvertPtr(obj, pptr, type, flags) #define SWIG_ConvertPtrAndOwn(obj,pptr,type,flags,own) SWIG_Python_ConvertPtrAndOwn(obj, pptr, type, flags, own) #ifdef SWIGPYTHON_BUILTIN #define SWIG_NewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(self, ptr, type, flags) #else #define SWIG_NewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(NULL, ptr, type, flags) #endif #define SWIG_InternalNewPointerObj(ptr, type, flags) SWIG_Python_NewPointerObj(NULL, ptr, type, flags) #define SWIG_CheckImplicit(ty) SWIG_Python_CheckImplicit(ty) #define SWIG_AcquirePtr(ptr, src) SWIG_Python_AcquirePtr(ptr, src) #define swig_owntype int /* for raw packed data */ #define SWIG_ConvertPacked(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) #define SWIG_NewPackedObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) /* for class or struct pointers */ #define SWIG_ConvertInstance(obj, pptr, type, flags) SWIG_ConvertPtr(obj, pptr, type, flags) #define SWIG_NewInstanceObj(ptr, type, flags) SWIG_NewPointerObj(ptr, type, flags) /* for C or C++ function pointers */ #define SWIG_ConvertFunctionPtr(obj, pptr, type) SWIG_Python_ConvertFunctionPtr(obj, pptr, type) #define SWIG_NewFunctionPtrObj(ptr, type) SWIG_Python_NewPointerObj(NULL, ptr, type, 0) /* for C++ member pointers, ie, member methods */ #define SWIG_ConvertMember(obj, ptr, sz, ty) SWIG_Python_ConvertPacked(obj, ptr, sz, ty) #define SWIG_NewMemberObj(ptr, sz, type) SWIG_Python_NewPackedObj(ptr, sz, type) /* Runtime API */ #define SWIG_GetModule(clientdata) SWIG_Python_GetModule(clientdata) #define SWIG_SetModule(clientdata, pointer) SWIG_Python_SetModule(pointer) #define SWIG_NewClientData(obj) SwigPyClientData_New(obj) #define SWIG_SetErrorObj SWIG_Python_SetErrorObj #define SWIG_SetErrorMsg SWIG_Python_SetErrorMsg #define SWIG_ErrorType(code) SWIG_Python_ErrorType(code) #define SWIG_Error(code, msg) SWIG_Python_SetErrorMsg(SWIG_ErrorType(code), msg) #define SWIG_fail goto fail /* Runtime API implementation */ /* Error manipulation */ SWIGINTERN void SWIG_Python_SetErrorObj(PyObject *errtype, PyObject *obj) { SWIG_PYTHON_THREAD_BEGIN_BLOCK; PyErr_SetObject(errtype, obj); Py_DECREF(obj); SWIG_PYTHON_THREAD_END_BLOCK; } SWIGINTERN void SWIG_Python_SetErrorMsg(PyObject *errtype, const char *msg) { SWIG_PYTHON_THREAD_BEGIN_BLOCK; PyErr_SetString(errtype, msg); SWIG_PYTHON_THREAD_END_BLOCK; } #define SWIG_Python_Raise(obj, type, desc) SWIG_Python_SetErrorObj(SWIG_Python_ExceptionType(desc), obj) /* Set a constant value */ #if defined(SWIGPYTHON_BUILTIN) SWIGINTERN void SwigPyBuiltin_AddPublicSymbol(PyObject *seq, const char *key) { PyObject *s = PyString_InternFromString(key); PyList_Append(seq, s); Py_DECREF(s); } SWIGINTERN void SWIG_Python_SetConstant(PyObject *d, PyObject *public_interface, const char *name, PyObject *obj) { #if PY_VERSION_HEX < 0x02030000 PyDict_SetItemString(d, (char *)name, obj); #else PyDict_SetItemString(d, name, obj); #endif Py_DECREF(obj); if (public_interface) SwigPyBuiltin_AddPublicSymbol(public_interface, name); } #else SWIGINTERN void SWIG_Python_SetConstant(PyObject *d, const char *name, PyObject *obj) { #if PY_VERSION_HEX < 0x02030000 PyDict_SetItemString(d, (char *)name, obj); #else PyDict_SetItemString(d, name, obj); #endif Py_DECREF(obj); } #endif /* Append a value to the result obj */ SWIGINTERN PyObject* SWIG_Python_AppendOutput(PyObject* result, PyObject* obj) { #if !defined(SWIG_PYTHON_OUTPUT_TUPLE) if (!result) { result = obj; } else if (result == Py_None) { Py_DECREF(result); result = obj; } else { if (!PyList_Check(result)) { PyObject *o2 = result; result = PyList_New(1); PyList_SetItem(result, 0, o2); } PyList_Append(result,obj); Py_DECREF(obj); } return result; #else PyObject* o2; PyObject* o3; if (!result) { result = obj; } else if (result == Py_None) { Py_DECREF(result); result = obj; } else { if (!PyTuple_Check(result)) { o2 = result; result = PyTuple_New(1); PyTuple_SET_ITEM(result, 0, o2); } o3 = PyTuple_New(1); PyTuple_SET_ITEM(o3, 0, obj); o2 = result; result = PySequence_Concat(o2, o3); Py_DECREF(o2); Py_DECREF(o3); } return result; #endif } /* Unpack the argument tuple */ SWIGINTERN Py_ssize_t SWIG_Python_UnpackTuple(PyObject *args, const char *name, Py_ssize_t min, Py_ssize_t max, PyObject **objs) { if (!args) { if (!min && !max) { return 1; } else { PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got none", name, (min == max ? "" : "at least "), (int)min); return 0; } } if (!PyTuple_Check(args)) { if (min <= 1 && max >= 1) { Py_ssize_t i; objs[0] = args; for (i = 1; i < max; ++i) { objs[i] = 0; } return 2; } PyErr_SetString(PyExc_SystemError, "UnpackTuple() argument list is not a tuple"); return 0; } else { Py_ssize_t l = PyTuple_GET_SIZE(args); if (l < min) { PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", name, (min == max ? "" : "at least "), (int)min, (int)l); return 0; } else if (l > max) { PyErr_Format(PyExc_TypeError, "%s expected %s%d arguments, got %d", name, (min == max ? "" : "at most "), (int)max, (int)l); return 0; } else { Py_ssize_t i; for (i = 0; i < l; ++i) { objs[i] = PyTuple_GET_ITEM(args, i); } for (; l < max; ++l) { objs[l] = 0; } return i + 1; } } } /* A functor is a function object with one single object argument */ #if PY_VERSION_HEX >= 0x02020000 #define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunctionObjArgs(functor, obj, NULL); #else #define SWIG_Python_CallFunctor(functor, obj) PyObject_CallFunction(functor, "O", obj); #endif /* Helper for static pointer initialization for both C and C++ code, for example static PyObject *SWIG_STATIC_POINTER(MyVar) = NewSomething(...); */ #ifdef __cplusplus #define SWIG_STATIC_POINTER(var) var #else #define SWIG_STATIC_POINTER(var) var = 0; if (!var) var #endif /* ----------------------------------------------------------------------------- * Pointer declarations * ----------------------------------------------------------------------------- */ /* Flags for new pointer objects */ #define SWIG_POINTER_NOSHADOW (SWIG_POINTER_OWN << 1) #define SWIG_POINTER_NEW (SWIG_POINTER_NOSHADOW | SWIG_POINTER_OWN) #define SWIG_POINTER_IMPLICIT_CONV (SWIG_POINTER_DISOWN << 1) #define SWIG_BUILTIN_TP_INIT (SWIG_POINTER_OWN << 2) #define SWIG_BUILTIN_INIT (SWIG_BUILTIN_TP_INIT | SWIG_POINTER_OWN) #ifdef __cplusplus extern "C" { #endif /* How to access Py_None */ #if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__) # ifndef SWIG_PYTHON_NO_BUILD_NONE # ifndef SWIG_PYTHON_BUILD_NONE # define SWIG_PYTHON_BUILD_NONE # endif # endif #endif #ifdef SWIG_PYTHON_BUILD_NONE # ifdef Py_None # undef Py_None # define Py_None SWIG_Py_None() # endif SWIGRUNTIMEINLINE PyObject * _SWIG_Py_None(void) { PyObject *none = Py_BuildValue((char*)""); Py_DECREF(none); return none; } SWIGRUNTIME PyObject * SWIG_Py_None(void) { static PyObject *SWIG_STATIC_POINTER(none) = _SWIG_Py_None(); return none; } #endif /* The python void return value */ SWIGRUNTIMEINLINE PyObject * SWIG_Py_Void(void) { PyObject *none = Py_None; Py_INCREF(none); return none; } /* SwigPyClientData */ typedef struct { PyObject *klass; PyObject *newraw; PyObject *newargs; PyObject *destroy; int delargs; int implicitconv; PyTypeObject *pytype; } SwigPyClientData; SWIGRUNTIMEINLINE int SWIG_Python_CheckImplicit(swig_type_info *ty) { SwigPyClientData *data = (SwigPyClientData *)ty->clientdata; return data ? data->implicitconv : 0; } SWIGRUNTIMEINLINE PyObject * SWIG_Python_ExceptionType(swig_type_info *desc) { SwigPyClientData *data = desc ? (SwigPyClientData *) desc->clientdata : 0; PyObject *klass = data ? data->klass : 0; return (klass ? klass : PyExc_RuntimeError); } SWIGRUNTIME SwigPyClientData * SwigPyClientData_New(PyObject* obj) { if (!obj) { return 0; } else { SwigPyClientData *data = (SwigPyClientData *)malloc(sizeof(SwigPyClientData)); /* the klass element */ data->klass = obj; Py_INCREF(data->klass); /* the newraw method and newargs arguments used to create a new raw instance */ if (PyClass_Check(obj)) { data->newraw = 0; data->newargs = obj; Py_INCREF(obj); } else { #if (PY_VERSION_HEX < 0x02020000) data->newraw = 0; #else data->newraw = PyObject_GetAttrString(data->klass, (char *)"__new__"); #endif if (data->newraw) { Py_INCREF(data->newraw); data->newargs = PyTuple_New(1); PyTuple_SetItem(data->newargs, 0, obj); } else { data->newargs = obj; } Py_INCREF(data->newargs); } /* the destroy method, aka as the C++ delete method */ data->destroy = PyObject_GetAttrString(data->klass, (char *)"__swig_destroy__"); if (PyErr_Occurred()) { PyErr_Clear(); data->destroy = 0; } if (data->destroy) { int flags; Py_INCREF(data->destroy); flags = PyCFunction_GET_FLAGS(data->destroy); #ifdef METH_O data->delargs = !(flags & (METH_O)); #else data->delargs = 0; #endif } else { data->delargs = 0; } data->implicitconv = 0; data->pytype = 0; return data; } } SWIGRUNTIME void SwigPyClientData_Del(SwigPyClientData *data) { Py_XDECREF(data->newraw); Py_XDECREF(data->newargs); Py_XDECREF(data->destroy); } /* =============== SwigPyObject =====================*/ typedef struct { PyObject_HEAD void *ptr; swig_type_info *ty; int own; PyObject *next; #ifdef SWIGPYTHON_BUILTIN PyObject *dict; #endif } SwigPyObject; #ifdef SWIGPYTHON_BUILTIN SWIGRUNTIME PyObject * SwigPyObject_get___dict__(PyObject *v, PyObject *SWIGUNUSEDPARM(args)) { SwigPyObject *sobj = (SwigPyObject *)v; if (!sobj->dict) sobj->dict = PyDict_New(); Py_INCREF(sobj->dict); return sobj->dict; } #endif SWIGRUNTIME PyObject * SwigPyObject_long(SwigPyObject *v) { return PyLong_FromVoidPtr(v->ptr); } SWIGRUNTIME PyObject * SwigPyObject_format(const char* fmt, SwigPyObject *v) { PyObject *res = NULL; PyObject *args = PyTuple_New(1); if (args) { if (PyTuple_SetItem(args, 0, SwigPyObject_long(v)) == 0) { PyObject *ofmt = SWIG_Python_str_FromChar(fmt); if (ofmt) { #if PY_VERSION_HEX >= 0x03000000 res = PyUnicode_Format(ofmt,args); #else res = PyString_Format(ofmt,args); #endif Py_DECREF(ofmt); } Py_DECREF(args); } } return res; } SWIGRUNTIME PyObject * SwigPyObject_oct(SwigPyObject *v) { return SwigPyObject_format("%o",v); } SWIGRUNTIME PyObject * SwigPyObject_hex(SwigPyObject *v) { return SwigPyObject_format("%x",v); } SWIGRUNTIME PyObject * #ifdef METH_NOARGS SwigPyObject_repr(SwigPyObject *v) #else SwigPyObject_repr(SwigPyObject *v, PyObject *args) #endif { const char *name = SWIG_TypePrettyName(v->ty); PyObject *repr = SWIG_Python_str_FromFormat("", (name ? name : "unknown"), (void *)v); if (v->next) { # ifdef METH_NOARGS PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next); # else PyObject *nrep = SwigPyObject_repr((SwigPyObject *)v->next, args); # endif # if PY_VERSION_HEX >= 0x03000000 PyObject *joined = PyUnicode_Concat(repr, nrep); Py_DecRef(repr); Py_DecRef(nrep); repr = joined; # else PyString_ConcatAndDel(&repr,nrep); # endif } return repr; } SWIGRUNTIME int SwigPyObject_compare(SwigPyObject *v, SwigPyObject *w) { void *i = v->ptr; void *j = w->ptr; return (i < j) ? -1 : ((i > j) ? 1 : 0); } /* Added for Python 3.x, would it also be useful for Python 2.x? */ SWIGRUNTIME PyObject* SwigPyObject_richcompare(SwigPyObject *v, SwigPyObject *w, int op) { PyObject* res; if( op != Py_EQ && op != Py_NE ) { Py_INCREF(Py_NotImplemented); return Py_NotImplemented; } res = PyBool_FromLong( (SwigPyObject_compare(v, w)==0) == (op == Py_EQ) ? 1 : 0); return res; } SWIGRUNTIME PyTypeObject* SwigPyObject_TypeOnce(void); #ifdef SWIGPYTHON_BUILTIN static swig_type_info *SwigPyObject_stype = 0; SWIGRUNTIME PyTypeObject* SwigPyObject_type(void) { SwigPyClientData *cd; assert(SwigPyObject_stype); cd = (SwigPyClientData*) SwigPyObject_stype->clientdata; assert(cd); assert(cd->pytype); return cd->pytype; } #else SWIGRUNTIME PyTypeObject* SwigPyObject_type(void) { static PyTypeObject *SWIG_STATIC_POINTER(type) = SwigPyObject_TypeOnce(); return type; } #endif SWIGRUNTIMEINLINE int SwigPyObject_Check(PyObject *op) { #ifdef SWIGPYTHON_BUILTIN PyTypeObject *target_tp = SwigPyObject_type(); if (PyType_IsSubtype(op->ob_type, target_tp)) return 1; return (strcmp(op->ob_type->tp_name, "SwigPyObject") == 0); #else return (Py_TYPE(op) == SwigPyObject_type()) || (strcmp(Py_TYPE(op)->tp_name,"SwigPyObject") == 0); #endif } SWIGRUNTIME PyObject * SwigPyObject_New(void *ptr, swig_type_info *ty, int own); SWIGRUNTIME void SwigPyObject_dealloc(PyObject *v) { SwigPyObject *sobj = (SwigPyObject *) v; PyObject *next = sobj->next; if (sobj->own == SWIG_POINTER_OWN) { swig_type_info *ty = sobj->ty; SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; PyObject *destroy = data ? data->destroy : 0; if (destroy) { /* destroy is always a VARARGS method */ PyObject *res; /* PyObject_CallFunction() has the potential to silently drop the active active exception. In cases of unnamed temporary variable or where we just finished iterating over a generator StopIteration will be active right now, and this needs to remain true upon return from SwigPyObject_dealloc. So save and restore. */ PyObject *val = NULL, *type = NULL, *tb = NULL; PyErr_Fetch(&val, &type, &tb); if (data->delargs) { /* we need to create a temporary object to carry the destroy operation */ PyObject *tmp = SwigPyObject_New(sobj->ptr, ty, 0); res = SWIG_Python_CallFunctor(destroy, tmp); Py_DECREF(tmp); } else { PyCFunction meth = PyCFunction_GET_FUNCTION(destroy); PyObject *mself = PyCFunction_GET_SELF(destroy); res = ((*meth)(mself, v)); } if (!res) PyErr_WriteUnraisable(destroy); PyErr_Restore(val, type, tb); Py_XDECREF(res); } #if !defined(SWIG_PYTHON_SILENT_MEMLEAK) else { const char *name = SWIG_TypePrettyName(ty); printf("swig/python detected a memory leak of type '%s', no destructor found.\n", (name ? name : "unknown")); } #endif } Py_XDECREF(next); PyObject_DEL(v); } SWIGRUNTIME PyObject* SwigPyObject_append(PyObject* v, PyObject* next) { SwigPyObject *sobj = (SwigPyObject *) v; #ifndef METH_O PyObject *tmp = 0; if (!PyArg_ParseTuple(next,(char *)"O:append", &tmp)) return NULL; next = tmp; #endif if (!SwigPyObject_Check(next)) { PyErr_SetString(PyExc_TypeError, "Attempt to append a non SwigPyObject"); return NULL; } sobj->next = next; Py_INCREF(next); return SWIG_Py_Void(); } SWIGRUNTIME PyObject* #ifdef METH_NOARGS SwigPyObject_next(PyObject* v) #else SwigPyObject_next(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) #endif { SwigPyObject *sobj = (SwigPyObject *) v; if (sobj->next) { Py_INCREF(sobj->next); return sobj->next; } else { return SWIG_Py_Void(); } } SWIGINTERN PyObject* #ifdef METH_NOARGS SwigPyObject_disown(PyObject *v) #else SwigPyObject_disown(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) #endif { SwigPyObject *sobj = (SwigPyObject *)v; sobj->own = 0; return SWIG_Py_Void(); } SWIGINTERN PyObject* #ifdef METH_NOARGS SwigPyObject_acquire(PyObject *v) #else SwigPyObject_acquire(PyObject* v, PyObject *SWIGUNUSEDPARM(args)) #endif { SwigPyObject *sobj = (SwigPyObject *)v; sobj->own = SWIG_POINTER_OWN; return SWIG_Py_Void(); } SWIGINTERN PyObject* SwigPyObject_own(PyObject *v, PyObject *args) { PyObject *val = 0; #if (PY_VERSION_HEX < 0x02020000) if (!PyArg_ParseTuple(args,(char *)"|O:own",&val)) #elif (PY_VERSION_HEX < 0x02050000) if (!PyArg_UnpackTuple(args, (char *)"own", 0, 1, &val)) #else if (!PyArg_UnpackTuple(args, "own", 0, 1, &val)) #endif { return NULL; } else { SwigPyObject *sobj = (SwigPyObject *)v; PyObject *obj = PyBool_FromLong(sobj->own); if (val) { #ifdef METH_NOARGS if (PyObject_IsTrue(val)) { SwigPyObject_acquire(v); } else { SwigPyObject_disown(v); } #else if (PyObject_IsTrue(val)) { SwigPyObject_acquire(v,args); } else { SwigPyObject_disown(v,args); } #endif } return obj; } } #ifdef METH_O static PyMethodDef swigobject_methods[] = { {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_NOARGS, (char *)"releases ownership of the pointer"}, {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_NOARGS, (char *)"acquires ownership of the pointer"}, {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, {(char *)"append", (PyCFunction)SwigPyObject_append, METH_O, (char *)"appends another 'this' object"}, {(char *)"next", (PyCFunction)SwigPyObject_next, METH_NOARGS, (char *)"returns the next 'this' object"}, {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_NOARGS, (char *)"returns object representation"}, {0, 0, 0, 0} }; #else static PyMethodDef swigobject_methods[] = { {(char *)"disown", (PyCFunction)SwigPyObject_disown, METH_VARARGS, (char *)"releases ownership of the pointer"}, {(char *)"acquire", (PyCFunction)SwigPyObject_acquire, METH_VARARGS, (char *)"acquires ownership of the pointer"}, {(char *)"own", (PyCFunction)SwigPyObject_own, METH_VARARGS, (char *)"returns/sets ownership of the pointer"}, {(char *)"append", (PyCFunction)SwigPyObject_append, METH_VARARGS, (char *)"appends another 'this' object"}, {(char *)"next", (PyCFunction)SwigPyObject_next, METH_VARARGS, (char *)"returns the next 'this' object"}, {(char *)"__repr__",(PyCFunction)SwigPyObject_repr, METH_VARARGS, (char *)"returns object representation"}, {0, 0, 0, 0} }; #endif #if PY_VERSION_HEX < 0x02020000 SWIGINTERN PyObject * SwigPyObject_getattr(SwigPyObject *sobj,char *name) { return Py_FindMethod(swigobject_methods, (PyObject *)sobj, name); } #endif SWIGRUNTIME PyTypeObject* SwigPyObject_TypeOnce(void) { static char swigobject_doc[] = "Swig object carries a C/C++ instance pointer"; static PyNumberMethods SwigPyObject_as_number = { (binaryfunc)0, /*nb_add*/ (binaryfunc)0, /*nb_subtract*/ (binaryfunc)0, /*nb_multiply*/ /* nb_divide removed in Python 3 */ #if PY_VERSION_HEX < 0x03000000 (binaryfunc)0, /*nb_divide*/ #endif (binaryfunc)0, /*nb_remainder*/ (binaryfunc)0, /*nb_divmod*/ (ternaryfunc)0,/*nb_power*/ (unaryfunc)0, /*nb_negative*/ (unaryfunc)0, /*nb_positive*/ (unaryfunc)0, /*nb_absolute*/ (inquiry)0, /*nb_nonzero*/ 0, /*nb_invert*/ 0, /*nb_lshift*/ 0, /*nb_rshift*/ 0, /*nb_and*/ 0, /*nb_xor*/ 0, /*nb_or*/ #if PY_VERSION_HEX < 0x03000000 0, /*nb_coerce*/ #endif (unaryfunc)SwigPyObject_long, /*nb_int*/ #if PY_VERSION_HEX < 0x03000000 (unaryfunc)SwigPyObject_long, /*nb_long*/ #else 0, /*nb_reserved*/ #endif (unaryfunc)0, /*nb_float*/ #if PY_VERSION_HEX < 0x03000000 (unaryfunc)SwigPyObject_oct, /*nb_oct*/ (unaryfunc)SwigPyObject_hex, /*nb_hex*/ #endif #if PY_VERSION_HEX >= 0x03050000 /* 3.5 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_matrix_multiply */ #elif PY_VERSION_HEX >= 0x03000000 /* 3.0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index, nb_inplace_divide removed */ #elif PY_VERSION_HEX >= 0x02050000 /* 2.5.0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_index */ #elif PY_VERSION_HEX >= 0x02020000 /* 2.2.0 */ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_true_divide */ #elif PY_VERSION_HEX >= 0x02000000 /* 2.0.0 */ 0,0,0,0,0,0,0,0,0,0,0 /* nb_inplace_add -> nb_inplace_or */ #endif }; static PyTypeObject swigpyobject_type; static int type_init = 0; if (!type_init) { const PyTypeObject tmp = { /* PyObject header changed in Python 3 */ #if PY_VERSION_HEX >= 0x03000000 PyVarObject_HEAD_INIT(NULL, 0) #else PyObject_HEAD_INIT(NULL) 0, /* ob_size */ #endif (char *)"SwigPyObject", /* tp_name */ sizeof(SwigPyObject), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)SwigPyObject_dealloc, /* tp_dealloc */ 0, /* tp_print */ #if PY_VERSION_HEX < 0x02020000 (getattrfunc)SwigPyObject_getattr, /* tp_getattr */ #else (getattrfunc)0, /* tp_getattr */ #endif (setattrfunc)0, /* tp_setattr */ #if PY_VERSION_HEX >= 0x03000000 0, /* tp_reserved in 3.0.1, tp_compare in 3.0.0 but not used */ #else (cmpfunc)SwigPyObject_compare, /* tp_compare */ #endif (reprfunc)SwigPyObject_repr, /* tp_repr */ &SwigPyObject_as_number, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ (hashfunc)0, /* tp_hash */ (ternaryfunc)0, /* tp_call */ 0, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ swigobject_doc, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ (richcmpfunc)SwigPyObject_richcompare,/* tp_richcompare */ 0, /* tp_weaklistoffset */ #if PY_VERSION_HEX >= 0x02020000 0, /* tp_iter */ 0, /* tp_iternext */ swigobject_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ 0, /* tp_free */ 0, /* tp_is_gc */ 0, /* tp_bases */ 0, /* tp_mro */ 0, /* tp_cache */ 0, /* tp_subclasses */ 0, /* tp_weaklist */ #endif #if PY_VERSION_HEX >= 0x02030000 0, /* tp_del */ #endif #if PY_VERSION_HEX >= 0x02060000 0, /* tp_version_tag */ #endif #if PY_VERSION_HEX >= 0x03040000 0, /* tp_finalize */ #endif #ifdef COUNT_ALLOCS 0, /* tp_allocs */ 0, /* tp_frees */ 0, /* tp_maxalloc */ #if PY_VERSION_HEX >= 0x02050000 0, /* tp_prev */ #endif 0 /* tp_next */ #endif }; swigpyobject_type = tmp; type_init = 1; #if PY_VERSION_HEX < 0x02020000 swigpyobject_type.ob_type = &PyType_Type; #else if (PyType_Ready(&swigpyobject_type) < 0) return NULL; #endif } return &swigpyobject_type; } SWIGRUNTIME PyObject * SwigPyObject_New(void *ptr, swig_type_info *ty, int own) { SwigPyObject *sobj = PyObject_NEW(SwigPyObject, SwigPyObject_type()); if (sobj) { sobj->ptr = ptr; sobj->ty = ty; sobj->own = own; sobj->next = 0; } return (PyObject *)sobj; } /* ----------------------------------------------------------------------------- * Implements a simple Swig Packed type, and use it instead of string * ----------------------------------------------------------------------------- */ typedef struct { PyObject_HEAD void *pack; swig_type_info *ty; size_t size; } SwigPyPacked; SWIGRUNTIME int SwigPyPacked_print(SwigPyPacked *v, FILE *fp, int SWIGUNUSEDPARM(flags)) { char result[SWIG_BUFFER_SIZE]; fputs("pack, v->size, 0, sizeof(result))) { fputs("at ", fp); fputs(result, fp); } fputs(v->ty->name,fp); fputs(">", fp); return 0; } SWIGRUNTIME PyObject * SwigPyPacked_repr(SwigPyPacked *v) { char result[SWIG_BUFFER_SIZE]; if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))) { return SWIG_Python_str_FromFormat("", result, v->ty->name); } else { return SWIG_Python_str_FromFormat("", v->ty->name); } } SWIGRUNTIME PyObject * SwigPyPacked_str(SwigPyPacked *v) { char result[SWIG_BUFFER_SIZE]; if (SWIG_PackDataName(result, v->pack, v->size, 0, sizeof(result))){ return SWIG_Python_str_FromFormat("%s%s", result, v->ty->name); } else { return SWIG_Python_str_FromChar(v->ty->name); } } SWIGRUNTIME int SwigPyPacked_compare(SwigPyPacked *v, SwigPyPacked *w) { size_t i = v->size; size_t j = w->size; int s = (i < j) ? -1 : ((i > j) ? 1 : 0); return s ? s : strncmp((char *)v->pack, (char *)w->pack, 2*v->size); } SWIGRUNTIME PyTypeObject* SwigPyPacked_TypeOnce(void); SWIGRUNTIME PyTypeObject* SwigPyPacked_type(void) { static PyTypeObject *SWIG_STATIC_POINTER(type) = SwigPyPacked_TypeOnce(); return type; } SWIGRUNTIMEINLINE int SwigPyPacked_Check(PyObject *op) { return ((op)->ob_type == SwigPyPacked_TypeOnce()) || (strcmp((op)->ob_type->tp_name,"SwigPyPacked") == 0); } SWIGRUNTIME void SwigPyPacked_dealloc(PyObject *v) { if (SwigPyPacked_Check(v)) { SwigPyPacked *sobj = (SwigPyPacked *) v; free(sobj->pack); } PyObject_DEL(v); } SWIGRUNTIME PyTypeObject* SwigPyPacked_TypeOnce(void) { static char swigpacked_doc[] = "Swig object carries a C/C++ instance pointer"; static PyTypeObject swigpypacked_type; static int type_init = 0; if (!type_init) { const PyTypeObject tmp = { /* PyObject header changed in Python 3 */ #if PY_VERSION_HEX>=0x03000000 PyVarObject_HEAD_INIT(NULL, 0) #else PyObject_HEAD_INIT(NULL) 0, /* ob_size */ #endif (char *)"SwigPyPacked", /* tp_name */ sizeof(SwigPyPacked), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor)SwigPyPacked_dealloc, /* tp_dealloc */ (printfunc)SwigPyPacked_print, /* tp_print */ (getattrfunc)0, /* tp_getattr */ (setattrfunc)0, /* tp_setattr */ #if PY_VERSION_HEX>=0x03000000 0, /* tp_reserved in 3.0.1 */ #else (cmpfunc)SwigPyPacked_compare, /* tp_compare */ #endif (reprfunc)SwigPyPacked_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ (hashfunc)0, /* tp_hash */ (ternaryfunc)0, /* tp_call */ (reprfunc)SwigPyPacked_str, /* tp_str */ PyObject_GenericGetAttr, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ Py_TPFLAGS_DEFAULT, /* tp_flags */ swigpacked_doc, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ #if PY_VERSION_HEX >= 0x02020000 0, /* tp_iter */ 0, /* tp_iternext */ 0, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ 0, /* tp_init */ 0, /* tp_alloc */ 0, /* tp_new */ 0, /* tp_free */ 0, /* tp_is_gc */ 0, /* tp_bases */ 0, /* tp_mro */ 0, /* tp_cache */ 0, /* tp_subclasses */ 0, /* tp_weaklist */ #endif #if PY_VERSION_HEX >= 0x02030000 0, /* tp_del */ #endif #if PY_VERSION_HEX >= 0x02060000 0, /* tp_version_tag */ #endif #if PY_VERSION_HEX >= 0x03040000 0, /* tp_finalize */ #endif #ifdef COUNT_ALLOCS 0, /* tp_allocs */ 0, /* tp_frees */ 0, /* tp_maxalloc */ #if PY_VERSION_HEX >= 0x02050000 0, /* tp_prev */ #endif 0 /* tp_next */ #endif }; swigpypacked_type = tmp; type_init = 1; #if PY_VERSION_HEX < 0x02020000 swigpypacked_type.ob_type = &PyType_Type; #else if (PyType_Ready(&swigpypacked_type) < 0) return NULL; #endif } return &swigpypacked_type; } SWIGRUNTIME PyObject * SwigPyPacked_New(void *ptr, size_t size, swig_type_info *ty) { SwigPyPacked *sobj = PyObject_NEW(SwigPyPacked, SwigPyPacked_type()); if (sobj) { void *pack = malloc(size); if (pack) { memcpy(pack, ptr, size); sobj->pack = pack; sobj->ty = ty; sobj->size = size; } else { PyObject_DEL((PyObject *) sobj); sobj = 0; } } return (PyObject *) sobj; } SWIGRUNTIME swig_type_info * SwigPyPacked_UnpackData(PyObject *obj, void *ptr, size_t size) { if (SwigPyPacked_Check(obj)) { SwigPyPacked *sobj = (SwigPyPacked *)obj; if (sobj->size != size) return 0; memcpy(ptr, sobj->pack, size); return sobj->ty; } else { return 0; } } /* ----------------------------------------------------------------------------- * pointers/data manipulation * ----------------------------------------------------------------------------- */ SWIGRUNTIMEINLINE PyObject * _SWIG_This(void) { return SWIG_Python_str_FromChar("this"); } static PyObject *swig_this = NULL; SWIGRUNTIME PyObject * SWIG_This(void) { if (swig_this == NULL) swig_this = _SWIG_This(); return swig_this; } /* #define SWIG_PYTHON_SLOW_GETSET_THIS */ /* TODO: I don't know how to implement the fast getset in Python 3 right now */ #if PY_VERSION_HEX>=0x03000000 #define SWIG_PYTHON_SLOW_GETSET_THIS #endif SWIGRUNTIME SwigPyObject * SWIG_Python_GetSwigThis(PyObject *pyobj) { PyObject *obj; if (SwigPyObject_Check(pyobj)) return (SwigPyObject *) pyobj; #ifdef SWIGPYTHON_BUILTIN (void)obj; # ifdef PyWeakref_CheckProxy if (PyWeakref_CheckProxy(pyobj)) { pyobj = PyWeakref_GET_OBJECT(pyobj); if (pyobj && SwigPyObject_Check(pyobj)) return (SwigPyObject*) pyobj; } # endif return NULL; #else obj = 0; #if (!defined(SWIG_PYTHON_SLOW_GETSET_THIS) && (PY_VERSION_HEX >= 0x02030000)) if (PyInstance_Check(pyobj)) { obj = _PyInstance_Lookup(pyobj, SWIG_This()); } else { PyObject **dictptr = _PyObject_GetDictPtr(pyobj); if (dictptr != NULL) { PyObject *dict = *dictptr; obj = dict ? PyDict_GetItem(dict, SWIG_This()) : 0; } else { #ifdef PyWeakref_CheckProxy if (PyWeakref_CheckProxy(pyobj)) { PyObject *wobj = PyWeakref_GET_OBJECT(pyobj); return wobj ? SWIG_Python_GetSwigThis(wobj) : 0; } #endif obj = PyObject_GetAttr(pyobj,SWIG_This()); if (obj) { Py_DECREF(obj); } else { if (PyErr_Occurred()) PyErr_Clear(); return 0; } } } #else obj = PyObject_GetAttr(pyobj,SWIG_This()); if (obj) { Py_DECREF(obj); } else { if (PyErr_Occurred()) PyErr_Clear(); return 0; } #endif if (obj && !SwigPyObject_Check(obj)) { /* a PyObject is called 'this', try to get the 'real this' SwigPyObject from it */ return SWIG_Python_GetSwigThis(obj); } return (SwigPyObject *)obj; #endif } /* Acquire a pointer value */ SWIGRUNTIME int SWIG_Python_AcquirePtr(PyObject *obj, int own) { if (own == SWIG_POINTER_OWN) { SwigPyObject *sobj = SWIG_Python_GetSwigThis(obj); if (sobj) { int oldown = sobj->own; sobj->own = own; return oldown; } } return 0; } /* Convert a pointer value */ SWIGRUNTIME int SWIG_Python_ConvertPtrAndOwn(PyObject *obj, void **ptr, swig_type_info *ty, int flags, int *own) { int res; SwigPyObject *sobj; int implicit_conv = (flags & SWIG_POINTER_IMPLICIT_CONV) != 0; if (!obj) return SWIG_ERROR; if (obj == Py_None && !implicit_conv) { if (ptr) *ptr = 0; return SWIG_OK; } res = SWIG_ERROR; sobj = SWIG_Python_GetSwigThis(obj); if (own) *own = 0; while (sobj) { void *vptr = sobj->ptr; if (ty) { swig_type_info *to = sobj->ty; if (to == ty) { /* no type cast needed */ if (ptr) *ptr = vptr; break; } else { swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); if (!tc) { sobj = (SwigPyObject *)sobj->next; } else { if (ptr) { int newmemory = 0; *ptr = SWIG_TypeCast(tc,vptr,&newmemory); if (newmemory == SWIG_CAST_NEW_MEMORY) { assert(own); /* badly formed typemap which will lead to a memory leak - it must set and use own to delete *ptr */ if (own) *own = *own | SWIG_CAST_NEW_MEMORY; } } break; } } } else { if (ptr) *ptr = vptr; break; } } if (sobj) { if (own) *own = *own | sobj->own; if (flags & SWIG_POINTER_DISOWN) { sobj->own = 0; } res = SWIG_OK; } else { if (implicit_conv) { SwigPyClientData *data = ty ? (SwigPyClientData *) ty->clientdata : 0; if (data && !data->implicitconv) { PyObject *klass = data->klass; if (klass) { PyObject *impconv; data->implicitconv = 1; /* avoid recursion and call 'explicit' constructors*/ impconv = SWIG_Python_CallFunctor(klass, obj); data->implicitconv = 0; if (PyErr_Occurred()) { PyErr_Clear(); impconv = 0; } if (impconv) { SwigPyObject *iobj = SWIG_Python_GetSwigThis(impconv); if (iobj) { void *vptr; res = SWIG_Python_ConvertPtrAndOwn((PyObject*)iobj, &vptr, ty, 0, 0); if (SWIG_IsOK(res)) { if (ptr) { *ptr = vptr; /* transfer the ownership to 'ptr' */ iobj->own = 0; res = SWIG_AddCast(res); res = SWIG_AddNewMask(res); } else { res = SWIG_AddCast(res); } } } Py_DECREF(impconv); } } } } if (!SWIG_IsOK(res) && obj == Py_None) { if (ptr) *ptr = 0; if (PyErr_Occurred()) PyErr_Clear(); res = SWIG_OK; } } return res; } /* Convert a function ptr value */ SWIGRUNTIME int SWIG_Python_ConvertFunctionPtr(PyObject *obj, void **ptr, swig_type_info *ty) { if (!PyCFunction_Check(obj)) { return SWIG_ConvertPtr(obj, ptr, ty, 0); } else { void *vptr = 0; /* here we get the method pointer for callbacks */ const char *doc = (((PyCFunctionObject *)obj) -> m_ml -> ml_doc); const char *desc = doc ? strstr(doc, "swig_ptr: ") : 0; if (desc) desc = ty ? SWIG_UnpackVoidPtr(desc + 10, &vptr, ty->name) : 0; if (!desc) return SWIG_ERROR; if (ty) { swig_cast_info *tc = SWIG_TypeCheck(desc,ty); if (tc) { int newmemory = 0; *ptr = SWIG_TypeCast(tc,vptr,&newmemory); assert(!newmemory); /* newmemory handling not yet implemented */ } else { return SWIG_ERROR; } } else { *ptr = vptr; } return SWIG_OK; } } /* Convert a packed value value */ SWIGRUNTIME int SWIG_Python_ConvertPacked(PyObject *obj, void *ptr, size_t sz, swig_type_info *ty) { swig_type_info *to = SwigPyPacked_UnpackData(obj, ptr, sz); if (!to) return SWIG_ERROR; if (ty) { if (to != ty) { /* check type cast? */ swig_cast_info *tc = SWIG_TypeCheck(to->name,ty); if (!tc) return SWIG_ERROR; } } return SWIG_OK; } /* ----------------------------------------------------------------------------- * Create a new pointer object * ----------------------------------------------------------------------------- */ /* Create a new instance object, without calling __init__, and set the 'this' attribute. */ SWIGRUNTIME PyObject* SWIG_Python_NewShadowInstance(SwigPyClientData *data, PyObject *swig_this) { #if (PY_VERSION_HEX >= 0x02020000) PyObject *inst = 0; PyObject *newraw = data->newraw; if (newraw) { inst = PyObject_Call(newraw, data->newargs, NULL); if (inst) { #if !defined(SWIG_PYTHON_SLOW_GETSET_THIS) PyObject **dictptr = _PyObject_GetDictPtr(inst); if (dictptr != NULL) { PyObject *dict = *dictptr; if (dict == NULL) { dict = PyDict_New(); *dictptr = dict; PyDict_SetItem(dict, SWIG_This(), swig_this); } } #else PyObject *key = SWIG_This(); PyObject_SetAttr(inst, key, swig_this); #endif } } else { #if PY_VERSION_HEX >= 0x03000000 inst = ((PyTypeObject*) data->newargs)->tp_new((PyTypeObject*) data->newargs, Py_None, Py_None); if (inst) { PyObject_SetAttr(inst, SWIG_This(), swig_this); Py_TYPE(inst)->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG; } #else PyObject *dict = PyDict_New(); if (dict) { PyDict_SetItem(dict, SWIG_This(), swig_this); inst = PyInstance_NewRaw(data->newargs, dict); Py_DECREF(dict); } #endif } return inst; #else #if (PY_VERSION_HEX >= 0x02010000) PyObject *inst = 0; PyObject *dict = PyDict_New(); if (dict) { PyDict_SetItem(dict, SWIG_This(), swig_this); inst = PyInstance_NewRaw(data->newargs, dict); Py_DECREF(dict); } return (PyObject *) inst; #else PyInstanceObject *inst = PyObject_NEW(PyInstanceObject, &PyInstance_Type); if (inst == NULL) { return NULL; } inst->in_class = (PyClassObject *)data->newargs; Py_INCREF(inst->in_class); inst->in_dict = PyDict_New(); if (inst->in_dict == NULL) { Py_DECREF(inst); return NULL; } #ifdef Py_TPFLAGS_HAVE_WEAKREFS inst->in_weakreflist = NULL; #endif #ifdef Py_TPFLAGS_GC PyObject_GC_Init(inst); #endif PyDict_SetItem(inst->in_dict, SWIG_This(), swig_this); return (PyObject *) inst; #endif #endif } SWIGRUNTIME void SWIG_Python_SetSwigThis(PyObject *inst, PyObject *swig_this) { PyObject *dict; #if (PY_VERSION_HEX >= 0x02020000) && !defined(SWIG_PYTHON_SLOW_GETSET_THIS) PyObject **dictptr = _PyObject_GetDictPtr(inst); if (dictptr != NULL) { dict = *dictptr; if (dict == NULL) { dict = PyDict_New(); *dictptr = dict; } PyDict_SetItem(dict, SWIG_This(), swig_this); return; } #endif dict = PyObject_GetAttrString(inst, (char*)"__dict__"); PyDict_SetItem(dict, SWIG_This(), swig_this); Py_DECREF(dict); } SWIGINTERN PyObject * SWIG_Python_InitShadowInstance(PyObject *args) { PyObject *obj[2]; if (!SWIG_Python_UnpackTuple(args, "swiginit", 2, 2, obj)) { return NULL; } else { SwigPyObject *sthis = SWIG_Python_GetSwigThis(obj[0]); if (sthis) { SwigPyObject_append((PyObject*) sthis, obj[1]); } else { SWIG_Python_SetSwigThis(obj[0], obj[1]); } return SWIG_Py_Void(); } } /* Create a new pointer object */ SWIGRUNTIME PyObject * SWIG_Python_NewPointerObj(PyObject *self, void *ptr, swig_type_info *type, int flags) { SwigPyClientData *clientdata; PyObject * robj; int own; if (!ptr) return SWIG_Py_Void(); clientdata = type ? (SwigPyClientData *)(type->clientdata) : 0; own = (flags & SWIG_POINTER_OWN) ? SWIG_POINTER_OWN : 0; if (clientdata && clientdata->pytype) { SwigPyObject *newobj; if (flags & SWIG_BUILTIN_TP_INIT) { newobj = (SwigPyObject*) self; if (newobj->ptr) { PyObject *next_self = clientdata->pytype->tp_alloc(clientdata->pytype, 0); while (newobj->next) newobj = (SwigPyObject *) newobj->next; newobj->next = next_self; newobj = (SwigPyObject *)next_self; #ifdef SWIGPYTHON_BUILTIN newobj->dict = 0; #endif } } else { newobj = PyObject_New(SwigPyObject, clientdata->pytype); #ifdef SWIGPYTHON_BUILTIN newobj->dict = 0; #endif } if (newobj) { newobj->ptr = ptr; newobj->ty = type; newobj->own = own; newobj->next = 0; return (PyObject*) newobj; } return SWIG_Py_Void(); } assert(!(flags & SWIG_BUILTIN_TP_INIT)); robj = SwigPyObject_New(ptr, type, own); if (robj && clientdata && !(flags & SWIG_POINTER_NOSHADOW)) { PyObject *inst = SWIG_Python_NewShadowInstance(clientdata, robj); Py_DECREF(robj); robj = inst; } return robj; } /* Create a new packed object */ SWIGRUNTIMEINLINE PyObject * SWIG_Python_NewPackedObj(void *ptr, size_t sz, swig_type_info *type) { return ptr ? SwigPyPacked_New((void *) ptr, sz, type) : SWIG_Py_Void(); } /* -----------------------------------------------------------------------------* * Get type list * -----------------------------------------------------------------------------*/ #ifdef SWIG_LINK_RUNTIME void *SWIG_ReturnGlobalTypeList(void *); #endif SWIGRUNTIME swig_module_info * SWIG_Python_GetModule(void *SWIGUNUSEDPARM(clientdata)) { static void *type_pointer = (void *)0; /* first check if module already created */ if (!type_pointer) { #ifdef SWIG_LINK_RUNTIME type_pointer = SWIG_ReturnGlobalTypeList((void *)0); #else # ifdef SWIGPY_USE_CAPSULE type_pointer = PyCapsule_Import(SWIGPY_CAPSULE_NAME, 0); # else type_pointer = PyCObject_Import((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, (char*)"type_pointer" SWIG_TYPE_TABLE_NAME); # endif if (PyErr_Occurred()) { PyErr_Clear(); type_pointer = (void *)0; } #endif } return (swig_module_info *) type_pointer; } #if PY_MAJOR_VERSION < 2 /* PyModule_AddObject function was introduced in Python 2.0. The following function is copied out of Python/modsupport.c in python version 2.3.4 */ SWIGINTERN int PyModule_AddObject(PyObject *m, char *name, PyObject *o) { PyObject *dict; if (!PyModule_Check(m)) { PyErr_SetString(PyExc_TypeError, "PyModule_AddObject() needs module as first arg"); return SWIG_ERROR; } if (!o) { PyErr_SetString(PyExc_TypeError, "PyModule_AddObject() needs non-NULL value"); return SWIG_ERROR; } dict = PyModule_GetDict(m); if (dict == NULL) { /* Internal error -- modules must have a dict! */ PyErr_Format(PyExc_SystemError, "module '%s' has no __dict__", PyModule_GetName(m)); return SWIG_ERROR; } if (PyDict_SetItemString(dict, name, o)) return SWIG_ERROR; Py_DECREF(o); return SWIG_OK; } #endif SWIGRUNTIME void #ifdef SWIGPY_USE_CAPSULE SWIG_Python_DestroyModule(PyObject *obj) #else SWIG_Python_DestroyModule(void *vptr) #endif { #ifdef SWIGPY_USE_CAPSULE swig_module_info *swig_module = (swig_module_info *) PyCapsule_GetPointer(obj, SWIGPY_CAPSULE_NAME); #else swig_module_info *swig_module = (swig_module_info *) vptr; #endif swig_type_info **types = swig_module->types; size_t i; for (i =0; i < swig_module->size; ++i) { swig_type_info *ty = types[i]; if (ty->owndata) { SwigPyClientData *data = (SwigPyClientData *) ty->clientdata; if (data) SwigPyClientData_Del(data); } } Py_DECREF(SWIG_This()); swig_this = NULL; } SWIGRUNTIME void SWIG_Python_SetModule(swig_module_info *swig_module) { #if PY_VERSION_HEX >= 0x03000000 /* Add a dummy module object into sys.modules */ PyObject *module = PyImport_AddModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION); #else static PyMethodDef swig_empty_runtime_method_table[] = { {NULL, NULL, 0, NULL} }; /* Sentinel */ PyObject *module = Py_InitModule((char*)"swig_runtime_data" SWIG_RUNTIME_VERSION, swig_empty_runtime_method_table); #endif #ifdef SWIGPY_USE_CAPSULE PyObject *pointer = PyCapsule_New((void *) swig_module, SWIGPY_CAPSULE_NAME, SWIG_Python_DestroyModule); if (pointer && module) { PyModule_AddObject(module, (char*)"type_pointer_capsule" SWIG_TYPE_TABLE_NAME, pointer); } else { Py_XDECREF(pointer); } #else PyObject *pointer = PyCObject_FromVoidPtr((void *) swig_module, SWIG_Python_DestroyModule); if (pointer && module) { PyModule_AddObject(module, (char*)"type_pointer" SWIG_TYPE_TABLE_NAME, pointer); } else { Py_XDECREF(pointer); } #endif } /* The python cached type query */ SWIGRUNTIME PyObject * SWIG_Python_TypeCache(void) { static PyObject *SWIG_STATIC_POINTER(cache) = PyDict_New(); return cache; } SWIGRUNTIME swig_type_info * SWIG_Python_TypeQuery(const char *type) { PyObject *cache = SWIG_Python_TypeCache(); PyObject *key = SWIG_Python_str_FromChar(type); PyObject *obj = PyDict_GetItem(cache, key); swig_type_info *descriptor; if (obj) { #ifdef SWIGPY_USE_CAPSULE descriptor = (swig_type_info *) PyCapsule_GetPointer(obj, NULL); #else descriptor = (swig_type_info *) PyCObject_AsVoidPtr(obj); #endif } else { swig_module_info *swig_module = SWIG_GetModule(0); descriptor = SWIG_TypeQueryModule(swig_module, swig_module, type); if (descriptor) { #ifdef SWIGPY_USE_CAPSULE obj = PyCapsule_New((void*) descriptor, NULL, NULL); #else obj = PyCObject_FromVoidPtr(descriptor, NULL); #endif PyDict_SetItem(cache, key, obj); Py_DECREF(obj); } } Py_DECREF(key); return descriptor; } /* For backward compatibility only */ #define SWIG_POINTER_EXCEPTION 0 #define SWIG_arg_fail(arg) SWIG_Python_ArgFail(arg) #define SWIG_MustGetPtr(p, type, argnum, flags) SWIG_Python_MustGetPtr(p, type, argnum, flags) SWIGRUNTIME int SWIG_Python_AddErrMesg(const char* mesg, int infront) { if (PyErr_Occurred()) { PyObject *type = 0; PyObject *value = 0; PyObject *traceback = 0; PyErr_Fetch(&type, &value, &traceback); if (value) { char *tmp; PyObject *old_str = PyObject_Str(value); Py_XINCREF(type); PyErr_Clear(); if (infront) { PyErr_Format(type, "%s %s", mesg, tmp = SWIG_Python_str_AsChar(old_str)); } else { PyErr_Format(type, "%s %s", tmp = SWIG_Python_str_AsChar(old_str), mesg); } SWIG_Python_str_DelForPy3(tmp); Py_DECREF(old_str); } return 1; } else { return 0; } } SWIGRUNTIME int SWIG_Python_ArgFail(int argnum) { if (PyErr_Occurred()) { /* add information about failing argument */ char mesg[256]; PyOS_snprintf(mesg, sizeof(mesg), "argument number %d:", argnum); return SWIG_Python_AddErrMesg(mesg, 1); } else { return 0; } } SWIGRUNTIMEINLINE const char * SwigPyObject_GetDesc(PyObject *self) { SwigPyObject *v = (SwigPyObject *)self; swig_type_info *ty = v ? v->ty : 0; return ty ? ty->str : ""; } SWIGRUNTIME void SWIG_Python_TypeError(const char *type, PyObject *obj) { if (type) { #if defined(SWIG_COBJECT_TYPES) if (obj && SwigPyObject_Check(obj)) { const char *otype = (const char *) SwigPyObject_GetDesc(obj); if (otype) { PyErr_Format(PyExc_TypeError, "a '%s' is expected, 'SwigPyObject(%s)' is received", type, otype); return; } } else #endif { const char *otype = (obj ? obj->ob_type->tp_name : 0); if (otype) { PyObject *str = PyObject_Str(obj); const char *cstr = str ? SWIG_Python_str_AsChar(str) : 0; if (cstr) { PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s(%s)' is received", type, otype, cstr); SWIG_Python_str_DelForPy3(cstr); } else { PyErr_Format(PyExc_TypeError, "a '%s' is expected, '%s' is received", type, otype); } Py_XDECREF(str); return; } } PyErr_Format(PyExc_TypeError, "a '%s' is expected", type); } else { PyErr_Format(PyExc_TypeError, "unexpected type is received"); } } /* Convert a pointer value, signal an exception on a type mismatch */ SWIGRUNTIME void * SWIG_Python_MustGetPtr(PyObject *obj, swig_type_info *ty, int SWIGUNUSEDPARM(argnum), int flags) { void *result; if (SWIG_Python_ConvertPtr(obj, &result, ty, flags) == -1) { PyErr_Clear(); #if SWIG_POINTER_EXCEPTION if (flags) { SWIG_Python_TypeError(SWIG_TypePrettyName(ty), obj); SWIG_Python_ArgFail(argnum); } #endif } return result; } #ifdef SWIGPYTHON_BUILTIN SWIGRUNTIME int SWIG_Python_NonDynamicSetAttr(PyObject *obj, PyObject *name, PyObject *value) { PyTypeObject *tp = obj->ob_type; PyObject *descr; PyObject *encoded_name; descrsetfunc f; int res = -1; # ifdef Py_USING_UNICODE if (PyString_Check(name)) { name = PyUnicode_Decode(PyString_AsString(name), PyString_Size(name), NULL, NULL); if (!name) return -1; } else if (!PyUnicode_Check(name)) # else if (!PyString_Check(name)) # endif { PyErr_Format(PyExc_TypeError, "attribute name must be string, not '%.200s'", name->ob_type->tp_name); return -1; } else { Py_INCREF(name); } if (!tp->tp_dict) { if (PyType_Ready(tp) < 0) goto done; } descr = _PyType_Lookup(tp, name); f = NULL; if (descr != NULL) f = descr->ob_type->tp_descr_set; if (!f) { if (PyString_Check(name)) { encoded_name = name; Py_INCREF(name); } else { encoded_name = PyUnicode_AsUTF8String(name); } PyErr_Format(PyExc_AttributeError, "'%.100s' object has no attribute '%.200s'", tp->tp_name, PyString_AsString(encoded_name)); Py_DECREF(encoded_name); } else { res = f(descr, obj, value); } done: Py_DECREF(name); return res; } #endif #ifdef __cplusplus } #endif #define SWIG_exception_fail(code, msg) do { SWIG_Error(code, msg); SWIG_fail; } while(0) #define SWIG_contract_assert(expr, msg) if (!(expr)) { SWIG_Error(SWIG_RuntimeError, msg); SWIG_fail; } else /* -------- TYPES TABLE (BEGIN) -------- */ #define SWIGTYPE_p_char swig_types[0] #define SWIGTYPE_p_chmFile swig_types[1] #define SWIGTYPE_p_chmUnitInfo swig_types[2] #define SWIGTYPE_p_f_p_struct_chmFile_p_struct_chmUnitInfo_p_void__int swig_types[3] #define SWIGTYPE_p_long_long swig_types[4] #define SWIGTYPE_p_unsigned_char swig_types[5] #define SWIGTYPE_p_unsigned_long_long swig_types[6] static swig_type_info *swig_types[8]; static swig_module_info swig_module = {swig_types, 7, 0, 0, 0, 0}; #define SWIG_TypeQuery(name) SWIG_TypeQueryModule(&swig_module, &swig_module, name) #define SWIG_MangledTypeQuery(name) SWIG_MangledTypeQueryModule(&swig_module, &swig_module, name) /* -------- TYPES TABLE (END) -------- */ #if (PY_VERSION_HEX <= 0x02000000) # if !defined(SWIG_PYTHON_CLASSIC) # error "This python version requires swig to be run with the '-classic' option" # endif #endif /*----------------------------------------------- @(target):= _chmlib.so ------------------------------------------------*/ #if PY_VERSION_HEX >= 0x03000000 # define SWIG_init PyInit__chmlib #else # define SWIG_init init_chmlib #endif #define SWIG_name "_chmlib" #define SWIGVERSION 0x030010 #define SWIG_VERSION SWIGVERSION #define SWIG_as_voidptr(a) (void *)((const void *)(a)) #define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),(void**)(a)) /* Copyright (C) 2003 Rubens Ramos Based on code by: Copyright (C) 2003 Razvan Cojocaru pychm is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA $Id$ */ #include "chm_lib.h" #include static PyObject *my_callback = NULL; static PyObject * my_set_callback(PyObject *dummy, PyObject *arg) { PyObject *result = NULL; if (!PyCallable_Check(arg)) { PyErr_SetString(PyExc_TypeError, "parameter must be callable"); return NULL; } Py_XINCREF(arg); /* Add a reference to new callback */ Py_XDECREF(my_callback); /* Dispose of previous callback */ my_callback = arg; /* Remember new callback */ /* Boilerplate to return "None" */ Py_INCREF(Py_None); result = Py_None; return result; } int dummy_enumerator (struct chmFile *h, struct chmUnitInfo *ui, void *context) { PyObject *arglist; PyObject *result; PyObject *py_h; PyObject *py_ui; PyObject *py_c; py_h = SWIG_NewPointerObj((void *) h, SWIGTYPE_p_chmFile, 0); py_ui = SWIG_NewPointerObj((void *) ui, SWIGTYPE_p_chmUnitInfo, 0); /* The following was: py_c = PyCObject_AsVoidPtr(context); which did not make sense because the function takes a PyObject * and returns a void *, not the reverse. This was probably never used?? In doubt, replace with a call which makes sense and hope for the best... */ py_c = PyCapsule_New(context, "context", NULL); /* Time to call the callback */ arglist = Py_BuildValue("(OOO)", py_h, py_ui, py_c); if (arglist) { result = PyEval_CallObject(my_callback, arglist); Py_DECREF(arglist); Py_DECREF(result); Py_DECREF(py_h); Py_DECREF(py_ui); Py_DECREF(py_c); if (result == NULL) { return 0; /* Pass error back */ } else { return 1; } } else return 0; } SWIGINTERNINLINE PyObject* SWIG_From_int (int value) { return PyInt_FromLong((long) value); } SWIGINTERN int SWIG_AsVal_double (PyObject *obj, double *val) { int res = SWIG_TypeError; if (PyFloat_Check(obj)) { if (val) *val = PyFloat_AsDouble(obj); return SWIG_OK; #if PY_VERSION_HEX < 0x03000000 } else if (PyInt_Check(obj)) { if (val) *val = PyInt_AsLong(obj); return SWIG_OK; #endif } else if (PyLong_Check(obj)) { double v = PyLong_AsDouble(obj); if (!PyErr_Occurred()) { if (val) *val = v; return SWIG_OK; } else { PyErr_Clear(); } } #ifdef SWIG_PYTHON_CAST_MODE { int dispatch = 0; double d = PyFloat_AsDouble(obj); if (!PyErr_Occurred()) { if (val) *val = d; return SWIG_AddCast(SWIG_OK); } else { PyErr_Clear(); } if (!dispatch) { long v = PyLong_AsLong(obj); if (!PyErr_Occurred()) { if (val) *val = v; return SWIG_AddCast(SWIG_AddCast(SWIG_OK)); } else { PyErr_Clear(); } } } #endif return res; } #include #include SWIGINTERNINLINE int SWIG_CanCastAsInteger(double *d, double min, double max) { double x = *d; if ((min <= x && x <= max)) { double fx = floor(x); double cx = ceil(x); double rd = ((x - fx) < 0.5) ? fx : cx; /* simple rint */ if ((errno == EDOM) || (errno == ERANGE)) { errno = 0; } else { double summ, reps, diff; if (rd < x) { diff = x - rd; } else if (rd > x) { diff = rd - x; } else { return 1; } summ = rd + x; reps = diff/summ; if (reps < 8*DBL_EPSILON) { *d = rd; return 1; } } } return 0; } SWIGINTERN int SWIG_AsVal_unsigned_SS_long (PyObject *obj, unsigned long *val) { #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(obj)) { long v = PyInt_AsLong(obj); if (v >= 0) { if (val) *val = v; return SWIG_OK; } else { return SWIG_OverflowError; } } else #endif if (PyLong_Check(obj)) { unsigned long v = PyLong_AsUnsignedLong(obj); if (!PyErr_Occurred()) { if (val) *val = v; return SWIG_OK; } else { PyErr_Clear(); return SWIG_OverflowError; } } #ifdef SWIG_PYTHON_CAST_MODE { int dispatch = 0; unsigned long v = PyLong_AsUnsignedLong(obj); if (!PyErr_Occurred()) { if (val) *val = v; return SWIG_AddCast(SWIG_OK); } else { PyErr_Clear(); } if (!dispatch) { double d; int res = SWIG_AddCast(SWIG_AsVal_double (obj,&d)); if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, 0, ULONG_MAX)) { if (val) *val = (unsigned long)(d); return res; } } } #endif return SWIG_TypeError; } #include #if !defined(SWIG_NO_LLONG_MAX) # if !defined(LLONG_MAX) && defined(__GNUC__) && defined (__LONG_LONG_MAX__) # define LLONG_MAX __LONG_LONG_MAX__ # define LLONG_MIN (-LLONG_MAX - 1LL) # define ULLONG_MAX (LLONG_MAX * 2ULL + 1ULL) # endif #endif #if defined(LLONG_MAX) && !defined(SWIG_LONG_LONG_AVAILABLE) # define SWIG_LONG_LONG_AVAILABLE #endif #ifdef SWIG_LONG_LONG_AVAILABLE SWIGINTERN int SWIG_AsVal_unsigned_SS_long_SS_long (PyObject *obj, unsigned long long *val) { int res = SWIG_TypeError; if (PyLong_Check(obj)) { unsigned long long v = PyLong_AsUnsignedLongLong(obj); if (!PyErr_Occurred()) { if (val) *val = v; return SWIG_OK; } else { PyErr_Clear(); res = SWIG_OverflowError; } } else { unsigned long v; res = SWIG_AsVal_unsigned_SS_long (obj,&v); if (SWIG_IsOK(res)) { if (val) *val = v; return res; } } #ifdef SWIG_PYTHON_CAST_MODE { const double mant_max = 1LL << DBL_MANT_DIG; double d; res = SWIG_AsVal_double (obj,&d); if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, 0, mant_max)) { if (val) *val = (unsigned long long)(d); return SWIG_AddCast(res); } res = SWIG_TypeError; } #endif return res; } #endif #ifdef SWIG_LONG_LONG_AVAILABLE SWIGINTERNINLINE PyObject* SWIG_From_unsigned_SS_long_SS_long (unsigned long long value) { return (value > LONG_MAX) ? PyLong_FromUnsignedLongLong(value) : PyInt_FromLong((long)(value)); } #endif SWIGINTERN int SWIG_AsVal_long (PyObject *obj, long* val) { #if PY_VERSION_HEX < 0x03000000 if (PyInt_Check(obj)) { if (val) *val = PyInt_AsLong(obj); return SWIG_OK; } else #endif if (PyLong_Check(obj)) { long v = PyLong_AsLong(obj); if (!PyErr_Occurred()) { if (val) *val = v; return SWIG_OK; } else { PyErr_Clear(); return SWIG_OverflowError; } } #ifdef SWIG_PYTHON_CAST_MODE { int dispatch = 0; long v = PyInt_AsLong(obj); if (!PyErr_Occurred()) { if (val) *val = v; return SWIG_AddCast(SWIG_OK); } else { PyErr_Clear(); } if (!dispatch) { double d; int res = SWIG_AddCast(SWIG_AsVal_double (obj,&d)); if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, LONG_MIN, LONG_MAX)) { if (val) *val = (long)(d); return res; } } } #endif return SWIG_TypeError; } SWIGINTERN int SWIG_AsVal_int (PyObject * obj, int *val) { long v; int res = SWIG_AsVal_long (obj, &v); if (SWIG_IsOK(res)) { if ((v < INT_MIN || v > INT_MAX)) { return SWIG_OverflowError; } else { if (val) *val = (int)(v); } } return res; } SWIGINTERN swig_type_info* SWIG_pchar_descriptor(void) { static int init = 0; static swig_type_info* info = 0; if (!init) { info = SWIG_TypeQuery("_p_char"); init = 1; } return info; } SWIGINTERN int SWIG_AsCharPtrAndSize(PyObject *obj, char** cptr, size_t* psize, int *alloc) { #if PY_VERSION_HEX>=0x03000000 #if defined(SWIG_PYTHON_STRICT_BYTE_CHAR) if (PyBytes_Check(obj)) #else if (PyUnicode_Check(obj)) #endif #else if (PyString_Check(obj)) #endif { char *cstr; Py_ssize_t len; #if PY_VERSION_HEX>=0x03000000 #if !defined(SWIG_PYTHON_STRICT_BYTE_CHAR) if (!alloc && cptr) { /* We can't allow converting without allocation, since the internal representation of string in Python 3 is UCS-2/UCS-4 but we require a UTF-8 representation. TODO(bhy) More detailed explanation */ return SWIG_RuntimeError; } obj = PyUnicode_AsUTF8String(obj); if(alloc) *alloc = SWIG_NEWOBJ; #endif PyBytes_AsStringAndSize(obj, &cstr, &len); #else PyString_AsStringAndSize(obj, &cstr, &len); #endif if (cptr) { if (alloc) { /* In python the user should not be able to modify the inner string representation. To warranty that, if you define SWIG_PYTHON_SAFE_CSTRINGS, a new/copy of the python string buffer is always returned. The default behavior is just to return the pointer value, so, be careful. */ #if defined(SWIG_PYTHON_SAFE_CSTRINGS) if (*alloc != SWIG_OLDOBJ) #else if (*alloc == SWIG_NEWOBJ) #endif { *cptr = (char *)memcpy((char *)malloc((len + 1)*sizeof(char)), cstr, sizeof(char)*(len + 1)); *alloc = SWIG_NEWOBJ; } else { *cptr = cstr; *alloc = SWIG_OLDOBJ; } } else { #if PY_VERSION_HEX>=0x03000000 #if defined(SWIG_PYTHON_STRICT_BYTE_CHAR) *cptr = PyBytes_AsString(obj); #else assert(0); /* Should never reach here with Unicode strings in Python 3 */ #endif #else *cptr = SWIG_Python_str_AsChar(obj); #endif } } if (psize) *psize = len + 1; #if PY_VERSION_HEX>=0x03000000 && !defined(SWIG_PYTHON_STRICT_BYTE_CHAR) Py_XDECREF(obj); #endif return SWIG_OK; } else { #if defined(SWIG_PYTHON_2_UNICODE) #if defined(SWIG_PYTHON_STRICT_BYTE_CHAR) #error "Cannot use both SWIG_PYTHON_2_UNICODE and SWIG_PYTHON_STRICT_BYTE_CHAR at once" #endif #if PY_VERSION_HEX<0x03000000 if (PyUnicode_Check(obj)) { char *cstr; Py_ssize_t len; if (!alloc && cptr) { return SWIG_RuntimeError; } obj = PyUnicode_AsUTF8String(obj); if (PyString_AsStringAndSize(obj, &cstr, &len) != -1) { if (cptr) { if (alloc) *alloc = SWIG_NEWOBJ; *cptr = (char *)memcpy((char *)malloc((len + 1)*sizeof(char)), cstr, sizeof(char)*(len + 1)); } if (psize) *psize = len + 1; Py_XDECREF(obj); return SWIG_OK; } else { Py_XDECREF(obj); } } #endif #endif swig_type_info* pchar_descriptor = SWIG_pchar_descriptor(); if (pchar_descriptor) { void* vptr = 0; if (SWIG_ConvertPtr(obj, &vptr, pchar_descriptor, 0) == SWIG_OK) { if (cptr) *cptr = (char *) vptr; if (psize) *psize = vptr ? (strlen((char *)vptr) + 1) : 0; if (alloc) *alloc = SWIG_OLDOBJ; return SWIG_OK; } } } return SWIG_TypeError; } SWIGINTERN int SWIG_AsCharArray(PyObject * obj, char *val, size_t size) { char* cptr = 0; size_t csize = 0; int alloc = SWIG_OLDOBJ; int res = SWIG_AsCharPtrAndSize(obj, &cptr, &csize, &alloc); if (SWIG_IsOK(res)) { /* special case of single char conversion when we don't need space for NUL */ if (size == 1 && csize == 2 && cptr && !cptr[1]) --csize; if (csize <= size) { if (val) { if (csize) memcpy(val, cptr, csize*sizeof(char)); if (csize < size) memset(val + csize, 0, (size - csize)*sizeof(char)); } if (alloc == SWIG_NEWOBJ) { free((char*)cptr); res = SWIG_DelNewMask(res); } return res; } if (alloc == SWIG_NEWOBJ) free((char*)cptr); } return SWIG_TypeError; } SWIGINTERNINLINE PyObject * SWIG_FromCharPtrAndSize(const char* carray, size_t size) { if (carray) { if (size > INT_MAX) { swig_type_info* pchar_descriptor = SWIG_pchar_descriptor(); return pchar_descriptor ? SWIG_InternalNewPointerObj((char *)(carray), pchar_descriptor, 0) : SWIG_Py_Void(); } else { #if PY_VERSION_HEX >= 0x03000000 #if defined(SWIG_PYTHON_STRICT_BYTE_CHAR) return PyBytes_FromStringAndSize(carray, (Py_ssize_t)(size)); #else #if PY_VERSION_HEX >= 0x03010000 return PyUnicode_DecodeUTF8(carray, (Py_ssize_t)(size), "surrogateescape"); #else return PyUnicode_FromStringAndSize(carray, (Py_ssize_t)(size)); #endif #endif #else return PyString_FromStringAndSize(carray, (Py_ssize_t)(size)); #endif } } else { return SWIG_Py_Void(); } } size_t SWIG_strnlen(const char* s, size_t maxlen) { const char *p; for (p = s; maxlen-- && *p; p++) ; return p - s; } #ifdef SWIG_LONG_LONG_AVAILABLE SWIGINTERN int SWIG_AsVal_long_SS_long (PyObject *obj, long long *val) { int res = SWIG_TypeError; if (PyLong_Check(obj)) { long long v = PyLong_AsLongLong(obj); if (!PyErr_Occurred()) { if (val) *val = v; return SWIG_OK; } else { PyErr_Clear(); res = SWIG_OverflowError; } } else { long v; res = SWIG_AsVal_long (obj,&v); if (SWIG_IsOK(res)) { if (val) *val = v; return res; } } #ifdef SWIG_PYTHON_CAST_MODE { const double mant_max = 1LL << DBL_MANT_DIG; const double mant_min = -mant_max; double d; res = SWIG_AsVal_double (obj,&d); if (SWIG_IsOK(res) && SWIG_CanCastAsInteger(&d, mant_min, mant_max)) { if (val) *val = (long long)(d); return SWIG_AddCast(res); } res = SWIG_TypeError; } #endif return res; } #endif #define t_output_helper SWIG_Python_AppendOutput #ifdef SWIG_LONG_LONG_AVAILABLE SWIGINTERNINLINE PyObject* SWIG_From_long_SS_long (long long value) { return ((value < LONG_MIN) || (value > LONG_MAX)) ? PyLong_FromLongLong(value) : PyInt_FromLong((long)(value)); } #endif #ifdef __cplusplus extern "C" { #endif SWIGINTERN PyObject *_wrap_chmUnitInfo_start_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *arg1 = (struct chmUnitInfo *) 0 ; LONGUINT64 arg2 ; void *argp1 = 0 ; int res1 = 0 ; unsigned long long val2 ; int ecode2 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; if (!PyArg_ParseTuple(args,(char *)"OO:chmUnitInfo_start_set",&obj0,&obj1)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmUnitInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chmUnitInfo_start_set" "', argument " "1"" of type '" "struct chmUnitInfo *""'"); } arg1 = (struct chmUnitInfo *)(argp1); ecode2 = SWIG_AsVal_unsigned_SS_long_SS_long(obj1, &val2); if (!SWIG_IsOK(ecode2)) { SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "chmUnitInfo_start_set" "', argument " "2"" of type '" "LONGUINT64""'"); } arg2 = (LONGUINT64)(val2); if (arg1) (arg1)->start = arg2; resultobj = SWIG_Py_Void(); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chmUnitInfo_start_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *arg1 = (struct chmUnitInfo *) 0 ; void *argp1 = 0 ; int res1 = 0 ; PyObject * obj0 = 0 ; LONGUINT64 result; if (!PyArg_ParseTuple(args,(char *)"O:chmUnitInfo_start_get",&obj0)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmUnitInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chmUnitInfo_start_get" "', argument " "1"" of type '" "struct chmUnitInfo *""'"); } arg1 = (struct chmUnitInfo *)(argp1); result = (LONGUINT64) ((arg1)->start); resultobj = SWIG_From_unsigned_SS_long_SS_long((unsigned long long)(result)); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chmUnitInfo_length_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *arg1 = (struct chmUnitInfo *) 0 ; LONGUINT64 arg2 ; void *argp1 = 0 ; int res1 = 0 ; unsigned long long val2 ; int ecode2 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; if (!PyArg_ParseTuple(args,(char *)"OO:chmUnitInfo_length_set",&obj0,&obj1)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmUnitInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chmUnitInfo_length_set" "', argument " "1"" of type '" "struct chmUnitInfo *""'"); } arg1 = (struct chmUnitInfo *)(argp1); ecode2 = SWIG_AsVal_unsigned_SS_long_SS_long(obj1, &val2); if (!SWIG_IsOK(ecode2)) { SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "chmUnitInfo_length_set" "', argument " "2"" of type '" "LONGUINT64""'"); } arg2 = (LONGUINT64)(val2); if (arg1) (arg1)->length = arg2; resultobj = SWIG_Py_Void(); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chmUnitInfo_length_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *arg1 = (struct chmUnitInfo *) 0 ; void *argp1 = 0 ; int res1 = 0 ; PyObject * obj0 = 0 ; LONGUINT64 result; if (!PyArg_ParseTuple(args,(char *)"O:chmUnitInfo_length_get",&obj0)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmUnitInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chmUnitInfo_length_get" "', argument " "1"" of type '" "struct chmUnitInfo *""'"); } arg1 = (struct chmUnitInfo *)(argp1); result = (LONGUINT64) ((arg1)->length); resultobj = SWIG_From_unsigned_SS_long_SS_long((unsigned long long)(result)); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chmUnitInfo_space_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *arg1 = (struct chmUnitInfo *) 0 ; int arg2 ; void *argp1 = 0 ; int res1 = 0 ; int val2 ; int ecode2 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; if (!PyArg_ParseTuple(args,(char *)"OO:chmUnitInfo_space_set",&obj0,&obj1)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmUnitInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chmUnitInfo_space_set" "', argument " "1"" of type '" "struct chmUnitInfo *""'"); } arg1 = (struct chmUnitInfo *)(argp1); ecode2 = SWIG_AsVal_int(obj1, &val2); if (!SWIG_IsOK(ecode2)) { SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "chmUnitInfo_space_set" "', argument " "2"" of type '" "int""'"); } arg2 = (int)(val2); if (arg1) (arg1)->space = arg2; resultobj = SWIG_Py_Void(); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chmUnitInfo_space_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *arg1 = (struct chmUnitInfo *) 0 ; void *argp1 = 0 ; int res1 = 0 ; PyObject * obj0 = 0 ; int result; if (!PyArg_ParseTuple(args,(char *)"O:chmUnitInfo_space_get",&obj0)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmUnitInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chmUnitInfo_space_get" "', argument " "1"" of type '" "struct chmUnitInfo *""'"); } arg1 = (struct chmUnitInfo *)(argp1); result = (int) ((arg1)->space); resultobj = SWIG_From_int((int)(result)); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chmUnitInfo_path_set(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *arg1 = (struct chmUnitInfo *) 0 ; char *arg2 ; void *argp1 = 0 ; int res1 = 0 ; char temp2[256+1] ; int res2 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; if (!PyArg_ParseTuple(args,(char *)"OO:chmUnitInfo_path_set",&obj0,&obj1)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmUnitInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chmUnitInfo_path_set" "', argument " "1"" of type '" "struct chmUnitInfo *""'"); } arg1 = (struct chmUnitInfo *)(argp1); res2 = SWIG_AsCharArray(obj1, temp2, 256+1); if (!SWIG_IsOK(res2)) { SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "chmUnitInfo_path_set" "', argument " "2"" of type '" "char [256+1]""'"); } arg2 = (char *)(temp2); if (arg2) memcpy(arg1->path,arg2,256+1*sizeof(char)); else memset(arg1->path,0,256+1*sizeof(char)); resultobj = SWIG_Py_Void(); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chmUnitInfo_path_get(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *arg1 = (struct chmUnitInfo *) 0 ; void *argp1 = 0 ; int res1 = 0 ; PyObject * obj0 = 0 ; char *result = 0 ; if (!PyArg_ParseTuple(args,(char *)"O:chmUnitInfo_path_get",&obj0)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmUnitInfo, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chmUnitInfo_path_get" "', argument " "1"" of type '" "struct chmUnitInfo *""'"); } arg1 = (struct chmUnitInfo *)(argp1); result = (char *)(char *) ((arg1)->path); { size_t size = SWIG_strnlen(result, 256+1); resultobj = SWIG_FromCharPtrAndSize(result, size); } return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_new_chmUnitInfo(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *result = 0 ; if (!PyArg_ParseTuple(args,(char *)":new_chmUnitInfo")) SWIG_fail; result = (struct chmUnitInfo *)calloc(1, sizeof(struct chmUnitInfo)); resultobj = SWIG_NewPointerObj(SWIG_as_voidptr(result), SWIGTYPE_p_chmUnitInfo, SWIG_POINTER_NEW | 0 ); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_delete_chmUnitInfo(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmUnitInfo *arg1 = (struct chmUnitInfo *) 0 ; void *argp1 = 0 ; int res1 = 0 ; PyObject * obj0 = 0 ; if (!PyArg_ParseTuple(args,(char *)"O:delete_chmUnitInfo",&obj0)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmUnitInfo, SWIG_POINTER_DISOWN | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "delete_chmUnitInfo" "', argument " "1"" of type '" "struct chmUnitInfo *""'"); } arg1 = (struct chmUnitInfo *)(argp1); free((char *) arg1); resultobj = SWIG_Py_Void(); return resultobj; fail: return NULL; } SWIGINTERN PyObject *chmUnitInfo_swigregister(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *obj; if (!PyArg_ParseTuple(args,(char*)"O:swigregister", &obj)) return NULL; SWIG_TypeNewClientData(SWIGTYPE_p_chmUnitInfo, SWIG_NewClientData(obj)); return SWIG_Py_Void(); } SWIGINTERN PyObject *_wrap_chm_open(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; char *arg1 = (char *) 0 ; int res1 ; char *buf1 = 0 ; int alloc1 = 0 ; PyObject * obj0 = 0 ; struct chmFile *result = 0 ; if (!PyArg_ParseTuple(args,(char *)"O:chm_open",&obj0)) SWIG_fail; res1 = SWIG_AsCharPtrAndSize(obj0, &buf1, NULL, &alloc1); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chm_open" "', argument " "1"" of type '" "char const *""'"); } arg1 = (char *)(buf1); result = (struct chmFile *)chm_open((char const *)arg1); resultobj = SWIG_NewPointerObj(SWIG_as_voidptr(result), SWIGTYPE_p_chmFile, 0 | 0 ); if (alloc1 == SWIG_NEWOBJ) free((char*)buf1); return resultobj; fail: if (alloc1 == SWIG_NEWOBJ) free((char*)buf1); return NULL; } SWIGINTERN PyObject *_wrap_chm_close(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmFile *arg1 = (struct chmFile *) 0 ; void *argp1 = 0 ; int res1 = 0 ; PyObject * obj0 = 0 ; if (!PyArg_ParseTuple(args,(char *)"O:chm_close",&obj0)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmFile, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chm_close" "', argument " "1"" of type '" "struct chmFile *""'"); } arg1 = (struct chmFile *)(argp1); chm_close(arg1); resultobj = SWIG_Py_Void(); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chm_set_param(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmFile *arg1 = (struct chmFile *) 0 ; int arg2 ; int arg3 ; void *argp1 = 0 ; int res1 = 0 ; int val2 ; int ecode2 = 0 ; int val3 ; int ecode3 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; PyObject * obj2 = 0 ; if (!PyArg_ParseTuple(args,(char *)"OOO:chm_set_param",&obj0,&obj1,&obj2)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmFile, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chm_set_param" "', argument " "1"" of type '" "struct chmFile *""'"); } arg1 = (struct chmFile *)(argp1); ecode2 = SWIG_AsVal_int(obj1, &val2); if (!SWIG_IsOK(ecode2)) { SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "chm_set_param" "', argument " "2"" of type '" "int""'"); } arg2 = (int)(val2); ecode3 = SWIG_AsVal_int(obj2, &val3); if (!SWIG_IsOK(ecode3)) { SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "chm_set_param" "', argument " "3"" of type '" "int""'"); } arg3 = (int)(val3); chm_set_param(arg1,arg2,arg3); resultobj = SWIG_Py_Void(); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chm_resolve_object(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmFile *arg1 = (struct chmFile *) 0 ; char *arg2 = (char *) 0 ; struct chmUnitInfo *arg3 = (struct chmUnitInfo *) 0 ; void *argp1 = 0 ; int res1 = 0 ; int res2 ; char *buf2 = 0 ; int alloc2 = 0 ; struct chmUnitInfo *temp3 = (struct chmUnitInfo *) calloc(1, sizeof(struct chmUnitInfo)) ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; int result; { arg3 = temp3; } if (!PyArg_ParseTuple(args,(char *)"OO:chm_resolve_object",&obj0,&obj1)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmFile, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chm_resolve_object" "', argument " "1"" of type '" "struct chmFile *""'"); } arg1 = (struct chmFile *)(argp1); res2 = SWIG_AsCharPtrAndSize(obj1, &buf2, NULL, &alloc2); if (!SWIG_IsOK(res2)) { SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "chm_resolve_object" "', argument " "2"" of type '" "char const *""'"); } arg2 = (char *)(buf2); result = (int)chm_resolve_object(arg1,(char const *)arg2,arg3); resultobj = SWIG_From_int((int)(result)); { PyObject *o, *o2, *o3; o = SWIG_NewPointerObj((void *) arg3, SWIGTYPE_p_chmUnitInfo, 1); if ((!resultobj) || (resultobj == Py_None)) { resultobj = o; } else { if (!PyTuple_Check(resultobj)) { PyObject *o2 = resultobj; resultobj = PyTuple_New(1); PyTuple_SetItem(resultobj,0,o2); } o3 = PyTuple_New(1); PyTuple_SetItem(o3,0,o); o2 = resultobj; resultobj = PySequence_Concat(o2,o3); Py_DECREF(o2); Py_DECREF(o3); } } if (alloc2 == SWIG_NEWOBJ) free((char*)buf2); return resultobj; fail: if (alloc2 == SWIG_NEWOBJ) free((char*)buf2); return NULL; } SWIGINTERN PyObject *_wrap_chm_retrieve_object(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmFile *arg1 = (struct chmFile *) 0 ; struct chmUnitInfo *arg2 = (struct chmUnitInfo *) 0 ; unsigned char *arg3 = (unsigned char *) 0 ; LONGUINT64 arg4 ; LONGINT64 arg5 ; void *argp1 = 0 ; int res1 = 0 ; void *argp2 = 0 ; int res2 = 0 ; unsigned char temp3 ; int res3 = SWIG_TMPOBJ ; unsigned long long val4 ; int ecode4 = 0 ; long long val5 ; int ecode5 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; PyObject * obj2 = 0 ; PyObject * obj3 = 0 ; LONGINT64 result; arg3 = &temp3; if (!PyArg_ParseTuple(args,(char *)"OOOO:chm_retrieve_object",&obj0,&obj1,&obj2,&obj3)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmFile, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chm_retrieve_object" "', argument " "1"" of type '" "struct chmFile *""'"); } arg1 = (struct chmFile *)(argp1); res2 = SWIG_ConvertPtr(obj1, &argp2,SWIGTYPE_p_chmUnitInfo, 0 | 0 ); if (!SWIG_IsOK(res2)) { SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "chm_retrieve_object" "', argument " "2"" of type '" "struct chmUnitInfo *""'"); } arg2 = (struct chmUnitInfo *)(argp2); ecode4 = SWIG_AsVal_unsigned_SS_long_SS_long(obj2, &val4); if (!SWIG_IsOK(ecode4)) { SWIG_exception_fail(SWIG_ArgError(ecode4), "in method '" "chm_retrieve_object" "', argument " "4"" of type '" "LONGUINT64""'"); } arg4 = (LONGUINT64)(val4); ecode5 = SWIG_AsVal_long_SS_long(obj3, &val5); if (!SWIG_IsOK(ecode5)) { SWIG_exception_fail(SWIG_ArgError(ecode5), "in method '" "chm_retrieve_object" "', argument " "5"" of type '" "LONGINT64""'"); } arg5 = (LONGINT64)(val5); { /* nasty hack */ arg3 = (unsigned char *) malloc(arg5); if (arg3 == NULL) SWIG_fail; } result = (LONGINT64)chm_retrieve_object(arg1,arg2,arg3,arg4,arg5); resultobj = SWIG_From_long_SS_long((long long)(result)); { PyObject *o; o = SWIG_FromCharPtrAndSize((const char*)arg3, arg5); /* o = PyString_FromStringAndSize(arg3, arg5);*/ resultobj = t_output_helper(resultobj,o); free(arg3); } return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chm_enumerate(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmFile *arg1 = (struct chmFile *) 0 ; int arg2 ; CHM_ENUMERATOR arg3 = (CHM_ENUMERATOR) 0 ; void *arg4 = (void *) 0 ; void *argp1 = 0 ; int res1 = 0 ; int val2 ; int ecode2 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; PyObject * obj2 = 0 ; PyObject * obj3 = 0 ; int result; if (!PyArg_ParseTuple(args,(char *)"OOOO:chm_enumerate",&obj0,&obj1,&obj2,&obj3)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmFile, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chm_enumerate" "', argument " "1"" of type '" "struct chmFile *""'"); } arg1 = (struct chmFile *)(argp1); ecode2 = SWIG_AsVal_int(obj1, &val2); if (!SWIG_IsOK(ecode2)) { SWIG_exception_fail(SWIG_ArgError(ecode2), "in method '" "chm_enumerate" "', argument " "2"" of type '" "int""'"); } arg2 = (int)(val2); { if (!my_set_callback(self, obj2)) goto fail; arg3 = dummy_enumerator; } { if (!(arg4 = PyCapsule_New(obj3, "context", NULL))) goto fail; } result = (int)chm_enumerate(arg1,arg2,arg3,arg4); resultobj = SWIG_From_int((int)(result)); return resultobj; fail: return NULL; } SWIGINTERN PyObject *_wrap_chm_enumerate_dir(PyObject *SWIGUNUSEDPARM(self), PyObject *args) { PyObject *resultobj = 0; struct chmFile *arg1 = (struct chmFile *) 0 ; char *arg2 = (char *) 0 ; int arg3 ; CHM_ENUMERATOR arg4 = (CHM_ENUMERATOR) 0 ; void *arg5 = (void *) 0 ; void *argp1 = 0 ; int res1 = 0 ; int res2 ; char *buf2 = 0 ; int alloc2 = 0 ; int val3 ; int ecode3 = 0 ; PyObject * obj0 = 0 ; PyObject * obj1 = 0 ; PyObject * obj2 = 0 ; PyObject * obj3 = 0 ; PyObject * obj4 = 0 ; int result; if (!PyArg_ParseTuple(args,(char *)"OOOOO:chm_enumerate_dir",&obj0,&obj1,&obj2,&obj3,&obj4)) SWIG_fail; res1 = SWIG_ConvertPtr(obj0, &argp1,SWIGTYPE_p_chmFile, 0 | 0 ); if (!SWIG_IsOK(res1)) { SWIG_exception_fail(SWIG_ArgError(res1), "in method '" "chm_enumerate_dir" "', argument " "1"" of type '" "struct chmFile *""'"); } arg1 = (struct chmFile *)(argp1); res2 = SWIG_AsCharPtrAndSize(obj1, &buf2, NULL, &alloc2); if (!SWIG_IsOK(res2)) { SWIG_exception_fail(SWIG_ArgError(res2), "in method '" "chm_enumerate_dir" "', argument " "2"" of type '" "char const *""'"); } arg2 = (char *)(buf2); ecode3 = SWIG_AsVal_int(obj2, &val3); if (!SWIG_IsOK(ecode3)) { SWIG_exception_fail(SWIG_ArgError(ecode3), "in method '" "chm_enumerate_dir" "', argument " "3"" of type '" "int""'"); } arg3 = (int)(val3); { if (!my_set_callback(self, obj3)) goto fail; arg4 = dummy_enumerator; } { if (!(arg5 = PyCapsule_New(obj4, "context", NULL))) goto fail; } result = (int)chm_enumerate_dir(arg1,(char const *)arg2,arg3,arg4,arg5); resultobj = SWIG_From_int((int)(result)); if (alloc2 == SWIG_NEWOBJ) free((char*)buf2); return resultobj; fail: if (alloc2 == SWIG_NEWOBJ) free((char*)buf2); return NULL; } static PyMethodDef SwigMethods[] = { { (char *)"SWIG_PyInstanceMethod_New", (PyCFunction)SWIG_PyInstanceMethod_New, METH_O, NULL}, { (char *)"chmUnitInfo_start_set", _wrap_chmUnitInfo_start_set, METH_VARARGS, NULL}, { (char *)"chmUnitInfo_start_get", _wrap_chmUnitInfo_start_get, METH_VARARGS, NULL}, { (char *)"chmUnitInfo_length_set", _wrap_chmUnitInfo_length_set, METH_VARARGS, NULL}, { (char *)"chmUnitInfo_length_get", _wrap_chmUnitInfo_length_get, METH_VARARGS, NULL}, { (char *)"chmUnitInfo_space_set", _wrap_chmUnitInfo_space_set, METH_VARARGS, NULL}, { (char *)"chmUnitInfo_space_get", _wrap_chmUnitInfo_space_get, METH_VARARGS, NULL}, { (char *)"chmUnitInfo_path_set", _wrap_chmUnitInfo_path_set, METH_VARARGS, NULL}, { (char *)"chmUnitInfo_path_get", _wrap_chmUnitInfo_path_get, METH_VARARGS, NULL}, { (char *)"new_chmUnitInfo", _wrap_new_chmUnitInfo, METH_VARARGS, NULL}, { (char *)"delete_chmUnitInfo", _wrap_delete_chmUnitInfo, METH_VARARGS, NULL}, { (char *)"chmUnitInfo_swigregister", chmUnitInfo_swigregister, METH_VARARGS, NULL}, { (char *)"chm_open", _wrap_chm_open, METH_VARARGS, NULL}, { (char *)"chm_close", _wrap_chm_close, METH_VARARGS, NULL}, { (char *)"chm_set_param", _wrap_chm_set_param, METH_VARARGS, NULL}, { (char *)"chm_resolve_object", _wrap_chm_resolve_object, METH_VARARGS, NULL}, { (char *)"chm_retrieve_object", _wrap_chm_retrieve_object, METH_VARARGS, NULL}, { (char *)"chm_enumerate", _wrap_chm_enumerate, METH_VARARGS, NULL}, { (char *)"chm_enumerate_dir", _wrap_chm_enumerate_dir, METH_VARARGS, NULL}, { NULL, NULL, 0, NULL } }; /* -------- TYPE CONVERSION AND EQUIVALENCE RULES (BEGIN) -------- */ static swig_type_info _swigt__p_char = {"_p_char", "char *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_chmFile = {"_p_chmFile", "struct chmFile *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_chmUnitInfo = {"_p_chmUnitInfo", "struct chmUnitInfo *|chmUnitInfo *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_f_p_struct_chmFile_p_struct_chmUnitInfo_p_void__int = {"_p_f_p_struct_chmFile_p_struct_chmUnitInfo_p_void__int", "int (*)(struct chmFile *,struct chmUnitInfo *,void *)|CHM_ENUMERATOR", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_long_long = {"_p_long_long", "LONGINT64 *|long long *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_unsigned_char = {"_p_unsigned_char", "unsigned char *", 0, 0, (void*)0, 0}; static swig_type_info _swigt__p_unsigned_long_long = {"_p_unsigned_long_long", "LONGUINT64 *|unsigned long long *", 0, 0, (void*)0, 0}; static swig_type_info *swig_type_initial[] = { &_swigt__p_char, &_swigt__p_chmFile, &_swigt__p_chmUnitInfo, &_swigt__p_f_p_struct_chmFile_p_struct_chmUnitInfo_p_void__int, &_swigt__p_long_long, &_swigt__p_unsigned_char, &_swigt__p_unsigned_long_long, }; static swig_cast_info _swigc__p_char[] = { {&_swigt__p_char, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_chmFile[] = { {&_swigt__p_chmFile, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_chmUnitInfo[] = { {&_swigt__p_chmUnitInfo, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_f_p_struct_chmFile_p_struct_chmUnitInfo_p_void__int[] = { {&_swigt__p_f_p_struct_chmFile_p_struct_chmUnitInfo_p_void__int, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_long_long[] = { {&_swigt__p_long_long, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_unsigned_char[] = { {&_swigt__p_unsigned_char, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info _swigc__p_unsigned_long_long[] = { {&_swigt__p_unsigned_long_long, 0, 0, 0},{0, 0, 0, 0}}; static swig_cast_info *swig_cast_initial[] = { _swigc__p_char, _swigc__p_chmFile, _swigc__p_chmUnitInfo, _swigc__p_f_p_struct_chmFile_p_struct_chmUnitInfo_p_void__int, _swigc__p_long_long, _swigc__p_unsigned_char, _swigc__p_unsigned_long_long, }; /* -------- TYPE CONVERSION AND EQUIVALENCE RULES (END) -------- */ static swig_const_info swig_const_table[] = { {0, 0, 0, 0.0, 0, 0}}; #ifdef __cplusplus } #endif /* ----------------------------------------------------------------------------- * Type initialization: * This problem is tough by the requirement that no dynamic * memory is used. Also, since swig_type_info structures store pointers to * swig_cast_info structures and swig_cast_info structures store pointers back * to swig_type_info structures, we need some lookup code at initialization. * The idea is that swig generates all the structures that are needed. * The runtime then collects these partially filled structures. * The SWIG_InitializeModule function takes these initial arrays out of * swig_module, and does all the lookup, filling in the swig_module.types * array with the correct data and linking the correct swig_cast_info * structures together. * * The generated swig_type_info structures are assigned statically to an initial * array. We just loop through that array, and handle each type individually. * First we lookup if this type has been already loaded, and if so, use the * loaded structure instead of the generated one. Then we have to fill in the * cast linked list. The cast data is initially stored in something like a * two-dimensional array. Each row corresponds to a type (there are the same * number of rows as there are in the swig_type_initial array). Each entry in * a column is one of the swig_cast_info structures for that type. * The cast_initial array is actually an array of arrays, because each row has * a variable number of columns. So to actually build the cast linked list, * we find the array of casts associated with the type, and loop through it * adding the casts to the list. The one last trick we need to do is making * sure the type pointer in the swig_cast_info struct is correct. * * First off, we lookup the cast->type name to see if it is already loaded. * There are three cases to handle: * 1) If the cast->type has already been loaded AND the type we are adding * casting info to has not been loaded (it is in this module), THEN we * replace the cast->type pointer with the type pointer that has already * been loaded. * 2) If BOTH types (the one we are adding casting info to, and the * cast->type) are loaded, THEN the cast info has already been loaded by * the previous module so we just ignore it. * 3) Finally, if cast->type has not already been loaded, then we add that * swig_cast_info to the linked list (because the cast->type) pointer will * be correct. * ----------------------------------------------------------------------------- */ #ifdef __cplusplus extern "C" { #if 0 } /* c-mode */ #endif #endif #if 0 #define SWIGRUNTIME_DEBUG #endif SWIGRUNTIME void SWIG_InitializeModule(void *clientdata) { size_t i; swig_module_info *module_head, *iter; int init; /* check to see if the circular list has been setup, if not, set it up */ if (swig_module.next==0) { /* Initialize the swig_module */ swig_module.type_initial = swig_type_initial; swig_module.cast_initial = swig_cast_initial; swig_module.next = &swig_module; init = 1; } else { init = 0; } /* Try and load any already created modules */ module_head = SWIG_GetModule(clientdata); if (!module_head) { /* This is the first module loaded for this interpreter */ /* so set the swig module into the interpreter */ SWIG_SetModule(clientdata, &swig_module); } else { /* the interpreter has loaded a SWIG module, but has it loaded this one? */ iter=module_head; do { if (iter==&swig_module) { /* Our module is already in the list, so there's nothing more to do. */ return; } iter=iter->next; } while (iter!= module_head); /* otherwise we must add our module into the list */ swig_module.next = module_head->next; module_head->next = &swig_module; } /* When multiple interpreters are used, a module could have already been initialized in a different interpreter, but not yet have a pointer in this interpreter. In this case, we do not want to continue adding types... everything should be set up already */ if (init == 0) return; /* Now work on filling in swig_module.types */ #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: size %d\n", swig_module.size); #endif for (i = 0; i < swig_module.size; ++i) { swig_type_info *type = 0; swig_type_info *ret; swig_cast_info *cast; #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); #endif /* if there is another module already loaded */ if (swig_module.next != &swig_module) { type = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, swig_module.type_initial[i]->name); } if (type) { /* Overwrite clientdata field */ #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: found type %s\n", type->name); #endif if (swig_module.type_initial[i]->clientdata) { type->clientdata = swig_module.type_initial[i]->clientdata; #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: found and overwrite type %s \n", type->name); #endif } } else { type = swig_module.type_initial[i]; } /* Insert casting types */ cast = swig_module.cast_initial[i]; while (cast->type) { /* Don't need to add information already in the list */ ret = 0; #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: look cast %s\n", cast->type->name); #endif if (swig_module.next != &swig_module) { ret = SWIG_MangledTypeQueryModule(swig_module.next, &swig_module, cast->type->name); #ifdef SWIGRUNTIME_DEBUG if (ret) printf("SWIG_InitializeModule: found cast %s\n", ret->name); #endif } if (ret) { if (type == swig_module.type_initial[i]) { #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: skip old type %s\n", ret->name); #endif cast->type = ret; ret = 0; } else { /* Check for casting already in the list */ swig_cast_info *ocast = SWIG_TypeCheck(ret->name, type); #ifdef SWIGRUNTIME_DEBUG if (ocast) printf("SWIG_InitializeModule: skip old cast %s\n", ret->name); #endif if (!ocast) ret = 0; } } if (!ret) { #ifdef SWIGRUNTIME_DEBUG printf("SWIG_InitializeModule: adding cast %s\n", cast->type->name); #endif if (type->cast) { type->cast->prev = cast; cast->next = type->cast; } type->cast = cast; } cast++; } /* Set entry in modules->types array equal to the type */ swig_module.types[i] = type; } swig_module.types[i] = 0; #ifdef SWIGRUNTIME_DEBUG printf("**** SWIG_InitializeModule: Cast List ******\n"); for (i = 0; i < swig_module.size; ++i) { int j = 0; swig_cast_info *cast = swig_module.cast_initial[i]; printf("SWIG_InitializeModule: type %d %s\n", i, swig_module.type_initial[i]->name); while (cast->type) { printf("SWIG_InitializeModule: cast type %s\n", cast->type->name); cast++; ++j; } printf("---- Total casts: %d\n",j); } printf("**** SWIG_InitializeModule: Cast List ******\n"); #endif } /* This function will propagate the clientdata field of type to * any new swig_type_info structures that have been added into the list * of equivalent types. It is like calling * SWIG_TypeClientData(type, clientdata) a second time. */ SWIGRUNTIME void SWIG_PropagateClientData(void) { size_t i; swig_cast_info *equiv; static int init_run = 0; if (init_run) return; init_run = 1; for (i = 0; i < swig_module.size; i++) { if (swig_module.types[i]->clientdata) { equiv = swig_module.types[i]->cast; while (equiv) { if (!equiv->converter) { if (equiv->type && !equiv->type->clientdata) SWIG_TypeClientData(equiv->type, swig_module.types[i]->clientdata); } equiv = equiv->next; } } } } #ifdef __cplusplus #if 0 { /* c-mode */ #endif } #endif #ifdef __cplusplus extern "C" { #endif /* Python-specific SWIG API */ #define SWIG_newvarlink() SWIG_Python_newvarlink() #define SWIG_addvarlink(p, name, get_attr, set_attr) SWIG_Python_addvarlink(p, name, get_attr, set_attr) #define SWIG_InstallConstants(d, constants) SWIG_Python_InstallConstants(d, constants) /* ----------------------------------------------------------------------------- * global variable support code. * ----------------------------------------------------------------------------- */ typedef struct swig_globalvar { char *name; /* Name of global variable */ PyObject *(*get_attr)(void); /* Return the current value */ int (*set_attr)(PyObject *); /* Set the value */ struct swig_globalvar *next; } swig_globalvar; typedef struct swig_varlinkobject { PyObject_HEAD swig_globalvar *vars; } swig_varlinkobject; SWIGINTERN PyObject * swig_varlink_repr(swig_varlinkobject *SWIGUNUSEDPARM(v)) { #if PY_VERSION_HEX >= 0x03000000 return PyUnicode_InternFromString(""); #else return PyString_FromString(""); #endif } SWIGINTERN PyObject * swig_varlink_str(swig_varlinkobject *v) { #if PY_VERSION_HEX >= 0x03000000 PyObject *str = PyUnicode_InternFromString("("); PyObject *tail; PyObject *joined; swig_globalvar *var; for (var = v->vars; var; var=var->next) { tail = PyUnicode_FromString(var->name); joined = PyUnicode_Concat(str, tail); Py_DecRef(str); Py_DecRef(tail); str = joined; if (var->next) { tail = PyUnicode_InternFromString(", "); joined = PyUnicode_Concat(str, tail); Py_DecRef(str); Py_DecRef(tail); str = joined; } } tail = PyUnicode_InternFromString(")"); joined = PyUnicode_Concat(str, tail); Py_DecRef(str); Py_DecRef(tail); str = joined; #else PyObject *str = PyString_FromString("("); swig_globalvar *var; for (var = v->vars; var; var=var->next) { PyString_ConcatAndDel(&str,PyString_FromString(var->name)); if (var->next) PyString_ConcatAndDel(&str,PyString_FromString(", ")); } PyString_ConcatAndDel(&str,PyString_FromString(")")); #endif return str; } SWIGINTERN int swig_varlink_print(swig_varlinkobject *v, FILE *fp, int SWIGUNUSEDPARM(flags)) { char *tmp; PyObject *str = swig_varlink_str(v); fprintf(fp,"Swig global variables "); fprintf(fp,"%s\n", tmp = SWIG_Python_str_AsChar(str)); SWIG_Python_str_DelForPy3(tmp); Py_DECREF(str); return 0; } SWIGINTERN void swig_varlink_dealloc(swig_varlinkobject *v) { swig_globalvar *var = v->vars; while (var) { swig_globalvar *n = var->next; free(var->name); free(var); var = n; } } SWIGINTERN PyObject * swig_varlink_getattr(swig_varlinkobject *v, char *n) { PyObject *res = NULL; swig_globalvar *var = v->vars; while (var) { if (strcmp(var->name,n) == 0) { res = (*var->get_attr)(); break; } var = var->next; } if (res == NULL && !PyErr_Occurred()) { PyErr_Format(PyExc_AttributeError, "Unknown C global variable '%s'", n); } return res; } SWIGINTERN int swig_varlink_setattr(swig_varlinkobject *v, char *n, PyObject *p) { int res = 1; swig_globalvar *var = v->vars; while (var) { if (strcmp(var->name,n) == 0) { res = (*var->set_attr)(p); break; } var = var->next; } if (res == 1 && !PyErr_Occurred()) { PyErr_Format(PyExc_AttributeError, "Unknown C global variable '%s'", n); } return res; } SWIGINTERN PyTypeObject* swig_varlink_type(void) { static char varlink__doc__[] = "Swig var link object"; static PyTypeObject varlink_type; static int type_init = 0; if (!type_init) { const PyTypeObject tmp = { /* PyObject header changed in Python 3 */ #if PY_VERSION_HEX >= 0x03000000 PyVarObject_HEAD_INIT(NULL, 0) #else PyObject_HEAD_INIT(NULL) 0, /* ob_size */ #endif (char *)"swigvarlink", /* tp_name */ sizeof(swig_varlinkobject), /* tp_basicsize */ 0, /* tp_itemsize */ (destructor) swig_varlink_dealloc, /* tp_dealloc */ (printfunc) swig_varlink_print, /* tp_print */ (getattrfunc) swig_varlink_getattr, /* tp_getattr */ (setattrfunc) swig_varlink_setattr, /* tp_setattr */ 0, /* tp_compare */ (reprfunc) swig_varlink_repr, /* tp_repr */ 0, /* tp_as_number */ 0, /* tp_as_sequence */ 0, /* tp_as_mapping */ 0, /* tp_hash */ 0, /* tp_call */ (reprfunc) swig_varlink_str, /* tp_str */ 0, /* tp_getattro */ 0, /* tp_setattro */ 0, /* tp_as_buffer */ 0, /* tp_flags */ varlink__doc__, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ #if PY_VERSION_HEX >= 0x02020000 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, /* tp_iter -> tp_weaklist */ #endif #if PY_VERSION_HEX >= 0x02030000 0, /* tp_del */ #endif #if PY_VERSION_HEX >= 0x02060000 0, /* tp_version_tag */ #endif #if PY_VERSION_HEX >= 0x03040000 0, /* tp_finalize */ #endif #ifdef COUNT_ALLOCS 0, /* tp_allocs */ 0, /* tp_frees */ 0, /* tp_maxalloc */ #if PY_VERSION_HEX >= 0x02050000 0, /* tp_prev */ #endif 0 /* tp_next */ #endif }; varlink_type = tmp; type_init = 1; #if PY_VERSION_HEX < 0x02020000 varlink_type.ob_type = &PyType_Type; #else if (PyType_Ready(&varlink_type) < 0) return NULL; #endif } return &varlink_type; } /* Create a variable linking object for use later */ SWIGINTERN PyObject * SWIG_Python_newvarlink(void) { swig_varlinkobject *result = PyObject_NEW(swig_varlinkobject, swig_varlink_type()); if (result) { result->vars = 0; } return ((PyObject*) result); } SWIGINTERN void SWIG_Python_addvarlink(PyObject *p, char *name, PyObject *(*get_attr)(void), int (*set_attr)(PyObject *p)) { swig_varlinkobject *v = (swig_varlinkobject *) p; swig_globalvar *gv = (swig_globalvar *) malloc(sizeof(swig_globalvar)); if (gv) { size_t size = strlen(name)+1; gv->name = (char *)malloc(size); if (gv->name) { strncpy(gv->name,name,size); gv->get_attr = get_attr; gv->set_attr = set_attr; gv->next = v->vars; } } v->vars = gv; } SWIGINTERN PyObject * SWIG_globals(void) { static PyObject *_SWIG_globals = 0; if (!_SWIG_globals) _SWIG_globals = SWIG_newvarlink(); return _SWIG_globals; } /* ----------------------------------------------------------------------------- * constants/methods manipulation * ----------------------------------------------------------------------------- */ /* Install Constants */ SWIGINTERN void SWIG_Python_InstallConstants(PyObject *d, swig_const_info constants[]) { PyObject *obj = 0; size_t i; for (i = 0; constants[i].type; ++i) { switch(constants[i].type) { case SWIG_PY_POINTER: obj = SWIG_InternalNewPointerObj(constants[i].pvalue, *(constants[i]).ptype,0); break; case SWIG_PY_BINARY: obj = SWIG_NewPackedObj(constants[i].pvalue, constants[i].lvalue, *(constants[i].ptype)); break; default: obj = 0; break; } if (obj) { PyDict_SetItemString(d, constants[i].name, obj); Py_DECREF(obj); } } } /* -----------------------------------------------------------------------------*/ /* Fix SwigMethods to carry the callback ptrs when needed */ /* -----------------------------------------------------------------------------*/ SWIGINTERN void SWIG_Python_FixMethods(PyMethodDef *methods, swig_const_info *const_table, swig_type_info **types, swig_type_info **types_initial) { size_t i; for (i = 0; methods[i].ml_name; ++i) { const char *c = methods[i].ml_doc; if (!c) continue; c = strstr(c, "swig_ptr: "); if (c) { int j; swig_const_info *ci = 0; const char *name = c + 10; for (j = 0; const_table[j].type; ++j) { if (strncmp(const_table[j].name, name, strlen(const_table[j].name)) == 0) { ci = &(const_table[j]); break; } } if (ci) { void *ptr = (ci->type == SWIG_PY_POINTER) ? ci->pvalue : 0; if (ptr) { size_t shift = (ci->ptype) - types; swig_type_info *ty = types_initial[shift]; size_t ldoc = (c - methods[i].ml_doc); size_t lptr = strlen(ty->name)+2*sizeof(void*)+2; char *ndoc = (char*)malloc(ldoc + lptr + 10); if (ndoc) { char *buff = ndoc; strncpy(buff, methods[i].ml_doc, ldoc); buff += ldoc; strncpy(buff, "swig_ptr: ", 10); buff += 10; SWIG_PackVoidPtr(buff, ptr, ty->name, lptr); methods[i].ml_doc = ndoc; } } } } } } #ifdef __cplusplus } #endif /* -----------------------------------------------------------------------------* * Partial Init method * -----------------------------------------------------------------------------*/ #ifdef __cplusplus extern "C" #endif SWIGEXPORT #if PY_VERSION_HEX >= 0x03000000 PyObject* #else void #endif SWIG_init(void) { PyObject *m, *d, *md; #if PY_VERSION_HEX >= 0x03000000 static struct PyModuleDef SWIG_module = { # if PY_VERSION_HEX >= 0x03020000 PyModuleDef_HEAD_INIT, # else { PyObject_HEAD_INIT(NULL) NULL, /* m_init */ 0, /* m_index */ NULL, /* m_copy */ }, # endif (char *) SWIG_name, NULL, -1, SwigMethods, NULL, NULL, NULL, NULL }; #endif #if defined(SWIGPYTHON_BUILTIN) static SwigPyClientData SwigPyObject_clientdata = { 0, 0, 0, 0, 0, 0, 0 }; static PyGetSetDef this_getset_def = { (char *)"this", &SwigPyBuiltin_ThisClosure, NULL, NULL, NULL }; static SwigPyGetSet thisown_getset_closure = { (PyCFunction) SwigPyObject_own, (PyCFunction) SwigPyObject_own }; static PyGetSetDef thisown_getset_def = { (char *)"thisown", SwigPyBuiltin_GetterClosure, SwigPyBuiltin_SetterClosure, NULL, &thisown_getset_closure }; PyObject *metatype_args; PyTypeObject *builtin_pytype; int builtin_base_count; swig_type_info *builtin_basetype; PyObject *tuple; PyGetSetDescrObject *static_getset; PyTypeObject *metatype; SwigPyClientData *cd; PyObject *public_interface, *public_symbol; PyObject *this_descr; PyObject *thisown_descr; PyObject *self = 0; int i; (void)builtin_pytype; (void)builtin_base_count; (void)builtin_basetype; (void)tuple; (void)static_getset; (void)self; /* metatype is used to implement static member variables. */ metatype_args = Py_BuildValue("(s(O){})", "SwigPyObjectType", &PyType_Type); assert(metatype_args); metatype = (PyTypeObject *) PyType_Type.tp_call((PyObject *) &PyType_Type, metatype_args, NULL); assert(metatype); Py_DECREF(metatype_args); metatype->tp_setattro = (setattrofunc) &SwigPyObjectType_setattro; assert(PyType_Ready(metatype) >= 0); #endif /* Fix SwigMethods to carry the callback ptrs when needed */ SWIG_Python_FixMethods(SwigMethods, swig_const_table, swig_types, swig_type_initial); #if PY_VERSION_HEX >= 0x03000000 m = PyModule_Create(&SWIG_module); #else m = Py_InitModule((char *) SWIG_name, SwigMethods); #endif md = d = PyModule_GetDict(m); (void)md; SWIG_InitializeModule(0); #ifdef SWIGPYTHON_BUILTIN SwigPyObject_stype = SWIG_MangledTypeQuery("_p_SwigPyObject"); assert(SwigPyObject_stype); cd = (SwigPyClientData*) SwigPyObject_stype->clientdata; if (!cd) { SwigPyObject_stype->clientdata = &SwigPyObject_clientdata; SwigPyObject_clientdata.pytype = SwigPyObject_TypeOnce(); } else if (SwigPyObject_TypeOnce()->tp_basicsize != cd->pytype->tp_basicsize) { PyErr_SetString(PyExc_RuntimeError, "Import error: attempted to load two incompatible swig-generated modules."); # if PY_VERSION_HEX >= 0x03000000 return NULL; # else return; # endif } /* All objects have a 'this' attribute */ this_descr = PyDescr_NewGetSet(SwigPyObject_type(), &this_getset_def); (void)this_descr; /* All objects have a 'thisown' attribute */ thisown_descr = PyDescr_NewGetSet(SwigPyObject_type(), &thisown_getset_def); (void)thisown_descr; public_interface = PyList_New(0); public_symbol = 0; (void)public_symbol; PyDict_SetItemString(md, "__all__", public_interface); Py_DECREF(public_interface); for (i = 0; SwigMethods[i].ml_name != NULL; ++i) SwigPyBuiltin_AddPublicSymbol(public_interface, SwigMethods[i].ml_name); for (i = 0; swig_const_table[i].name != 0; ++i) SwigPyBuiltin_AddPublicSymbol(public_interface, swig_const_table[i].name); #endif SWIG_InstallConstants(d,swig_const_table); SWIG_Python_SetConstant(d, "CHM_UNCOMPRESSED",SWIG_From_int((int)((0)))); SWIG_Python_SetConstant(d, "CHM_COMPRESSED",SWIG_From_int((int)((1)))); SWIG_Python_SetConstant(d, "CHM_MAX_PATHLEN",SWIG_From_int((int)(256))); SWIG_Python_SetConstant(d, "CHM_PARAM_MAX_BLOCKS_CACHED",SWIG_From_int((int)(0))); SWIG_Python_SetConstant(d, "CHM_RESOLVE_SUCCESS",SWIG_From_int((int)((0)))); SWIG_Python_SetConstant(d, "CHM_RESOLVE_FAILURE",SWIG_From_int((int)((1)))); SWIG_Python_SetConstant(d, "CHM_ENUMERATE_NORMAL",SWIG_From_int((int)((1)))); SWIG_Python_SetConstant(d, "CHM_ENUMERATE_META",SWIG_From_int((int)((2)))); SWIG_Python_SetConstant(d, "CHM_ENUMERATE_SPECIAL",SWIG_From_int((int)((4)))); SWIG_Python_SetConstant(d, "CHM_ENUMERATE_FILES",SWIG_From_int((int)((8)))); SWIG_Python_SetConstant(d, "CHM_ENUMERATE_DIRS",SWIG_From_int((int)((16)))); SWIG_Python_SetConstant(d, "CHM_ENUMERATE_ALL",SWIG_From_int((int)((31)))); SWIG_Python_SetConstant(d, "CHM_ENUMERATOR_FAILURE",SWIG_From_int((int)((0)))); SWIG_Python_SetConstant(d, "CHM_ENUMERATOR_CONTINUE",SWIG_From_int((int)((1)))); SWIG_Python_SetConstant(d, "CHM_ENUMERATOR_SUCCESS",SWIG_From_int((int)((2)))); #if PY_VERSION_HEX >= 0x03000000 return m; #else return; #endif } recoll-1.26.3/python/pychm/README-RECOLL.txt0000644000175000017500000000045013533651561015130 00000000000000May 2018: pychm has no python3 version. The pull request I submitted for the port is sitting there, and so is the Debian bug. https://github.com/dottedmag/pychm/pull/5 Which is why Recoll bundles pychm, enhanced for Python3, for now. The source repo is here: https://github.com/medoc92/pychm recoll-1.26.3/python/pychm/setup.py.in0000644000175000017500000000277313533651561014605 00000000000000from setuptools import setup, Extension long_description = ''' Version of the chm package modified to support Python 3 and bundled with Recoll. The chm package provides three modules, chm, chmlib and extra, which provide access to the API implemented by the C library chmlib and some additional classes and functions. They are used to access MS-ITSS encoded files - Compressed Html Help files (.chm). ''' # For shadow builds: references to the source tree import os top = os.path.join('@srcdir@', '..', '..') pytop = '@srcdir@' setup(name="recollchm", version="0.8.4.1+git", description="Python package to handle CHM files", author="Rubens Ramos", author_email="rubensr@users.sourceforge.net", maintainer="Mikhail Gusarov", maintainer_email="dottedmag@dottedmag.net", url="https://github.com/dottedmag/pychm", license="GPL", long_description=long_description, package_dir = {'' : os.path.join(top, 'python', 'pychm')}, py_modules=["recollchm.chm", "recollchm.chmlib"], ext_modules=[Extension("recollchm._chmlib", [os.path.join(pytop, "recollchm/swig_chm.c")], libraries=["chm"], extra_compile_args=["-DSWIG_COBJECT_TYPES"]), Extension("recollchm.extra", [os.path.join(pytop, "recollchm/extra.c")], extra_compile_args=["-D__PYTHON__"], libraries=["chm"])] ) recoll-1.26.3/python/pychm/AUTHORS0000644000175000017500000000125613533651561013531 00000000000000Author ------ Rubens Ramos Maintainer ---------- Mikhail Gusarov Python3 port minor changes -------------------------- Jean-Francois Dockes Acknowledgements ---------------- This work would not have been possible without the existence of chmlib, developed by Jed Wing, and a lot of the python code used to parse the contents tree and to decode the index files was heavily based on the code implemented by Razvan Cojocaru for the xCHM viewer. Bug reports ----------- can3p, Chang (changshu), Hristo Iliev, Carlos Liu, Torsten Marek, Dmitri (nebraskin), Fredrik de Vibe, Glenn Washburn recoll-1.26.3/python/recoll/0000755000175000017500000000000013570165410012667 500000000000000recoll-1.26.3/python/recoll/pyrclextract.cpp0000644000175000017500000003000213533651561016040 00000000000000/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include #include #include "log.h" #include "rcldoc.h" #include "internfile.h" #include "rclconfig.h" #include "rclinit.h" #include "pyrecoll.h" using namespace std; // Imported from pyrecoll static PyObject *recoll_DocType; ////////////////////////////////////////////////////////////////////// /// Extractor object code typedef struct { PyObject_HEAD /* Type-specific fields go here. */ FileInterner *xtr; std::shared_ptr rclconfig; recoll_DocObject *docobject; } rclx_ExtractorObject; static void Extractor_dealloc(rclx_ExtractorObject *self) { LOGDEB("Extractor_dealloc\n" ); if (self->docobject) { Py_DECREF(&self->docobject); } self->rclconfig.reset(); delete self->xtr; Py_TYPE(self)->tp_free((PyObject*)self); } static PyObject * Extractor_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { LOGDEB("Extractor_new\n" ); rclx_ExtractorObject *self = (rclx_ExtractorObject *)type->tp_alloc(type, 0); if (self == 0) return 0; self->xtr = 0; self->docobject = 0; return (PyObject *)self; } static int Extractor_init(rclx_ExtractorObject *self, PyObject *args, PyObject *kwargs) { LOGDEB("Extractor_init\n" ); static const char* kwlist[] = {"doc", NULL}; PyObject *pdobj; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", (char**)kwlist, recoll_DocType, &pdobj)) return -1; recoll_DocObject *dobj = (recoll_DocObject *)pdobj; if (dobj->doc == 0) { PyErr_SetString(PyExc_AttributeError, "Null Doc ?"); return -1; } self->docobject = dobj; Py_INCREF(dobj); self->rclconfig = dobj->rclconfig; self->xtr = new FileInterner(*dobj->doc, self->rclconfig.get(), FileInterner::FIF_forPreview); return 0; } PyDoc_STRVAR(doc_Extractor_textextract, "textextract(ipath)\n" "Extract document defined by ipath and return a doc object. The doc.text\n" "field has the document text as either text/plain or text/html\n" "according to doc.mimetype.\n" ); static PyObject * Extractor_textextract(rclx_ExtractorObject* self, PyObject *args, PyObject *kwargs) { LOGDEB("Extractor_textextract\n" ); static const char* kwlist[] = {"ipath", NULL}; char *sipath = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "es:Extractor_textextract", (char**)kwlist, "utf-8", &sipath)) return 0; string ipath(sipath); PyMem_Free(sipath); if (self->xtr == 0) { PyErr_SetString(PyExc_AttributeError, "extract: null object"); return 0; } /* Call the doc class object to create a new doc. */ recoll_DocObject *result = (recoll_DocObject *)PyObject_CallObject((PyObject *)recoll_DocType, 0); if (!result) { PyErr_SetString(PyExc_AttributeError, "extract: doc create failed"); return 0; } FileInterner::Status status = self->xtr->internfile(*(result->doc), ipath); if (status != FileInterner::FIDone && status != FileInterner::FIAgain) { PyErr_SetString(PyExc_AttributeError, "internfile failure"); return 0; } string html = self->xtr->get_html(); if (!html.empty()) { result->doc->text = html; result->doc->mimetype = "text/html"; } // Is this actually needed ? Useful for url which is also formatted . Rcl::Doc *doc = result->doc; printableUrl(self->rclconfig->getDefCharset(), doc->url, doc->meta[Rcl::Doc::keyurl]); doc->meta[Rcl::Doc::keytp] = doc->mimetype; doc->meta[Rcl::Doc::keyipt] = doc->ipath; doc->meta[Rcl::Doc::keyfs] = doc->fbytes; doc->meta[Rcl::Doc::keyds] = doc->dbytes; return (PyObject *)result; } PyDoc_STRVAR(doc_Extractor_idoctofile, "idoctofile(ipath='', mimetype='', ofilename='')\n" "Extract document defined by ipath into a file, in its native format.\n" ); static PyObject * Extractor_idoctofile(rclx_ExtractorObject* self, PyObject *args, PyObject *kwargs) { LOGDEB("Extractor_idoctofile\n" ); static const char* kwlist[] = {"ipath", "mimetype", "ofilename", NULL}; char *sipath = 0; char *smt = 0; char *soutfile = 0; // no freeing if (!PyArg_ParseTupleAndKeywords(args,kwargs, "eses|s:Extractor_idoctofile", (char**)kwlist, "utf-8", &sipath, "utf-8", &smt, &soutfile)) return 0; string ipath(sipath); PyMem_Free(sipath); string mimetype(smt); PyMem_Free(smt); string outfile; if (soutfile && *soutfile) outfile.assign(soutfile); if (self->xtr == 0) { PyErr_SetString(PyExc_AttributeError, "idoctofile: null object"); return 0; } // If ipath is empty and we want the original mimetype, we can't // use FileInterner::internToFile() because the first conversion // was performed by the FileInterner constructor, so that we can't // reach the original object this way. Instead, if the data comes // from a file (m_fn set), we just copy it, else, we call // idoctofile, which will call topdoctofile (and re-fetch the // data, yes, wastefull) TempFile temp; bool status = false; LOGDEB("Extractor_idoctofile: ipath [" << ipath << "] mimetype [" << mimetype << "] doc mimetype [" << self->docobject->doc->mimetype << "\n"); if (ipath.empty() && !mimetype.compare(self->docobject->doc->mimetype)) { status = FileInterner::idocToFile(temp, outfile, self->rclconfig.get(), *self->docobject->doc); } else { self->xtr->setTargetMType(mimetype); status = self->xtr->interntofile(temp, outfile, ipath, mimetype); } if (!status) { PyErr_SetString(PyExc_AttributeError, "interntofile failure"); return 0; } if (outfile.empty()) temp.setnoremove(1); PyObject *result = outfile.empty() ? PyBytes_FromString(temp.filename()) : PyBytes_FromString(outfile.c_str()); return (PyObject *)result; } static PyMethodDef Extractor_methods[] = { {"textextract", (PyCFunction)Extractor_textextract, METH_VARARGS|METH_KEYWORDS, doc_Extractor_textextract}, {"idoctofile", (PyCFunction)Extractor_idoctofile, METH_VARARGS|METH_KEYWORDS, doc_Extractor_idoctofile}, {NULL} /* Sentinel */ }; PyDoc_STRVAR(doc_ExtractorObject, "Extractor()\n" "\n" "An Extractor object can extract data from a native simple or compound\n" "object.\n" ); static PyTypeObject rclx_ExtractorType = { PyVarObject_HEAD_INIT(NULL, 0) "rclextract.Extractor", /*tp_name*/ sizeof(rclx_ExtractorObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ (destructor)Extractor_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash */ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ doc_ExtractorObject, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ Extractor_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)Extractor_init, /* tp_init */ 0, /* tp_alloc */ Extractor_new, /* tp_new */ }; ///////////////////////////////////// Module-level stuff static PyMethodDef rclextract_methods[] = { {NULL, NULL, 0, NULL} /* Sentinel */ }; PyDoc_STRVAR(rclx_doc_string, "This is an interface to the Recoll text extraction features."); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif #if PY_MAJOR_VERSION >= 3 static int rclextract_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int rclextract_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "rclextract", NULL, sizeof(struct module_state), rclextract_methods, NULL, rclextract_traverse, rclextract_clear, NULL }; #define INITERROR return NULL extern "C" PyObject * PyInit_rclextract(void) #else #define INITERROR return PyMODINIT_FUNC initrclextract(void) #endif { // We run recollinit. It's responsible for initializing some static data // which is distinct from pyrecoll's as we're separately dlopened. // The rclconfig object is not used, we'll get the config // data from the objects out of the recoll module. // Unfortunately, as we're not getting the actual config directory // from pyrecoll (we could, through a capsule), this needs at // least an empty default configuration directory to work. string reason; RclConfig *rclconfig = recollinit(RCLINIT_PYTHON, 0, 0, reason, 0); if (rclconfig == 0) { PyErr_SetString(PyExc_EnvironmentError, reason.c_str()); INITERROR; } else { delete rclconfig; } #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("rclextract", rclextract_methods); #endif if (module == NULL) INITERROR; struct module_state *st = GETSTATE(module); // The first parameter is a char *. Hopefully we don't initialize // modules too often... st->error = PyErr_NewException(strdup("rclextract.Error"), NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } PyModule_AddStringConstant(module, "__doc__", rclx_doc_string); if (PyType_Ready(&rclx_ExtractorType) < 0) INITERROR; Py_INCREF(&rclx_ExtractorType); PyModule_AddObject(module, "Extractor", (PyObject *)&rclx_ExtractorType); #if PY_MAJOR_VERSION >= 3 || (PY_MAJOR_VERSION >= 2 && PY_MINOR_VERSION >= 7) recoll_DocType = (PyObject*)PyCapsule_Import(PYRECOLL_PACKAGE "recoll.doctypeptr", 0); #else PyObject *module1 = PyImport_ImportModule(PYRECOLL_PACKAGE "recoll"); if (module1 != NULL) { PyObject *cobject = PyObject_GetAttrString(module1, "doctypeptr"); if (cobject == NULL) INITERROR; if (PyCObject_Check(cobject)) recoll_DocType = (PyObject*)PyCObject_AsVoidPtr(cobject); Py_DECREF(cobject); } #endif #if PY_MAJOR_VERSION >= 3 return module; #endif } recoll-1.26.3/python/recoll/pyrecoll.cpp0000644000175000017500000020357213566424763015173 00000000000000/* Copyright (C) 2007 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include #include #include #include #include #include "rclinit.h" #include "rclconfig.h" #include "rcldb.h" #include "searchdata.h" #include "rclquery.h" #include "pathut.h" #include "rclutil.h" #include "wasatorcl.h" #include "log.h" #include "pathut.h" #include "plaintorich.h" #include "hldata.h" #include "smallut.h" #include "pyrecoll.h" using namespace std; #if PY_MAJOR_VERSION >=3 # define Py_TPFLAGS_HAVE_ITER 0 #else #define PyLong_FromLong PyInt_FromLong #endif // To keep old code going after we moved the static rclconfig to the // db object (to fix multiple dbs issues), we keep a copy of the last // created rclconfig in RCLCONFIG. This is set into the doc objec by // doc_init, then reset to the db's by db::doc() or query::iter_next, // the proper Doc creators. static shared_ptr RCLCONFIG; ////////////////////////////////////////////////////////////////////// /// SEARCHDATA SearchData code typedef struct { PyObject_HEAD /* Type-specific fields go here. */ std::shared_ptr sd; } recoll_SearchDataObject; static void SearchData_dealloc(recoll_SearchDataObject *self) { LOGDEB("SearchData_dealloc. Releasing. Count before: " << self->sd.use_count() << "\n"); self->sd.reset(); Py_TYPE(self)->tp_free((PyObject*)self); } static PyObject * SearchData_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { LOGDEB("SearchData_new\n"); recoll_SearchDataObject *self; self = (recoll_SearchDataObject *)type->tp_alloc(type, 0); if (self == 0) return 0; return (PyObject *)self; } PyDoc_STRVAR(doc_SearchDataObject, "SearchData([type=AND|OR], [stemlang=somelanguage|null])\n" "\n" "A SearchData object describes a query. It has a number of global\n" "parameters and a chain of search clauses.\n" ); static int SearchData_init(recoll_SearchDataObject *self, PyObject *args, PyObject *kwargs) { LOGDEB("SearchData_init\n"); static const char* kwlist[] = {"type", "stemlang", NULL}; char *stp = 0; char *steml = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|sz", (char**)kwlist, &stp, &steml)) return -1; Rcl::SClType tp = Rcl::SCLT_AND; if (stp && strcasecmp(stp, "or")) { tp = Rcl::SCLT_OR; } string stemlang; if (steml) { stemlang = steml; } else { stemlang = "english"; } self->sd = std::shared_ptr(new Rcl::SearchData(tp, stemlang)); return 0; } /* Note: addclause necessite And/Or vient du fait que le string peut avoir plusieurs mots. A transferer dans l'i/f Python ou pas ? */ PyDoc_STRVAR(doc_addclause, "addclause(type='and'|'or'|'filename'|'phrase'|'near'|'path'|'sub',\n" " qstring=string, slack=int, field=string, stemming=1|0,\n" " subSearch=SearchData, exclude=0|1, anchorstart=0|1, anchorend=0|1,\n" " casesens=0|1, diacsens=0|1)\n" "Adds a simple clause to the SearchData And/Or chain, or a subquery\n" "defined by another SearchData object\n" ); /* Forward declaration only, definition needs recoll_searchDataType */ static PyObject * SearchData_addclause(recoll_SearchDataObject* self, PyObject *args, PyObject *kwargs); static PyMethodDef SearchData_methods[] = { {"addclause", (PyCFunction)SearchData_addclause, METH_VARARGS|METH_KEYWORDS, doc_addclause}, {NULL} /* Sentinel */ }; static PyTypeObject recoll_SearchDataType = { PyVarObject_HEAD_INIT(NULL, 0) "recoll.SearchData", /*tp_name*/ sizeof(recoll_SearchDataObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ (destructor)SearchData_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash */ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ doc_SearchDataObject, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ SearchData_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)SearchData_init, /* tp_init */ 0, /* tp_alloc */ SearchData_new, /* tp_new */ }; static PyObject * SearchData_addclause(recoll_SearchDataObject* self, PyObject *args, PyObject *kwargs) { LOGDEB0("SearchData_addclause\n"); if (!self->sd) { LOGERR("SearchData_addclause: not init??\n"); PyErr_SetString(PyExc_AttributeError, "sd"); return 0; } static const char *kwlist[] = {"type", "qstring", "slack", "field", "stemming", "subsearch", "exclude", "anchorstart", "anchorend", "casesens", "diacsens", NULL}; char *tp = 0; char *qs = 0; // needs freeing int slack = 0; char *fld = 0; // needs freeing PyObject *dostem = 0; recoll_SearchDataObject *sub = 0; PyObject *exclude = 0; PyObject *anchorstart = 0; PyObject *anchorend = 0; PyObject *casesens = 0; PyObject *diacsens = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ses|iesOO!OOOOO", (char**)kwlist, &tp, "utf-8", &qs, &slack, "utf-8", &fld, &dostem, &recoll_SearchDataType, &sub, &exclude, &anchorstart, &anchorend, &casesens, &diacsens )) return 0; Rcl::SearchDataClause *cl = 0; switch (tp[0]) { case 'a': case 'A': if (strcasecmp(tp, "and")) goto defaultcase; cl = new Rcl::SearchDataClauseSimple(Rcl::SCLT_AND, qs, fld?fld:""); break; case 'f': case 'F': if (strcasecmp(tp, "filename")) goto defaultcase; cl = new Rcl::SearchDataClauseFilename(qs); break; case 'o': case 'O': if (strcasecmp(tp, "or")) goto defaultcase; cl = new Rcl::SearchDataClauseSimple(Rcl::SCLT_OR, qs, fld?fld:""); break; case 'n': case 'N': if (strcasecmp(tp, "near")) goto defaultcase; cl = new Rcl::SearchDataClauseDist(Rcl::SCLT_NEAR, qs, slack, fld ? fld : ""); break; case 'p': case 'P': if (!strcasecmp(tp, "phrase")) { cl = new Rcl::SearchDataClauseDist(Rcl::SCLT_PHRASE, qs, slack, fld ? fld : ""); } else if (!strcasecmp(tp, "path")) { cl = new Rcl::SearchDataClausePath(qs); } else { goto defaultcase; } break; case 's': case 'S': if (strcasecmp(tp, "sub")) goto defaultcase; cl = new Rcl::SearchDataClauseSub(sub->sd); break; defaultcase: default: PyErr_SetString(PyExc_AttributeError, "Bad tp arg"); return 0; } PyMem_Free(qs); PyMem_Free(fld); if (dostem != 0 && !PyObject_IsTrue(dostem)) { cl->addModifier(Rcl::SearchDataClause::SDCM_NOSTEMMING); } if (exclude != 0 && !PyObject_IsTrue(exclude)) { cl->setexclude(true); } if (anchorstart && PyObject_IsTrue(anchorstart)) { cl->addModifier(Rcl::SearchDataClause::SDCM_ANCHORSTART); } if (anchorend && PyObject_IsTrue(anchorend)) { cl->addModifier(Rcl::SearchDataClause::SDCM_ANCHOREND); } if (casesens && PyObject_IsTrue(casesens)) { cl->addModifier(Rcl::SearchDataClause::SDCM_CASESENS); } if (diacsens && PyObject_IsTrue(diacsens)) { cl->addModifier(Rcl::SearchDataClause::SDCM_DIACSENS); } self->sd->addClause(cl); Py_RETURN_NONE; } /////////////////////////////////////////////////////////////////////// ///// DOC Doc code static void Doc_dealloc(recoll_DocObject *self) { LOGDEB("Doc_dealloc\n"); deleteZ(self->doc); Py_TYPE(self)->tp_free((PyObject*)self); } static PyObject * Doc_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { LOGDEB("Doc_new\n"); recoll_DocObject *self; self = (recoll_DocObject *)type->tp_alloc(type, 0); if (self == 0) return 0; self->doc = 0; return (PyObject *)self; } static int Doc_init(recoll_DocObject *self, PyObject *, PyObject *) { LOGDEB("Doc_init\n"); delete self->doc; self->doc = new Rcl::Doc; if (self->doc == 0) return -1; self->rclconfig = RCLCONFIG; return 0; } PyDoc_STRVAR(doc_Doc_getbinurl, "getbinurl(none) -> binary url\n" "\n" "Returns an URL with a path part which is a as bit for bit copy of the \n" "file system path, without encoding\n" ); static PyObject * Doc_getbinurl(recoll_DocObject *self) { LOGDEB0("Doc_getbinurl\n"); if (self->doc == 0) { PyErr_SetString(PyExc_AttributeError, "doc"); return 0; } return PyBytes_FromStringAndSize(self->doc->url.c_str(), self->doc->url.size()); } PyDoc_STRVAR(doc_Doc_setbinurl, "setbinurl(url) -> binary url\n" "\n" "Set the URL from binary path like file://may/contain/unencodable/bytes\n" ); static PyObject * Doc_setbinurl(recoll_DocObject *self, PyObject *value) { LOGDEB0("Doc_setbinurl\n"); if (self->doc == 0) { PyErr_SetString(PyExc_AttributeError, "doc??"); return 0; } if (!PyByteArray_Check(value)) { PyErr_SetString(PyExc_TypeError, "setbinurl needs byte array argument"); return 0; } self->doc->url = string(PyByteArray_AsString(value), PyByteArray_Size(value)); Py_RETURN_NONE; } PyDoc_STRVAR(doc_Doc_keys, "keys() -> list of doc object keys (attribute names)\n" ); static PyObject * Doc_keys(recoll_DocObject *self) { LOGDEB0("Doc_keys\n"); if (self->doc == 0) { PyErr_SetString(PyExc_AttributeError, "doc"); return 0; } PyObject *pkeys = PyList_New(0); if (!pkeys) return 0; for (const auto& entry : self->doc->meta) { PyList_Append(pkeys, PyUnicode_Decode(entry.first.c_str(),entry.first.size(), "UTF-8", "replace")); } return pkeys; } PyDoc_STRVAR(doc_Doc_items, "items() -> dictionary of doc object keys/values\n" ); static PyObject * Doc_items(recoll_DocObject *self) { LOGDEB0("Doc_items\n"); if (self->doc == 0) { PyErr_SetString(PyExc_AttributeError, "doc"); return 0; } PyObject *pdict = PyDict_New(); if (!pdict) return 0; for (const auto& entry : self->doc->meta) { PyDict_SetItem(pdict, PyUnicode_Decode(entry.first.c_str(), entry.first.size(), "UTF-8", "replace"), PyUnicode_Decode(entry.second.c_str(), entry.second.size(), "UTF-8", "replace")); } return pdict; } static bool idocget(recoll_DocObject *self, const string& key, string& value) { switch (key.at(0)) { case 'u': if (!key.compare(Rcl::Doc::keyurl)) { value = self->doc->url; return true; } break; case 'f': if (!key.compare(Rcl::Doc::keyfs)) { value = self->doc->fbytes; return true; } else if (!key.compare(Rcl::Doc::keyfmt)) { value = self->doc->fmtime; return true; } break; case 'd': if (!key.compare(Rcl::Doc::keyds)) { value = self->doc->dbytes; return true; } else if (!key.compare(Rcl::Doc::keydmt)) { value = self->doc->dmtime; return true; } break; case 'i': if (!key.compare(Rcl::Doc::keyipt)) { value = self->doc->ipath; return true; } break; case 'm': if (!key.compare(Rcl::Doc::keytp)) { value = self->doc->mimetype; return true; } else if (!key.compare(Rcl::Doc::keymt)) { value = self->doc->dmtime.empty() ? self->doc->fmtime : self->doc->dmtime; return true; } break; case 'o': if (!key.compare(Rcl::Doc::keyoc)) { value = self->doc->origcharset; return true; } break; case 's': if (!key.compare(Rcl::Doc::keysig)) { value = self->doc->sig; return true; } else if (!key.compare(Rcl::Doc::keysz)) { value = self->doc->dbytes.empty() ? self->doc->fbytes : self->doc->dbytes; return true; } break; case 't': if (!key.compare("text")) { value = self->doc->text; return true; } break; case 'x': if (!key.compare("xdocid")) { ulltodecstr(self->doc->xdocid, value); return true; } break; } if (self->doc->getmeta(key, 0)) { value = self->doc->meta[key]; return true; } return false; } PyDoc_STRVAR(doc_Doc_get, "get(key) -> value\n" "Retrieve the named doc attribute\n" ); static PyObject * Doc_get(recoll_DocObject *self, PyObject *args) { LOGDEB1("Doc_get\n"); if (self->doc == 0) { PyErr_SetString(PyExc_AttributeError, "doc??"); return 0; } char *sutf8 = 0; // needs freeing if (!PyArg_ParseTuple(args, "es:Doc_get", "utf-8", &sutf8)) { return 0; } string key(sutf8); PyMem_Free(sutf8); string value; if (idocget(self, key, value)) { return PyUnicode_Decode(value.c_str(), value.size(), "UTF-8","replace"); } Py_RETURN_NONE; } static PyMethodDef Doc_methods[] = { {"getbinurl", (PyCFunction)Doc_getbinurl, METH_NOARGS, doc_Doc_getbinurl}, {"setbinurl", (PyCFunction)Doc_setbinurl, METH_O, doc_Doc_setbinurl}, {"keys", (PyCFunction)Doc_keys, METH_NOARGS, doc_Doc_keys}, {"items", (PyCFunction)Doc_items, METH_NOARGS, doc_Doc_items}, {"get", (PyCFunction)Doc_get, METH_VARARGS, doc_Doc_get}, {NULL} /* Sentinel */ }; // Note that this returns None if the attribute is not found instead of raising // an exception as would be standard. We don't change it to keep existing code // working. static PyObject * Doc_getattro(recoll_DocObject *self, PyObject *nameobj) { if (self->doc == 0) { PyErr_SetString(PyExc_AttributeError, "doc"); return 0; } if (!self->rclconfig || !self->rclconfig->ok()) { PyErr_SetString(PyExc_AttributeError, "Configuration not initialized"); return 0; } PyObject *meth = PyObject_GenericGetAttr((PyObject*)self, nameobj); if (meth) { return meth; } PyErr_Clear(); string name; if (PyUnicode_Check(nameobj)) { PyObject* utf8o = PyUnicode_AsUTF8String(nameobj); if (utf8o == 0) { LOGERR("Doc_getattro: encoding name to utf8 failed\n"); PyErr_SetString(PyExc_AttributeError, "name??"); Py_RETURN_NONE; } name = PyBytes_AsString(utf8o); Py_DECREF(utf8o); } else if (PyBytes_Check(nameobj)) { name = PyBytes_AsString(nameobj); } else { PyErr_SetString(PyExc_AttributeError, "name not unicode nor string??"); Py_RETURN_NONE; } string key = self->rclconfig->fieldQCanon(name); string value; if (idocget(self, key, value)) { LOGDEB1("Doc_getattro: [" << key << "] -> [" << value << "]\n"); // Return a python unicode object return PyUnicode_Decode(value.c_str(), value.size(), "utf-8","replace"); } Py_RETURN_NONE; } static int Doc_setattr(recoll_DocObject *self, char *name, PyObject *value) { if (self->doc == 0) { PyErr_SetString(PyExc_AttributeError, "doc??"); return -1; } if (!self->rclconfig || !self->rclconfig->ok()) { PyErr_SetString(PyExc_AttributeError, "Configuration not initialized"); return -1; } if (name == 0) { PyErr_SetString(PyExc_AttributeError, "name??"); return -1; } if (PyBytes_Check(value)) { value = PyUnicode_FromEncodedObject(value, "UTF-8", "strict"); if (value == 0) return -1; } if (!PyUnicode_Check(value)) { PyErr_SetString(PyExc_AttributeError, "value not unicode??"); return -1; } PyObject* putf8 = PyUnicode_AsUTF8String(value); if (putf8 == 0) { LOGERR("Doc_setmeta: encoding to utf8 failed\n"); PyErr_SetString(PyExc_AttributeError, "value??"); return -1; } string uvalue = PyBytes_AsString(putf8); Py_DECREF(putf8); string key = self->rclconfig->fieldQCanon(name); LOGDEB0("Doc_setattr: doc " << self->doc << " [" << key << "] (" << name << ") -> [" << uvalue << "]\n"); // We set the value in the meta array in all cases. Good idea ? or do it // only for fields without a dedicated Doc:: entry? self->doc->meta[key] = uvalue; switch (key.at(0)) { case 't': if (!key.compare("text")) { self->doc->text.swap(uvalue); } break; case 'u': if (!key.compare(Rcl::Doc::keyurl)) { self->doc->url.swap(uvalue); } break; case 'f': if (!key.compare(Rcl::Doc::keyfs)) { self->doc->fbytes.swap(uvalue); } else if (!key.compare(Rcl::Doc::keyfmt)) { self->doc->fmtime.swap(uvalue); } break; case 'd': if (!key.compare(Rcl::Doc::keyds)) { self->doc->dbytes.swap(uvalue); } else if (!key.compare(Rcl::Doc::keydmt)) { self->doc->dmtime.swap(uvalue); } break; case 'i': if (!key.compare(Rcl::Doc::keyipt)) { self->doc->ipath.swap(uvalue); } break; case 'm': if (!key.compare(Rcl::Doc::keytp)) { self->doc->mimetype.swap(uvalue); } else if (!key.compare(Rcl::Doc::keymt)) { self->doc->dmtime.swap(uvalue); } break; case 'o': if (!key.compare(Rcl::Doc::keyoc)) { self->doc->origcharset.swap(uvalue); } break; case 's': if (!key.compare(Rcl::Doc::keysig)) { self->doc->sig.swap(uvalue); } else if (!key.compare(Rcl::Doc::keysz)) { self->doc->dbytes.swap(uvalue); } break; } return 0; } static Py_ssize_t Doc_length(recoll_DocObject *self) { if (self->doc == 0) { PyErr_SetString(PyExc_AttributeError, "doc??"); return -1; } return self->doc->meta.size(); } static PyObject * Doc_subscript(recoll_DocObject *self, PyObject *key) { if (self->doc == 0) { PyErr_SetString(PyExc_AttributeError, "doc??"); return NULL; } if (!self->rclconfig || !self->rclconfig->ok()) { PyErr_SetString(PyExc_AttributeError, "Configuration not initialized"); return NULL; } string name; if (PyUnicode_Check(key)) { PyObject* utf8o = PyUnicode_AsUTF8String(key); if (utf8o == 0) { LOGERR("Doc_getitemo: encoding name to utf8 failed\n"); PyErr_SetString(PyExc_AttributeError, "name??"); Py_RETURN_NONE; } name = PyBytes_AsString(utf8o); Py_DECREF(utf8o); } else if (PyBytes_Check(key)) { name = PyBytes_AsString(key); } else { PyErr_SetString(PyExc_AttributeError, "key not unicode nor string??"); Py_RETURN_NONE; } string skey = self->rclconfig->fieldQCanon(name); string value; if (idocget(self, skey, value)) { return PyUnicode_Decode(value.c_str(), value.size(), "UTF-8","replace"); } Py_RETURN_NONE; } static PyMappingMethods doc_as_mapping = { (lenfunc)Doc_length, /*mp_length*/ (binaryfunc)Doc_subscript, /*mp_subscript*/ (objobjargproc)0, /*mp_ass_subscript*/ }; PyDoc_STRVAR(doc_DocObject, "Doc()\n" "\n" "A Doc object contains index data for a given document.\n" "The data is extracted from the index when searching, or set by the\n" "indexer program when updating. The Doc object has no useful methods but\n" "many attributes to be read or set by its user. It matches exactly the\n" "Rcl::Doc c++ object. Some of the attributes are predefined, but, \n" "especially when indexing, others can be set, the name of which will be\n" "processed as field names by the indexing configuration.\n" "Inputs can be specified as unicode or strings.\n" "Outputs are unicode objects.\n" "All dates are specified as unix timestamps, printed as strings\n" "Predefined attributes (index/query/both):\n" " text (index): document plain text\n" " url (both)\n" " fbytes (both) optional) file size in bytes\n" " filename (both)\n" " fmtime (both) optional file modification date. Unix time printed \n" " as string\n" " dbytes (both) document text bytes\n" " dmtime (both) document creation/modification date\n" " ipath (both) value private to the app.: internal access path\n" " inside file\n" " mtype (both) mime type for original document\n" " mtime (query) dmtime if set else fmtime\n" " origcharset (both) charset the text was converted from\n" " size (query) dbytes if set, else fbytes\n" " sig (both) app-defined file modification signature. \n" " For up to date checks\n" " relevancyrating (query)\n" " abstract (both)\n" " author (both)\n" " title (both)\n" " keywords (both)\n" ); static PyTypeObject recoll_DocType = { PyVarObject_HEAD_INIT(NULL, 0) "recoll.Doc", /*tp_name*/ sizeof(recoll_DocObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ (destructor)Doc_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ (setattrfunc)Doc_setattr, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ &doc_as_mapping, /*tp_as_mapping */ 0, /*tp_hash */ 0, /*tp_call*/ 0, /*tp_str*/ (getattrofunc)Doc_getattro,/*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT, /*tp_flags*/ doc_DocObject, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ Doc_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)Doc_init, /* tp_init */ 0, /* tp_alloc */ Doc_new, /* tp_new */ }; ////////////////////////////////////////////////////// /// QUERY Query object typedef struct recoll_DbObject { PyObject_HEAD /* Type-specific fields go here. */ Rcl::Db *db; std::shared_ptr rclconfig; } recoll_DbObject; typedef struct { PyObject_HEAD /* Type-specific fields go here. */ Rcl::Query *query; int next; // Index of result to be fetched next or -1 if uninit int rowcount; // Number of records returned by last execute string *sortfield; // Need to allocate in here, main program is C. int ascending; int arraysize; // Default size for fetchmany recoll_DbObject* connection; bool fetchtext; } recoll_QueryObject; PyDoc_STRVAR(doc_Query_close, "close(). Deallocate query. Object is unusable after the call." ); static PyObject * Query_close(recoll_QueryObject *self) { LOGDEB("Query_close\n"); if (self->query) { deleteZ(self->query); } deleteZ(self->sortfield); if (self->connection) { Py_DECREF(self->connection); self->connection = 0; } Py_RETURN_NONE; } static void Query_dealloc(recoll_QueryObject *self) { LOGDEB("Query_dealloc\n"); PyObject *ret = Query_close(self); Py_DECREF(ret); Py_TYPE(self)->tp_free((PyObject*)self); } static PyObject * Query_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) { LOGDEB("Query_new\n"); recoll_QueryObject *self; self = (recoll_QueryObject *)type->tp_alloc(type, 0); if (self == 0) return 0; self->query = 0; self->next = -1; self->rowcount = -1; self->sortfield = new string; self->ascending = 1; self->arraysize = 1; self->connection = 0; self->fetchtext = false; return (PyObject *)self; } // Query_init creates an unusable object. The only way to create a // valid Query Object is through db_query(). (or we'd need to add a Db // parameter to the Query object creation method) static int Query_init(recoll_QueryObject *self, PyObject *, PyObject *) { LOGDEB("Query_init\n"); delete self->query; self->query = 0; self->next = -1; self->ascending = true; return 0; } static PyObject * Query_iter(PyObject *self) { Py_INCREF(self); return self; } PyDoc_STRVAR(doc_Query_sortby, "sortby(field=fieldname, ascending=True)\n" "Sort results by 'fieldname', in ascending or descending order.\n" "Only one field can be used, no subsorts for now.\n" "Must be called before executing the search\n" ); static PyObject * Query_sortby(recoll_QueryObject* self, PyObject *args, PyObject *kwargs) { LOGDEB0("Query_sortby\n"); static const char *kwlist[] = {"field", "ascending", NULL}; char *sfield = 0; PyObject *ascobj = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|O", (char**)kwlist, &sfield, &ascobj)) return 0; if (sfield) { self->sortfield->assign(sfield); } else { self->sortfield->clear(); } if (ascobj == 0) { self->ascending = true; } else { self->ascending = PyObject_IsTrue(ascobj); } Py_RETURN_NONE; } PyDoc_STRVAR(doc_Query_execute, "execute(query_string, stemming=1|0, stemlang=\"stemming language\", " "fetchtext=False)\n" "\n" "Starts a search for query_string, a Recoll search language string\n" "(mostly Xesam-compatible).\n" "The query can be a simple list of terms (and'ed by default), or more\n" "complicated with field specs etc. See the Recoll manual.\n" ); static PyObject * Query_execute(recoll_QueryObject* self, PyObject *args, PyObject *kwargs) { LOGDEB0("Query_execute\n"); static const char *kwlist[] = {"query_string", "stemming", "stemlang", "fetchtext", "collapseduplicates", NULL}; char *sutf8 = 0; // needs freeing char *sstemlang = 0; PyObject *dostemobj = 0; PyObject *fetchtextobj = 0; PyObject *collapseobj = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "es|OesOO:Query_execute", (char**)kwlist, "utf-8", &sutf8, &dostemobj, "utf-8", &sstemlang, &fetchtextobj, &collapseobj)) { return 0; } bool dostem{true}; if (dostemobj != 0 && !PyObject_IsTrue(dostemobj)) dostem = false; if (fetchtextobj != 0 && PyObject_IsTrue(fetchtextobj)) { self->fetchtext = true; } else { self->fetchtext = false; } string utf8(sutf8); PyMem_Free(sutf8); string stemlang("english"); if (sstemlang) { stemlang.assign(sstemlang); PyMem_Free(sstemlang); } LOGDEB0("Query_execute: [" << utf8 << "] dostem " << dostem << " stemlang [" << stemlang << "]\n"); if (self->query == 0) { PyErr_SetString(PyExc_AttributeError, "query"); return 0; } if (collapseobj != 0 && PyObject_IsTrue(collapseobj)) { self->query->setCollapseDuplicates(true); } else { self->query->setCollapseDuplicates(false); } // SearchData defaults to stemming in english // Use default for now but need to add way to specify language string reason; Rcl::SearchData *sd = wasaStringToRcl( self->connection->rclconfig.get(),dostem ? stemlang : "", utf8, reason); if (!sd) { PyErr_SetString(PyExc_ValueError, reason.c_str()); return 0; } std::shared_ptr rq(sd); self->query->setSortBy(*self->sortfield, self->ascending); self->query->setQuery(rq); int cnt = self->query->getResCnt(); self->next = 0; self->rowcount = cnt; return Py_BuildValue("i", cnt); } PyDoc_STRVAR(doc_Query_executesd, "executesd(SearchData, fetchtext=False)\n" "\n" "Starts a search for the query defined by the SearchData object.\n" ); static PyObject * Query_executesd(recoll_QueryObject* self, PyObject *args, PyObject *kwargs) { LOGDEB0("Query_executeSD\n"); static const char *kwlist[] = {"searchdata", "fetchtext", "collapseduplicates", NULL}; recoll_SearchDataObject *pysd = 0; PyObject *fetchtextobj = 0; PyObject *collapseobj = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|OO:Query_execute", (char **)kwlist, &recoll_SearchDataType, &pysd, &fetchtextobj, &collapseobj)) { return 0; } if (pysd == 0 || self->query == 0) { PyErr_SetString(PyExc_AttributeError, "query"); return 0; } if (fetchtextobj != 0 && PyObject_IsTrue(fetchtextobj)) { self->fetchtext = true; } else { self->fetchtext = false; } if (collapseobj != 0 && PyObject_IsTrue(collapseobj)) { self->query->setCollapseDuplicates(true); } else { self->query->setCollapseDuplicates(false); } self->query->setSortBy(*self->sortfield, self->ascending); self->query->setQuery(pysd->sd); int cnt = self->query->getResCnt(); self->next = 0; self->rowcount = cnt; return Py_BuildValue("i", cnt); } // Move some data from the dedicated fields to the meta array to make // fetching attributes easier. Needed because we only use the meta // array when enumerating keys. Also for url which is also formatted. // But not that some fields are not copied, and are only reachable if // one knows their name (e.g. xdocid). static void movedocfields(const RclConfig* rclconfig, Rcl::Doc *doc) { printableUrl(rclconfig->getDefCharset(), doc->url, doc->meta[Rcl::Doc::keyurl]); doc->meta[Rcl::Doc::keytp] = doc->mimetype; doc->meta[Rcl::Doc::keyipt] = doc->ipath; doc->meta[Rcl::Doc::keyfs] = doc->fbytes; doc->meta[Rcl::Doc::keyds] = doc->dbytes; } static PyObject * Query_iternext(PyObject *_self) { LOGDEB0("Query_iternext\n"); recoll_QueryObject* self = (recoll_QueryObject*)_self; if (self->query == 0) { PyErr_SetString(PyExc_AttributeError, "query"); return 0; } int cnt = self->query->getResCnt(); if (cnt <= 0 || self->next < 0) { // This happens if there are no results and is not an error return 0; } recoll_DocObject *result = (recoll_DocObject *)PyObject_CallObject((PyObject *)&recoll_DocType, 0); if (!result) { PyErr_SetString(PyExc_EnvironmentError, "doc create failed"); return 0; } result->rclconfig = self->connection->rclconfig; // We used to check against rowcount here, but this was wrong: // xapian result count estimate are sometimes wrong, we must go on // fetching until we fail if (!self->query->getDoc(self->next, *result->doc, self->fetchtext)) { return 0; } self->next++; movedocfields(self->connection->rclconfig.get(), result->doc); return (PyObject *)result; } PyDoc_STRVAR(doc_Query_fetchone, "fetchone(None) -> Doc\n" "\n" "Fetches the next Doc object in the current search results.\n" ); static PyObject * Query_fetchone(PyObject *_self) { LOGDEB0("Query_fetchone/next\n"); recoll_DocObject *result = (recoll_DocObject *)Query_iternext(_self); if (!result) { Py_RETURN_NONE; } return (PyObject *)result; } PyDoc_STRVAR(doc_Query_fetchmany, "fetchmany([size=query.arraysize]) -> Doc list\n" "\n" "Fetches the next Doc objects in the current search results.\n" ); static PyObject * Query_fetchmany(PyObject* _self, PyObject *args, PyObject *kwargs) { LOGDEB0("Query_fetchmany\n"); recoll_QueryObject* self = (recoll_QueryObject*)_self; static const char *kwlist[] = {"size", NULL}; int size = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i", (char**)kwlist, &size)) return 0; if (size == 0) size = self->arraysize; PyObject *reslist = PyList_New(0); for (int i = 0; i < size; i++) { recoll_DocObject *docobj = (recoll_DocObject *)Query_iternext(_self); if (!docobj) { break; } PyList_Append(reslist, (PyObject*)docobj); Py_DECREF(docobj); } if (PyErr_Occurred()) { Py_DECREF(reslist); return NULL; } else { return reslist; } } PyDoc_STRVAR(doc_Query_scroll, "scroll(value, [, mode='relative'/'absolute' ]) -> new int position\n" "\n" "Adjusts the position in the current result set.\n" ); static PyObject * Query_scroll(recoll_QueryObject* self, PyObject *args, PyObject *kwargs) { LOGDEB0("Query_scroll\n"); static const char *kwlist[] = {"position", "mode", NULL}; int pos = 0; char *smode = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|s", (char**)kwlist, &pos, &smode)) return 0; bool isrelative = 1; if (smode != 0) { if (!strcasecmp(smode, "relative")) { isrelative = 1; } else if (!strcasecmp(smode, "absolute")) { isrelative = 0; } else { PyErr_SetString(PyExc_ValueError, "bad mode value"); return 0; } } if (self->query == 0) { PyErr_SetString(PyExc_AttributeError, "null query"); return 0; } int newpos = isrelative ? self->next + pos : pos; if (newpos < 0 || newpos >= self->rowcount) { PyErr_SetString(PyExc_IndexError, "position out of range"); return 0; } self->next = newpos; return Py_BuildValue("i", newpos); } PyDoc_STRVAR(doc_Query_highlight, "highlight(text, ishtml = 0/1, methods = object))\n" "Will insert tags around the match areas\n" "in the input text and return the modified text\n" "ishtml can be set to indicate that the input text is html and html special\n" " characters should not be escaped\n" "methods if set should be an object with methods startMatch(i) and endMatch()\n" " which will be called for each match and should return a begin and end tag\n" ); class PyPlainToRich: public PlainToRich { public: PyPlainToRich(PyObject *methods, bool eolbr = false) : m_methods(methods) { m_eolbr = eolbr; } virtual ~PyPlainToRich() { } virtual string startMatch(unsigned int idx) { PyObject *res = 0; if (m_methods) res = PyObject_CallMethod(m_methods, (char *)"startMatch", (char *)"(i)", idx); if (res == 0) return ""; PyObject *res1 = res; if (PyUnicode_Check(res)) res1 = PyUnicode_AsUTF8String(res); return PyBytes_AsString(res1); } virtual string endMatch() { PyObject *res = 0; if (m_methods) res = PyObject_CallMethod(m_methods, (char *)"endMatch", 0); if (res == 0) return ""; PyObject *res1 = res; if (PyUnicode_Check(res)) res1 = PyUnicode_AsUTF8String(res); return PyBytes_AsString(res1); } PyObject *m_methods; }; static PyObject * Query_highlight(recoll_QueryObject* self, PyObject *args, PyObject *kwargs) { LOGDEB0("Query_highlight\n"); static const char *kwlist[] = {"text", "ishtml", "eolbr", "methods", NULL}; char *sutf8 = 0; // needs freeing int ishtml = 0; PyObject *ishtmlobj = 0; int eolbr = 1; PyObject *eolbrobj = 0; PyObject *methods = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "es|OOO:Query_highlight", (char**)kwlist, "utf-8", &sutf8, &ishtmlobj, &eolbrobj, &methods)) { return 0; } string utf8(sutf8); PyMem_Free(sutf8); if (ishtmlobj && PyObject_IsTrue(ishtmlobj)) ishtml = 1; if (eolbrobj && !PyObject_IsTrue(eolbrobj)) eolbr = 0; LOGDEB0("Query_highlight: ishtml " << ishtml << "\n"); if (self->query == 0) { PyErr_SetString(PyExc_AttributeError, "query"); return 0; } std::shared_ptr sd = self->query->getSD(); if (!sd) { PyErr_SetString(PyExc_ValueError, "Query not initialized"); return 0; } HighlightData hldata; sd->getTerms(hldata); PyPlainToRich hler(methods, eolbr); hler.set_inputhtml(ishtml); list out; hler.plaintorich(utf8, out, hldata, 5000000); if (out.empty()) { PyErr_SetString(PyExc_ValueError, "Plaintorich failed"); return 0; } // cf python manual:The bytes will be interpreted as being UTF-8 encoded. PyObject* unicode = PyUnicode_FromStringAndSize(out.begin()->c_str(), out.begin()->size()); // We used to return a copy of the unicode object. Can't see why any more return unicode; } PyDoc_STRVAR(doc_Query_makedocabstract, "makedocabstract(doc, methods = object))\n" "Will create a snippets abstract for doc by selecting text around the match\n" " terms\n" "If methods is set, will also perform highlighting. See the highlight method\n" ); static PyObject * Query_makedocabstract(recoll_QueryObject* self, PyObject *args,PyObject *kwargs) { LOGDEB0("Query_makeDocAbstract\n"); static const char *kwlist[] = {"doc", "methods", NULL}; recoll_DocObject *pydoc = 0; PyObject *hlmethods = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O:Query_makeDocAbstract", (char **)kwlist, &recoll_DocType, &pydoc, &hlmethods)) { return 0; } if (pydoc->doc == 0) { LOGERR("Query_makeDocAbstract: doc not found " << pydoc->doc << "\n"); PyErr_SetString(PyExc_AttributeError, "doc"); return 0; } if (self->query == 0) { LOGERR("Query_makeDocAbstract: query not found " << self->query<< "\n"); PyErr_SetString(PyExc_AttributeError, "query"); return 0; } std::shared_ptr sd = self->query->getSD(); if (!sd) { PyErr_SetString(PyExc_ValueError, "Query not initialized"); return 0; } string abstract; if (hlmethods == 0) { // makeDocAbstract() can fail if there are no query terms (e.g. for // a query like [ext:odt]. This should not cause an exception self->query->makeDocAbstract(*(pydoc->doc), abstract); } else { HighlightData hldata; sd->getTerms(hldata); PyPlainToRich hler(hlmethods); hler.set_inputhtml(0); vector vabs; self->query->makeDocAbstract(*pydoc->doc, vabs); for (unsigned int i = 0; i < vabs.size(); i++) { if (vabs[i].empty()) continue; list lr; // There may be data like page numbers before the snippet text. // will be in brackets. string::size_type bckt = vabs[i].find("]"); if (bckt == string::npos) { hler.plaintorich(vabs[i], lr, hldata); } else { hler.plaintorich(vabs[i].substr(bckt), lr, hldata); lr.front() = vabs[i].substr(0, bckt) + lr.front(); } abstract += lr.front(); abstract += "..."; } } // Return a python unicode object return PyUnicode_Decode(abstract.c_str(), abstract.size(), "UTF-8", "replace"); } PyDoc_STRVAR(doc_Query_getxquery, "getxquery(None) -> Unicode string\n" "\n" "Retrieves the Xapian query description as a Unicode string.\n" "Meaningful only after executexx\n" ); static PyObject * Query_getxquery(recoll_QueryObject* self, PyObject *, PyObject *) { LOGDEB0("Query_getxquery self->query " << self->query << "\n"); if (self->query == 0) { PyErr_SetString(PyExc_AttributeError, "query"); return 0; } std::shared_ptr sd = self->query->getSD(); if (!sd) { PyErr_SetString(PyExc_ValueError, "Query not initialized"); return 0; } string desc = sd->getDescription(); return PyUnicode_Decode(desc.c_str(), desc.size(), "UTF-8", "replace"); } PyDoc_STRVAR(doc_Query_getgroups, "getgroups(None) -> a list of pairs\n" "\n" "Retrieves the expanded query terms. Meaningful only after executexx\n" "In each pair, the first entry is a list of user terms, the second a list of\n" "query terms as derived from the user terms and used in the Xapian Query.\n" "The size of each list is one for simple terms, or more for group and phrase\n" "clauses\n" ); static PyObject * Query_getgroups(recoll_QueryObject* self, PyObject *, PyObject *) { LOGDEB0("Query_getgroups\n"); if (self->query == 0) { PyErr_SetString(PyExc_AttributeError, "query"); return 0; } std::shared_ptr sd = self->query->getSD(); if (!sd) { PyErr_SetString(PyExc_ValueError, "Query not initialized"); return 0; } HighlightData hld; sd->getTerms(hld); PyObject *mainlist = PyList_New(0); PyObject *ulist; PyObject *xlist; // We walk the groups vector. For each we retrieve the user group, // make a python list of each, then group those in a pair, and // append this to the main list. for (unsigned int i = 0; i < hld.index_term_groups.size(); i++) { HighlightData::TermGroup& tg{hld.index_term_groups[i]}; unsigned int ugidx = tg.grpsugidx; ulist = PyList_New(hld.ugroups[ugidx].size()); for (unsigned int j = 0; j < hld.ugroups[ugidx].size(); j++) { PyList_SetItem(ulist, j, PyUnicode_Decode(hld.ugroups[ugidx][j].c_str(), hld.ugroups[ugidx][j].size(), "UTF-8", "replace")); } // Not sure that this makes any sense after we changed from // multiply_groups to using or-plists. TBD: check if (tg.kind == HighlightData::TermGroup::TGK_TERM) { xlist = PyList_New(1); PyList_SetItem(xlist, 0, PyUnicode_Decode(tg.term.c_str(), tg.term.size(), "UTF-8", "replace")); } else { xlist = PyList_New(tg.orgroups.size()); for (unsigned int j = 0; j < tg.orgroups.size(); j++) { PyList_SetItem(xlist, j, PyUnicode_Decode(tg.orgroups[j][0].c_str(), tg.orgroups[j][0].size(), "UTF-8", "replace")); } } PyList_Append(mainlist, Py_BuildValue("(OO)", ulist, xlist)); } return mainlist; } static PyMethodDef Query_methods[] = { {"execute", (PyCFunction)Query_execute, METH_VARARGS|METH_KEYWORDS, doc_Query_execute}, {"executesd", (PyCFunction)Query_executesd, METH_VARARGS|METH_KEYWORDS, doc_Query_executesd}, {"next", (PyCFunction)Query_fetchone, METH_NOARGS, doc_Query_fetchone}, {"fetchone", (PyCFunction)Query_fetchone, METH_NOARGS, doc_Query_fetchone}, {"fetchmany", (PyCFunction)Query_fetchmany, METH_VARARGS|METH_KEYWORDS, doc_Query_fetchmany}, {"close", (PyCFunction)Query_close, METH_NOARGS, doc_Query_close}, {"sortby", (PyCFunction)Query_sortby, METH_VARARGS|METH_KEYWORDS, doc_Query_sortby}, {"highlight", (PyCFunction)Query_highlight, METH_VARARGS|METH_KEYWORDS, doc_Query_highlight}, {"getxquery", (PyCFunction)Query_getxquery, METH_NOARGS, doc_Query_getxquery}, {"getgroups", (PyCFunction)Query_getgroups, METH_NOARGS, doc_Query_getgroups}, {"makedocabstract", (PyCFunction)Query_makedocabstract, METH_VARARGS|METH_KEYWORDS, doc_Query_makedocabstract}, {"scroll", (PyCFunction)Query_scroll, METH_VARARGS|METH_KEYWORDS, doc_Query_scroll}, {NULL} /* Sentinel */ }; static PyMemberDef Query_members[] = { {(char*)"rownumber", T_INT, offsetof(recoll_QueryObject, next), 0, (char*)"Next index to be fetched from results. Normally increments after\n" "each fetchone() call, but can be set/reset before the call effect\n" "seeking. Starts at 0" }, {(char*)"rowcount", T_INT, offsetof(recoll_QueryObject, rowcount), READONLY, (char*)"Number of records returned by the last execute" }, {(char*)"arraysize", T_INT, offsetof(recoll_QueryObject, arraysize), 0, (char*)"Default number of records processed by fetchmany (r/w)" }, {(char*)"connection", T_OBJECT_EX, offsetof(recoll_QueryObject, connection), 0, (char*)"Connection object this is from" }, {NULL} /* Sentinel */ }; PyDoc_STRVAR(doc_QueryObject, "Recoll Query objects are used to execute index searches. \n" "They must be created by the Db.query() method.\n" ); static PyTypeObject recoll_QueryType = { PyVarObject_HEAD_INIT(NULL, 0) "recoll.Query", /*tp_name*/ sizeof(recoll_QueryObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ (destructor)Query_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash */ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_ITER, /*tp_flags*/ doc_QueryObject, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ Query_iter, /* tp_iter */ Query_iternext, /* tp_iternext */ Query_methods, /* tp_methods */ Query_members, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)Query_init, /* tp_init */ 0, /* tp_alloc */ Query_new, /* tp_new */ }; /////////////////////////////////////////////// ////// DB Db object code static PyObject * Db_close(recoll_DbObject *self) { LOGDEB("Db_close. self " << self << "\n"); if (self->db) { delete self->db; self->db = 0; } self->rclconfig.reset(); Py_RETURN_NONE; } static void Db_dealloc(recoll_DbObject *self) { LOGDEB("Db_dealloc\n"); PyObject *ret = Db_close(self); Py_DECREF(ret); Py_TYPE(self)->tp_free((PyObject*)self); } static PyObject * Db_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { LOGDEB2("Db_new\n"); recoll_DbObject *self; self = (recoll_DbObject *)type->tp_alloc(type, 0); if (self == 0) return 0; self->db = 0; return (PyObject *)self; } static int Db_init(recoll_DbObject *self, PyObject *args, PyObject *kwargs) { static const char *kwlist[] = {"confdir", "extra_dbs", "writable", NULL}; PyObject *extradbs = 0; char *confdir = 0; int writable = 0; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|sOi", (char**)kwlist, &confdir, &extradbs, &writable)) return -1; // If the user creates several dbs, changing the confdir, we call // recollinit repeatedly, which *should* be ok, except that it // resets the log file. string reason; if (confdir) { string cfd = confdir; self->rclconfig = std::shared_ptr( recollinit(RCLINIT_PYTHON, 0, 0, reason, &cfd)); } else { self->rclconfig = std::shared_ptr( recollinit(RCLINIT_PYTHON, 0, 0, reason, 0)); } RCLCONFIG = self->rclconfig; LOGDEB("Db_init\n"); if (!self->rclconfig) { PyErr_SetString(PyExc_EnvironmentError, reason.c_str()); return -1; } if (!self->rclconfig->ok()) { PyErr_SetString(PyExc_EnvironmentError, "Bad config ?"); return -1; } delete self->db; self->db = new Rcl::Db(self->rclconfig.get()); if (!self->db->open(writable ? Rcl::Db::DbUpd : Rcl::Db::DbRO)) { LOGERR("Db_init: db open error\n"); PyErr_SetString(PyExc_EnvironmentError, "Can't open index"); return -1; } if (extradbs) { if (!PySequence_Check(extradbs)) { PyErr_SetString(PyExc_TypeError, "extra_dbs must be a sequence"); deleteZ(self->db); return -1; } int dbcnt = PySequence_Size(extradbs); if (dbcnt == -1) { PyErr_SetString(PyExc_TypeError, "extra_dbs could not be sized"); deleteZ(self->db); return -1; } for (int i = 0; i < dbcnt; i++) { PyObject *item = PySequence_GetItem(extradbs, i); const char *s = PyBytes_AsString(item); if (s == nullptr) { PyErr_SetString(PyExc_TypeError, "extra_dbs must contain strings"); deleteZ(self->db); Py_DECREF(item); return -1; } string dbname(s); Py_DECREF(item); if (!self->db->addQueryDb(dbname)) { PyErr_SetString(PyExc_EnvironmentError, "extra db could not be opened"); deleteZ(self->db); return -1; } } } return 0; } static PyObject * Db_query(recoll_DbObject* self) { LOGDEB("Db_query\n"); if (self->db == 0) { LOGERR("Db_query: db not found " << self->db << "\n"); PyErr_SetString(PyExc_AttributeError, "db"); return 0; } recoll_QueryObject *result = (recoll_QueryObject *) PyObject_CallObject((PyObject *)&recoll_QueryType, 0); if (!result) return 0; result->query = new Rcl::Query(self->db); result->connection = self; Py_INCREF(self); return (PyObject *)result; } static PyObject * Db_doc(recoll_DbObject* self) { LOGDEB("Db_doc\n"); if (self->db == 0) { LOGERR("Db_doc: db not found " << self->db << "\n"); PyErr_SetString(PyExc_AttributeError, "db"); return 0; } recoll_DocObject *result = (recoll_DocObject *) PyObject_CallObject((PyObject *)&recoll_DocType, 0); if (!result) return 0; result->rclconfig = self->rclconfig; Py_INCREF(self); return (PyObject *)result; } static PyObject * Db_setAbstractParams(recoll_DbObject *self, PyObject *args, PyObject *kwargs) { LOGDEB0("Db_setAbstractParams\n"); static const char *kwlist[] = {"maxchars", "contextwords", NULL}; int ctxwords = -1, maxchars = -1; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", (char**)kwlist, &maxchars, &ctxwords)) return 0; if (self->db == 0) { LOGERR("Db_setAbstractParams: db not found " << self->db << "\n"); PyErr_SetString(PyExc_AttributeError, "db id not found"); return 0; } LOGDEB0("Db_setAbstractParams: mxchrs " << maxchars << ", ctxwrds " << ctxwords << "\n"); self->db->setAbstractParams(-1, maxchars, ctxwords); Py_RETURN_NONE; } static PyObject * Db_makeDocAbstract(recoll_DbObject* self, PyObject *args) { LOGDEB0("Db_makeDocAbstract\n"); recoll_DocObject *pydoc = 0; recoll_QueryObject *pyquery = 0; if (!PyArg_ParseTuple(args, "O!O!:Db_makeDocAbstract", &recoll_DocType, &pydoc, &recoll_QueryType, &pyquery)) { return 0; } if (self->db == 0) { LOGERR("Db_makeDocAbstract: db not found " << self->db << "\n"); PyErr_SetString(PyExc_AttributeError, "db"); return 0; } if (pydoc->doc == 0) { LOGERR("Db_makeDocAbstract: doc not found " << pydoc->doc << "\n"); PyErr_SetString(PyExc_AttributeError, "doc"); return 0; } if (pyquery->query == 0) { LOGERR("Db_makeDocAbstract: query not found "<< pyquery->query << "\n"); PyErr_SetString(PyExc_AttributeError, "query"); return 0; } string abstract; if (!pyquery->query->makeDocAbstract(*(pydoc->doc), abstract)) { PyErr_SetString(PyExc_EnvironmentError, "rcl makeDocAbstract failed"); return 0; } // Return a python unicode object return PyUnicode_Decode(abstract.c_str(), abstract.size(), "UTF-8", "replace"); } PyDoc_STRVAR( doc_Db_termMatch, "termMatch(match_type='wildcard|regexp|stem', expr, field='', " "maxlen=-1, casesens=False, diacsens=False, lang='english', freqs=False)" " returns the expanded term list\n" "\n" "Expands the input expression according to the mode and parameters and " "returns the expanded term list, as raw terms if freqs is False, or " "(term, totcnt, docnt) tuples if freqs is True.\n" ); static PyObject * Db_termMatch(recoll_DbObject* self, PyObject *args, PyObject *kwargs) { LOGDEB0("Db_termMatch\n"); static const char *kwlist[] = {"type", "expr", "field", "maxlen", "casesens", "diacsens", "freqs", "lang", NULL}; char *tp = 0; char *expr = 0; // needs freeing char *field = 0; // needs freeing int maxlen = -1; PyObject *casesens = 0; PyObject *diacsens = 0; PyObject *freqs = 0; char *lang = 0; // needs freeing PyObject *ret = 0; int typ_sens = 0; Rcl::TermMatchResult result; bool showfreqs = false; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ses|esiOOOes", (char**)kwlist, &tp, "utf-8", &expr, "utf-8", &field, &maxlen, &casesens, &diacsens, &freqs, "utf-8", &lang)) return 0; if (self->db == 0) { LOGERR("Db_termMatch: db not found " << self->db << "\n"); PyErr_SetString(PyExc_AttributeError, "db"); goto out; } if (!strcasecmp(tp, "wildcard")) { typ_sens = Rcl::Db::ET_WILD; } else if (!strcasecmp(tp, "regexp")) { typ_sens = Rcl::Db::ET_REGEXP; } else if (!strcasecmp(tp, "stem")) { typ_sens = Rcl::Db::ET_STEM; } else { PyErr_SetString(PyExc_AttributeError, "Bad type arg"); goto out; } if (casesens != 0 && PyObject_IsTrue(casesens)) { typ_sens |= Rcl::Db::ET_CASESENS; } if (diacsens != 0 && PyObject_IsTrue(diacsens)) { typ_sens |= Rcl::Db::ET_DIACSENS; } if (freqs != 0 && PyObject_IsTrue(freqs)) { showfreqs = true; } if (!self->db->termMatch(typ_sens, lang ? lang : "english", expr, result, maxlen, field ? field : "")) { LOGERR("Db_termMatch: db termMatch error\n"); PyErr_SetString(PyExc_AttributeError, "rcldb termMatch error"); goto out; } ret = PyList_New(result.entries.size()); for (unsigned int i = 0; i < result.entries.size(); i++) { PyObject *term = PyUnicode_FromString( Rcl::strip_prefix(result.entries[i].term).c_str()); if (showfreqs) { PyObject *totcnt = PyLong_FromLong(result.entries[i].wcf); PyObject *doccnt = PyLong_FromLong(result.entries[i].docs); PyObject *tup = PyTuple_New(3); PyTuple_SetItem(tup, 0, term); PyTuple_SetItem(tup, 1, totcnt); PyTuple_SetItem(tup, 2, doccnt); PyList_SetItem(ret, i, tup); } else { PyList_SetItem(ret, i, term); } } out: PyMem_Free(expr); PyMem_Free(field); PyMem_Free(lang); return ret; } static PyObject * Db_needUpdate(recoll_DbObject* self, PyObject *args, PyObject *kwds) { LOGDEB0("Db_needUpdate\n"); char *udi = 0; // needs freeing char *sig = 0; // needs freeing if (!PyArg_ParseTuple(args, "eses:Db_needUpdate", "utf-8", &udi, "utf-8", &sig)) { return 0; } if (self->db == 0) { LOGERR("Db_needUpdate: db not found " << self->db << "\n"); PyErr_SetString(PyExc_AttributeError, "db"); PyMem_Free(udi); PyMem_Free(sig); return 0; } bool result = self->db->needUpdate(udi, sig); PyMem_Free(udi); PyMem_Free(sig); return Py_BuildValue("i", result); } static PyObject * Db_delete(recoll_DbObject* self, PyObject *args, PyObject *kwds) { LOGDEB0("Db_delete\n"); char *udi = 0; // needs freeing if (!PyArg_ParseTuple(args, "es:Db_delete", "utf-8", &udi)) { return 0; } if (self->db == 0) { LOGERR("Db_delete: db not found " << self->db << "\n"); PyErr_SetString(PyExc_AttributeError, "db"); PyMem_Free(udi); return 0; } bool result = self->db->purgeFile(udi); PyMem_Free(udi); return Py_BuildValue("i", result); } static PyObject * Db_purge(recoll_DbObject* self) { LOGDEB0("Db_purge\n"); if (self->db == 0) { LOGERR("Db_purge: db not found " << self->db << "\n"); PyErr_SetString(PyExc_AttributeError, "db"); return 0; } bool result = self->db->purge(); return Py_BuildValue("i", result); } static PyObject * Db_addOrUpdate(recoll_DbObject* self, PyObject *args, PyObject *) { LOGDEB0("Db_addOrUpdate\n"); char *sudi = 0; // needs freeing char *sparent_udi = 0; // needs freeing recoll_DocObject *pydoc; if (!PyArg_ParseTuple(args, "esO!|es:Db_addOrUpdate", "utf-8", &sudi, &recoll_DocType, &pydoc, "utf-8", &sparent_udi)) { return 0; } string udi(sudi); string parent_udi(sparent_udi ? sparent_udi : ""); PyMem_Free(sudi); PyMem_Free(sparent_udi); if (self->db == 0) { LOGERR("Db_addOrUpdate: db not found " << self->db << "\n"); PyErr_SetString(PyExc_AttributeError, "db"); return 0; } if (pydoc->doc == 0) { LOGERR("Db_addOrUpdate: doc not found " << pydoc->doc << "\n"); PyErr_SetString(PyExc_AttributeError, "doc"); return 0; } if (!self->db->addOrUpdate(udi, parent_udi, *pydoc->doc)) { LOGERR("Db_addOrUpdate: rcldb error\n"); PyErr_SetString(PyExc_AttributeError, "rcldb error"); return 0; } Py_RETURN_NONE; } static PyMethodDef Db_methods[] = { {"close", (PyCFunction)Db_close, METH_NOARGS, "close() closes the index connection. The object is unusable after this." }, {"query", (PyCFunction)Db_query, METH_NOARGS, "query() -> Query. Return a new, blank query object for this index." }, {"doc", (PyCFunction)Db_doc, METH_NOARGS, "doc() -> Doc. Return a new, blank doc object for this index." }, {"cursor", (PyCFunction)Db_query, METH_NOARGS, "cursor() -> Query. Alias for query(). Return query object." }, {"setAbstractParams", (PyCFunction)Db_setAbstractParams, METH_VARARGS|METH_KEYWORDS, "setAbstractParams(maxchars, contextwords).\n" "Set the parameters used to build 'keyword-in-context' abstracts" }, {"makeDocAbstract", (PyCFunction)Db_makeDocAbstract, METH_VARARGS, "makeDocAbstract(Doc, Query) -> string\n" "Build and return 'keyword-in-context' abstract for document\n" "and query." }, {"termMatch", (PyCFunction)Db_termMatch, METH_VARARGS|METH_KEYWORDS, doc_Db_termMatch }, {"needUpdate", (PyCFunction)Db_needUpdate, METH_VARARGS, "needUpdate(udi, sig) -> Bool.\n" "Check if the index is up to date for the document defined by udi,\n" "having the current signature sig." }, {"delete", (PyCFunction)Db_delete, METH_VARARGS, "delete(udi) -> Bool.\n" "Purge index from all data for udi. If udi matches a container\n" "document, purge all subdocs (docs with a parent_udi matching udi)." }, {"purge", (PyCFunction)Db_purge, METH_NOARGS, "purge() -> Bool.\n" "Delete all documents that were not touched during the just finished\n" "indexing pass (since open-for-write). These are the documents for\n" "the needUpdate() call was not performed, indicating that they no\n" "longer exist in the primary storage system.\n" }, {"addOrUpdate", (PyCFunction)Db_addOrUpdate, METH_VARARGS, "addOrUpdate(udi, doc, parent_udi=None) -> None\n" "Add or update index data for a given document\n" "The udi string must define a unique id for the document. It is not\n" "interpreted inside Recoll\n" "doc is a Doc object\n" "if parent_udi is set, this is a unique identifier for the\n" "top-level container (ie mbox file)" }, {NULL} /* Sentinel */ }; PyDoc_STRVAR(doc_DbObject, "Db([confdir=None], [extra_dbs=None], [writable = False])\n" "\n" "A Db object holds a connection to a Recoll index. Use the connect()\n" "function to create one.\n" "confdir specifies a Recoll configuration directory (default: \n" " $RECOLL_CONFDIR or ~/.recoll).\n" "extra_dbs is a list of external databases (xapian directories)\n" "writable decides if we can index new data through this connection\n" ); static PyTypeObject recoll_DbType = { PyVarObject_HEAD_INIT(NULL, 0) "recoll.Db", /*tp_name*/ sizeof(recoll_DbObject), /*tp_basicsize*/ 0, /*tp_itemsize*/ (destructor)Db_dealloc, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ 0, /*tp_compare*/ 0, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash */ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /*tp_flags*/ doc_DbObject, /* tp_doc */ 0, /* tp_traverse */ 0, /* tp_clear */ 0, /* tp_richcompare */ 0, /* tp_weaklistoffset */ 0, /* tp_iter */ 0, /* tp_iternext */ Db_methods, /* tp_methods */ 0, /* tp_members */ 0, /* tp_getset */ 0, /* tp_base */ 0, /* tp_dict */ 0, /* tp_descr_get */ 0, /* tp_descr_set */ 0, /* tp_dictoffset */ (initproc)Db_init, /* tp_init */ 0, /* tp_alloc */ Db_new, /* tp_new */ }; ////////////////////////////////////////////////////////////////////////// // Module methods static PyObject * recoll_connect(PyObject *self, PyObject *args, PyObject *kwargs) { LOGDEB2("recoll_connect\n"); recoll_DbObject *db = (recoll_DbObject *) PyObject_Call((PyObject *)&recoll_DbType, args, kwargs); return (PyObject *)db; } PyDoc_STRVAR(doc_connect, "connect([confdir=None], [extra_dbs=None], [writable = False])\n" " -> Db.\n" "\n" "Connects to a Recoll database and returns a Db object.\n" "confdir specifies a Recoll configuration directory\n" "(the default is built like for any Recoll program).\n" "extra_dbs is a list of external databases (xapian directories)\n" "writable decides if we can index new data through this connection\n" ); static PyMethodDef recoll_methods[] = { {"connect", (PyCFunction)recoll_connect, METH_VARARGS|METH_KEYWORDS, doc_connect}, {NULL, NULL, 0, NULL} /* Sentinel */ }; PyDoc_STRVAR(pyrecoll_doc_string, "This is an interface to the Recoll full text indexer."); struct module_state { PyObject *error; }; #if PY_MAJOR_VERSION >= 3 #define GETSTATE(m) ((struct module_state*)PyModule_GetState(m)) #else #define GETSTATE(m) (&_state) static struct module_state _state; #endif #if PY_MAJOR_VERSION >= 3 static int recoll_traverse(PyObject *m, visitproc visit, void *arg) { Py_VISIT(GETSTATE(m)->error); return 0; } static int recoll_clear(PyObject *m) { Py_CLEAR(GETSTATE(m)->error); return 0; } static struct PyModuleDef moduledef = { PyModuleDef_HEAD_INIT, "recoll", NULL, sizeof(struct module_state), recoll_methods, NULL, recoll_traverse, recoll_clear, NULL }; #define INITERROR return NULL extern "C" PyObject * PyInit_recoll(void) #else #define INITERROR return PyMODINIT_FUNC initrecoll(void) #endif { // Note: we can't call recollinit here, because the confdir is only really // known when the first db object is created (it is an optional parameter). // Using a default here may end up with variables such as stripchars being // wrong #if PY_MAJOR_VERSION >= 3 PyObject *module = PyModule_Create(&moduledef); #else PyObject *module = Py_InitModule("recoll", recoll_methods); #endif if (module == NULL) INITERROR; struct module_state *st = GETSTATE(module); // The first parameter is a char *. Hopefully we don't initialize // modules too often... st->error = PyErr_NewException(strdup("recoll.Error"), NULL, NULL); if (st->error == NULL) { Py_DECREF(module); INITERROR; } if (PyType_Ready(&recoll_DbType) < 0) INITERROR; Py_INCREF((PyObject*)&recoll_DbType); PyModule_AddObject(module, "Db", (PyObject *)&recoll_DbType); if (PyType_Ready(&recoll_QueryType) < 0) INITERROR; Py_INCREF((PyObject*)&recoll_QueryType); PyModule_AddObject(module, "Query", (PyObject *)&recoll_QueryType); if (PyType_Ready(&recoll_DocType) < 0) INITERROR; Py_INCREF((PyObject*)&recoll_DocType); PyModule_AddObject(module, "Doc", (PyObject *)&recoll_DocType); if (PyType_Ready(&recoll_SearchDataType) < 0) INITERROR; Py_INCREF((PyObject*)&recoll_SearchDataType); PyModule_AddObject(module, "SearchData", (PyObject *)&recoll_SearchDataType); PyModule_AddStringConstant(module, "__doc__", pyrecoll_doc_string); PyObject *doctypecobject; #if PY_MAJOR_VERSION >= 3 || (PY_MAJOR_VERSION >= 2 && PY_MINOR_VERSION >= 7) // Export a few pointers for the benefit of other recoll python modules doctypecobject= PyCapsule_New(&recoll_DocType, PYRECOLL_PACKAGE "recoll.doctypeptr", 0); #else doctypecobject = PyCObject_FromVoidPtr(&recoll_DocType, NULL); #endif PyModule_AddObject(module, "doctypeptr", doctypecobject); #if PY_MAJOR_VERSION >= 3 return module; #endif } recoll-1.26.3/python/recoll/recoll/0000755000175000017500000000000013570165410014147 500000000000000recoll-1.26.3/python/recoll/recoll/rclconfig.py0000644000175000017500000001061213566424763016425 00000000000000#!/usr/bin/env python3 from __future__ import print_function import locale import re import os import sys import base64 import platform try: from . import conftree except: import conftree class RclDynConf: def __init__(self, fname): self.data = conftree.ConfSimple(fname) def getStringList(self, sk): nms = self.data.getNames(sk) out = [] if nms is not None: for nm in nms: out.append(base64.b64decode(self.data.get(nm, sk))) return out class RclConfig: def __init__(self, argcnf = None): self.config = None self.mimemap = None platsys = platform.system() # Find configuration directory if argcnf is not None: self.confdir = os.path.abspath(argcnf) elif "RECOLL_CONFDIR" in os.environ: self.confdir = os.environ["RECOLL_CONFDIR"] else: if platsys == "Windows": if "LOCALAPPDATA" in os.environ: dir = os.environ["LOCALAPPDATA"] else: dir = os.path.expanduser("~") self.confdir = os.path.join(dir, "Recoll") else: self.confdir = os.path.expanduser("~/.recoll") #print("Confdir: [%s]" % self.confdir, file=sys.stderr) # Also find datadir. This is trickier because this is set by # "configure" in the C code. We can only do our best. Have to # choose a preference order. Use RECOLL_DATADIR if the order is wrong self.datadir = None if "RECOLL_DATADIR" in os.environ: self.datadir = os.environ["RECOLL_DATADIR"] else: if platsys == "Windows": self.datadir = os.path.join(os.path.dirname(sys.argv[0]), "..") else: dirs = ("/opt/local", "/usr", "/usr/local") for dir in dirs: dd = os.path.join(dir, "share/recoll") if os.path.exists(dd): self.datadir = dd if self.datadir is None: self.datadir = "/usr/share/recoll" #print("Datadir: [%s]" % self.datadir, file=sys.stderr) self.cdirs = [] # Additional config directory, values override user ones if "RECOLL_CONFTOP" in os.environ: self.cdirs.append(os.environ["RECOLL_CONFTOP"]) self.cdirs.append(self.confdir) # Additional config directory, overrides system's, overridden by user's if "RECOLL_CONFMID" in os.environ: self.cdirs.append(os.environ["RECOLL_CONFMID"]) self.cdirs.append(os.path.join(self.datadir, "examples")) #print("Config dirs: %s" % self.cdirs, file=sys.stderr) self.keydir = '' def getConfDir(self): return self.confdir def getDataDir(self): return self.datadir def setKeyDir(self, dir): self.keydir = dir def getConfParam(self, nm): if not self.config: self.config = conftree.ConfStack("recoll.conf", self.cdirs, "tree") return self.config.get(nm, self.keydir) # This is a simplified version of the c++ code, intended mostly for the # test mode of rclexecm.py. We don't attempt to check the data, so this # will not work on extension-less paths (e.g. mbox/mail/etc.) def mimeType(self, path): if not self.mimemap: self.mimemap = conftree.ConfStack("mimemap", self.cdirs, "tree") if os.path.exists(path): if os.path.isdir(path): return "inode/directory" if os.path.islink(path): return "inode/symlink" if not os.path.isfile(path): return "inode/x-fsspecial" try: size = os.path.getsize(path) if size == 0: return "inode/x-empty" except: pass ext = os.path.splitext(path)[1] return self.mimemap.get(ext, self.keydir) class RclExtraDbs: def __init__(self, config): self.config = config def getActDbs(self): dyncfile = os.path.join(self.config.getConfDir(), "history") dync = RclDynConf(dyncfile) return dync.getStringList("actExtDbs") if __name__ == '__main__': config = RclConfig() print("topdirs = %s" % config.getConfParam("topdirs")) extradbs = RclExtraDbs(config) print(extradbs.getActDbs()) recoll-1.26.3/python/recoll/recoll/conftree.py0000644000175000017500000002354313533651561016263 00000000000000# Copyright (C) 2016 J.F.Dockes # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. from __future__ import print_function import locale import re import os import sys import base64 import platform import shlex def _debug(s): print("%s"%s, file=sys.stderr) class ConfSimple(object): """A ConfSimple class reads a recoll configuration file, which is a typical ini file (see the Recoll manual). It's a dictionary of dictionaries which lets you retrieve named values from the top level or a subsection""" def __init__(self, confname, tildexp = False, readonly = True): self.submaps = {} self.dotildexpand = tildexp self.readonly = readonly self.confname = confname try: f = open(confname, 'rb') except Exception as exc: #_debug("Open Exception: %s" % exc) # File does not exist -> empty config, not an error. self.submaps = {} self.submaps[b''] = {} return self._parseinput(f) def _parseinput(self, f): appending = False line = b'' submapkey = b'' for cline in f: cline = cline.rstrip(b'\r\n') if appending: line = line + cline else: line = cline line = line.strip() if line == b'' or line[0] == b'#'[0]: continue if line[len(line)-1] == b'\\'[0]: line = line[0:len(line)-1] appending = True continue appending = False #_debug(line) if line[0] == b'['[0]: line = line.strip(b'[]') if self.dotildexpand: submapkey = os.path.expanduser(line) if type(submapkey) == type(u''): submapkey = submapkey.encode('utf-8') else: submapkey = line #_debug("Submapkey: [%s]" % submapkey) continue nm, sep, value = line.partition(b'=') if sep == b'': # No equal sign in line -> considered comment continue nm = nm.strip() value = value.strip() #_debug("sk [%s] nm: [%s] value: [%s]" % (submapkey, nm, value)) if not submapkey in self.submaps: self.submaps[submapkey] = {} self.submaps[submapkey][nm] = value def getbin(self, nm, sk = b''): '''Returns None if not found, empty string if found empty''' if type(nm) != type(b'') or type(sk) != type(b''): raise TypeError("getbin: parameters must be binary not unicode") #_debug("ConfSimple::getbin nm [%s] sk [%s]" % (nm, sk)) if not sk in self.submaps: return None if not nm in self.submaps[sk]: return None return self.submaps[sk][nm] def get(self, nm, sk = b''): dodecode = False if type(nm) == type(u''): dodecode = True nm = nm.encode('utf-8') if type(sk) == type(u''): sk = sk.encode('utf-8') #v = ConfSimple.getbin(self, nm, sk) v = self.getbin(nm, sk) if v and dodecode: v = v.decode('utf-8') return v def getNamesbin(self, sk = b''): if not sk in self.submaps: return None return list(self.submaps[sk].keys()) def getNames(self, sk = ''): if not sk in self.submaps: return None dodecode = False if type(sk) == type(u''): dodecode = True sk = sk.encode('utf-8') names = self.getNamesbin(sk) if names and dodecode: names = [nm.decode('utf-8') for nm in names] return names def _rewrite(self): if self.readonly: raise Exception("ConfSimple is readonly") tname = self.confname + "-" f = open(tname, 'wb') # First output null subkey submap if b'' in self.submaps: for nm,value in self.submaps[b''].items(): f.write(nm + b'=' + value + b'\n') for sk,mp in self.submaps.items(): if sk == b'': continue f.write(b'[' + sk + b']\n') for nm,value in mp.items(): f.write(nm + b'=' + value + b'\n') f.close() try: # os.replace works on Windows even if dst exists, but py3 only os.replace(tname, self.confname) except: try: os.rename(tname, self.confname) except: import shutil shutil.move(tname, self.confname) def setbin(self, nm, value, sk = b''): if self.readonly: raise Exception("ConfSimple is readonly") if sk not in self.submaps: self.submaps[sk] = {} self.submaps[sk][nm] = value self._rewrite() return True def set(self, nm, value, sk = b''): if self.readonly: raise Exception("ConfSimple is readonly") if type(nm) == type(u''): nm = nm.encode('utf-8') if type(value) == type(u''): value = value.encode('utf-8') if type(sk) == type(u''): sk = sk.encode('utf-8') return self.setbin(nm, value, sk) class ConfTree(ConfSimple): """A ConfTree adds path-hierarchical interpretation of the section keys, which should be '/'-separated values. When a value is requested for a given path, it will also be searched in the sections corresponding to the ancestors. E.g. get(name, '/a/b') will also look in sections '/a' and '/' or '' (the last 2 are equivalent)""" def getbin(self, nm, sk = b''): if type(nm) != type(b'') or type(sk) != type(b''): raise TypeError("getbin: parameters must be binary not unicode") #_debug("ConfTree::getbin: nm [%s] sk [%s]" % (nm, sk)) if sk == b'' or sk[0] != b'/'[0]: return ConfSimple.getbin(self, nm, sk) if sk[len(sk)-1] == b'/'[0]: sk = sk[:len(sk)-1] # Try all sk ancestors as submaps (/a/b/c-> /a/b/c, /a/b, /a, b'') while sk: if sk in self.submaps: return ConfSimple.getbin(self, nm, sk) if sk + b'/' in self.submaps: return ConfSimple.getbin(self, nm, sk+b'/') i = sk.rfind(b'/') if i == -1: break sk = sk[:i] return ConfSimple.getbin(self, nm) class ConfStack(object): """ A ConfStack manages the superposition of a list of Configuration objects. Values are looked for in each object from the list until found. This typically provides for defaults overriden by sparse values in the topmost file.""" def __init__(self, nm, dirs, tp = 'simple'): fnames = [] for dir in dirs: fnm = os.path.join(dir, nm) fnames.append(fnm) self._construct(tp, fnames) def _construct(self, tp, fnames): self.confs = [] for fname in fnames: if tp.lower() == 'simple': conf = ConfSimple(fname) else: conf = ConfTree(fname) self.confs.append(conf) # Accepts / returns binary strings (non-unicode) def getbin(self, nm, sk = b''): if type(nm) != type(b'') or type(sk) != type(b''): raise TypeError("getbin: parameters must be binary not unicode") for conf in self.confs: value = conf.getbin(nm, sk) if value is not None: return value return None def get(self, nm, sk = b''): dodecode = False if type(nm) == type(u''): dodecode = True nm = nm.encode('utf-8') if type(sk) == type(u''): sk = sk.encode('utf-8') #v = ConfSimple.getbin(self, nm, sk) v = self.getbin(nm, sk) if v and dodecode: v = v.decode('utf-8') return v # Split string of strings, with possible quoting and escaping. # The default is do do Recoll stringToStrings emulation: whitespace # separated, and doublequotes only (C-style). E.G.: # word1 word2 "compound \\"quoted\\" string" -> # ['word1', 'word2', 'compound "quoted string'] # # This is not the shlex default and can be changed by setting the # parameters def stringToStrings(s, quotes = '"', escape = '\\', escapedquotes = '"', whitespace = None): lex = shlex.shlex(s, posix=True) lex.whitespace_split = True if quotes is not None: lex.quotes = quotes if escape is not None: lex.escape = escape if escapedquotes is not None: lex.escapedquotes = escapedquotes if whitespace is not None: lex.whitespace = whitespace l = [] while True: tok = lex.get_token() if not tok: break l.append(tok) return l def stringsToString(vs): out = [] for s in vs: if s.find(" ") != -1 or s.find("\t") != -1 or s.find("\\") != -1 or \ s.find('"') != -1: out.append('"' + s.replace('\\', '\\\\').replace('"', '\\"') + '"') else: out.append(s) return " ".join(out) recoll-1.26.3/python/recoll/recoll/__init__.py0000644000175000017500000000000013303776060016171 00000000000000recoll-1.26.3/python/recoll/pyrecoll.h0000644000175000017500000000226213533651561014621 00000000000000/* Copyright (C) 2012 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _PYRECOLL_H_INCLUDED_ #define _PYRECOLL_H_INCLUDED_ #include #include class RclConfig; typedef struct { PyObject_HEAD /* Type-specific fields go here. */ Rcl::Doc *doc; /* Each doc object has a pointer to the global config, for convenience */ std::shared_ptr rclconfig; } recoll_DocObject; #define PYRECOLL_PACKAGE "recoll." #endif // _PYRECOLL_H_INCLUDED_ recoll-1.26.3/python/recoll/setup.py.in0000644000175000017500000000661513533651561014744 00000000000000from setuptools import setup, Extension import os import sys sysname = os.uname()[0] # For shadow builds: references to the source tree top = os.path.join('@srcdir@', '..', '..') pytop = '@srcdir@' # For shadow builds: reference to the top of the local tree (for finding # generated .h files, e.g. autoconfig.h) localtop = os.path.join(os.path.dirname(__file__), '..', '..') library_dirs = [os.path.join(localtop, '.libs')] if "CYGWIN" in os.environ: libraries = ['recoll', 'xapian', 'iconv', 'z'] else: libraries = ['recoll'] extra_compile_args = ['-std=c++11'] if 'libdir' in os.environ and os.environ['libdir'] != "": runtime_library_dirs = [os.path.join(os.environ['libdir'], 'recoll')] else: runtime_library_dirs = [os.path.join('@prefix@', 'lib', 'recoll')] module1 = Extension('recoll', define_macros = [('MAJOR_VERSION', '1'), ('MINOR_VERSION', '0'), ('UNAC_VERSION', '"1.0.7"'), ('RECOLL_DATADIR', '"@RECOLL_DATADIR@"') ], include_dirs = ['/usr/local/include', os.path.join(top, 'utils'), os.path.join(top, 'common'), os.path.join(localtop, 'common'), os.path.join(top, 'common'), os.path.join(top, 'rcldb'), os.path.join(top, 'query'), os.path.join(top, 'unac') ], extra_compile_args = extra_compile_args, libraries = libraries, library_dirs = library_dirs, runtime_library_dirs = runtime_library_dirs, sources = [os.path.join(pytop, 'pyrecoll.cpp')]) module2 = Extension('rclextract', define_macros = [('MAJOR_VERSION', '1'), ('MINOR_VERSION', '0'), ('UNAC_VERSION', '"1.0.7"'), ('RECOLL_DATADIR', '"@RECOLL_DATADIR@"') ], include_dirs = ['/usr/local/include', os.path.join(top, 'utils'), os.path.join(top, 'common'), os.path.join(localtop, 'common'), os.path.join(top, 'internfile'), os.path.join(top, 'rcldb'), ], extra_compile_args = extra_compile_args, libraries = libraries, library_dirs = library_dirs, runtime_library_dirs = runtime_library_dirs, sources = [os.path.join(pytop, 'pyrclextract.cpp')]) setup (name = 'Recoll', version = '1.0', description = 'Query/Augment a Recoll full text index', author = 'J.F. Dockes', author_email = 'jfd@recoll.org', url = 'http://www.recoll.org', license = 'GPL', package_dir = {'' : os.path.join(top, 'python', 'recoll')}, long_description = ''' ''', packages = ['recoll'], ext_package = 'recoll', ext_modules = [module1, module2]) recoll-1.26.3/python/recoll/Makefile0000644000175000017500000000037713430577215014263 00000000000000all: echo libdir: $(libdir) test '.' = '.' || cp -rp ./recoll . libdir=$(libdir) python setup.py build install: sudo python setup.py install clean: rm -rf build rm -f recoll/__init__.pyc rm -rf recoll/__pycache__ distclean: clean rm -f setup.py recoll-1.26.3/python/README.txt0000644000175000017500000000020013303776060013020 00000000000000How to build and use the python interface is documented in the Recoll user manual, inside the "Programming interface" chapter. recoll-1.26.3/python/samples/0000755000175000017500000000000013570165410013053 500000000000000recoll-1.26.3/python/samples/mutt-recoll.py0000755000175000017500000000550013303776060015622 00000000000000#!/usr/bin/env python """ Modified from github:honza/mutt-notmuch-py This is a recoll version of the original mutt-notmuch script. It will interactively ask you for a search query and then symlink the matching messages to $HOME/.cache/mutt_results. Add this to your .muttrc. macro index / "unset wait_keymutt-recoll.py~/.cache/mutt_results" \ "search mail (using recoll)" This script overrides the $HOME/.cache/mutt_results each time you run a query. Install this by adding this file somewhere on your PATH. (c) 2012 - Honza Pokorny (c) 2014 - Jean-Francois Dockes Licensed under BSD """ import os import hashlib from commands import getoutput from mailbox import Maildir from optparse import OptionParser from collections import defaultdict def digest(filename): with open(filename) as f: return hashlib.sha1(f.read()).hexdigest() def pick_all_mail(messages): for m in messages: if 'All Mail' in m: return m def empty_dir(directory): box = Maildir(directory) box.clear() def command(cmd): return getoutput(cmd) def main(dest_box, is_gmail): query = raw_input('Query: ') command('mkdir -p %s/cur' % dest_box) command('mkdir -p %s/new' % dest_box) empty_dir(dest_box) files = command('recoll -t -b -q %s' % query).split('\n') data = defaultdict(list) messages = [] for f in files: # Recoll outputs file:// urls f = f[7:] if not f: continue try: sha = digest(f) data[sha].append(f) except IOError: print('File %s does not exist' % f) for sha in data: if is_gmail and len(data[sha]) > 1: messages.append(pick_all_mail(data[sha])) else: messages.append(data[sha][0]) for m in messages: if not m: continue target = os.path.join(dest_box, 'cur', os.path.basename(m)) if not os.path.exists(target): print "symlink [%s] -> [%s]" % (m, target) os.symlink(m, target) if __name__ == '__main__': p = OptionParser("usage: %prog [OPTIONS] [RESULTDIR]") p.add_option('-g', '--gmail', dest='gmail', action='store_true', default=True, help='gmail-specific behavior') p.add_option('-G', '--not-gmail', dest='gmail', action='store_false', help='gmail-specific behavior') (options, args) = p.parse_args() if args: dest = args[0] else: dest = os.path.expanduser('~/.cache/mutt_results') if not os.path.exists(dest): os.makedirs(dest) # Use expanduser() so that os.symlink() won't get weirded out by tildes. main(os.path.expanduser(dest).rstrip('/'), options.gmail) recoll-1.26.3/python/samples/rclmbox.py0000644000175000017500000001675213303776060015031 00000000000000#!/usr/bin/env python """This sample uses the Recoll Python API to index a directory containing mbox files. This is not particularly useful as Recoll itself can do this better (e.g. this script does not process attachments), but it shows the use of most of the Recoll interface features, except 'parent_udi' (we do not create a 'self' document to act as the parent).""" from __future__ import print_function import sys import glob import os import stat import mailbox import email.header import email.utils try: from recoll import recoll except: import recoll # EDIT # Change this for some directory with mbox files, such as a # Thunderbird/Icedove mail storage directory. mbdir = os.path.expanduser("~/mail") #mbdir = os.path.expanduser("~/.icedove/n8n19644.default/Mail/Local Folders/") # EDIT # Change this to wherever you want your recoll data to live. Create # the directory with a (possibly empty) recoll.conf in it before first # running the script rclconf = os.path.expanduser("~/.recoll-extern") # Utility: extract text for named header def header_value(msg, nm, to_utf = False): value = msg.get(nm) if value == None: return "" #value = value.replace("\n", "") #value = value.replace("\r", "") parts = email.header.decode_header(value) univalue = u"" for part in parts: try: if part[1] != None: univalue += part[0].decode(part[1]) + u" " else: if isinstance(part[0], bytes): univalue += part[0].decode("cp1252") + u" " else: univalue += part[0] + u" " except Exception as err: print("Failed decoding header: %s" % err, file=sys.stderr) pass if to_utf: return univalue.encode('utf-8') else: return univalue # Utility: extract text parts from body def extract_text(msg): """Extract and decode all text/plain parts from the message""" text = u"" # We only output the headers for previewing, else they're already # output/indexed as fields. if "RECOLL_FILTER_FORPREVIEW" in os.environ and \ os.environ["RECOLL_FILTER_FORPREVIEW"] == "yes": text += u"From: " + header_value(msg, "From") + u"\n" text += u"To: " + header_value(msg, "To") + u"\n" text += u"Subject: " + header_value(msg, "Subject") + u"\n" # text += u"Content-Type: text/plain; charset=UTF-8\n" #text += u"Message-ID: " + header_value(msg, "Message-ID") + u"\n" text += u"\n" for part in msg.walk(): if part.is_multipart(): pass else: ct = part.get_content_type() if ct.lower() == "text/plain": charset = part.get_content_charset("cp1252") try: ntxt = part.get_payload(None, True).decode(charset) text += ntxt except Exception as err: print("Failed decoding payload: %s" % err, file=sys.stderr) pass return text class mbox_indexer: """The indexer classs. An object is created for indexing one mbox folder""" def __init__(self, db, mbfile): """Initialize for writable db recoll.Db object and mbfile mbox file. We retrieve the the file size and mtime.""" self.db = db self.mbfile = mbfile stdata = os.stat(mbfile) self.fmtime = stdata[stat.ST_MTIME] self.fbytes = stdata[stat.ST_SIZE] self.msgnum = 1 def sig(self): """Create update verification value for mbox file: modification time concatenated with size should cover most cases""" return str(self.fmtime) + ":" + str(self.fbytes) def udi(self, msgnum): """Create unique document identifier for message. This should be shorter than 150 bytes, which we optimistically don't check here, as we just concatenate the mbox file name and message number""" return self.mbfile + ":" + str(msgnum) def index(self): if not self.db.needUpdate(self.udi(1), self.sig()): print("Index is up to date for %s"%self.mbfile, file=sys.stderr); return None mb = mailbox.mbox(self.mbfile) for msg in mb.values(): print("Indexing message %d" % self.msgnum, file=sys.stderr); self.index_message(msg) self.msgnum += 1 def getdata(self, ipath): """Implements the 'fetch' data access interface (called at query time from the command line).""" #print("mbox::getdata: ipath: %s" % ipath, file=sys.stderr) imsgnum = int(ipath) mb = mailbox.mbox(self.mbfile) msgnum = 0; for msg in mb.values(): msgnum += 1 if msgnum == imsgnum: return extract_text(msg) return "" def index_message(self, msg): doc = recoll.Doc() # Misc standard recoll fields doc.author = header_value(msg, "From") doc.recipient = header_value(msg, "To") + " " + header_value(msg, "Cc") dte = header_value(msg, "Date") tm = email.utils.parsedate_tz(dte) if tm == None: doc.mtime = str(self.fmtime) else: doc.mtime = str(email.utils.mktime_tz(tm)) doc.title = header_value(msg, "Subject") doc.fbytes = str(self.fbytes) # Custom field doc.myfield = "some value" # Main document text and MIME type doc.text = extract_text(msg) doc.dbytes = str(len(doc.text.encode('UTF-8'))) doc.mimetype = "text/plain" # Store data for later "up to date" checks doc.sig = self.sig() # The rclbes field is the link between the index data and this # script when used at query time doc.rclbes = "MBOX" # These get stored inside the index, and returned at query # time, but the main identifier is the condensed 'udi' doc.url = "file://" + self.mbfile doc.ipath = str(self.msgnum) # The udi is the unique document identifier, later used if we # want to e.g. delete the document index data (and other ops). udi = self.udi(self.msgnum) self.db.addOrUpdate(udi, doc) # Index a directory containing mbox files def index_mboxdir(dir): db = recoll.connect(confdir=rclconf, writable=1) entries = glob.glob(dir + "/*") for ent in entries: if '.' in os.path.basename(ent): # skip .log etc. our mboxes have no exts continue if not os.path.isfile(ent): continue print("Processing %s"%ent) mbidx = mbox_indexer(db, ent) mbidx.index() db.purge() usage_string='''Usage: rclmbox.py Index the directory (the path is hard-coded inside the script) rclmbox.py [fetch|makesig] udi url ipath fetch subdoc data or make signature (query time) ''' def usage(): print("%s" % usage_string, file=sys.stderr) sys.exit(1) if len(sys.argv) == 1: index_mboxdir(mbdir) else: # cmd [fetch|makesig] udi url ipath if len(sys.argv) != 5: usage() cmd = sys.argv[1] udi = sys.argv[2] url = sys.argv[3] ipath = sys.argv[4] mbfile = url.replace('file://', '') # no need for a db for getdata or makesig. mbidx = mbox_indexer(None, mbfile) if cmd == 'fetch': print("%s"%mbidx.getdata(ipath).encode('UTF-8'), end="") elif cmd == 'makesig': print(mbidx.sig(), end="") else: usage() sys.exit(0) recoll-1.26.3/python/samples/docdups.py0000755000175000017500000000614313533651561015023 00000000000000#!/usr/bin/env python from __future__ import print_function import sys import xapian o_index_stripchars = True md5wpref = "XM" # Handle caps/diac-stripping option. If the db is raw the prefixes are # wrapped with ":" def wrap_prefix(prefix): if o_index_stripchars: return prefix else: return ":" + prefix + ":" def init_stripchars(xdb): global o_index_stripchars global md5wpref t = xdb.allterms() t.skip_to(":") for term in t: if term.term.find(":") == 0: o_index_stripchars = False break md5wpref = wrap_prefix("XM") # Retrieve named value from document data record. # The record format is a sequence of nm=value lines def get_attribute(xdb, docid, fld): doc = xdb.get_document(docid) data = doc.get_data() s = data.find(fld+"=") if s == -1: return "" e = data.find("\n", s) return data[s+len(fld)+1:e] # Convenience: retrieve postings as Python list def get_postlist(xdb, term): ret = list() for posting in xdb.postlist(term): ret.append(posting.docid) return ret # Return list of docids having same md5 including self def get_dups(xdb, docid): doc = xdb.get_document(int(docid)) # It would be more efficient to retrieve the value, but it's # binary so we'd have to decode it md5term = doc.termlist().skip_to(md5wpref).term if not md5term.startswith(md5wpref): return posts = get_postlist(xdb, md5term) return posts # Retrieve all sets of duplicates: # walk the list of all MD5 terms, look up their posting lists, and # store the docids where the list is longer than one. def find_all_dups(xdb): alldups = list() # Walk the MD5 terms t = xdb.allterms() t.skip_to(md5wpref) for term in t: if not term.term.startswith(md5wpref): break # Check postlist for term, if it's not of length 1, we have a dup dups = get_postlist(xdb, term.term) if len(dups) != 1: alldups.append(dups) return alldups # Print docid url ipath for list of docids def print_urlipath(xdb, doclist): for docid in doclist: url = get_attribute(xdb, docid, "url") ipath = get_attribute(xdb, docid, "ipath") print("%s %s %s" % (docid, url, ipath)) def msg(s): print("%s" % s, file = sys.stderr) ########## Main program if len(sys.argv) < 2: msg("Usage: %s /path/to/db [docid [docid ...]]" % \ sys.argv[0]) msg(" will print all sets of dups if no docid is given") msg(" else only the duplicates for the given docids") sys.exit(1) xdbpath = sys.argv[1] xdb = xapian.Database(xdbpath) init_stripchars(xdb) try: if len(sys.argv) == 2: # No docid args, alldups = find_all_dups(xdb) for dups in alldups: print_urlipath(xdb, dups) print("") else: for docid in sys.argv[2:]: dups = get_dups(xdb, docid) if dups is not None and len(dups) > 1: print_urlipath(xdb, dups) except Exception as e: msg("Xapian error: %s" % str(e)) sys.exit(1) recoll-1.26.3/python/samples/recollqsd.py0000755000175000017500000000241713303776060015347 00000000000000#!/usr/bin/env python """Example for using the ''searchdata''' structured query interface. Not good for anything except showing/trying the code.""" import sys from recoll import recoll def dotest(db, q): query = db.query() query.sortby("title", 1) nres = query.executesd(q) print "Result count: ", nres print "Query: ", query.getxquery().encode('utf-8') if nres > 10: nres = 10 for i in range(nres): doc = query.fetchone() print query.next if type(query.next) == int else query.rownumber for k in ("url", "mtime", "title", "author", "abstract"): if getattr(doc, k): print k, ":", getattr(doc, k).encode('utf-8') else: print k, ": None" print # End dotest # addclause(type='and'|'or'|'filename'|'phrase'|'near'|'path'|'sub' # qstring=string, slack=int, field=string, stemming=1|0, # subSearch=SearchData, exclude=0|1, anchorstart=0|1, anchorend=0|1 # casesens=0|1, diacsens=0|1) #sd.addclause("and", "dockes", field="author") #sd.addclause("phrase", "jean francois", 1) db = recoll.connect(confdir="/home/dockes/.recoll-prod") sd = recoll.SearchData(stemlang="english") sd.addclause('filename', "recollqsd*") dotest(db, sd) sys.exit(0) recoll-1.26.3/python/samples/recollq.py0000755000175000017500000000744013402532452015014 00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- """A python version of the command line query tool recollq (a bit simplified) The input string is always interpreted as a query language string. This could actually be useful for something after some customization """ import sys import locale from getopt import getopt if sys.version_info[0] >= 3: ISP3 = True else: ISP3 = False try: from recoll import recoll from recoll import rclextract hasextract = True except: import recoll hasextract = False allmeta = ("title", "keywords", "abstract", "url", "mimetype", "mtime", "ipath", "fbytes", "dbytes", "relevancyrating") def Usage(): print("Usage: recollq.py [-c conf] [-i extra_index] ") sys.exit(1); class ptrmeths: def __init__(self, groups): self.groups = groups def startMatch(self, idx): ugroup = " ".join(self.groups[idx][1]) return '' % (idx, ugroup) def endMatch(self): return '' def extract(doc): extractor = rclextract.Extractor(doc) newdoc = extractor.textextract(doc.ipath) return newdoc def extractofile(doc, outfilename=""): extractor = rclextract.Extractor(doc) outfilename = extractor.idoctofile(doc.ipath, doc.mimetype, \ ofilename=outfilename) return outfilename def utf8string(s): if ISP3: return s else: return s.encode('utf8') def doquery(db, q): # Get query object query = db.query() #query.sortby("dmtime", ascending=True) # Parse/run input query string nres = query.execute(q, stemming = 0, stemlang="english") qs = "Xapian query: [%s]" % query.getxquery() print(utf8string(qs)) groups = query.getgroups() m = ptrmeths(groups) # Print results: print("Result count: %d %d" % (nres, query.rowcount)) if nres > 20: nres = 20 #results = query.fetchmany(nres) #for doc in results: for i in range(nres): doc = query.fetchone() rownum = query.next if type(query.next) == int else \ query.rownumber print("%d:"%(rownum,)) #for k,v in doc.items().items(): #print "KEY:", utf8string(k), "VALUE", utf8string(v) #continue #outfile = extractofile(doc) #print "outfile:", outfile, "url", utf8string(doc.url) for k in ("title", "mtime", "author"): value = getattr(doc, k) # value = doc.get(k) if value is None: print("%s: (None)"%(k,)) else: print("%s : %s"%(k, utf8string(value))) #doc.setbinurl(bytearray("toto")) #burl = doc.getbinurl(); print("Bin URL : [%s]"%(doc.getbinurl(),)) abs = query.makedocabstract(doc, methods=m) print(utf8string(abs)) print('') # fulldoc = extract(doc) # print "FULLDOC MIMETYPE", fulldoc.mimetype, "TEXT:", fulldoc.text.encode("utf-8") ########################################### MAIN if len(sys.argv) < 2: Usage() language, localecharset = locale.getdefaultlocale() confdir="" extra_dbs = [] # Snippet params maxchars = 120 contextwords = 4 # Process options: [-c confdir] [-i extra_db [-i extra_db] ...] options, args = getopt(sys.argv[1:], "c:i:") for opt,val in options: if opt == "-c": confdir = val elif opt == "-i": extra_dbs.append(val) else: print("Bad opt: %s"%(opt,)) Usage() # The query should be in the remaining arg(s) if len(args) == 0: print("No query found in command line") Usage() q = '' for word in args: q += word + ' ' print("QUERY: [%s]"%(q,)) db = recoll.connect(confdir=confdir, extra_dbs=extra_dbs) db.setAbstractParams(maxchars=maxchars, contextwords=contextwords) doquery(db, q) recoll-1.26.3/python/samples/recollgui/0000755000175000017500000000000013570165410015040 500000000000000recoll-1.26.3/python/samples/recollgui/Makefile0000644000175000017500000000020013303776060016413 00000000000000PYUIC = pyuic5 all: rclmain.py rclmain.py: rclmain.ui $(PYUIC) -o rclmain.py rclmain.ui clean: rm -f rclmain.py rclmain.pyc recoll-1.26.3/python/samples/recollgui/qrecoll.py0000755000175000017500000002050013533651561017001 00000000000000#!/usr/bin/env python from __future__ import print_function import sys import datetime try: from recoll import recoll from recoll import rclextract hasextract = True except: import recoll hasextract = False import rclmain from getopt import getopt from PyQt5 import QtCore from PyQt5.QtCore import pyqtSlot from PyQt5.QtGui import QKeySequence from PyQt5.QtWidgets import * #################### # Highlighting methods. Just for showing the groups usage, we add the # original string for the match to the highlighted text. I don't think # you'd want to do this in a real app, but maybe some kind of tooltip? class HlMeths: def __init__(self, groups): self.groups = groups def startMatch(self, idx): ugroup = " ".join(self.groups[idx][0]) return ''+ugroup+'' def endMatch(self): return '' ############ # Data extraction. The 2 following methods use the extractor module # and get the data from the original document # # Extract and return document text (in text or html format, indicated # by newdoc.mimetype) def textextract(doc): extractor = rclextract.Extractor(doc) newdoc = extractor.textextract(doc.ipath) return newdoc # Extract document in original format (ie: application/msword) and # save it to a file. This only works if ipath is not null (else just # use the url !) def extractofile(doc, outfilename=""): extractor = rclextract.Extractor(doc) outfilename = extractor.idoctofile(doc.ipath, doc.mimetype, \ ofilename=outfilename) return outfilename ######### # RecollQuery wraps a recoll.query object in a Qt model class RecollQuery(QtCore.QAbstractTableModel): def __init__(self): QtCore.QAbstractTableModel.__init__(self) self.totres = -1 self.db = None self.query = None self.qtext = "" self.docs = [] self.pagelen = 10 self.attrs = ("filename", "title", "mtime", "url", "ipath") def rowCount(self, parent): ret = len(self.docs) #print("RecollQuery.rowCount(): %d"% ret) return ret def columnCount(self, parent): #print("RecollQuery.columnCount()") if parent.isValid(): return 0 else: return len(self.attrs) def setquery(self, db, q, sortfield="", ascending=True): """Parse and execute query on open db""" #print("RecollQuery.setquery():") # Get query object self.query = db.query() if sortfield: self.query.sortby(sortfield, ascending) # Parse/run input query string self.totres = self.query.execute(q) self.qtext = q self.db = db self.docs = [] self.fetchMore(None) def getdoc(self, index): if index.row() < len(self.docs): return self.docs[index.row()] else: return None def sort(self, col, order): #print("sort %s %s", (col, order)) self.setquery(self.db, self.qtext, sortfield=self.attrs[col], ascending = order) def headerData(self, idx, orient, role): if orient == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole: return self.attrs[idx] return None def data(self, index, role): #print("RecollQuery.data: row %d, role: %s" % (index.row(),role)) if not index.isValid(): return QtCore.QVariant() if index.row() >= len(self.docs): return QtCore.QVariant() if role == QtCore.Qt.DisplayRole: #print("RecollQuery.data: row %d, col %d role: %s" % \ # (index.row(), index.column() role)) attr = self.attrs[index.column()] value = getattr(self.docs[index.row()], attr) if attr == "mtime": dte = datetime.datetime.fromtimestamp(int(value)) value = str(dte) return value else: return QtCore.QVariant() def canFetchMore(self, parent): #print("RecollQuery.canFetchMore:") if len(self.docs) < self.totres: return True else: return False def fetchMore(self, parent): #print("RecollQuery.fetchMore:") self.beginInsertRows(QtCore.QModelIndex(), len(self.docs), \ len(self.docs) + self.pagelen) for count in range(self.pagelen): try: self.docs.append(self.query.fetchone()) except: break self.endInsertRows() ### # UI interaction code class RclGui_Main(QMainWindow): def __init__(self, db, parent=None): QMainWindow.__init__(self, parent) self.ui = rclmain.Ui_MainWindow() self.ui.setupUi(self) self.db = db self.qmodel = RecollQuery() scq = QShortcut(QKeySequence("Ctrl+Q"), self) scq.activated.connect(self.onexit) header = self.ui.resTable.horizontalHeader() header.setSortIndicatorShown(True) header.setSortIndicator(-1, QtCore.Qt.AscendingOrder) self.ui.resTable.setSortingEnabled(True) self.currentindex = -1 self.currentdoc = None def on_searchEntry_returnPressed(self): self.startQuery() def on_resTable_clicked(self, index): doc = self.qmodel.getdoc(index) self.currentindex = index self.currentdoc = doc if doc is None: print("NO DoC") return query = self.qmodel.query groups = query.getgroups() meths = HlMeths(groups) abs = query.makedocabstract(doc, methods=meths) self.ui.resDetail.setText(abs) if hasextract: ipath = doc.get('ipath') #print("ipath[%s]" % ipath) self.ui.previewPB.setEnabled(True) if ipath: self.ui.savePB.setEnabled(True) else: self.ui.savePB.setEnabled(False) @pyqtSlot() def on_previewPB_clicked(self): print("on_previewPB_clicked(self)") newdoc = textextract(self.currentdoc) query = self.qmodel.query; groups = query.getgroups() meths = HlMeths(groups) #print("newdoc.mimetype:", newdoc.mimetype) if newdoc.mimetype == 'text/html': ishtml = True else: ishtml = False text = '' + \ query.highlight(newdoc.text, methods=meths, ishtml=ishtml, eolbr=True) text += '' self.ui.resDetail.setText(text) @pyqtSlot() def on_savePB_clicked(self): print("on_savePB_clicked(self)") doc = self.currentdoc ipath = doc.ipath if not ipath: return fn = QFileDialog.getSaveFileName(self) if fn: docitems = doc.items() fn = extractofile(doc, str(fn.toLocal8Bit())) print("Saved as %s" % fn) else: print("Canceled", file=sys.stderr) def startQuery(self): self.qmodel.setquery(self.db, self.ui.searchEntry.text()) self.ui.resTable.setModel(self.qmodel) def onexit(self): self.close() def Usage(): print('''Usage: qt.py [ [ ...]]''', file=sys.stderr) sys.exit(1) def main(args): app = QApplication(args) confdir="" extra_dbs = [] # Snippet params maxchars = 300 contextwords = 6 # Process options: [-c confdir] [-i extra_db [-i extra_db] ...] options, args = getopt(args[1:], "c:i:") for opt,val in options: if opt == "-c": confdir = val elif opt == "-i": extra_dbs.append(val) else: print("Bad opt: %s"% opt, file=sys.stderr) Usage() # The query should be in the remaining arg(s) q = None if len(args) > 0: q = "" for word in args: q += word + " " db = recoll.connect(confdir=confdir, extra_dbs=extra_dbs) db.setAbstractParams(maxchars=maxchars, contextwords=contextwords) topwindow = RclGui_Main(db) topwindow.show() if q is not None: topwindow.ui.searchEntry.setText(q) topwindow.startQuery() sys.exit(app.exec_()) if __name__=="__main__": main(sys.argv) recoll-1.26.3/python/samples/recollgui/rclmain.ui0000644000175000017500000000615613303776060016757 00000000000000 MainWindow 0 0 800 600 MainWindow Search string + CR 0 4 QAbstractItemView::NoEditTriggers false 0 0 false Preview false Save 0 0 800 23 File Exit actionExit triggered() MainWindow close() -1 -1 399 299 recoll-1.26.3/python/samples/rcldlkp.py0000755000175000017500000000532413303776060015012 00000000000000#!/usr/bin/env python __doc__ = """ An exemple indexer for an arbitrary multi-document file format. Not supposed to run ''as-is'' or be really useful. ''Lookup'' notes file indexing The file format has text notes separated by lines with a single '%' character If the script is called with just the file name as an argument, it will (re)index the contents. If the script is called with second numeric argument, it will retrieve the specified record and output it in html """ import os import stat import sys import re rclconf = "/Users/dockes/.recoll-dlkp" def udi(docfile, numrec): return docfile + "#" + str(numrec) ############################################################### def index_rec(db, numrec, rec): doc = recoll.Doc() # url doc.url = "file://" + docfile # utf8fn # ipath doc.ipath = str(numrec) # mimetype doc.mimetype = "text/plain" # mtime # origcharset # title lines = rec.split("\n") if len(lines) >= 2: doc.title = unicode(lines[1], "iso-8859-1") if len(doc.title.strip()) == 0 and len(lines) >= 3: doc.title = unicode(lines[2], "iso-8859-1") # keywords # abstract # author # fbytes doc.fbytes = str(fbytes) # text doc.text = unicode(rec, "iso-8859-1") # dbytes doc.dbytes = str(len(rec)) # sig if numrec == 0: doc.sig = str(fmtime) db.addOrUpdate(udi(docfile, numrec), doc) def output_rec(rec): # Escape html rec = unicode(rec, "iso-8859-1").encode("utf-8") rec = rec.replace("<", "<"); rec = rec.replace("&", "&"); rec = rec.replace('"', "&dquot;"); print '' print '' print '
'
    print rec
    print '
' ################################################################ def usage(): sys.stderr.write("Usage: rcldlkp.py []\n") exit(1) if len(sys.argv) < 2: usage() docfile = sys.argv[1] if len(sys.argv) > 2: targetnum = int(sys.argv[2]) else: targetnum = None #print docfile, targetnum stdata = os.stat(docfile) fmtime = stdata[stat.ST_MTIME] fbytes = stdata[stat.ST_SIZE] f = open(docfile, 'r') if targetnum == None: import recoll db = recoll.connect(confdir=rclconf, writable=1) if not db.needUpdate(udi(docfile, 0), str(fmtime)): exit(0) rec = "" numrec = 1 for line in f: if re.compile("^%[ \t]*").match(line): if targetnum == None: index_rec(db, numrec, rec) elif targetnum == numrec: output_rec(rec) exit(0) numrec += 1 rec = "" else: rec += line if targetnum == None: index_rec(db, 0, "") recoll-1.26.3/internfile/0000755000175000017500000000000013570165407012233 500000000000000recoll-1.26.3/internfile/extrameta.cpp0000644000175000017500000001115213533651561014651 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include "rclconfig.h" #include "pxattr.h" #include "log.h" #include "cstr.h" #include "rcldoc.h" #include "execmd.h" using std::string; using std::map; static void docfieldfrommeta(RclConfig* cfg, const string& name, const string &value, Rcl::Doc& doc) { string fieldname = cfg->fieldCanon(name); LOGDEB0("Internfile:: setting [" << fieldname << "] from cmd/xattr value [" << value << "]\n"); if (fieldname == cstr_dj_keymd) { doc.dmtime = value; } else { doc.meta[fieldname] = value; } } void reapXAttrs(const RclConfig* cfg, const string& path, map& xfields) { LOGDEB2("reapXAttrs: [" << path << "]\n"); #ifndef _WIN32 // Retrieve xattrs names from files and mapping table from config vector xnames; if (!pxattr::list(path, &xnames)) { if (errno == ENOTSUP) { LOGDEB("FileInterner::reapXattrs: pxattr::list: errno " << errno << "\n"); } else { LOGERR("FileInterner::reapXattrs: pxattr::list: errno " << errno << "\n"); } return; } const map& xtof = cfg->getXattrToField(); // Record the xattrs: names found in the config are either skipped // or mapped depending if the translation is empty. Other names // are recorded as-is for (vector::const_iterator it = xnames.begin(); it != xnames.end(); it++) { string key = *it; map::const_iterator mit = xtof.find(*it); if (mit != xtof.end()) { if (mit->second.empty()) { continue; } else { key = mit->second; } } string value; if (!pxattr::get(path, *it, &value, pxattr::PXATTR_NOFOLLOW)) { LOGERR("FileInterner::reapXattrs: pxattr::get failed for " << *it << ", errno " << errno << "\n"); continue; } // Encode should we ? xfields[key] = value; LOGDEB2("reapXAttrs: [" << key << "] -> [" << value << "]\n"); } #endif } void docFieldsFromXattrs(RclConfig *cfg, const map& xfields, Rcl::Doc& doc) { for (map::const_iterator it = xfields.begin(); it != xfields.end(); it++) { docfieldfrommeta(cfg, it->first, it->second, doc); } } void reapMetaCmds(RclConfig* cfg, const string& path, map& cfields) { const vector& reapers = cfg->getMDReapers(); if (reapers.empty()) return; map smap = {{'f', path}}; for (vector::const_iterator rp = reapers.begin(); rp != reapers.end(); rp++) { vector cmd; for (vector::const_iterator it = rp->cmdv.begin(); it != rp->cmdv.end(); it++) { string s; pcSubst(*it, s, smap); cmd.push_back(s); } string output; if (ExecCmd::backtick(cmd, output)) { cfields[rp->fieldname] = output; } } } // Set fields from external commands // These override those from xattrs and can be later augmented by // values from inside the file. // // This is a bit atrocious because some entry names are special: // "modificationdate" will set mtime instead of an ordinary field, // and the output from anything beginning with "rclmulti" will be // interpreted as multiple fields in configuration file format... void docFieldsFromMetaCmds(RclConfig *cfg, const map& cfields, Rcl::Doc& doc) { for (map::const_iterator it = cfields.begin(); it != cfields.end(); it++) { if (!it->first.compare(0, 8, "rclmulti")) { ConfSimple simple(it->second); if (simple.ok()) { vector names = simple.getNames(""); for (vector::const_iterator nm = names.begin(); nm != names.end(); nm++) { string value; if (simple.get(*nm, value)) { docfieldfrommeta(cfg, *nm, value, doc); } } } } else { docfieldfrommeta(cfg, it->first, it->second, doc); } } } recoll-1.26.3/internfile/internfile.cpp0000644000175000017500000012373313533651561015027 00000000000000/* Copyright (C) 2004-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include "safefcntl.h" #include #include "safesysstat.h" #include "safeunistd.h" #include #include #include #include using namespace std; #include "cstr.h" #include "internfile.h" #include "rcldoc.h" #include "mimetype.h" #include "log.h" #include "mimehandler.h" #include "execmd.h" #include "pathut.h" #include "rclconfig.h" #include "mh_html.h" #include "fileudi.h" #include "cancelcheck.h" #include "copyfile.h" #include "fetcher.h" #include "extrameta.h" #include "uncomp.h" // The internal path element separator. This can't be the same as the rcldb // file to ipath separator : "|" // We replace it with a control char if it comes out of a filter (ie: // rclzip or rclchm can do this). If you want the SOH control char // inside an ipath, you're out of luck (and a bit weird). static const string cstr_isep(":"); static const char cchar_colon_repl = '\x01'; static string colon_hide(const string& in) { string out; for (string::const_iterator it = in.begin(); it != in.end(); it++) { out += *it == ':' ? cchar_colon_repl : *it; } return out; } static string colon_restore(const string& in) { string out; for (string::const_iterator it = in.begin(); it != in.end(); it++) { out += *it == cchar_colon_repl ? ':' : *it; } return out; } // This is used when the user wants to retrieve a search result doc's parent // (ie message having a given attachment) bool FileInterner::getEnclosingUDI(const Rcl::Doc &doc, string& udi) { LOGDEB("FileInterner::getEnclosingUDI(): url [" << doc.url << "] ipath [" << doc.ipath << "]\n"); string eipath = doc.ipath; string::size_type colon; if (eipath.empty()) return false; if ((colon = eipath.find_last_of(cstr_isep)) != string::npos) { eipath.erase(colon); } else { eipath.erase(); } make_udi(url_gpath(doc.idxurl.empty() ? doc.url : doc.idxurl), eipath, udi); return true; } string FileInterner::getLastIpathElt(const string& ipath) { string::size_type sep; if ((sep = ipath.find_last_of(cstr_isep)) != string::npos) { return ipath.substr(sep + 1); } else { return ipath; } } bool FileInterner::ipathContains(const string& parent, const string& child) { return child.find(parent) == 0 && child.find(cstr_isep, parent.size()) == parent.size(); } // Constructor: identify the input file, possibly create an // uncompressed temporary copy, and create the top filter for the // uncompressed file type. // // Empty handler on return says that we're in error, this will be // processed by the first call to internfile(). // Split into "constructor calls init()" to allow use from other constructor FileInterner::FileInterner(const string &fn, const struct stat *stp, RclConfig *cnf, int flags, const string *imime) { LOGDEB0("FileInterner::FileInterner(fn=" << fn << ")\n"); if (fn.empty()) { LOGERR("FileInterner::FileInterner: empty file name!\n"); return; } initcommon(cnf, flags); init(fn, stp, cnf, flags, imime); } // Note that we always succeed (set m_ok = true), except in internal // inconsistency cases (which could just as well abort()). Errors // will be detected when internfile() is called. This is done so that // our caller creates a doc record in all cases (with an error-flagged // signature), so that the appropriate retry choices can be made. This // used to not be the case, and was changed because this was the // simplest way to solve the retry issues (simpler than changing the // caller in e.g. fsindexer). void FileInterner::init(const string &f, const struct stat *stp, RclConfig *cnf, int flags, const string *imime) { if (f.empty()) { LOGERR("FileInterner::init: empty file name!\n"); return; } m_fn = f; // Compute udi for the input file. This is used by filters which // manage some kind of cache. Indexing by udi makes things easier // because they sometimes get a temp as actual input. string udi; make_udi(f, cstr_null, udi); cnf->setKeyDir(path_getfather(m_fn)); string l_mime; bool usfci = false; cnf->getConfParam("usesystemfilecommand", &usfci); // In general, even when the input mime type is set (when // previewing), we can't use it: it's the type for the actual // document, but this can be part of a compound document, and // we're dealing with the top level file here, or this could be a // compressed file. The flag tells us we really can use it // (e.g. the web indexer sets it). if (flags & FIF_doUseInputMimetype) { if (!imime) { LOGERR("FileInterner:: told to use null imime\n"); return; } l_mime = *imime; } else { LOGDEB("FileInterner::init fn [" << f << "] mime [" << (imime ? imime->c_str() : "(null)") << "] preview " << m_forPreview << "\n"); // Run mime type identification in any case (see comment above). l_mime = mimetype(m_fn, stp, m_cfg, usfci); // If identification fails, try to use the input parameter. This // is then normally not a compressed type (it's the mime type from // the db), and is only set when previewing, not for indexing if (l_mime.empty() && imime) l_mime = *imime; } int64_t docsize = stp->st_size; if (!l_mime.empty()) { // Has mime: check for a compressed file. If so, create a // temporary uncompressed file, and rerun the mime type // identification, then do the rest with the temp file. vectorucmd; if (m_cfg->getUncompressor(l_mime, ucmd)) { // Check for compressed size limit int maxkbs = -1; if (!m_cfg->getConfParam("compressedfilemaxkbs", &maxkbs) || maxkbs < 0 || !stp || int(stp->st_size / 1024) < maxkbs) { if (!m_uncomp->uncompressfile(m_fn, ucmd, m_tfile)) { m_ok = true; return; } LOGDEB1("FileInterner:: after ucomp: tfile " << m_tfile <<"\n"); m_fn = m_tfile; // Stat the uncompressed file, mainly to get the size struct stat ucstat; if (path_fileprops(m_fn, &ucstat) != 0) { LOGERR("FileInterner: can't stat the uncompressed file[" << m_fn << "] errno " << errno << "\n"); m_ok = true; return; } else { docsize = ucstat.st_size; } l_mime = mimetype(m_fn, &ucstat, m_cfg, usfci); if (l_mime.empty() && imime) l_mime = *imime; } else { LOGINFO("FileInterner:: " << m_fn << " over size limit " << maxkbs << " kbs\n"); } } } if (l_mime.empty()) { // No mime type. We let it through as config may warrant that // we index all file names LOGDEB0("FileInterner:: no mime: [" << m_fn << "]\n"); } // Get fields computed from extended attributes. We use the // original file, not the m_fn which may be the uncompressed temp // file if (!m_noxattrs) reapXAttrs(m_cfg, f, m_XAttrsFields); // Gather metadata from external commands as configured. reapMetaCmds(m_cfg, f, m_cmdFields); m_mimetype = l_mime; // Look for appropriate handler (might still return empty) RecollFilter *df = getMimeHandler(l_mime, m_cfg, !m_forPreview); if (!df || df->is_unknown()) { // No real handler for this type, for now :( LOGDEB("FileInterner:: unprocessed mime: [" << l_mime << "] [" << f << "]\n"); if (!df) return; } df->set_property(Dijon::Filter::OPERATING_MODE, m_forPreview ? "view" : "index"); df->set_property(Dijon::Filter::DJF_UDI, udi); df->set_docsize(docsize); // Don't process init errors here: doing it later allows indexing // the file name of even a totally unparsable file (void)df->set_document_file(l_mime, m_fn); m_handlers.push_back(df); LOGDEB("FileInterner:: init ok " << l_mime << " [" << m_fn << "]\n"); m_ok = true; } // Setup from memory data (ie: out of the web cache). imime needs to be set. FileInterner::FileInterner(const string &data, RclConfig *cnf, int flags, const string& imime) { LOGDEB0("FileInterner::FileInterner(data)\n"); initcommon(cnf, flags); init(data, cnf, flags, imime); } void FileInterner::init(const string &data, RclConfig *cnf, int flags, const string& imime) { if (imime.empty()) { LOGERR("FileInterner: inmemory constructor needs input mime type\n"); return; } m_mimetype = imime; // Look for appropriate handler (might still return empty) RecollFilter *df = getMimeHandler(m_mimetype, m_cfg, !m_forPreview); if (!df) { // No handler for this type, for now :( if indexallfilenames // is set in the config, this normally wont happen (we get mh_unknown) LOGDEB("FileInterner:: unprocessed mime [" << m_mimetype << "]\n"); return; } df->set_property(Dijon::Filter::OPERATING_MODE, m_forPreview ? "view" : "index"); df->set_docsize(data.length()); if (df->is_data_input_ok(Dijon::Filter::DOCUMENT_STRING)) { (void)df->set_document_string(m_mimetype, data); } else if (df->is_data_input_ok(Dijon::Filter::DOCUMENT_DATA)) { (void)df->set_document_data(m_mimetype, data.c_str(), data.length()); } else if (df->is_data_input_ok(Dijon::Filter::DOCUMENT_FILE_NAME)) { TempFile temp = dataToTempFile(data, m_mimetype); if (temp.ok()) { (void)df->set_document_file(m_mimetype, temp.filename()); m_tmpflgs[m_handlers.size()] = true; m_tempfiles.push_back(temp); } } // Don't process init errors here: doing it later allows indexing // the file name of even a totally unparsable file m_handlers.push_back(df); m_ok = true; } void FileInterner::initcommon(RclConfig *cnf, int flags) { m_cfg = cnf; m_forPreview = ((flags & FIF_forPreview) != 0); m_uncomp = new Uncomp(m_forPreview); // Initialize handler stack. m_handlers.reserve(MAXHANDLERS); for (unsigned int i = 0; i < MAXHANDLERS; i++) m_tmpflgs[i] = false; m_targetMType = cstr_textplain; m_cfg->getConfParam("noxattrfields", &m_noxattrs); m_direct = false; } FileInterner::FileInterner(const Rcl::Doc& idoc, RclConfig *cnf, int flags) { LOGDEB0("FileInterner::FileInterner(idoc)\n"); initcommon(cnf, flags); std::unique_ptr fetcher(docFetcherMake(cnf, idoc)); if (!fetcher) { LOGERR("FileInterner:: no backend\n"); return; } DocFetcher::RawDoc rawdoc; if (!fetcher->fetch(cnf, idoc, rawdoc)) { LOGERR("FileInterner:: fetcher failed\n"); return; } switch (rawdoc.kind) { case DocFetcher::RawDoc::RDK_FILENAME: init(rawdoc.data, &rawdoc.st, cnf, flags, &idoc.mimetype); break; case DocFetcher::RawDoc::RDK_DATA: init(rawdoc.data, cnf, flags, idoc.mimetype); break; case DocFetcher::RawDoc::RDK_DATADIRECT: // Note: only used for demo with the sample python external // mbox indexer at this point. The external program is // responsible for all the extraction process. init(rawdoc.data, cnf, flags, idoc.mimetype); m_direct = true; break; default: LOGERR("FileInterner::FileInterner(idoc): bad rawdoc kind ??\n"); } return; } FileInterner::ErrorPossibleCause FileInterner::tryGetReason(RclConfig *cnf, const Rcl::Doc& idoc) { LOGDEB0("FileInterner::tryGetReason(idoc)\n"); std::unique_ptr fetcher(docFetcherMake(cnf, idoc)); if (!fetcher) { LOGERR("FileInterner:: no backend\n"); return FileInterner::FetchNoBackend; } DocFetcher::Reason fetchreason = fetcher->testAccess(cnf, idoc); switch (fetchreason) { case DocFetcher::FetchNotExist: return FileInterner::FetchMissing; case DocFetcher::FetchNoPerm: return FileInterner::FetchPerm; default: return FileInterner::InternfileOther; } } bool FileInterner::makesig(RclConfig *cnf, const Rcl::Doc& idoc, string& sig) { std::unique_ptr fetcher(docFetcherMake(cnf, idoc)); if (!fetcher) { LOGERR("FileInterner::makesig no backend for doc\n"); return false; } bool ret = fetcher->makesig(cnf, idoc, sig); return ret; } FileInterner::~FileInterner() { for (auto& entry: m_handlers) { returnMimeHandler(entry); } delete m_uncomp; // m_tempfiles will take care of itself } // Create a temporary file for a block of data (ie: attachment) found // while walking the internal document tree, with a type for which the // handler needs an actual file (ie : external script). TempFile FileInterner::dataToTempFile(const string& dt, const string& mt) { // Create temp file with appropriate suffix for mime type TempFile temp(m_cfg->getSuffixFromMimeType(mt)); if (!temp.ok()) { LOGERR("FileInterner::dataToTempFile: cant create tempfile: " << temp.getreason() << "\n"); return TempFile(); } string reason; if (!stringtofile(dt, temp.filename(), reason)) { LOGERR("FileInterner::dataToTempFile: stringtofile: " < verr; stringToStrings(msg, verr); if (verr.size() > 2) { vector::iterator it = verr.begin(); it++; if (*it == "HELPERNOTFOUND") { it++; for (; it != verr.end(); it++) { m_missingdatap->addMissing(*it, mt); } } } } } void FIMissingStore::getMissingExternal(string& out) { for (map >::const_iterator it = m_typesForMissing.begin(); it != m_typesForMissing.end(); it++) { out += string(" ") + it->first; } trimstring(out); } void FIMissingStore::getMissingDescription(string& out) { out.erase(); for (map >::const_iterator it = m_typesForMissing.begin(); it != m_typesForMissing.end(); it++) { out += it->first + " ("; set::const_iterator it3; for (it3 = it->second.begin(); it3 != it->second.end(); it3++) { out += *it3 + " "; } trimstring(out); out += ")"; out += "\n"; } } FIMissingStore::FIMissingStore(const string& in) { // The "missing" file is text. Each line defines a missing filter // and the list of mime types actually encountered that needed it // (see method getMissingDescription()) vector lines; stringToTokens(in, lines, "\n"); for (vector::const_iterator it = lines.begin(); it != lines.end(); it++) { // Lines from the file are like: // // filter name string (mime1 mime2) // // We can't be too sure that there will never be a parenthesis // inside the filter string as this comes from the filter // itself. The list part is safer, so we start from the end. const string& line = *it; string::size_type lastopen = line.find_last_of("("); if (lastopen == string::npos) continue; string::size_type lastclose = line.find_last_of(")"); if (lastclose == string::npos || lastclose <= lastopen + 1) continue; string smtypes = line.substr(lastopen+1, lastclose - lastopen - 1); vector mtypes; stringToTokens(smtypes, mtypes); string filter = line.substr(0, lastopen); trimstring(filter); if (filter.empty()) continue; for (vector::const_iterator itt = mtypes.begin(); itt != mtypes.end(); itt++) { m_typesForMissing[filter].insert(*itt); } } } // Helper for extracting a value from a map. static inline bool getKeyValue(const map& docdata, const string& key, string& value) { auto it = docdata.find(key); if (it != docdata.end()) { value = it->second; LOGDEB2("getKeyValue: [" << key << "]->[" << value << "]\n"); return true; } LOGDEB2("getKeyValue: no value for [" << key << "]\n"); return false; } // Copy most metadata fields from the top filter to the recoll // doc. Some fields need special processing, because they go into // struct fields instead of metadata entry, or because we don't want // to copy them. bool FileInterner::dijontorcl(Rcl::Doc& doc) { RecollFilter *df = m_handlers.back(); if (df == 0) { //?? LOGERR("FileInterner::dijontorcl: null top handler ??\n"); return false; } for (const auto& ent : df->get_meta_data()) { if (ent.first == cstr_dj_keycontent) { doc.text = ent.second; if (doc.fbytes.empty()) { // It's normally set by walking the filter stack, in // collectIpathAndMt, which was called before us. It // can happen that the doc size is still empty at this // point if the last container filter is directly // returning text/plain content, so that there is no // ipath-less filter at the top lltodecstr(doc.text.length(), doc.fbytes); LOGDEB("FileInterner::dijontorcl: fbytes->" << doc.fbytes << endl); } } else if (ent.first == cstr_dj_keymd) { doc.dmtime = ent.second; } else if (ent.first == cstr_dj_keyanc) { doc.haschildren = true; } else if (ent.first == cstr_dj_keyorigcharset) { doc.origcharset = ent.second; } else if (ent.first == cstr_dj_keyfn) { // Only if not set during the stack walk const string *fnp = 0; if (!doc.peekmeta(Rcl::Doc::keyfn, &fnp) || fnp->empty()) doc.meta[Rcl::Doc::keyfn] = ent.second; } else if (ent.first == cstr_dj_keymt || ent.first == cstr_dj_keycharset) { // don't need/want these. } else { LOGDEB2("dijontorcl: " << m_cfg->fieldCanon(ent.first) << " -> " << ent.second << endl); doc.addmeta(m_cfg->fieldCanon(ent.first), ent.second); } } if (doc.meta[Rcl::Doc::keyabs].empty() && !doc.meta[cstr_dj_keyds].empty()) { doc.meta[Rcl::Doc::keyabs] = doc.meta[cstr_dj_keyds]; doc.meta.erase(cstr_dj_keyds); } return true; } const set nocopyfields{cstr_dj_keycontent, cstr_dj_keymd, cstr_dj_keyanc, cstr_dj_keyorigcharset, cstr_dj_keyfn, cstr_dj_keymt, cstr_dj_keycharset, cstr_dj_keyds}; static void copymeta(const RclConfig *cfg,Rcl::Doc& doc, const RecollFilter* hp) { for (const auto& entry : hp->get_meta_data()) { if (nocopyfields.find(entry.first) == nocopyfields.end()) { doc.addmeta(cfg->fieldCanon(entry.first), entry.second); } } } // Collect the ipath from the filter stack. // While we're at it, we also set the mimetype and filename, // which are special properties: we want to get them from the topmost // doc with an ipath, not the last one which is usually text/plain We // also set the author and modification time from the last doc which // has them. // // The stack can contain objects with an ipath element (corresponding // to actual embedded documents), and, towards the top, elements // without an ipath element, for format translations of the last doc. // // The docsize is fetched from the first element without an ipath // (first non container). If the last element directly returns // text/plain so that there is no ipath-less element, the value will // be set in dijontorcl(). // // The whole thing is a bit messy but it's not obvious how it should // be cleaned up as the "inheritance" rules inside the stack are // actually complicated. void FileInterner::collectIpathAndMT(Rcl::Doc& doc) const { LOGDEB2("FileInterner::collectIpathAndMT\n"); // Set to true if any element in the stack sets an ipath. (at least one of // the docs is a compound). bool hasipath = false; if (!m_noxattrs) { docFieldsFromXattrs(m_cfg, m_XAttrsFields, doc); } docFieldsFromMetaCmds(m_cfg, m_cmdFields, doc); // If there is no ipath stack, the mimetype is the one from the // file, else we'll change it further down. doc.mimetype = m_mimetype; string pathelprev; for (unsigned int i = 0; i < m_handlers.size(); i++) { const map& docdata = m_handlers[i]->get_meta_data(); string ipathel; getKeyValue(docdata, cstr_dj_keyipath, ipathel); if (!ipathel.empty()) { // Non-empty ipath. This stack element is for an // actual embedded document, not a format translation. hasipath = true; doc.ipath += colon_hide(ipathel) + cstr_isep; getKeyValue(docdata, cstr_dj_keymt, doc.mimetype); getKeyValue(docdata, cstr_dj_keyfn, doc.meta[Rcl::Doc::keyfn]); } else { // We copy all the metadata from the topmost actual // document: either the first if it has no ipath, or the // last one with an ipath (before pure format // translations). This would allow, for example mh_execm // handlers to use setfield() instead of embedding // metadata in the HTML meta tags. if (i == 0 || !pathelprev.empty()) { copymeta(m_cfg, doc, m_handlers[i]); } if (doc.fbytes.empty()) { lltodecstr(m_handlers[i]->get_docsize(), doc.fbytes); LOGDEB("collectIpath..: fbytes->" << doc.fbytes << endl); } } // We set the author field from the innermost doc which has // one: allows finding, e.g. an image attachment having no // metadata by a search on the sender name. Only do this for // actually embedded documents (avoid replacing values from // metacmds for the topmost one). For a topmost doc, author // will be merged by dijontorcl() later on. About same for // dmtime, but an external value will be replaced, not // augmented if dijontorcl() finds an internal value. if (hasipath) { getKeyValue(docdata, cstr_dj_keyauthor, doc.meta[Rcl::Doc::keyau]); getKeyValue(docdata, cstr_dj_keymd, doc.dmtime); } pathelprev = ipathel; } if (hasipath) { // Trim ending ipath separator LOGDEB2("IPATH [" << doc.ipath << "]\n"); if (doc.ipath.back() == cstr_isep[0]) { doc.ipath.erase(doc.ipath.end()-1); } } } // Remove handler from stack. Clean up temp file if needed. void FileInterner::popHandler() { if (m_handlers.empty()) return; size_t i = m_handlers.size() - 1; if (m_tmpflgs[i]) { m_tempfiles.pop_back(); m_tmpflgs[i] = false; } returnMimeHandler(m_handlers.back()); m_handlers.pop_back(); } enum addResols {ADD_OK, ADD_CONTINUE, ADD_BREAK, ADD_ERROR}; // Just got document from current top handler. See what type it is, // and possibly add a filter/handler to the stack int FileInterner::addHandler() { const map& docdata = m_handlers.back()->get_meta_data(); string charset, mimetype; getKeyValue(docdata, cstr_dj_keycharset, charset); getKeyValue(docdata, cstr_dj_keymt, mimetype); LOGDEB("FileInterner::addHandler: back() is " << mimetype << " target [" << m_targetMType << "]\n"); // If we find a document of the target type (text/plain in // general), we're done decoding. If we hit text/plain, we're done // in any case if (!stringicmp(mimetype, m_targetMType) || !stringicmp(mimetype, cstr_textplain)) { m_reachedMType = mimetype; LOGDEB1("FileInterner::addHandler: target reached\n"); return ADD_BREAK; } // We need to stack another handler. Check stack size if (m_handlers.size() >= MAXHANDLERS) { // Stack too big. Skip this and go on to check if there is // something else in the current back() LOGERR("FileInterner::addHandler: stack too high\n"); return ADD_CONTINUE; } // We must not filter out HTML when it is an intermediate // conversion format. We discriminate between e.g. an HTML email // attachment (needs filtering) and a result of pdf conversion // (must process) by looking at the last ipath element: a // conversion will have an empty one (same test as in // collectIpathAndMT). string ipathel; getKeyValue(docdata, cstr_dj_keyipath, ipathel); bool dofilter = !m_forPreview && (mimetype.compare(cstr_texthtml) || !ipathel.empty()); RecollFilter *newflt = getMimeHandler(mimetype, m_cfg, dofilter); if (!newflt) { // If we can't find a handler, this doc can't be handled // but there can be other ones so we go on LOGINFO("FileInterner::addHandler: no filter for [" << mimetype << "]\n"); return ADD_CONTINUE; } newflt->set_property(Dijon::Filter::OPERATING_MODE, m_forPreview ? "view" : "index"); if (!charset.empty()) newflt->set_property(Dijon::Filter::DEFAULT_CHARSET, charset); // Get current content: we don't use getkeyvalue() here to avoid // copying the text, which may be big. string ns; const string *txt = &ns; { map::const_iterator it; it = docdata.find(cstr_dj_keycontent); if (it != docdata.end()) txt = &it->second; } bool setres = false; newflt->set_docsize(txt->length()); if (newflt->is_data_input_ok(Dijon::Filter::DOCUMENT_STRING)) { setres = newflt->set_document_string(mimetype, *txt); } else if (newflt->is_data_input_ok(Dijon::Filter::DOCUMENT_DATA)) { setres = newflt->set_document_data(mimetype,txt->c_str(),txt->length()); } else if (newflt->is_data_input_ok(Dijon::Filter::DOCUMENT_FILE_NAME)) { TempFile temp = dataToTempFile(*txt, mimetype); if (temp.ok() && (setres = newflt->set_document_file(mimetype, temp.filename()))) { m_tmpflgs[m_handlers.size()] = true; m_tempfiles.push_back(temp); // Hack here, but really helps perfs: if we happen to // create a temp file for, ie, an image attachment, keep // it around for preview to use it through get_imgtmp() if (!mimetype.compare(0, 6, "image/")) { m_imgtmp = m_tempfiles.back(); } } } if (!setres) { LOGINFO("FileInterner::addHandler: set_doc failed inside [" << m_fn << "] for mtype " << mimetype << "\n"); } // Add handler and go on, maybe this one will give us text... m_handlers.push_back(newflt); LOGDEB1("FileInterner::addHandler: added\n"); return setres ? ADD_OK : ADD_BREAK; } // Information and debug after a next_document error void FileInterner::processNextDocError(Rcl::Doc &doc) { collectIpathAndMT(doc); m_reason = m_handlers.back()->get_error(); checkExternalMissing(m_reason, doc.mimetype); LOGERR("FileInterner::internfile: next_document error [" << m_fn << (doc.ipath.empty() ? "" : "|") << doc.ipath << "] " << doc.mimetype << " " << m_reason << "\n"); } FileInterner::Status FileInterner::internfile(Rcl::Doc& doc,const string& ipath) { LOGDEB("FileInterner::internfile. ipath [" << ipath << "]\n"); // Get rid of possible image tempfile from older call m_imgtmp = TempFile(); if (m_handlers.size() < 1) { // Just means the constructor failed LOGDEB("FileInterner::internfile: no handler: constructor failed\n"); return FIError; } // Input Ipath vector when retrieving a given subdoc for previewing vector vipath; if (!ipath.empty() && !m_direct) { stringToTokens(ipath, vipath, cstr_isep, true); for (auto& entry: vipath) { entry = colon_restore(entry); } if (!m_handlers.back()->skip_to_document(vipath[m_handlers.size()-1])){ LOGERR("FileInterner::internfile: can't skip\n"); return FIError; } } // Try to get doc from the topmost handler // Security counter: looping happens when we stack one other // handler or when walking the file document tree without finding // something to index (typical exemple: email with multiple image // attachments and no image filter installed). So we need to be // quite generous here, especially because there is another // security in the form of a maximum handler stack size. int loop = 0; while (!m_handlers.empty()) { CancelCheck::instance().checkCancel(); if (loop++ > 1000) { LOGERR("FileInterner:: looping!\n"); return FIError; } // If there are no more docs at the current top level we pop and // see if there is something at the previous one if (!m_handlers.back()->has_documents()) { // If looking for a specific doc, this is an error. Happens if // the index is stale, and the ipath points to the wrong message // for exemple (one with less attachments) if (m_forPreview) { m_reason += "Requested document does not exist. "; m_reason += m_handlers.back()->get_error(); LOGERR("FileInterner: requested document does not exist\n"); return FIError; } popHandler(); continue; } // While indexing, don't stop on next_document() error. There // might be ie an error while decoding an attachment, but we // still want to process the rest of the mbox! For preview: fatal. if (!m_handlers.back()->next_document()) { // Using a temp doc here because else we'd need to pop the // last ipath element when we do the pophandler (else the // ipath continues to grow in the current doc with each // consecutive error). It would be better to have // something like ipath.pop(). We do need the MIME type Rcl::Doc doc1 = doc; processNextDocError(doc1); doc.mimetype = doc1.mimetype; if (m_forPreview) { m_reason += "Requested document does not exist. "; m_reason += m_handlers.back()->get_error(); LOGERR("FileInterner: requested document does not exist\n"); return FIError; } popHandler(); continue; } // Look at the type for the next document and possibly add // handler to stack. switch (addHandler()) { case ADD_OK: // Just go through: handler has been stacked, use it LOGDEB2("addHandler returned OK\n"); break; case ADD_CONTINUE: // forget this doc and retrieve next from current handler // (ipath stays same) LOGDEB2("addHandler returned CONTINUE\n"); continue; case ADD_BREAK: // Stop looping: doc type ok, need complete its processing // and return it LOGDEB2("addHandler returned BREAK\n"); goto breakloop; // when you have to you have to case ADD_ERROR: LOGDEB2("addHandler returned ERROR\n"); return FIError; } // If we have an ipath, meaning that we are seeking a specific // document (ie: previewing a search result), we may have to // seek to the correct entry of a compound doc (ie: archive or // mail). When we are out of ipath entries, we stop seeking, // the handlers stack may still grow for translation (ie: if // the target doc is msword, we'll still stack the // word-to-text translator). if (!ipath.empty()) { if (m_handlers.size() <= vipath.size() && !m_handlers.back()->skip_to_document(vipath[m_handlers.size()-1])) { LOGERR("FileInterner::internfile: can't skip\n"); return FIError; } } } breakloop: if (m_handlers.empty()) { LOGDEB("FileInterner::internfile: conversion ended with no doc\n"); return FIError; } // Compute ipath and significant mimetype. ipath is returned // through doc.ipath. We also retrieve some metadata fields from // the ancesters (like date or author). This is useful for email // attachments. The values will be replaced by those internal to // the document (by dijontorcl()) if any, so the order of calls is // important. We used to only do this when indexing, but the aux // fields like filename and author may be interesting when // previewing too collectIpathAndMT(doc); if (m_forPreview) { doc.mimetype = m_reachedMType; } // Keep this AFTER collectIpathAndMT dijontorcl(doc); // Possibly destack so that we can test for FIDone. While doing this // possibly set aside an ancestor html text (for the GUI preview) while (!m_handlers.empty() && !m_handlers.back()->has_documents()) { if (m_forPreview) { MimeHandlerHtml *hth = dynamic_cast(m_handlers.back()); if (hth) { m_html = hth->get_html(); } } popHandler(); } if (m_handlers.empty()) return FIDone; else return FIAgain; } bool FileInterner::tempFileForMT(TempFile& otemp, RclConfig* cnf, const string& mimetype) { TempFile temp(cnf->getSuffixFromMimeType(mimetype)); if (!temp.ok()) { LOGERR("FileInterner::tempFileForMT: can't create temp file\n"); return false; } otemp = temp; return true; } // Static method, creates a FileInterner object to do the job. bool FileInterner::idocToFile( TempFile& otemp, const string& tofile, RclConfig *cnf, const Rcl::Doc& idoc, bool uncompress) { LOGDEB("FileInterner::idocToFile\n"); if (idoc.ipath.empty()) { // Because of the mandatory first conversion in the // FileInterner constructor, need to use a specific method. return topdocToFile(otemp, tofile, cnf, idoc, uncompress); } // We set FIF_forPreview for consistency with the previous version // which determined this by looking at mtype!=null. Probably // doesn't change anything in this case. FileInterner interner(idoc, cnf, FIF_forPreview); interner.setTargetMType(idoc.mimetype); return interner.interntofile(otemp, tofile, idoc.ipath, idoc.mimetype); } // This is only needed because the FileInterner constructor always performs // the first conversion, so that we need another approach for accessing the // original document (targetmtype won't do). bool FileInterner::topdocToFile( TempFile& otemp, const string& tofile, RclConfig *cnf, const Rcl::Doc& idoc, bool uncompress) { std::unique_ptr fetcher(docFetcherMake(cnf, idoc)); if (!fetcher) { LOGERR("FileInterner::topdocToFile no backend\n"); return false; } DocFetcher::RawDoc rawdoc; if (!fetcher->fetch(cnf, idoc, rawdoc)) { LOGERR("FileInterner::topdocToFile fetcher failed\n"); return false; } const char *filename = ""; TempFile temp; if (tofile.empty()) { if (!tempFileForMT(temp, cnf, idoc.mimetype)) { return false; } filename = temp.filename(); } else { filename = tofile.c_str(); } string reason; switch (rawdoc.kind) { case DocFetcher::RawDoc::RDK_FILENAME: { string fn(rawdoc.data); TempFile temp; if (uncompress && isCompressed(fn, cnf)) { if (!maybeUncompressToTemp(temp, fn, cnf, idoc)) { LOGERR("FileInterner::idocToFile: uncompress failed\n"); return false; } } fn = temp.ok() ? temp.filename() : rawdoc.data; if (!copyfile(fn.c_str(), filename, reason)) { LOGERR("FileInterner::idocToFile: copyfile: " << reason << "\n"); return false; } } break; case DocFetcher::RawDoc::RDK_DATA: case DocFetcher::RawDoc::RDK_DATADIRECT: if (!stringtofile(rawdoc.data, filename, reason)) { LOGERR("FileInterner::idocToFile: stringtofile: " << reason <<"\n"); return false; } break; default: LOGERR("FileInterner::FileInterner(idoc): bad rawdoc kind ??\n"); } if (tofile.empty()) otemp = temp; return true; } bool FileInterner::interntofile(TempFile& otemp, const string& tofile, const string& ipath, const string& mimetype) { if (!ok()) { LOGERR("FileInterner::interntofile: constructor failed\n"); return false; } Rcl::Doc doc; Status ret = internfile(doc, ipath); if (ret == FileInterner::FIError) { LOGERR("FileInterner::interntofile: internfile() failed\n"); return false; } // Specialcase text/html. This is to work around a bug that will // get fixed some day: the internfile constructor always loads the // first handler so that at least one conversion is always // performed (and the access to the original data may be lost). A // common case is an "Open" on an HTML file (we end up // with text/plain content). As the HTML version is saved in this // case, use it. if (!stringlowercmp(cstr_texthtml, mimetype) && !get_html().empty()) { doc.text = get_html(); doc.mimetype = cstr_texthtml; } const char *filename; TempFile temp; if (tofile.empty()) { if (!tempFileForMT(temp, m_cfg, mimetype)) { return false; } filename = temp.filename(); } else { filename = tofile.c_str(); } string reason; if (!stringtofile(doc.text, filename, reason)) { LOGERR("FileInterner::interntofile: stringtofile : " << reason << "\n"); return false; } if (tofile.empty()) otemp = temp; return true; } bool FileInterner::isCompressed(const string& fn, RclConfig *cnf) { LOGDEB("FileInterner::isCompressed: [" << fn << "]\n"); struct stat st; if (path_fileprops(fn, &st) < 0) { LOGERR("FileInterner::isCompressed: can't stat [" << fn << "]\n"); return false; } string l_mime = mimetype(fn, &st, cnf, true); if (l_mime.empty()) { LOGERR("FileInterner::isUncompressed: can't get mime for [" << fn << "]\n"); return false; } vector ucmd; if (cnf->getUncompressor(l_mime, ucmd)) { return true; } return false; } // Static. bool FileInterner::maybeUncompressToTemp(TempFile& temp, const string& fn, RclConfig *cnf, const Rcl::Doc& doc) { LOGDEB("FileInterner::maybeUncompressToTemp: [" << fn << "]\n"); struct stat st; if (path_fileprops(fn.c_str(), &st) < 0) { LOGERR("FileInterner::maybeUncompressToTemp: can't stat [" <ucmd; if (!cnf->getUncompressor(l_mime, ucmd)) { return true; } // Check for compressed size limit int maxkbs = -1; if (cnf->getConfParam("compressedfilemaxkbs", &maxkbs) && maxkbs >= 0 && int(st.st_size / 1024) > maxkbs) { LOGINFO("FileInterner:: " << fn << " over size limit " << maxkbs << " kbs\n"); return false; } temp = TempFile(cnf->getSuffixFromMimeType(doc.mimetype)); if (!temp.ok()) { LOGERR("FileInterner: cant create temporary file\n"); return false; } Uncomp uncomp; string uncomped; if (!uncomp.uncompressfile(fn, ucmd, uncomped)) { return false; } // uncompressfile choses the output file name, there is good // reason for this, but it's not nice here. Have to move, the // uncompressed file, hopefully staying on the same dev. string reason; if (!renameormove(uncomped.c_str(), temp.filename(), reason)) { LOGERR("FileInterner::maybeUncompress: move [" << uncomped << "] -> [" << temp.filename() << "] failed: " << reason << "\n"); return false; } return true; } recoll-1.26.3/internfile/htmlparse.h0000644000175000017500000000317513303776060014326 00000000000000/* This file was copied from xapian-omega-1.0.1 and modified */ /* htmlparse.h: simple HTML parser for omega indexer * * Copyright 1999,2000,2001 BrightStation PLC * Copyright 2002,2006 Olly Betts * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 * USA */ #ifndef INCLUDED_HTMLPARSE_H #define INCLUDED_HTMLPARSE_H #include #include using std::string; using std::map; class HtmlParser { map parameters; protected: virtual void decode_entities(string &s); bool in_script; string charset; static map named_ents; bool get_parameter(const string & param, string & value) const; public: virtual void process_text(const string &/*text*/) { } virtual bool opening_tag(const string &/*tag*/) { return true; } virtual bool closing_tag(const string &/*tag*/) { return true; } virtual void parse_html(const string &text); virtual void do_eof() {} HtmlParser(); virtual ~HtmlParser() { } }; #endif recoll-1.26.3/internfile/mh_mail.h0000644000175000017500000000536613533651561013744 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _MAIL_H_INCLUDED_ #define _MAIL_H_INCLUDED_ #include #include #include #include "mimehandler.h" namespace Binc { class MimeDocument; class MimePart; } class MHMailAttach; /** * Process a mail message (rfc822) into internal documents. */ class MimeHandlerMail : public RecollFilter { public: MimeHandlerMail(RclConfig *cnf, const std::string &id); virtual ~MimeHandlerMail(); virtual bool is_data_input_ok(DataInput input) const override { return (input == DOCUMENT_FILE_NAME || input == DOCUMENT_STRING); } virtual bool next_document() override; virtual bool skip_to_document(const std::string& ipath) override; virtual void clear_impl() override; protected: virtual bool set_document_file_impl(const std::string& mt, const std::string& file_path) override; virtual bool set_document_string_impl(const std::string& mt, const std::string& data) override; private: bool processMsg(Binc::MimePart *doc, int depth); void walkmime(Binc::MimePart* doc, int depth); bool processAttach(); Binc::MimeDocument *m_bincdoc; int m_fd; std::stringstream *m_stream; // Current index in parts. starts at -1 for self, then index into // attachments int m_idx; // Start of actual text (after the reprinted headers. This is for // generating a semi-meaningful "abstract") std::string::size_type m_startoftext; std::string m_subject; std::vector m_attachments; // Additional headers to be processed as per config + field name translation std::map m_addProcdHdrs; }; class MHMailAttach { public: std::string m_contentType; std::string m_filename; std::string m_charset; std::string m_contentTransferEncoding; Binc::MimePart *m_part; }; #endif /* _MAIL_H_INCLUDED_ */ recoll-1.26.3/internfile/mh_exec.h0000644000175000017500000001057113533651561013740 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _MH_EXEC_H_INCLUDED_ #define _MH_EXEC_H_INCLUDED_ #include #include #include "mimehandler.h" #include "execmd.h" class HandlerTimeout {}; /** * Turn external document into internal one by executing an external command. * * The command to execute, and its parameters, are defined in the mimeconf * configuration file, and stored by mimehandler.cpp in the object when it is * built. This data is not reset by a clear() call. * * The output MIME type (typically text/plain or text/html) and output * character set are also defined in mimeconf. The default is text/html, utf-8 * * The command will write the document text to stdout. Its only way to * set metadata is through "meta" tags if the output MIME is * text/html. * * As any RecollFilter, a MimeHandlerExec object can be reset * by calling clear(), and will stay initialised for the same mtype * (cmd, params etc.) */ class MimeHandlerExec : public RecollFilter { public: /////////////////////// // Members not reset by clear(). params, cfgFilterOutputMtype and // cfgFilterOutputCharset // define what I am. missingHelper is a permanent error // (no use to try and execute over and over something that's not // here). // Parameters: this has been built by our creator, from config file // data. We always add the file name at the end before actual execution std::vector params; // Filter output type. The default for ext. filters is to output html, // but some don't, in which case the type is defined in the config. std::string cfgFilterOutputMtype; // Output character set if the above type is not text/html. For // those filters, the output charset has to be known: ie set by a command // line option. std::string cfgFilterOutputCharset; bool missingHelper; // Resource management values int m_filtermaxseconds; int m_filtermaxmbytes; //////////////// MimeHandlerExec(RclConfig *cnf, const std::string& id); virtual bool next_document() override; virtual bool skip_to_document(const std::string& ipath) override; virtual void clear_impl() override { m_fn.erase(); m_ipath.erase(); } protected: virtual bool set_document_file_impl(const std::string& mt, const std::string& file_path) override; std::string m_fn; std::string m_ipath; // md5 computation excluded by handler name: can't change after init bool m_handlernomd5; bool m_hnomd5init; // If md5 not excluded by handler name, allow/forbid depending on mime bool m_nomd5; // Set the character set field and possibly transcode text/plain // output. // // @param mt the MIME type. A constant for mh_exec, but may depend on the // subdocument entry for mh_execm. // @param charset Document character set. A constant (empty // parameter) for mh_exec (we use the value defined in mimeconf), // possibly sent from the command for mh_execm. virtual void handle_cs(const std::string& mt, const std::string& charset = std::string()); private: virtual void finaldetails(); }; // This is called periodically by ExeCmd when it is waiting for data, // or when it does receive some. We may choose to interrupt the // command. class MEAdv : public ExecCmdAdvise { public: MEAdv(int maxsecs = 900); // Reset start time to now void reset(); void setmaxsecs(int maxsecs) { m_filtermaxseconds = maxsecs; } void newData(int n); private: time_t m_start; int m_filtermaxseconds; }; #endif /* _MH_EXEC_H_INCLUDED_ */ recoll-1.26.3/internfile/mh_exec.cpp0000644000175000017500000001677213533651561014304 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include "safesyswait.h" #include #include "cstr.h" #include "execmd.h" #include "mh_exec.h" #include "mh_html.h" #include "log.h" #include "cancelcheck.h" #include "smallut.h" #include "md5ut.h" #include "rclconfig.h" using namespace std; MEAdv::MEAdv(int maxsecs) : m_filtermaxseconds(maxsecs) { m_start = time(0L); } void MEAdv::reset() { m_start = time(0L); } void MEAdv::newData(int n) { LOGDEB2("MHExec:newData(" << n << ")\n"); if (m_filtermaxseconds > 0 && time(0L) - m_start > m_filtermaxseconds) { LOGERR("MimeHandlerExec: filter timeout (" << m_filtermaxseconds << " S)\n"); throw HandlerTimeout(); } // If a cancel request was set by the signal handler (or by us // just above), this will raise an exception. Another approach // would be to call ExeCmd::setCancel(). CancelCheck::instance().checkCancel(); } MimeHandlerExec::MimeHandlerExec(RclConfig *cnf, const std::string& id) : RecollFilter(cnf, id), missingHelper(false), m_filtermaxseconds(900), m_filtermaxmbytes(0), m_handlernomd5(false), m_hnomd5init(false), m_nomd5(false) { m_config->getConfParam("filtermaxseconds", &m_filtermaxseconds); m_config->getConfParam("filtermaxmbytes", &m_filtermaxmbytes); } bool MimeHandlerExec::set_document_file_impl(const std::string& mt, const std::string &file_path) { // Can't do this in constructor as script name not set yet. Do it // once on first call unordered_set nomd5tps; bool tpsread(false); if (false == m_hnomd5init) { m_hnomd5init = true; if (m_config->getConfParam("nomd5types", &nomd5tps)) { tpsread = true; if (!nomd5tps.empty()) { if (params.size() && nomd5tps.find(path_getsimple(params[0])) != nomd5tps.end()) { m_handlernomd5 = true; } // On windows the 1st param is often a script interp // name (e.g. "python", and the script name is 2nd if (params.size() > 1 && nomd5tps.find(path_getsimple(params[1])) != nomd5tps.end()) { m_handlernomd5 = true; } } } } m_nomd5 = m_handlernomd5; if (!m_nomd5) { // Check for MIME type based md5 suppression if (!tpsread) { m_config->getConfParam("nomd5types", &nomd5tps); } if (nomd5tps.find(mt) != nomd5tps.end()) { m_nomd5 = true; } } m_fn = file_path; m_havedoc = true; return true; } bool MimeHandlerExec::skip_to_document(const string& ipath) { LOGDEB("MimeHandlerExec:skip_to_document: [" << ipath << "]\n"); m_ipath = ipath; return true; } // Execute an external program to translate a file from its native // format to text or html. bool MimeHandlerExec::next_document() { if (m_havedoc == false) return false; m_havedoc = false; if (missingHelper) { LOGDEB("MimeHandlerExec::next_document(): helper known missing\n"); return false; } if (params.empty()) { // Hu ho LOGERR("MimeHandlerExec::next_document: empty params\n"); m_reason = "RECFILTERROR BADCONFIG"; return false; } // Command name string cmd = params.front(); // Build parameter vector: delete cmd name and add the file name vectormyparams(params.begin() + 1, params.end()); myparams.push_back(m_fn); if (!m_ipath.empty()) myparams.push_back(m_ipath); // Execute command, store the output string& output = m_metaData[cstr_dj_keycontent]; output.erase(); ExecCmd mexec; MEAdv adv(m_filtermaxseconds); mexec.setAdvise(&adv); mexec.putenv("RECOLL_CONFDIR", m_config->getConfDir()); mexec.putenv(m_forPreview ? "RECOLL_FILTER_FORPREVIEW=yes" : "RECOLL_FILTER_FORPREVIEW=no"); mexec.setrlimit_as(m_filtermaxmbytes); int status; try { status = mexec.doexec(cmd, myparams, 0, &output); } catch (HandlerTimeout) { LOGERR("MimeHandlerExec: handler timeout\n" ); status = 0x110f; } catch (CancelExcept) { LOGERR("MimeHandlerExec: cancelled\n" ); status = 0x110f; } if (status) { LOGERR("MimeHandlerExec: command status 0x" << std::hex << status << std::dec << " for " << cmd << "\n"); if (WIFEXITED(status) && WEXITSTATUS(status) == 127) { // That's how execmd signals a failed exec (most probably // a missing command). Let'hope no filter uses the same value as // an exit status... Disable myself permanently and signal the // missing cmd. missingHelper = true; m_reason = string("RECFILTERROR HELPERNOTFOUND ") + cmd; } else if (output.find("RECFILTERROR") == 0) { // If the output string begins with RECFILTERROR, then it's // interpretable error information out from a recoll script m_reason = output; list lerr; stringToStrings(output, lerr); if (lerr.size() > 2) { list::iterator it = lerr.begin(); it++; if (*it == "HELPERNOTFOUND") { // No use trying again and again to execute this filter, // it won't work. missingHelper = true; } } } return false; } finaldetails(); return true; } void MimeHandlerExec::handle_cs(const string& mt, const string& icharset) { string charset(icharset); // cfgFilterOutputCharset comes from the mimeconf filter // definition line and defaults to UTF-8 if empty. If the value is // "default", we use the default input charset value defined in // recoll.conf (which may vary depending on directory) if (charset.empty()) { charset = cfgFilterOutputCharset.empty() ? cstr_utf8 : cfgFilterOutputCharset; if (!stringlowercmp("default", charset)) { charset = m_dfltInputCharset; } } m_metaData[cstr_dj_keyorigcharset] = charset; // If this is text/plain transcode_to/check utf-8 if (!mt.compare(cstr_textplain)) { (void)txtdcode("mh_exec/m"); } else { m_metaData[cstr_dj_keycharset] = charset; } } void MimeHandlerExec::finaldetails() { // The default output mime type is html, but it may be defined // otherwise in the filter definition. m_metaData[cstr_dj_keymt] = cfgFilterOutputMtype.empty() ? cstr_texthtml : cfgFilterOutputMtype; if (!m_forPreview && !m_nomd5) { string md5, xmd5, reason; if (MD5File(m_fn, md5, &reason)) { m_metaData[cstr_dj_keymd5] = MD5HexPrint(md5, xmd5); } else { LOGERR("MimeHandlerExec: cant compute md5 for [" << m_fn << "]: " << reason << "\n"); } } handle_cs(m_metaData[cstr_dj_keymt]); } recoll-1.26.3/internfile/mh_text.cpp0000644000175000017500000001400113533651561014323 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include "safefcntl.h" #include #include "safeunistd.h" #include #include #include "cstr.h" #include "mh_text.h" #include "log.h" #include "readfile.h" #include "md5ut.h" #include "rclconfig.h" #include "pxattr.h" #include "pathut.h" using namespace std; const int MB = 1024*1024; const int KB = 1024; // Process a plain text file bool MimeHandlerText::set_document_file_impl(const string& mt, const string &fn) { LOGDEB("MimeHandlerText::set_document_file: [" << fn << "] offs " << m_offs << "\n"); m_fn = fn; // This should not be necessary, but it happens on msw that offset is large // negative at this point, could not find the reason (still trying). m_offs = 0; // file size for oversize check long long fsize = path_filesize(m_fn); if (fsize < 0) { LOGERR("MimeHandlerText::set_document_file: stat " << m_fn << " errno " << errno << "\n"); return false; } #ifndef _WIN32 // Check for charset defined in extended attribute as per: // http://freedesktop.org/wiki/CommonExtendedAttributes pxattr::get(m_fn, "charset", &m_charsetfromxattr); #endif // Max file size parameter: texts over this size are not indexed int maxmbs = 20; m_config->getConfParam("textfilemaxmbs", &maxmbs); if (maxmbs == -1 || fsize / MB <= maxmbs) { // Text file page size: if set, we split text files into // multiple documents int ps = 1000; m_config->getConfParam("textfilepagekbs", &ps); if (ps != -1) { ps *= KB; m_paging = true; } // Note: size_t is guaranteed unsigned, so max if ps is -1 m_pagesz = size_t(ps); if (!readnext()) return false; } else { LOGINF("MimeHandlerText: file too big (textfilemaxmbs=" << maxmbs << "), contents will not be indexed: " << fn << endl); } if (!m_forPreview) { string md5, xmd5; MD5String(m_text, md5); m_metaData[cstr_dj_keymd5] = MD5HexPrint(md5, xmd5); } m_havedoc = true; return true; } bool MimeHandlerText::set_document_string_impl(const string& mt, const string& otext) { m_text = otext; if (!m_forPreview) { string md5, xmd5; MD5String(m_text, md5); m_metaData[cstr_dj_keymd5] = MD5HexPrint(md5, xmd5); } m_havedoc = true; return true; } bool MimeHandlerText::skip_to_document(const string& ipath) { char *endptr; int64_t t = strtoll(ipath.c_str(), &endptr, 10); if (endptr == ipath.c_str()) { LOGERR("MimeHandlerText::skip_to_document: bad ipath offs [" << ipath << "]\n"); return false; } m_offs = t; readnext(); return true; } bool MimeHandlerText::next_document() { LOGDEB("MimeHandlerText::next_document: m_havedoc " << m_havedoc << "\n"); if (m_havedoc == false) return false; if (m_charsetfromxattr.empty()) m_metaData[cstr_dj_keyorigcharset] = m_dfltInputCharset; else m_metaData[cstr_dj_keyorigcharset] = m_charsetfromxattr; m_metaData[cstr_dj_keymt] = cstr_textplain; size_t srclen = m_text.length(); m_metaData[cstr_dj_keycontent].swap(m_text); // We transcode even if defcharset is supposedly already utf-8: // this validates the encoding. // txtdcode() truncates the text if transcoding fails (void)txtdcode("mh_text"); // If the text length is 0 (the file is empty or oversize), or we are // not paging, we're done if (srclen == 0 || !m_paging) { m_havedoc = false; return true; } else { // Paging: set ipath then read next chunk. // Don't set ipath for the first chunk to avoid having 2 // records for small files (one for the file, one for the // first chunk). This is a hack. The right thing to do would // be to use a different mtype for files over the page size, // and keep text/plain only for smaller files. string buf = lltodecstr(m_offs - srclen); if (m_offs - srclen != 0) m_metaData[cstr_dj_keyipath] = buf; readnext(); // This ensures that the first chunk (offs==srclen) of a // multi-chunk file does have an ipath. Else it stands for the // whole file, which used to be the case but does not seem // right if (m_havedoc) m_metaData[cstr_dj_keyipath] = buf; return true; } } bool MimeHandlerText::readnext() { string reason; m_text.clear(); if (!file_to_string(m_fn, m_text, m_offs, m_pagesz, &reason)) { LOGERR("MimeHandlerText: can't read file: " << reason << "\n" ); m_havedoc = false; return false; } if (m_text.length() == 0) { // EOF m_havedoc = false; return true; } // If possible try to adjust the chunk to end right after a line // Don't do this for the last chunk. Last chunk of exactly the // page size might be unduly split, no big deal if (m_text.length() == m_pagesz) { string::size_type pos = m_text.find_last_of("\n\r"); if (pos != string::npos && pos != 0) { m_text.erase(pos); } } m_offs += m_text.length(); return true; } recoll-1.26.3/internfile/mh_mbox.cpp0000644000175000017500000004551613570165013014314 00000000000000/* Copyright (C) 2005-2019 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #define _FILE_OFFSET_BITS 64 #include #include #include #include #include #include #include #include "cstr.h" #include "mimehandler.h" #include "log.h" #include "mh_mbox.h" #include "smallut.h" #include "rclconfig.h" #include "md5ut.h" #include "conftree.h" #include "pathut.h" using namespace std; // Define maximum message size for safety. 100MB would seem reasonable static unsigned int max_mbox_member_size = 100 * 1024 * 1024; // The mbox format uses lines beginning with 'From ' as separator. // Mailers are supposed to quote any other lines beginning with // 'From ', turning it into '>From '. This should make it easy to detect // message boundaries by matching a '^From ' regular expression // Unfortunately this quoting is quite often incorrect in the real world. // // The rest of the format for the line is somewhat variable, but there will // be a 4 digit year somewhere... // The canonic format is the following, with a 24 characters date: // From toto@tutu.com Sat Sep 30 16:44:06 2000 // This resulted into the pattern for versions up to 1.9.0: // "^From .* [1-2][0-9][0-9][0-9]$" // // Some mailers add a time zone to the date, this is non-"standard", // but happens, like in: // From toto@truc.com Sat Sep 30 16:44:06 2000 -0400 // // This is taken into account in the new regexp, which also matches more // of the date format, to catch a few actual issues like // From http://www.itu.int/newsroom/press/releases/1998/NP-2.html: // Note that this *should* have been quoted. // // http://www.qmail.org/man/man5/mbox.html seems to indicate that the // fact that From_ is normally preceded by a blank line should not be // used, but we do it anyway (for now). // The same source indicates that arbitrary data can follow the date field // // A variety of pathologic From_ lines: // Bad date format: // From uucp Wed May 22 11:28 GMT 1996 // Added timezone at the end (ok, part of the "any data" after the date) // From qian2@fas.harvard.edu Sat Sep 30 16:44:06 2000 -0400 // Emacs VM botch ? Adds tz between hour and year // From dockes Wed Feb 23 10:31:20 +0100 2005 // From dockes Fri Dec 1 20:36:39 +0100 2006 // The modified regexp gives the exact same results on the ietf mail archive // and my own's. // Update, 2008-08-29: some old? Thunderbird versions apparently use a date // in "Date: " header format, like: From - Mon, 8 May 2006 10:57:32 // This was added as an alternative format. By the way it also fools "mail" and // emacs-vm, Recoll is not alone // Update: 2009-11-27: word after From may be quoted string: From "john bull" static const string frompat{ "^From[ ]+([^ ]+|\"[^\"]+\")[ ]+" // 'From (toto@tutu|"john bull") ' "[[:alpha:]]{3}[ ]+[[:alpha:]]{3}[ ]+[0-3 ][0-9][ ]+" // Fri Oct 26 "[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?[ ]+" // Time, seconds optional "([^ ]+[ ]+)?" // Optional tz "[12][0-9][0-9][0-9]" // Year, unanchored, more data may follow "|" // Or standard mail Date: header format "^From[ ]+[^ ]+[ ]+" // From toto@tutu "[[:alpha:]]{3},[ ]+[0-3]?[0-9][ ]+[[:alpha:]]{3}[ ]+" // Mon, 8 May "[12][0-9][0-9][0-9][ ]+" // Year "[0-2][0-9]:[0-5][0-9](:[0-5][0-9])?" // Time, secs optional }; // Extreme thunderbird brokiness. Will sometimes use From lines // exactly like: From ^M (From followed by space and eol). We only // test for this if QUIRKS_TBIRD is set static const string miniTbirdFrom{"^From $"}; static SimpleRegexp fromregex(frompat, SimpleRegexp::SRE_NOSUB); static SimpleRegexp minifromregex(miniTbirdFrom, SimpleRegexp::SRE_NOSUB); static std::mutex o_mcache_mutex; /** * Handles a cache for message numbers to offset translations. Permits direct * accesses inside big folders instead of having to scan up to the right place * * Message offsets are saved to files stored under cfg(mboxcachedir), default * confdir/mboxcache. Mbox files smaller than cfg(mboxcacheminmbs) are not * cached. * Cache files are named as the md5 of the file UDI, which is kept in * the first block for possible collision detection. The 64 bits * offsets for all message "From_" lines follow. The format is purely * binary, values are not even byte-swapped to be proc-idependant. */ #define M_o_b1size 1024 class MboxCache { public: MboxCache() { // Can't access rclconfig here, we're a static object, would // have to make sure it's initialized. } ~MboxCache() {} int64_t get_offset(RclConfig *config, const string& udi, int msgnum, int64_t filesize) { LOGDEB0("MboxCache::get_offset: udi [" << udi << "] msgnum " << msgnum << "\n"); if (!ok(config)) { LOGDEB("MboxCache::get_offset: init failed\n"); return -1; } std::unique_lock locker(o_mcache_mutex); string fn = makefilename(udi); ifstream instream(fn.c_str(), std::ifstream::binary); if (!instream.good()) { if (filesize > m_minfsize) { LOGSYSERR("MboxCache::get_offset", "open", fn); } else { LOGDEB("MboxCache::get_offset: no cache for " << fn << endl); } return -1; } char blk1[M_o_b1size]; instream.read(blk1, M_o_b1size); if (!instream.good()) { LOGSYSERR("MboxCache::get_offset", "read blk1", ""); return -1; } ConfSimple cf(string(blk1, M_o_b1size)); string fudi; if (!cf.get("udi", fudi) || fudi.compare(udi)) { LOGINFO("MboxCache::get_offset:badudi fn " << fn << " udi [" << udi << "], fudi [" << fudi << "]\n"); return -1; } LOGDEB1("MboxCache::get_offset: reading offsets file at offs " << cacheoffset(msgnum) << "\n"); instream.seekg(cacheoffset(msgnum)); if (!instream.good()) { LOGSYSERR("MboxCache::get_offset", "seek", lltodecstr(cacheoffset(msgnum))); return -1; } int64_t offset = -1; instream.read((char*)&offset, sizeof(int64_t)); if (!instream.good()) { LOGSYSERR("MboxCache::get_offset", "read", ""); return -1; } LOGDEB0("MboxCache::get_offset: ret " << offset << "\n"); return offset; } // Save array of offsets for a given file, designated by Udi void put_offsets(RclConfig *config, const string& udi, int64_t fsize, vector& offs) { LOGDEB0("MboxCache::put_offsets: " << offs.size() << " offsets\n"); if (!ok(config) || !maybemakedir()) return; if (fsize < m_minfsize) { LOGDEB0("MboxCache::put_offsets: fsize " << fsize << " < minsize " << m_minfsize << endl); return; } std::unique_lock locker(o_mcache_mutex); string fn = makefilename(udi); std::ofstream os(fn.c_str(), std::ios::out|std::ios::binary); if (!os.good()) { LOGSYSERR("MboxCache::put_offsets", "open", fn); return; } string blk1("udi="); blk1.append(udi); blk1.append(cstr_newline); blk1.resize(M_o_b1size, 0); os.write(blk1.c_str(), M_o_b1size); if (!os.good()) { LOGSYSERR("MboxCache::put_offsets", "write blk1", ""); return; } for (const auto& off : offs) { LOGDEB1("MboxCache::put_offsets: writing value " << off << " at offset " << ftello(fp) << endl); os.write((char*)&off, sizeof(int64_t)); if (!os.good()) { LOGSYSERR("MboxCache::put_offsets", "write", ""); return; } } os.flush(); if (!os.good()) { LOGSYSERR("MboxCache::put_offsets", "flush", ""); return; } } // Check state, possibly initialize bool ok(RclConfig *config) { std::unique_lock locker(o_mcache_mutex); if (m_minfsize == -1) return false; if (!m_ok) { int minmbs = 5; config->getConfParam("mboxcacheminmbs", &minmbs); if (minmbs < 0) { // minmbs set to negative to disable cache m_minfsize = -1; return false; } m_minfsize = minmbs * 1000 * 1000; m_dir = config->getMboxcacheDir(); m_ok = true; } return m_ok; } private: bool m_ok{false}; // Place where we store things string m_dir; // Don't cache smaller files. If -1, don't do anything. int64_t m_minfsize{0}; // Create the cache directory if it does not exist bool maybemakedir() { if (!path_makepath(m_dir, 0700)) { LOGSYSERR("MboxCache::maybemakedir", "path_makepath", m_dir); return false; } return true; } // Compute file name from udi string makefilename(const string& udi) { string digest, xdigest; MD5String(udi, digest); MD5HexPrint(digest, xdigest); return path_cat(m_dir, xdigest); } // Compute offset in cache file for the mbox offset of msgnum // Msgnums are from 1 int64_t cacheoffset(int msgnum) { return M_o_b1size + (msgnum-1) * sizeof(int64_t); } }; static class MboxCache o_mcache; static const string cstr_keyquirks("mhmboxquirks"); enum Quirks {MBOXQUIRK_TBIRD=1}; class MimeHandlerMbox::Internal { public: Internal(MimeHandlerMbox *p) : pthis(p) {} std::string fn; // File name std::string ipath; ifstream instream; int msgnum{0}; // Current message number in folder. Starts at 1 int64_t lineno{0}; // debug int64_t fsize{0}; std::vector offsets; int quirks; MimeHandlerMbox *pthis; bool tryUseCache(int mtarg); }; MimeHandlerMbox::MimeHandlerMbox(RclConfig *cnf, const std::string& id) : RecollFilter(cnf, id) { m = new Internal(this); string smbs; m_config->getConfParam("mboxmaxmsgmbs", smbs); if (!smbs.empty()) { max_mbox_member_size = (unsigned int)atoi(smbs.c_str()) *1024*1024; } LOGDEB0("MimeHandlerMbox::MimeHandlerMbox: max_mbox_member_size (MB): " << max_mbox_member_size / (1024*1024) << endl); } MimeHandlerMbox::~MimeHandlerMbox() { if (m) { clear(); delete m; } } void MimeHandlerMbox::clear_impl() { m->fn.erase(); m->ipath.erase(); m->instream = ifstream(); m->msgnum = m->lineno = m->fsize = 0; m->offsets.clear(); m->quirks = 0; } bool MimeHandlerMbox::skip_to_document(const std::string& ipath) { m->ipath = ipath; return true; } bool MimeHandlerMbox::set_document_file_impl(const string& mt, const string &fn) { LOGDEB("MimeHandlerMbox::set_document_file(" << fn << ")\n"); clear_impl(); m->fn = fn; m->instream = ifstream(fn.c_str(), std::ifstream::binary); if (!m->instream.good()) { LOGSYSERR("MimeHandlerMail::set_document_file", "ifstream", fn); return false; } // TBD #if 0 && defined O_NOATIME && O_NOATIME != 0 if (fcntl(fileno((FILE *)m->vfp), F_SETFL, O_NOATIME) < 0) { // perror("fcntl"); } #endif m->fsize = path_filesize(fn); m_havedoc = true; // Check for location-based quirks: string quirks; if (m_config && m_config->getConfParam(cstr_keyquirks, quirks)) { if (quirks == "tbird") { LOGDEB("MimeHandlerMbox: setting quirks TBIRD\n"); m->quirks |= MBOXQUIRK_TBIRD; } } // And double check for thunderbird string tbirdmsf = fn + ".msf"; if (!(m->quirks & MBOXQUIRK_TBIRD) && path_exists(tbirdmsf)) { LOGDEB("MimeHandlerMbox: detected unconf'd tbird mbox in "<< fn <<"\n"); m->quirks |= MBOXQUIRK_TBIRD; } return true; } bool MimeHandlerMbox::Internal::tryUseCache(int mtarg) { bool cachefound = false; string line; int64_t off; LOGDEB0("MimeHandlerMbox::next_doc: mtarg " << mtarg << " m_udi[" << pthis->m_udi << "]\n"); if (pthis->m_udi.empty()) { goto out; } if ((off = o_mcache.get_offset(pthis->m_config, pthis->m_udi, mtarg, fsize)) < 0) { goto out; } instream.seekg(off); if (!instream.good()) { LOGSYSERR("tryUseCache", "seekg", ""); goto out; } getline(instream, line, '\n'); if (!instream.good()) { LOGSYSERR("tryUseCache", "getline", ""); goto out; } LOGDEB1("MimeHandlerMbox::tryUseCache:getl ok. line:[" << line << "]\n"); if ((fromregex(line) || ((quirks & MBOXQUIRK_TBIRD) && minifromregex(line))) ) { LOGDEB0("MimeHandlerMbox: Cache: From_ Ok\n"); instream.seekg(off); msgnum = mtarg -1; cachefound = true; } else { LOGDEB0("MimeHandlerMbox: cache: regex failed for [" << line << "]\n"); } out: if (!cachefound) { // No cached result: scan. instream.seekg(0); msgnum = 0; } return cachefound; } bool MimeHandlerMbox::next_document() { if (!m->instream.good()) { LOGERR("MimeHandlerMbox::next_document: not open\n"); return false; } if (!m_havedoc) { return false; } int mtarg = 0; if (!m->ipath.empty()) { sscanf(m->ipath.c_str(), "%d", &mtarg); } else if (m_forPreview) { // Can't preview an mbox. LOGDEB("MimeHandlerMbox::next_document: can't preview folders!\n"); return false; } LOGDEB0("MimeHandlerMbox::next_document: fn " << m->fn << ", msgnum " << m->msgnum << " mtarg " << mtarg << " \n"); if (mtarg == 0) mtarg = -1; // If we are called to retrieve a specific message, try to use the // offsets cache to try and position to the right header. bool storeoffsets = true; if (mtarg > 0) { storeoffsets = !m->tryUseCache(mtarg); } int64_t message_end = 0; bool iseof = false; bool hademptyline = true; string& msgtxt = m_metaData[cstr_dj_keycontent]; msgtxt.erase(); string line; for (;;) { message_end = m->instream.tellg(); getline(m->instream, line, '\n'); if (!m->instream.good()) { ifstream::iostate st = m->instream.rdstate(); if (st & std::ifstream::eofbit) { LOGDEB0("MimeHandlerMbox:next: eof at " << message_end << endl); } else { if (st & std::ifstream::failbit) { LOGDEB0("MimeHandlerMbox:next: failbit\n"); LOGSYSERR("MimeHandlerMbox:next:", "", ""); } if (st & std::ifstream::badbit) { LOGDEB0("MimeHandlerMbox:next: badbit\n"); LOGSYSERR("MimeHandlerMbox:next:", "", ""); } if (st & std::ifstream::goodbit) { LOGDEB1("MimeHandlerMbox:next: good\n"); } } iseof = true; m->msgnum++; break; } m->lineno++; rtrimstring(line, "\r\n"); int ll = line.size(); LOGDEB2("mhmbox:next: hadempty " << hademptyline << " lineno " << m->lineno << " ll " << ll << " Line: [" << line << "]\n"); if (hademptyline) { if (ll > 0) { // Non-empty line with empty line flag set, reset flag // and check regex. if (!(m->quirks & MBOXQUIRK_TBIRD)) { // Tbird sometimes ommits the empty line, so avoid // resetting state (initially true) and hope for // the best hademptyline = false; } /* The 'F' compare is redundant but it improves performance A LOT */ if (line[0] == 'F' && ( fromregex(line) || ((m->quirks & MBOXQUIRK_TBIRD) && minifromregex(line))) ) { LOGDEB1("MimeHandlerMbox: msgnum " << m->msgnum << ", From_ at line " << m->lineno << " foffset " << message_end << " line: [" << line << "]\n"); if (storeoffsets) { m->offsets.push_back(message_end); } m->msgnum++; if ((mtarg <= 0 && m->msgnum > 1) || (mtarg > 0 && m->msgnum > mtarg)) { // Got message, go do something with it break; } // From_ lines are not part of messages continue; } } } else if (ll <= 0) { hademptyline = true; } if (mtarg <= 0 || m->msgnum == mtarg) { // Accumulate message lines line += '\n'; msgtxt += line; if (msgtxt.size() > max_mbox_member_size) { LOGERR("mh_mbox: huge message (more than " << max_mbox_member_size/(1024*1024) << " MB) inside " << m->fn << ", giving up\n"); return false; } } } LOGDEB2("Message text length " << msgtxt.size() << "\n"); LOGDEB2("Message text: [" << msgtxt << "]\n"); char buf[20]; // m->msgnum was incremented when hitting the next From_ or eof, so the data // is for m->msgnum - 1 sprintf(buf, "%d", m->msgnum - 1); m_metaData[cstr_dj_keyipath] = buf; m_metaData[cstr_dj_keymt] = "message/rfc822"; if (iseof) { LOGDEB2("MimeHandlerMbox::next: eof hit\n"); m_havedoc = false; if (!m_udi.empty() && storeoffsets) { o_mcache.put_offsets(m_config, m_udi, m->fsize, m->offsets); } } return msgtxt.empty() ? false : true; } recoll-1.26.3/internfile/indextext.h0000644000175000017500000000230513533651561014340 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _INDEXTEXT_H_INCLUDED_ #define _INDEXTEXT_H_INCLUDED_ /* Note: this only exists to help with using myhtmlparse.cc */ #include // lets hope that the charset includes ascii values... static inline void lowercase_term(std::string &term) { std::string::iterator i = term.begin(); while (i != term.end()) { if (*i >= 'A' && *i <= 'Z') *i = *i + 'a' - 'A'; i++; } } #endif /* _INDEXTEXT_H_INCLUDED_ */ recoll-1.26.3/internfile/uncomp.cpp0000644000175000017500000001103313533651561014156 00000000000000/* Copyright (C) 2013 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "autoconfig.h" #include #include #include #include #include "uncomp.h" #include "log.h" #include "smallut.h" #include "execmd.h" #include "pathut.h" using std::map; using std::string; using std::vector; Uncomp::UncompCache Uncomp::o_cache; Uncomp::Uncomp(bool docache) : m_docache(docache) { LOGDEB0("Uncomp::Uncomp: m_docache: " << m_docache << "\n"); } bool Uncomp::uncompressfile(const string& ifn, const vector& cmdv, string& tfile) { if (m_docache) { std::unique_lock lock(o_cache.m_lock); if (!o_cache.m_srcpath.compare(ifn)) { m_dir = o_cache.m_dir; m_tfile = tfile = o_cache.m_tfile; m_srcpath = ifn; o_cache.m_dir = 0; o_cache.m_srcpath.clear(); return true; } } m_srcpath.clear(); m_tfile.clear(); if (m_dir == 0) { m_dir = new TempDir; } // Make sure tmp dir is empty. we guarantee this to filters if (!m_dir || !m_dir->ok() || !m_dir->wipe()) { LOGERR("uncompressfile: can't clear temp dir " << m_dir->dirname() << "\n"); return false; } // Check that we have enough available space to have some hope of // decompressing the file. int pc; long long availmbs; if (!fsocc(m_dir->dirname(), &pc, &availmbs)) { LOGERR("uncompressfile: can't retrieve avail space for " << m_dir->dirname() << "\n"); // Hope for the best } else { long long fsize = path_filesize(ifn); if (fsize < 0) { LOGERR("uncompressfile: stat input file " << ifn << " errno " << errno << "\n"); return false; } // We need at least twice the file size for the uncompressed // and compressed versions. Most compressors don't store the // uncompressed size, so we have no way to be sure that we // have enough space before trying. We take a little margin // use same Mb def as fsocc() long long filembs = fsize / (1024 * 1024); if (availmbs < 2 * filembs + 1) { LOGERR("uncompressfile. " << availmbs << " MBs available in " << m_dir->dirname() << " not enough to uncompress " << ifn << " of size " << filembs << " MBs\n"); return false; } } string cmd = cmdv.front(); // Substitute file name and temp dir in command elements vector::const_iterator it = cmdv.begin(); ++it; vector args; map subs; subs['f'] = ifn; subs['t'] = m_dir->dirname(); for (; it != cmdv.end(); it++) { string ns; pcSubst(*it, ns, subs); args.push_back(ns); } // Execute command and retrieve output file name, check that it exists ExecCmd ex; int status = ex.doexec(cmd, args, 0, &tfile); if (status || tfile.empty()) { LOGERR("uncompressfile: doexec: " << cmd << " " << stringsToString(args) << " failed for [" << ifn << "] status 0x" << status << "\n"); if (!m_dir->wipe()) { LOGERR("uncompressfile: wipedir failed\n"); } return false; } rtrimstring(tfile, "\n\r"); m_tfile = tfile; m_srcpath = ifn; return true; } Uncomp::~Uncomp() { LOGDEB0("Uncomp::~Uncomp: m_docache: " << m_docache << " m_dir " << (m_dir?m_dir->dirname():"(null)") << "\n"); if (m_docache) { std::unique_lock lock(o_cache.m_lock); delete o_cache.m_dir; o_cache.m_dir = m_dir; o_cache.m_tfile = m_tfile; o_cache.m_srcpath = m_srcpath; } else { delete m_dir; } } void Uncomp::clearcache() { LOGDEB0("Uncomp::clearcache\n"); std::unique_lock lock(o_cache.m_lock); delete o_cache.m_dir; o_cache.m_dir = 0; o_cache.m_tfile.clear(); o_cache.m_srcpath.clear(); } recoll-1.26.3/internfile/mimehandler.h0000644000175000017500000001263213533651561014615 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _MIMEHANDLER_H_INCLUDED_ #define _MIMEHANDLER_H_INCLUDED_ #include "autoconfig.h" #include #include #include #include "Filter.h" #include "cstr.h" #include "smallut.h" class RclConfig; class RecollFilter : public Dijon::Filter { public: RecollFilter(RclConfig *config, const std::string& id) : m_config(config), m_id(id) { } virtual ~RecollFilter() {} virtual void setConfig(RclConfig *config) { m_config = config; } virtual bool set_property(Properties p, const std::string &v) { switch (p) { case DJF_UDI: m_udi = v; break; case DEFAULT_CHARSET: m_dfltInputCharset = v; break; case OPERATING_MODE: if (!v.empty() && v[0] == 'v') m_forPreview = true; else m_forPreview = false; break; } return true; } // We don't use this for now virtual bool set_document_uri(const std::string& mtype, const std::string &) { m_mimeType = mtype; return false; } virtual bool set_document_file(const std::string& mtype, const std::string &file_path) { m_mimeType = mtype; return set_document_file_impl(mtype, file_path); } virtual bool set_document_string(const std::string& mtype, const std::string &contents) { m_mimeType = mtype; return set_document_string_impl(mtype, contents); } virtual bool set_document_data(const std::string& mtype, const char *cp, size_t sz) { return set_document_string(mtype, std::string(cp, sz)); } virtual void set_docsize(int64_t size) { m_docsize = size; } virtual int64_t get_docsize() const { return m_docsize; } virtual bool has_documents() const { return m_havedoc; } // Most doc types are single-doc virtual bool skip_to_document(const std::string& s) { if (s.empty()) return true; return false; } virtual bool is_data_input_ok(DataInput input) const { if (input == DOCUMENT_FILE_NAME) return true; return false; } virtual std::string get_error() const { return m_reason; } virtual const std::string& get_id() const { return m_id; } // Classes which need to do local work in clear() need // to implement clear_impl() virtual void clear() final { clear_impl(); Dijon::Filter::clear(); m_forPreview = m_havedoc = false; m_dfltInputCharset.clear(); m_reason.clear(); } virtual void clear_impl() {} // This only makes sense if the contents are currently txt/plain // It converts from keyorigcharset to UTF-8 and sets keycharset. bool txtdcode(const std::string& who); std::string metadataAsString(); protected: // We provide default implementation as not all handlers need both methods virtual bool set_document_file_impl(const std::string&, const std::string&) { return m_havedoc = true; } virtual bool set_document_string_impl(const std::string&, const std::string&) { return m_havedoc = true; } bool preview() { return m_forPreview; } RclConfig *m_config; bool m_forPreview{false}; std::string m_dfltInputCharset; std::string m_reason; bool m_havedoc{false}; std::string m_udi; // May be set by creator as a hint // m_id is and md5 of the filter definition line (from mimeconf) and // is used when fetching/returning filters to / from the cache. std::string m_id; int64_t m_docsize{0}; // Size of the top document }; /** * Return indexing handler object for the given mime type. The returned * pointer should be passed to returnMimeHandler() for recycling, after use. * @param mtyp input mime type, ie text/plain * @param cfg the recoll config object to be used * @param filtertypes decide if we should restrict to types in * indexedmimetypes (if this is set at all). */ extern RecollFilter *getMimeHandler(const std::string &mtyp, RclConfig *cfg, bool filtertypes); /// Free up filter for reuse (you can also delete it) extern void returnMimeHandler(RecollFilter *); /// Clean up cache at the end of an indexing pass. For people who use /// the GUI to index: avoid all those filter processes forever hanging /// off recoll. extern void clearMimeHandlerCache(); namespace Rcl { class Doc; } /// Can this mime type be interned ? extern bool canIntern(const std::string mimetype, RclConfig *cfg); /// Same, getting MIME from doc extern bool canIntern(Rcl::Doc *doc, RclConfig *cfg); /// Can this MIME type be opened (has viewer def) ? extern bool canOpen(Rcl::Doc *doc, RclConfig *cfg); #endif /* _MIMEHANDLER_H_INCLUDED_ */ recoll-1.26.3/internfile/mh_html.cpp0000644000175000017500000001350113533651561014307 00000000000000/* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include "cstr.h" #include "mimehandler.h" #include "log.h" #include "readfile.h" #include "transcode.h" #include "mimeparse.h" #include "myhtmlparse.h" #include "indextext.h" #include "mh_html.h" #include "smallut.h" #include "md5ut.h" #include using namespace std; bool MimeHandlerHtml::set_document_file_impl(const string& mt, const string &fn) { LOGDEB0("textHtmlToDoc: " << fn << "\n"); string otext; string reason; if (!file_to_string(fn, otext, &reason)) { LOGERR("textHtmlToDoc: cant read: " << fn << ": " << reason << "\n"); return false; } m_filename = fn; return set_document_string(mt, otext); } bool MimeHandlerHtml::set_document_string_impl(const string& mt, const string& htext) { m_html = htext; m_havedoc = true; if (!m_forPreview) { // We want to compute the md5 now because we may modify m_html later string md5, xmd5; MD5String(htext, md5); m_metaData[cstr_dj_keymd5] = MD5HexPrint(md5, xmd5); } return true; } bool MimeHandlerHtml::next_document() { if (m_havedoc == false) return false; m_havedoc = false; // If set_doc(fn), take note of file name. string fn = m_filename; m_filename.erase(); string charset = m_dfltInputCharset; LOGDEB("MHHtml::next_doc.: default supposed input charset: [" << charset << "]\n"); // Override default input charset if someone took care to set one: map::const_iterator it = m_metaData.find(cstr_dj_keycharset); if (it != m_metaData.end() && !it->second.empty()) { charset = it->second; LOGDEB("MHHtml: next_doc.: input charset from ext. metadata: [" << charset << "]\n"); } // - We first try to convert from the supposed charset // (which may depend of the current directory) to utf-8. If this // fails, we keep the original text // - During parsing, if we find a charset parameter, and it differs from // what we started with, we abort and restart with the parameter value // instead of the configuration one. MyHtmlParser result; for (int pass = 0; pass < 2; pass++) { string transcoded; LOGDEB("Html::mkDoc: pass " << pass << "\n"); MyHtmlParser p; // Try transcoding. If it fails, use original text. int ecnt; if (!transcode(m_html, transcoded, charset, "UTF-8", &ecnt)) { LOGDEB("textHtmlToDoc: transcode failed from cs '" << charset << "' to UTF-8 for[" << (fn.empty()?"unknown":fn) << "]"); transcoded = m_html; // We don't know the charset, at all p.reset_charsets(); charset.clear(); } else { if (ecnt) { if (pass == 0) { LOGDEB("textHtmlToDoc: init transcode had " << ecnt << " errors for ["<<(fn.empty()?"unknown":fn)<< "]\n"); } else { LOGERR("textHtmlToDoc: final transcode had " << ecnt << " errors for ["<< (fn.empty()?"unknown":fn)<< "]\n"); } } // charset has the putative source charset, transcoded is now // in utf-8 p.set_charsets(charset, "utf-8"); } try { p.parse_html(transcoded); // No exception: ok? But throw true to use the same // code path as if an exception had been thrown by parse_html throw true; break; } catch (bool diag) { result = p; if (diag == true) { // Parser throws true at end of text. ok if (m_forPreview) { // Save the html text m_html = transcoded; // In many cases, we need to change the charset decl, // because the file was transcoded. It seems that just // inserting one is enough (only the 1st one seems to // be used by browsers/qtextedit). string::size_type idx = m_html.find(""); if (idx == string::npos) idx = m_html.find(""); if (idx != string::npos) m_html.replace(idx+6, 0, ""); } break; } LOGDEB("textHtmlToDoc: charset [" << charset << "] doc charset ["<< result.get_charset() << "]\n"); if (!result.get_charset().empty() && !samecharset(result.get_charset(), result.fromcharset)) { LOGDEB("textHtmlToDoc: reparse for charsets\n"); // Set the origin charset as specified in document before // transcoding again charset = result.get_charset(); } else { LOGERR("textHtmlToDoc:: error: non charset exception\n"); return false; } } } m_metaData[cstr_dj_keyorigcharset] = result.get_charset(); m_metaData[cstr_dj_keycontent] = result.dump; m_metaData[cstr_dj_keycharset] = cstr_utf8; // Avoid setting empty values which would crush ones possibly inherited // from parent (if we're an attachment) if (!result.dmtime.empty()) m_metaData[cstr_dj_keymd] = result.dmtime; m_metaData[cstr_dj_keymt] = cstr_textplain; for (map::const_iterator it = result.meta.begin(); it != result.meta.end(); it++) { if (!it->second.empty()) m_metaData[it->first] = it->second; } return true; } recoll-1.26.3/internfile/extrameta.h0000644000175000017500000000345613533651561014326 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _REAPXATTRS_H_INCLUDED_ #define _REAPXATTRS_H_INCLUDED_ #include "autoconfig.h" /** Extended attributes processing helper functions */ #include #include class RclConfig; namespace Rcl {class Doc;}; /** Read external attributes, possibly ignore some or change the names according to the fields configuration */ extern void reapXAttrs(const RclConfig* config, const std::string& path, std::map& xfields); /** Turn the pre-processed extended file attributes into doc fields */ extern void docFieldsFromXattrs( RclConfig *cfg, const std::map& xfields, Rcl::Doc& doc); /** Get metadata by executing commands */ extern void reapMetaCmds(RclConfig* config, const std::string& path, std::map& xfields); /** Turn the pre-processed ext cmd metadata into doc fields */ extern void docFieldsFromMetaCmds( RclConfig *cfg, const std::map& xfields, Rcl::Doc& doc); #endif /* _REAPXATTRS_H_INCLUDED_ */ recoll-1.26.3/internfile/mh_symlink.h0000644000175000017500000000420413533651561014476 00000000000000/* Copyright (C) 2004 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #ifndef _MH_SYMLINK_H_INCLUDED_ #define _MH_SYMLINK_H_INCLUDED_ #include #include "safeunistd.h" #include #include "cstr.h" #include "mimehandler.h" #include "transcode.h" #include "pathut.h" #include "log.h" /** Index symlink target * * Not sure that this is such a good idea, so it's disabled by default in * the config. Add inode/symlink = internal to the index section of mimeconf * to enable. */ class MimeHandlerSymlink : public RecollFilter { public: MimeHandlerSymlink(RclConfig *cnf, const std::string& id) : RecollFilter(cnf, id) { } virtual ~MimeHandlerSymlink() {} virtual bool next_document() { if (m_havedoc == false) return false; m_havedoc = false; m_metaData[cstr_dj_keycontent] = cstr_null; char lc[1024]; ssize_t bytes = readlink(m_fn.c_str(), lc, 1024); if (bytes != (ssize_t)-1) { string slc(lc, bytes); transcode(path_getsimple(slc), m_metaData[cstr_dj_keycontent], m_config->getDefCharset(true), "UTF-8"); } else { LOGDEB("Symlink: readlink [" << m_fn << "] failed, errno " << errno << "\n"); } m_metaData[cstr_dj_keymt] = cstr_textplain; return true; } protected: virtual bool set_document_file_impl(const string& mt, const string& fn) { m_fn = fn; return m_havedoc = true; } private: std::string m_fn; }; #endif /* _MH_SYMLINK_H_INCLUDED_ */ recoll-1.26.3/internfile/mh_execm.cpp0000644000175000017500000002630413533651561014451 00000000000000 /* Copyright (C) 2005 J.F.Dockes * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include #include #include using namespace std; #include "cstr.h" #include "mh_execm.h" #include "mh_html.h" #include "log.h" #include "cancelcheck.h" #include "smallut.h" #include "md5ut.h" #include "rclconfig.h" #include "mimetype.h" #include "idfile.h" #include #include "safesyswait.h" bool MimeHandlerExecMultiple::startCmd() { LOGDEB("MimeHandlerExecMultiple::startCmd\n"); if (params.empty()) { // Hu ho LOGERR("MHExecMultiple::startCmd: empty params\n"); m_reason = "RECFILTERROR BADCONFIG"; return false; } // Command name string cmd = params.front(); m_maxmemberkb = 50000; m_config->getConfParam("membermaxkbs", &m_maxmemberkb); ostringstream oss; oss << "RECOLL_FILTER_MAXMEMBERKB=" << m_maxmemberkb; m_cmd.putenv(oss.str()); m_cmd.putenv("RECOLL_CONFDIR", m_config->getConfDir()); m_cmd.putenv(m_forPreview ? "RECOLL_FILTER_FORPREVIEW=yes" : "RECOLL_FILTER_FORPREVIEW=no"); m_cmd.setrlimit_as(m_filtermaxmbytes); m_adv.setmaxsecs(m_filtermaxseconds); m_cmd.setAdvise(&m_adv); // Build parameter list: delete cmd name vectormyparams(params.begin() + 1, params.end()); if (m_cmd.startExec(cmd, myparams, 1, 1) < 0) { m_reason = string("RECFILTERROR HELPERNOTFOUND ") + cmd; missingHelper = true; return false; } return true; } // Note: data is not used if this is the "document:" field: it goes // directly to m_metaData[cstr_dj_keycontent] to avoid an extra copy // // Messages are made of data elements. Each element is like: // name: len\ndata // An empty line signals the end of the message, so the whole thing // would look like: // Name1: Len1\nData1Name2: Len2\nData2\n bool MimeHandlerExecMultiple::readDataElement(string& name, string &data) { string ibuf; // Read name and length if (m_cmd.getline(ibuf) <= 0) { LOGERR("MHExecMultiple: getline error\n"); return false; } LOGDEB1("MHEM:rde: line [" << ibuf << "]\n"); // Empty line (end of message) ? if (!ibuf.compare("\n")) { LOGDEB("MHExecMultiple: Got empty line\n"); name.clear(); return true; } // Filters will sometimes abort before entering the real protocol, ie if // a module can't be loaded. Check the special filter error first word: if (ibuf.find("RECFILTERROR ") == 0) { m_reason = ibuf; if (ibuf.find("HELPERNOTFOUND") != string::npos) missingHelper = true; return false; } // We're expecting something like Name: len\n vector tokens; stringToTokens(ibuf, tokens); if (tokens.size() != 2) { LOGERR("MHExecMultiple: bad line in filter output: [" << ibuf << "]\n"); return false; } vector::iterator it = tokens.begin(); name = *it++; string& slen = *it; int len; if (sscanf(slen.c_str(), "%d", &len) != 1) { LOGERR("MHExecMultiple: bad line in filter output: [" << ibuf << "]\n"); return false; } if (len / 1024 > m_maxmemberkb) { LOGERR("MHExecMultiple: data len > maxmemberkb\n"); return false; } // Hack: check for 'Document:' and read directly the document data // to m_metaData[cstr_dj_keycontent] to avoid an extra copy of the bulky // piece string *datap = &data; if (!stringlowercmp("document:", name)) { datap = &m_metaData[cstr_dj_keycontent]; } else { datap = &data; } // Read element data datap->erase(); if (len > 0 && m_cmd.receive(*datap, len) != len) { LOGERR("MHExecMultiple: expected " << len << " bytes of data, got " << datap->length() << "\n"); return false; } LOGDEB1("MHExecMe:rdDtElt got: name [" << name << "] len " << len << "value [" << (datap->size() > 100 ? (datap->substr(0, 100) + " ...") : datap) << endl); return true; } bool MimeHandlerExecMultiple::next_document() { LOGDEB("MimeHandlerExecMultiple::next_document(): [" << m_fn << "]\n"); if (m_havedoc == false) return false; if (missingHelper) { LOGDEB("MHExecMultiple::next_document(): helper known missing\n"); return false; } if (m_cmd.getChildPid() <= 0 && !startCmd()) { return false; } m_metaData.clear(); // Send request to child process. This maybe the first/only // request for a given file, or a continuation request. We send an // empty file name in the latter case. // We also compute the file md5 before starting the extraction: // under Windows, we may not be able to do it while the file // is opened by the filter. ostringstream obuf; string file_md5; if (m_filefirst) { if (!m_forPreview && !m_nomd5) { string md5, xmd5, reason; if (MD5File(m_fn, md5, &reason)) { file_md5 = MD5HexPrint(md5, xmd5); } else { LOGERR("MimeHandlerExecM: cant compute md5 for [" << m_fn << "]: " << reason << "\n"); } } obuf << "FileName: " << m_fn.length() << "\n" << m_fn; // m_filefirst is set to true by set_document_file() m_filefirst = false; } else { obuf << "Filename: " << 0 << "\n"; } if (!m_ipath.empty()) { LOGDEB("next_doc: sending ipath " << m_ipath.length() << " val [" << m_ipath << "]\n"); obuf << "Ipath: " << m_ipath.length() << "\n" << m_ipath; } if (!m_dfltInputCharset.empty()) { obuf << "DflInCS: " << m_dfltInputCharset.length() << "\n" << m_dfltInputCharset; } obuf << "Mimetype: " << m_mimeType.length() << "\n" << m_mimeType; obuf << "\n"; if (m_cmd.send(obuf.str()) < 0) { m_cmd.zapChild(); LOGERR("MHExecMultiple: send error\n"); return false; } m_adv.reset(); // Read answer (multiple elements) LOGDEB1("MHExecMultiple: reading answer\n"); bool eofnext_received = false; bool eofnow_received = false; bool fileerror_received = false; bool subdocerror_received = false; string ipath; string mtype; string charset; for (int loop=0;;loop++) { string name, data; try { if (!readDataElement(name, data)) { m_cmd.zapChild(); return false; } } catch (HandlerTimeout) { LOGINFO("MHExecMultiple: timeout\n"); m_cmd.zapChild(); return false; } catch (CancelExcept) { LOGINFO("MHExecMultiple: interrupt\n"); m_cmd.zapChild(); return false; } if (name.empty()) break; if (!stringlowercmp("eofnext:", name)) { LOGDEB("MHExecMultiple: got EOFNEXT\n"); eofnext_received = true; } else if (!stringlowercmp("eofnow:", name)) { LOGDEB("MHExecMultiple: got EOFNOW\n"); eofnow_received = true; } else if (!stringlowercmp("fileerror:", name)) { LOGDEB("MHExecMultiple: got FILEERROR\n"); fileerror_received = true; } else if (!stringlowercmp("subdocerror:", name)) { LOGDEB("MHExecMultiple: got SUBDOCERROR\n"); subdocerror_received = true; } else if (!stringlowercmp("ipath:", name)) { ipath = data; LOGDEB("MHExecMultiple: got ipath [" << data << "]\n"); } else if (!stringlowercmp("charset:", name)) { charset = data; LOGDEB("MHExecMultiple: got charset [" << data << "]\n"); } else if (!stringlowercmp("mimetype:", name)) { mtype = data; LOGDEB("MHExecMultiple: got mimetype [" << data << "]\n"); } else { string nm = stringtolower((const string&)name); trimstring(nm, ":"); LOGDEB("MHExecMultiple: got [" << nm << "] -> [" << data << "]\n"); m_metaData[nm] += data; } if (loop == 200) { // ?? LOGERR("MHExecMultiple: handler sent more than 200 attributes\n"); return false; } } if (eofnow_received || fileerror_received) { // No more docs m_havedoc = false; return false; } if (subdocerror_received) { return false; } // It used to be that eof could be signalled just by an empty document, but // this was wrong. Empty documents can be found ie in zip files and should // not be interpreted as eof. if (m_metaData[cstr_dj_keycontent].empty()) { LOGDEB0("MHExecMultiple: got empty document inside [" << m_fn << "]: [" << ipath << "]\n"); } if (!ipath.empty()) { // If this has an ipath, it is an internal doc from a // multi-document file. In this case, either the filter // supplies the mimetype, or the ipath MUST be a filename-like // string which we can use to compute a mime type m_metaData[cstr_dj_keyipath] = ipath; if (mtype.empty()) { LOGDEB0("MHExecMultiple: no mime type from filter, using ipath " "for a guess\n"); mtype = mimetype(ipath, 0, m_config, false); if (mtype.empty()) { // mimetype() won't call idFile when there is no file. Do it mtype = idFileMem(m_metaData[cstr_dj_keycontent]); if (mtype.empty()) { // Note this happens for example for directory zip members // We could recognize them by the end /, but wouldn't know // what to do with them anyway. LOGINFO("MHExecMultiple: cant guess mime type\n"); mtype = "application/octet-stream"; } } } m_metaData[cstr_dj_keymt] = mtype; if (!m_forPreview) { string md5, xmd5; MD5String(m_metaData[cstr_dj_keycontent], md5); m_metaData[cstr_dj_keymd5] = MD5HexPrint(md5, xmd5); } } else { // "Self" document. m_metaData[cstr_dj_keymt] = mtype.empty() ? cstr_texthtml : mtype; m_metaData.erase(cstr_dj_keyipath); if (!m_forPreview) { m_metaData[cstr_dj_keymd5] = file_md5; } } handle_cs(m_metaData[cstr_dj_keymt], charset); if (eofnext_received) m_havedoc = false; LOGDEB0("MHExecMultiple: returning " << m_metaData[cstr_dj_keycontent].size() << " bytes of content, mtype [" << m_metaData[cstr_dj_keymt] << "] charset [" << m_metaData[cstr_dj_keycharset] << "]\n"); LOGDEB2("MHExecMultiple: metadata: \n" << metadataAsString()); return true; } recoll-1.26.3/internfile/htmlparse.cpp0000644000175000017500000002465713303776060014671 00000000000000/* This file was copied/updated from xapian-omega-1.0.1 to 1.2.6 and modified */ /* htmlparse.cc: simple HTML parser for omega indexer * * Copyright 1999,2000,2001 BrightStation PLC * Copyright 2001 Ananova Ltd * Copyright 2002,2006,2007,2008,2009,2010,2011 Olly Betts * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 * USA */ #include using std::find; using std::find_if; #include "htmlparse.h" #include #include #include inline void lowercase_string(string &str) { for (string::iterator i = str.begin(); i != str.end(); ++i) { *i = tolower(static_cast(*i)); } } map HtmlParser::named_ents; inline static bool p_notdigit(char c) { return !isdigit(static_cast(c)); } inline static bool p_notxdigit(char c) { return !isxdigit(static_cast(c)); } inline static bool p_notalnum(char c) { return !isalnum(static_cast(c)); } inline static bool p_notwhitespace(char c) { return !isspace(static_cast(c)); } inline static bool p_nottag(char c) { return !isalnum(static_cast(c)) && c != '.' && c != '-' && c != ':'; // ':' for XML namespaces. } inline static bool p_whitespacegt(char c) { return isspace(static_cast(c)) || c == '>'; } inline static bool p_whitespaceeqgt(char c) { return isspace(static_cast(c)) || c == '=' || c == '>'; } bool HtmlParser::get_parameter(const string & param, string & value) const { map::const_iterator i = parameters.find(param); if (i == parameters.end()) return false; value = i->second; return true; } HtmlParser::HtmlParser() { // RECOLL: no need to initialize these entities, we use those from // myhtmlparse #if 0 static const struct ent { const char *n; unsigned int v; } ents[] = { #include "namedentities.h" { NULL, 0 } }; if (named_ents.empty()) { const struct ent *i = ents; while (i->n) { named_ents[string(i->n)] = i->v; ++i; } } #endif } void HtmlParser::decode_entities(string &s) { // Not used for recoll. Kept here to minimize the amount of // diffs. Almost the same code in myhtmlparse except that the // entity table directly holds the utf-8 strings instead of the // unicode positions (one less conversion). #if 0 // We need a const_iterator version of s.end() - otherwise the // find() and find_if() templates don't work... string::const_iterator amp = s.begin(), s_end = s.end(); while ((amp = find(amp, s_end, '&')) != s_end) { unsigned int val = 0; string::const_iterator end, p = amp + 1; if (p != s_end && *p == '#') { p++; if (p != s_end && (*p == 'x' || *p == 'X')) { // hex p++; end = find_if(p, s_end, p_notxdigit); sscanf(s.substr(p - s.begin(), end - p).c_str(), "%x", &val); } else { // number end = find_if(p, s_end, p_notdigit); val = atoi(s.substr(p - s.begin(), end - p).c_str()); } } else { end = find_if(p, s_end, p_notalnum); string code = s.substr(p - s.begin(), end - p); map::const_iterator i; i = named_ents.find(code); if (i != named_ents.end()) val = i->second; } if (end < s_end && *end == ';') end++; if (val) { string::size_type amp_pos = amp - s.begin(); if (val < 0x80) { s.replace(amp_pos, end - amp, 1u, char(val)); } else { // Convert unicode value val to UTF-8. char seq[4]; unsigned len = Xapian::Unicode::nonascii_to_utf8(val, seq); s.replace(amp_pos, end - amp, seq, len); } s_end = s.end(); // We've modified the string, so the iterators are no longer // valid... amp = s.begin() + amp_pos + 1; } else { amp = end; } } #endif } void HtmlParser::parse_html(const string &body) { in_script = false; parameters.clear(); string::const_iterator start = body.begin(); while (true) { // Skip through until we find an HTML tag, a comment, or the end of // document. Ignore isolated occurrences of `<' which don't start // a tag or comment. string::const_iterator p = start; while (true) { p = find(p, body.end(), '<'); if (p == body.end()) break; unsigned char ch = *(p + 1); // Tag, closing tag, or comment (or SGML declaration). if ((!in_script && isalpha(ch)) || ch == '/' || ch == '!') break; if (ch == '?') { // PHP code or XML declaration. // XML declaration is only valid at the start of the first line. // FIXME: need to deal with BOMs... if (p != body.begin() || body.size() < 20) break; // XML declaration looks something like this: // if (p[2] != 'x' || p[3] != 'm' || p[4] != 'l') break; if (strchr(" \t\r\n", p[5]) == NULL) break; string::const_iterator decl_end = find(p + 6, body.end(), '?'); if (decl_end == body.end()) break; // Default charset for XML is UTF-8. charset = "utf-8"; string decl(p + 6, decl_end); size_t enc = decl.find("encoding"); if (enc == string::npos) break; enc = decl.find_first_not_of(" \t\r\n", enc + 8); if (enc == string::npos || enc == decl.size()) break; if (decl[enc] != '=') break; enc = decl.find_first_not_of(" \t\r\n", enc + 1); if (enc == string::npos || enc == decl.size()) break; if (decl[enc] != '"' && decl[enc] != '\'') break; char quote = decl[enc++]; size_t enc_end = decl.find(quote, enc); if (enc != string::npos) charset = decl.substr(enc, enc_end - enc); break; } p++; } // Process text up to start of tag. if (p > start || p == body.end()) { string text = body.substr(start - body.begin(), p - start); decode_entities(text); process_text(text); } if (p == body.end()) { do_eof(); break; } start = p + 1; if (start == body.end()) break; if (*start == '!') { if (++start == body.end()) break; if (++start == body.end()) break; // comment or SGML declaration if (*(start - 1) == '-' && *start == '-') { ++start; string::const_iterator close = find(start, body.end(), '>'); // An unterminated comment swallows rest of document // (like Netscape, but unlike MSIE IIRC) if (close == body.end()) break; p = close; // look for --> while (p != body.end() && (*(p - 1) != '-' || *(p - 2) != '-')) p = find(p + 1, body.end(), '>'); if (p != body.end()) { // Check for htdig's "ignore this bit" comments. if (p - start == 15 && string(start, p - 2) == "htdig_noindex") { string::size_type i; i = body.find("", p + 1 - body.begin()); if (i == string::npos) break; start = body.begin() + i + 21; continue; } // If we found --> skip to there. start = p; } else { // Otherwise skip to the first > we found (as Netscape does). start = close; } } else { // just an SGML declaration, perhaps giving the DTD - ignore it start = find(start - 1, body.end(), '>'); if (start == body.end()) break; } ++start; } else if (*start == '?') { if (++start == body.end()) break; // PHP - swallow until ?> or EOF start = find(start + 1, body.end(), '>'); // look for ?> while (start != body.end() && *(start - 1) != '?') start = find(start + 1, body.end(), '>'); // unterminated PHP swallows rest of document (rather arbitrarily // but it avoids polluting the database when things go wrong) if (start != body.end()) ++start; } else { // opening or closing tag int closing = 0; if (*start == '/') { closing = 1; start = find_if(start + 1, body.end(), p_notwhitespace); } p = start; start = find_if(start, body.end(), p_nottag); string tag = body.substr(p - body.begin(), start - p); // convert tagname to lowercase lowercase_string(tag); if (closing) { if (!closing_tag(tag)) return; if (in_script && tag == "script") in_script = false; /* ignore any bogus parameters on closing tags */ p = find(start, body.end(), '>'); if (p == body.end()) break; start = p + 1; } else { bool empty_element = false; // FIXME: parse parameters lazily. while (start < body.end() && *start != '>') { string name, value; p = find_if(start, body.end(), p_whitespaceeqgt); size_t name_len = p - start; if (name_len == 1) { if (*start == '/' && p < body.end() && *p == '>') { // E.g. start = p; empty_element = true; break; } } name.assign(body, start - body.begin(), name_len); p = find_if(p, body.end(), p_notwhitespace); start = p; if (start != body.end() && *start == '=') { start = find_if(start + 1, body.end(), p_notwhitespace); p = body.end(); int quote = *start; if (quote == '"' || quote == '\'') { start++; p = find(start, body.end(), quote); } if (p == body.end()) { // unquoted or no closing quote p = find_if(start, body.end(), p_whitespacegt); } value.assign(body, start - body.begin(), p - start); start = find_if(p, body.end(), p_notwhitespace); if (!name.empty()) { // convert parameter name to lowercase lowercase_string(name); // in case of multiple entries, use the first // (as Netscape does) parameters.insert(make_pair(name, value)); } } } #if 0 cout << "<" << tag; map::const_iterator x; for (x = parameters.begin(); x != parameters.end(); x++) { cout << " " << x->first << "=\"" << x->second << "\""; } cout << ">\n"; #endif if (!opening_tag(tag)) return; parameters.clear(); if (empty_element) { if (!closing_tag(tag)) return; } // In .... 3.3. Searching on the command line There are several ways to obtain search results as a text stream, without a graphical interface: o By passing option -t to the recoll program. o By using the recollq program. o By writing a custom Python program, using the Recoll Python API. The first two methods work in the same way and accept/need the same arguments (except for the additional -t to recoll). The query to be executed is specified as command line arguments. recollq is not built by default. You can use the Makefile in the query directory to build it. This is a very simple program, and if you can program a little c++, you may find it useful to taylor its output format to your needs. Not that recollq is only really useful on systems where the Qt libraries (or even the X11 ones) are not available. Otherwise, just use recoll -t, which takes the exact same parameters and options which are described for recollq recollq has a man page (not installed by default, look in the doc/man directory). The Usage string is as follows: recollq: usage: -P: Show the date span for all the documents present in the index [-o|-a|-f] [-q] Runs a recoll query and displays result lines. Default: will interpret the argument(s) as a xesam query string query may be like: implicit AND, Exclusion, field spec: t1 -t2 title:t3 OR has priority: t1 OR t2 t3 OR t4 means (t1 OR t2) AND (t3 OR t4) Phrase: "t1 t2" (needs additional quoting on cmd line) -o Emulate the GUI simple search in ANY TERM mode -a Emulate the GUI simple search in ALL TERMS mode -f Emulate the GUI simple search in filename mode -q is just ignored (compatibility with the recoll GUI command line) Common options: -c : specify config directory, overriding $RECOLL_CONFDIR -d also dump file contents -n [first-] define the result slice. The default value for [first] is 0. Without the option, the default max count is 2000. Use n=0 for no limit -b : basic. Just output urls, no mime types or titles -Q : no result lines, just the processed query and result count -m : dump the whole document meta[] array for each result -A : output the document abstracts -S fld : sort by field -s stemlang : set stemming language to use (must exist in index...) Use -s "" to turn off stem expansion -D : sort descending -i : additional index, several can be given -e use url encoding (%xx) for urls -F : output exactly these fields for each result. The field values are encoded in base64, output in one line and separated by one space character. This is the recommended format for use by other programs. Use a normal query with option -m to see the field names. Sample execution: recollq 'ilur -nautique mime:text/html' Recoll query: ((((ilur:(wqf=11) OR ilurs) AND_NOT (nautique:(wqf=11) OR nautiques OR nautiqu OR nautiquement)) FILTER Ttext/html)) 4 results text/html [file:///Users/uncrypted-dockes/projets/bateaux/ilur/comptes.html] [comptes.html] 18593 bytes text/html [file:///Users/uncrypted-dockes/projets/nautique/webnautique/articles/ilur1/index.html] [Constructio... text/html [file:///Users/uncrypted-dockes/projets/pagepers/index.html] [psxtcl/writemime/recoll]... text/html [file:///Users/uncrypted-dockes/projets/bateaux/ilur/factEtCie/recu-chasse-maree.... 3.4. Path translations In some cases, the document paths stored inside the index do not match the actual ones, so that document previews and accesses will fail. This can occur in a number of circumstances: o When using multiple indexes it is a relatively common occurrence that some will actually reside on a remote volume, for exemple mounted via NFS. In this case, the paths used to access the documents on the local machine are not necessarily the same than the ones used while indexing on the remote machine. For example, /home/me may have been used as a topdirs elements while indexing, but the directory might be mounted as /net/server/home/me on the local machine. o The case may also occur with removable disks. It is perfectly possible to configure an index to live with the documents on the removable disk, but it may happen that the disk is not mounted at the same place so that the documents paths from the index are invalid. o As a last exemple, one could imagine that a big directory has been moved, but that it is currently inconvenient to run the indexer. More generally, the path translation facility may be useful whenever the documents paths seen by the indexer are not the same as the ones which should be used at query time. Recoll has a facility for rewriting access paths when extracting the data from the index. The translations can be defined for the main index and for any additional query index. In the above NFS example, Recoll could be instructed to rewrite any file:///home/me URL from the index to file:///net/server/home/me, allowing accesses from the client. The translations are defined in the ptrans configuration file, which can be edited by hand or from the GUI external indexes configuration dialog. 3.5. The query language The query language processor is activated in the GUI simple search entry when the search mode selector is set to Query Language. It can also be used with the KIO slave or the command line search. It broadly has the same capabilities as the complex search interface in the GUI. The language is based on the (seemingly defunct) Xesam user search language specification. If the results of a query language search puzzle you and you doubt what has been actually searched for, you can use the GUI Show Query link at the top of the result list to check the exact query which was finally executed by Xapian. Here follows a sample request that we are going to explain: author:"john doe" Beatles OR Lennon Live OR Unplugged -potatoes This would search for all documents with John Doe appearing as a phrase in the author field (exactly what this is would depend on the document type, ie: the From: header, for an email message), and containing either beatles or lennon and either live or unplugged but not potatoes (in any part of the document). An element is composed of an optional field specification, and a value, separated by a colon (the field separator is the last colon in the element). Examples: Eugenie, author:balzac, dc:title:grandet dc:title:"eugenie grandet" The colon, if present, means "contains". Xesam defines other relations, which are mostly unsupported for now (except in special cases, described further down). All elements in the search entry are normally combined with an implicit AND. It is possible to specify that elements be OR'ed instead, as in Beatles OR Lennon. The OR must be entered literally (capitals), and it has priority over the AND associations: word1 word2 OR word3 means word1 AND (word2 OR word3) not (word1 AND word2) OR word3. Explicit parenthesis are not supported. As of Recoll 1.21, you can use parentheses to group elements, which will sometimes make things clearer, and may allow expressing combinations which would have been difficult otherwise. An element preceded by a - specifies a term that should not appear. As usual, words inside quotes define a phrase (the order of words is significant), so that title:"prejudice pride" is not the same as title:prejudice title:pride, and is unlikely to find a result. Words inside phrases and capitalized words are not stem-expanded. Wildcards may be used anywhere inside a term. Specifying a wild-card on the left of a term can produce a very slow search (or even an incorrect one if the expansion is truncated because of excessive size). Also see More about wildcards. To save you some typing, recent Recoll versions (1.20 and later) interpret a comma-separated list of terms as an AND list inside the field. Use slash characters ('/') for an OR list. No white space is allowed. So author:john,lennon will search for documents with john and lennon inside the author field (in any order), and author:john/ringo would search for john or ringo. Modifiers can be set on a double-quote value, for example to specify a proximity search (unordered). See the modifier section. No space must separate the final double-quote and the modifiers value, e.g. "two one"po10 Recoll currently manages the following default fields: o title, subject or caption are synonyms which specify data to be searched for in the document title or subject. o author or from for searching the documents originators. o recipient or to for searching the documents recipients. o keyword for searching the document-specified keywords (few documents actually have any). o filename for the document's file name. This is not necessarily set for all documents: internal documents contained inside a compound one (for example an EPUB section) do not inherit the container file name any more, this was replaced by an explicit field (see next). Sub-documents can still have a specific filename, if it is implied by the document format, for example the attachment file name for an email attachment. o containerfilename. This is set for all documents, both top-level and contained sub-documents, and is always the name of the filesystem directory entry which contains the data. The terms from this field can only be matched by an explicit field specification (as opposed to terms from filename which are also indexed as general document content). This avoids getting matches for all the sub-documents when searching for the container file name. o ext specifies the file name extension (Ex: ext:html) Recoll 1.20 and later have a way to specify aliases for the field names, which will save typing, for example by aliasing filename to fn or containerfilename to cfn. See the section about the fields file The field syntax also supports a few field-like, but special, criteria: o dir for filtering the results on file location (Ex: dir:/home/me/somedir). -dir also works to find results not in the specified directory (release >= 1.15.8). Tilde expansion will be performed as usual (except for a bug in versions 1.19 to 1.19.11p1). Wildcards will be expanded, but please have a look at an important limitation of wildcards in path filters. Relative paths also make sense, for example, dir:share/doc would match either /usr/share/doc or /usr/local/share/doc Several dir clauses can be specified, both positive and negative. For example the following makes sense: dir:recoll dir:src -dir:utils -dir:common This would select results which have both recoll and src in the path (in any order), and which have not either utils or common. You can also use OR conjunctions with dir: clauses. A special aspect of dir clauses is that the values in the index are not transcoded to UTF-8, and never lower-cased or unaccented, but stored as binary. This means that you need to enter the values in the exact lower or upper case, and that searches for names with diacritics may sometimes be impossible because of character set conversion issues. Non-ASCII UNIX file paths are an unending source of trouble and are best avoided. You need to use double-quotes around the path value if it contains space characters. o size for filtering the results on file size. Example: size<10000. You can use <, > or = as operators. You can specify a range like the following: size>100 size<1000. The usual k/K, m/M, g/G, t/T can be used as (decimal) multipliers. Ex: size>1k to search for files bigger than 1000 bytes. o date for searching or filtering on dates. The syntax for the argument is based on the ISO8601 standard for dates and time intervals. Only dates are supported, no times. The general syntax is 2 elements separated by a / character. Each element can be a date or a period of time. Periods are specified as PnYnMnD. The n numbers are the respective numbers of years, months or days, any of which may be missing. Dates are specified as YYYY-MM-DD. The days and months parts may be missing. If the / is present but an element is missing, the missing element is interpreted as the lowest or highest date in the index. Examples: o 2001-03-01/2002-05-01 the basic syntax for an interval of dates. o 2001-03-01/P1Y2M the same specified with a period. o 2001/ from the beginning of 2001 to the latest date in the index. o 2001 the whole year of 2001 o P2D/ means 2 days ago up to now if there are no documents with dates in the future. o /2003 all documents from 2003 or older. Periods can also be specified with small letters (ie: p2y). o mime or format for specifying the MIME type. This one is quite special because you can specify several values which will be OR'ed (the normal default for the language is AND). Ex: mime:text/plain mime:text/html. Specifying an explicit boolean operator before a mime specification is not supported and will produce strange results. You can filter out certain types by using negation (-mime:some/type), and you can use wildcards in the value (mime:text/*). Note that mime is the ONLY field with an OR default. You do need to use OR with ext terms for example. o type or rclcat for specifying the category (as in text/media/presentation/etc.). The classification of MIME types in categories is defined in the Recoll configuration (mimeconf), and can be modified or extended. The default category names are those which permit filtering results in the main GUI screen. Categories are OR'ed like MIME types above. This can't be negated with - either. The document input handlers used while indexing have the possibility to create other fields with arbitrary names, and aliases may be defined in the configuration, so that the exact field search possibilities may be different for you if someone took care of the customisation. 3.5.1. Modifiers Some characters are recognized as search modifiers when found immediately after the closing double quote of a phrase, as in "some term"modifierchars. The actual "phrase" can be a single term of course. Supported modifiers: o l can be used to turn off stemming (mostly makes sense with p because stemming is off by default for phrases). o o can be used to specify a "slack" for phrase and proximity searches: the number of additional terms that may be found between the specified ones. If o is followed by an integer number, this is the slack, else the default is 10. o p can be used to turn the default phrase search into a proximity one (unordered). Example:"order any in"p o C will turn on case sensitivity (if the index supports it). o D will turn on diacritics sensitivity (if the index supports it). o A weight can be specified for a query element by specifying a decimal value at the start of the modifiers. Example: "Important"2.5. 3.6. Search case and diacritics sensitivity For Recoll versions 1.18 and later, and when working with a raw index (not the default), searches can be made sensitive to character case and diacritics. How this happens is controlled by configuration variables and what search data is entered. The general default is that searches are insensitive to case and diacritics. An entry of resume will match any of Resume, RESUME, resume, Resume etc. Two configuration variables can automate switching on sensitivity: autodiacsens If this is set, search sensitivity to diacritics will be turned on as soon as an accented character exists in a search term. When the variable is set to true, resume will start a diacritics-unsensitive search, but resume will be matched exactly. The default value is false. autocasesens If this is set, search sensitivity to character case will be turned on as soon as an upper-case character exists in a search term except for the first one. When the variable is set to true, us or Us will start a diacritics-unsensitive search, but US will be matched exactly. The default value is true (contrary to autodiacsens). As in the past, capitalizing the first letter of a word will turn off its stem expansion and have no effect on case-sensitivity. You can also explicitely activate case and diacritics sensitivity by using modifiers with the query language. C will make the term case-sensitive, and D will make it diacritics-sensitive. Examples: "us"C will search for the term us exactly (Us will not be a match). "resume"D will search for the term resume exactly (resume will not be a match). When either case or diacritics sensitivity is activated, stem expansion is turned off. Having both does not make much sense. 3.7. Anchored searches and wildcards Some special characters are interpreted by Recoll in search strings to expand or specialize the search. Wildcards expand a root term in controlled ways. Anchor characters can restrict a search to succeed only if the match is found at or near the beginning of the document or one of its fields. 3.7.1. More about wildcards All words entered in Recoll search fields will be processed for wildcard expansion before the request is finally executed. The wildcard characters are: o * which matches 0 or more characters. o ? which matches a single character. o [] which allow defining sets of characters to be matched (ex: [abc] matches a single character which may be 'a' or 'b' or 'c', [0-9] matches any number. You should be aware of a few things when using wildcards. o Using a wildcard character at the beginning of a word can make for a slow search because Recoll will have to scan the whole index term list to find the matches. However, this is much less a problem for field searches, and queries like author:*@domain.com can sometimes be very useful. o For Recoll version 18 only, when working with a raw index (preserving character case and diacritics), the literal part of a wildcard expression will be matched exactly for case and diacritics. This is not true any more for versions 19 and later. o Using a * at the end of a word can produce more matches than you would think, and strange search results. You can use the term explorer tool to check what completions exist for a given term. You can also see exactly what search was performed by clicking on the link at the top of the result list. In general, for natural language terms, stem expansion will produce better results than an ending * (stem expansion is turned off when any wildcard character appears in the term). 3.7.1.1. Wildcards and path filtering Due to the way that Recoll processes wildcards inside dir path filtering clauses, they will have a multiplicative effect on the query size. A clause containg wildcards in several paths elements, like, for example, dir:/home/me/*/*/docdir, will almost certainly fail if your indexed tree is of any realistic size. Depending on the case, you may be able to work around the issue by specifying the paths elements more narrowly, with a constant prefix, or by using 2 separate dir: clauses instead of multiple wildcards, as in dir:/home/me dir:docdir. The latter query is not equivalent to the initial one because it does not specify a number of directory levels, but that's the best we can do (and it may be actually more useful in some cases). 3.7.2. Anchored searches Two characters are used to specify that a search hit should occur at the beginning or at the end of the text. ^ at the beginning of a term or phrase constrains the search to happen at the start, $ at the end force it to happen at the end. As this function is implemented as a phrase search it is possible to specify a maximum distance at which the hit should occur, either through the controls of the advanced search panel, or using the query language, for example, as in: "^someterm"o10 which would force someterm to be found within 10 terms of the start of the text. This can be combined with a field search as in somefield:"^someterm"o10 or somefield:someterm$. This feature can also be used with an actual phrase search, but in this case, the distance applies to the whole phrase and anchor, so that, for example, bla bla my unexpected term at the beginning of the text would be a match for "^my term"o5. Anchored searches can be very useful for searches inside somewhat structured documents like scientific articles, in case explicit metadata has not been supplied (a most frequent case), for example for looking for matches inside the abstract or the list of authors (which occur at the top of the document). 3.8. Desktop integration Being independant of the desktop type has its drawbacks: Recoll desktop integration is minimal. However there are a few tools available: o The KDE KIO Slave was described in a previous section. o If you use a recent version of Ubuntu Linux, you may find the Ubuntu Unity Lens module useful. o There is also an independantly developed Krunner plugin. Here follow a few other things that may help. 3.8.1. Hotkeying recoll It is surprisingly convenient to be able to show or hide the Recoll GUI with a single keystroke. Recoll comes with a small Python script, based on the libwnck window manager interface library, which will allow you to do just this. The detailed instructions are on this wiki page. 3.8.2. The KDE Kicker Recoll applet This is probably obsolete now. Anyway: The Recoll source tree contains the source code to the recoll_applet, a small application derived from the find_applet. This can be used to add a small Recoll launcher to the KDE panel. The applet is not automatically built with the main Recoll programs, nor is it included with the main source distribution (because the KDE build boilerplate makes it relatively big). You can download its source from the recoll.org download page. Use the omnipotent configure;make;make install incantation to build and install. You can then add the applet to the panel by right-clicking the panel and choosing the Add applet entry. The recoll_applet has a small text window where you can type a Recoll query (in query language form), and an icon which can be used to restrict the search to certain types of files. It is quite primitive, and launches a new recoll GUI instance every time (even if it is already running). You may find it useful anyway. Chapter 4. Programming interface Recoll has an Application Programming Interface, usable both for indexing and searching, currently accessible from the Python language. Another less radical way to extend the application is to write input handlers for new types of documents. The processing of metadata attributes for documents (fields) is highly configurable. 4.1. Writing a document input handler Terminology The small programs or pieces of code which handle the processing of the different document types for Recoll used to be called filters, which is still reflected in the name of the directory which holds them and many configuration variables. They were named this way because one of their primary functions is to filter out the formatting directives and keep the text content. However these modules may have other behaviours, and the term input handler is now progressively substituted in the documentation. filter is still used in many places though. Recoll input handlers cooperate to translate from the multitude of input document formats, simple ones as opendocument, acrobat), or compound ones such as Zip or Email, into the final Recoll indexing input format, which is plain text. Most input handlers are executable programs or scripts. A few handlers are coded in C++ and live inside recollindex. This latter kind will not be described here. There are currently (1.18 and since 1.13) two kinds of external executable input handlers: o Simple exec handlers run once and exit. They can be bare programs like antiword, or scripts using other programs. They are very simple to write, because they just need to print the converted document to the standard output. Their output can be plain text or HTML. HTML is usually preferred because it can store metadata fields and it allows preserving some of the formatting for the GUI preview. o Multiple execm handlers can process multiple files (sparing the process startup time which can be very significant), or multiple documents per file (e.g.: for zip or chm files). They communicate with the indexer through a simple protocol, but are nevertheless a bit more complicated than the older kind. Most of new handlers are written in Python, using a common module to handle the protocol. There is an exception, rclimg which is written in Perl. The subdocuments output by these handlers can be directly indexable (text or HTML), or they can be other simple or compound documents that will need to be processed by another handler. In both cases, handlers deal with regular file system files, and can process either a single document, or a linear list of documents in each file. Recoll is responsible for performing up to date checks, deal with more complex embedding and other upper level issues. A simple handler returning a document in text/plain format, can transfer no metadata to the indexer. Generic metadata, like document size or modification date, will be gathered and stored by the indexer. Handlers that produce text/html format can return an arbitrary amount of metadata inside HTML meta tags. These will be processed according to the directives found in the fields configuration file. The handlers that can handle multiple documents per file return a single piece of data to identify each document inside the file. This piece of data, called an ipath element will be sent back by Recoll to extract the document at query time, for previewing, or for creating a temporary file to be opened by a viewer. The following section describes the simple handlers, and the next one gives a few explanations about the execm ones. You could conceivably write a simple handler with only the elements in the manual. This will not be the case for the other ones, for which you will have to look at the code. 4.1.1. Simple input handlers Recoll simple handlers are usually shell-scripts, but this is in no way necessary. Extracting the text from the native format is the difficult part. Outputting the format expected by Recoll is trivial. Happily enough, most document formats have translators or text extractors which can be called from the handler. In some cases the output of the translating program is completely appropriate, and no intermediate shell-script is needed. Input handlers are called with a single argument which is the source file name. They should output the result to stdout. When writing a handler, you should decide if it will output plain text or HTML. Plain text is simpler, but you will not be able to add metadata or vary the output character encoding (this will be defined in a configuration file). Additionally, some formatting may be easier to preserve when previewing HTML. Actually the deciding factor is metadata: Recoll has a way to extract metadata from the HTML header and use it for field searches.. The RECOLL_FILTER_FORPREVIEW environment variable (values yes, no) tells the handler if the operation is for indexing or previewing. Some handlers use this to output a slightly different format, for example stripping uninteresting repeated keywords (ie: Subject: for email) when indexing. This is not essential. You should look at one of the simple handlers, for example rclps for a starting point. Don't forget to make your handler executable before testing ! 4.1.2. "Multiple" handlers If you can program and want to write an execm handler, it should not be too difficult to make sense of one of the existing modules. For example, look at rclzip which uses Zip file paths as identifiers (ipath), and rclics, which uses an integer index. Also have a look at the comments inside the internfile/mh_execm.h file and possibly at the corresponding module. execm handlers sometimes need to make a choice for the nature of the ipath elements that they use in communication with the indexer. Here are a few guidelines: o Use ASCII or UTF-8 (if the identifier is an integer print it, for example, like printf %d would do). o If at all possible, the data should make some kind of sense when printed to a log file to help with debugging. o Recoll uses a colon (:) as a separator to store a complex path internally (for deeper embedding). Colons inside the ipath elements output by a handler will be escaped, but would be a bad choice as a handler-specific separator (mostly, again, for debugging issues). In any case, the main goal is that it should be easy for the handler to extract the target document, given the file name and the ipath element. execm handlers will also produce a document with a null ipath element. Depending on the type of document, this may have some associated data (e.g. the body of an email message), or none (typical for an archive file). If it is empty, this document will be useful anyway for some operations, as the parent of the actual data documents. 4.1.3. Telling Recoll about the handler There are two elements that link a file to the handler which should process it: the association of file to MIME type and the association of a MIME type with a handler. The association of files to MIME types is mostly based on name suffixes. The types are defined inside the mimemap file. Example: .doc = application/msword If no suffix association is found for the file name, Recoll will try to execute the file -i command to determine a MIME type. The association of file types to handlers is performed in the mimeconf file. A sample will probably be of better help than a long explanation: [index] application/msword = exec antiword -t -i 1 -m UTF-8;\ mimetype = text/plain ; charset=utf-8 application/ogg = exec rclogg text/rtf = exec unrtf --nopict --html; charset=iso-8859-1; mimetype=text/html application/x-chm = execm rclchm The fragment specifies that: o application/msword files are processed by executing the antiword program, which outputs text/plain encoded in utf-8. o application/ogg files are processed by the rclogg script, with default output type (text/html, with encoding specified in the header, or utf-8 by default). o text/rtf is processed by unrtf, which outputs text/html. The iso-8859-1 encoding is specified because it is not the utf-8 default, and not output by unrtf in the HTML header section. o application/x-chm is processed by a persistant handler. This is determined by the execm keyword. 4.1.4. Input handler HTML output The output HTML could be very minimal like the following example: Some text content You should take care to escape some characters inside the text by transforming them into appropriate entities. At the very minimum, "&" should be transformed into "&", "<" should be transformed into "<". This is not always properly done by translating programs which output HTML, and of course never by those which output plain text. When encapsulating plain text in an HTML body, the display of a preview may be improved by enclosing the text inside
 tags.

   The character set needs to be specified in the header. It does not need to
   be UTF-8 (Recoll will take care of translating it), but it must be
   accurate for good results.

   Recoll will process meta tags inside the header as possible document
   fields candidates. Documents fields can be processed by the indexer in
   different ways, for searching or displaying inside query results. This is
   described in a following section.

   By default, the indexer will process the standard header fields if they
   are present: title, meta/description, and meta/keywords are both indexed
   and stored for query-time display.

   A predefined non-standard meta tag will also be processed by Recoll
   without further configuration: if a date tag is present and has the right
   format, it will be used as the document date (for display and sorting), in
   preference to the file modification date. The date format should be as
   follows:

 
 or
 
          

   Example:

 
          

   Input handlers also have the possibility to "invent" field names. This
   should also be output as meta tags:

 

   You can embed HTML markup inside the content of custom fields, for
   improving the display inside result lists. In this case, add a (wildly
   non-standard) markup attribute to tell Recoll that the value is HTML and
   should not be escaped for display.

 

   As written above, the processing of fields is described in a further
   section.

  4.1.5. Page numbers

   The indexer will interpret ^L characters in the handler output as
   indicating page breaks, and will record them. At query time, this allows
   starting a viewer on the right page for a hit or a snippet. Currently,
   only the PDF, Postscript and DVI handlers generate page breaks.

4.2. Field data processing

   Fields are named pieces of information in or about documents, like title,
   author, abstract.

   The field values for documents can appear in several ways during indexing:
   either output by input handlers as meta fields in the HTML header section,
   or extracted from file extended attributes, or added as attributes of the
   Doc object when using the API, or again synthetized internally by Recoll.

   The Recoll query language allows searching for text in a specific field.

   Recoll defines a number of default fields. Additional ones can be output
   by handlers, and described in the fields configuration file.

   Fields can be:

     o indexed, meaning that their terms are separately stored in inverted
       lists (with a specific prefix), and that a field-specific search is
       possible.

     o stored, meaning that their value is recorded in the index data record
       for the document, and can be returned and displayed with search
       results.

   A field can be either or both indexed and stored. This and other aspects
   of fields handling is defined inside the fields configuration file.

   The sequence of events for field processing is as follows:

     o During indexing, recollindex scans all meta fields in HTML documents
       (most document types are transformed into HTML at some point). It
       compares the name for each element to the configuration defining what
       should be done with fields (the fields file)

     o If the name for the meta element matches one for a field that should
       be indexed, the contents are processed and the terms are entered into
       the index with the prefix defined in the fields file.

     o If the name for the meta element matches one for a field that should
       be stored, the content of the element is stored with the document data
       record, from which it can be extracted and displayed at query time.

     o At query time, if a field search is performed, the index prefix is
       computed and the match is only performed against appropriately
       prefixed terms in the index.

     o At query time, the field can be displayed inside the result list by
       using the appropriate directive in the definition of the result list
       paragraph format. All fields are displayed on the fields screen of the
       preview window (which you can reach through the right-click menu).
       This is independant of the fact that the search which produced the
       results used the field or not.

   You can find more information in the section about the fields file, or in
   comments inside the file.

   You can also have a look at the example on the Wiki, detailing how one
   could add a page count field to pdf documents for displaying inside result
   lists.

4.3. API

  4.3.1. Interface elements

   A few elements in the interface are specific and and need an explanation.

   udi

           An udi (unique document identifier) identifies a document. Because
           of limitations inside the index engine, it is restricted in length
           (to 200 bytes), which is why a regular URI cannot be used. The
           structure and contents of the udi is defined by the application
           and opaque to the index engine. For example, the internal file
           system indexer uses the complete document path (file path +
           internal path), truncated to length, the suppressed part being
           replaced by a hash value.

   ipath

           This data value (set as a field in the Doc object) is stored,
           along with the URL, but not indexed by Recoll. Its contents are
           not interpreted, and its use is up to the application. For
           example, the Recoll internal file system indexer stores the part
           of the document access path internal to the container file (ipath
           in this case is a list of subdocument sequential numbers). url and
           ipath are returned in every search result and permit access to the
           original document.

   Stored and indexed fields

           The fields file inside the Recoll configuration defines which
           document fields are either "indexed" (searchable), "stored"
           (retrievable with search results), or both.

   Data for an external indexer, should be stored in a separate index, not
   the one for the Recoll internal file system indexer, except if the latter
   is not used at all). The reason is that the main document indexer purge
   pass would remove all the other indexer's documents, as they were not seen
   during indexing. The main indexer documents would also probably be a
   problem for the external indexer purge operation.

  4.3.2. Python interface

    4.3.2.1. Introduction

   Recoll versions after 1.11 define a Python programming interface, both for
   searching and indexing. The indexing portion has seen little use, but the
   searching one is used in the Recoll Ubuntu Unity Lens and Recoll Web UI.

   The API is inspired by the Python database API specification. There were
   two major changes in recent Recoll versions:

     o The basis for the Recoll API changed from Python database API version
       1.0 (Recoll versions up to 1.18.1), to version 2.0 (Recoll 1.18.2 and
       later).
     o The recoll module became a package (with an internal recoll module) as
       of Recoll version 1.19, in order to add more functions. For existing
       code, this only changes the way the interface must be imported.

   We will mostly describe the new API and package structure here. A
   paragraph at the end of this section will explain a few differences and
   ways to write code compatible with both versions.

   The Python interface can be found in the source package, under
   python/recoll.

   The python/recoll/ directory contains the usual setup.py. After
   configuring the main Recoll code, you can use the script to build and
   install the Python module:

             cd recoll-xxx/python/recoll
             python setup.py build
             python setup.py install
          

   The normal Recoll installer installs the Python API along with the main
   code.

   When installing from a repository, and depending on the distribution, the
   Python API can sometimes be found in a separate package.

    4.3.2.2. Recoll package

   The recoll package contains two modules:

     o The recoll module contains functions and classes used to query (or
       update) the index.

     o The rclextract module contains functions and classes used to access
       document data.

    4.3.2.3. The recoll module

      Functions

   connect(confdir=None, extra_dbs=None, writable = False)
           The connect() function connects to one or several Recoll index(es)
           and returns a Db object.
              o confdir may specify a configuration directory. The usual
                defaults apply.
              o extra_dbs is a list of additional indexes (Xapian
                directories).
              o writable decides if we can index new data through this
                connection.
           This call initializes the recoll module, and it should always be
           performed before any other call or object creation.

      Classes

        The Db class

   A Db object is created by a connect() call and holds a connection to a
   Recoll index.

   Methods

   Db.close()
           Closes the connection. You can't do anything with the Db object
           after this.

   Db.query(), Db.cursor()
           These aliases return a blank Query object for this index.

   Db.setAbstractParams(maxchars, contextwords)
           Set the parameters used to build snippets (sets of keywords in
           context text fragments). maxchars defines the maximum total size
           of the abstract. contextwords defines how many terms are shown
           around the keyword.

   Db.termMatch(match_type, expr, field='', maxlen=-1, casesens=False,
   diacsens=False, lang='english')
           Expand an expression against the index term list. Performs the
           basic function from the GUI term explorer tool. match_type can be
           either of wildcard, regexp or stem. Returns a list of terms
           expanded from the input expression.

        The Query class

   A Query object (equivalent to a cursor in the Python DB API) is created by
   a Db.query() call. It is used to execute index searches.

   Methods

   Query.sortby(fieldname, ascending=True)
           Sort results by fieldname, in ascending or descending order. Must
           be called before executing the search.

   Query.execute(query_string, stemming=1, stemlang="english")
           Starts a search for query_string, a Recoll search language string.

   Query.executesd(SearchData)
           Starts a search for the query defined by the SearchData object.

   Query.fetchmany(size=query.arraysize)
           Fetches the next Doc objects in the current search results, and
           returns them as an array of the required size, which is by default
           the value of the arraysize data member.

   Query.fetchone()
           Fetches the next Doc object from the current search results.

   Query.close()
           Closes the query. The object is unusable after the call.

   Query.scroll(value, mode='relative')
           Adjusts the position in the current result set. mode can be
           relative or absolute.

   Query.getgroups()
           Retrieves the expanded query terms as a list of pairs. Meaningful
           only after executexx In each pair, the first entry is a list of
           user terms (of size one for simple terms, or more for group and
           phrase clauses), the second a list of query terms as derived from
           the user terms and used in the Xapian Query.

   Query.getxquery()
           Return the Xapian query description as a Unicode string.
           Meaningful only after executexx.

   Query.highlight(text, ishtml = 0, methods = object)
           Will insert ,  tags around the match
           areas in the input text and return the modified text. ishtml can
           be set to indicate that the input text is HTML and that HTML
           special characters should not be escaped. methods if set should be
           an object with methods startMatch(i) and endMatch() which will be
           called for each match and should return a begin and end tag

   Query.makedocabstract(doc, methods = object))
           Create a snippets abstract for doc (a Doc object) by selecting
           text around the match terms. If methods is set, will also perform
           highlighting. See the highlight method.

   Query.__iter__() and Query.next()
           So that things like for doc in query: will work.

   Data descriptors

   Query.arraysize
           Default number of records processed by fetchmany (r/w).

   Query.rowcount
           Number of records returned by the last execute.

   Query.rownumber
           Next index to be fetched from results. Normally increments after
           each fetchone() call, but can be set/reset before the call to
           effect seeking (equivalent to using scroll()). Starts at 0.

        The Doc class

   A Doc object contains index data for a given document. The data is
   extracted from the index when searching, or set by the indexer program
   when updating. The Doc object has many attributes to be read or set by its
   user. It matches exactly the Rcl::Doc C++ object. Some of the attributes
   are predefined, but, especially when indexing, others can be set, the name
   of which will be processed as field names by the indexing configuration.
   Inputs can be specified as Unicode or strings. Outputs are Unicode
   objects. All dates are specified as Unix timestamps, printed as strings.
   Please refer to the rcldb/rcldoc.h C++ file for a description of the
   predefined attributes.

   At query time, only the fields that are defined as stored either by
   default or in the fields configuration file will be meaningful in the Doc
   object. Especially this will not be the case for the document text. See
   the rclextract module for accessing document contents.

   Methods

   get(key), [] operator
           Retrieve the named doc attribute

   getbinurl()
           Retrieve the URL in byte array format (no transcoding), for use as
           parameter to a system call.

   items()
           Return a dictionary of doc object keys/values

   keys()
           list of doc object keys (attribute names).

        The SearchData class

   A SearchData object allows building a query by combining clauses, for
   execution by Query.executesd(). It can be used in replacement of the query
   language approach. The interface is going to change a little, so no
   detailed doc for now...

   Methods

   addclause(type='and'|'or'|'excl'|'phrase'|'near'|'sub', qstring=string,
   slack=0, field='', stemming=1, subSearch=SearchData)

    4.3.2.4. The rclextract module

   Index queries do not provide document content (only a partial and
   unprecise reconstruction is performed to show the snippets text). In order
   to access the actual document data, the data extraction part of the
   indexing process must be performed (subdocument access and format
   translation). This is not trivial in general. The rclextract module
   currently provides a single class which can be used to access the data
   content for result documents.

      Classes

        The Extractor class

   Methods

   Extractor(doc)
           An Extractor object is built from a Doc object, output from a
           query.

   Extractor.textextract(ipath)
           Extract document defined by ipath and return a Doc object. The
           doc.text field has the document text converted to either
           text/plain or text/html according to doc.mimetype. The typical use
           would be as follows:

 qdoc = query.fetchone()
 extractor = recoll.Extractor(qdoc)
 doc = extractor.textextract(qdoc.ipath)
 # use doc.text, e.g. for previewing

   Extractor.idoctofile(ipath, targetmtype, outfile='')
           Extracts document into an output file, which can be given
           explicitly or will be created as a temporary file to be deleted by
           the caller. Typical use:

 qdoc = query.fetchone()
 extractor = recoll.Extractor(qdoc)
 filename = extractor.idoctofile(qdoc.ipath, qdoc.mimetype)

    4.3.2.5. Example code

   The following sample would query the index with a user language string.
   See the python/samples directory inside the Recoll source for other
   examples. The recollgui subdirectory has a very embryonic GUI which
   demonstrates the highlighting and data extraction functions.

 #!/usr/bin/env python

 from recoll import recoll

 db = recoll.connect()
 db.setAbstractParams(maxchars=80, contextwords=4)

 query = db.query()
 nres = query.execute("some user question")
 print "Result count: ", nres
 if nres > 5:
     nres = 5
 for i in range(nres):
     doc = query.fetchone()
     print "Result #%d" % (query.rownumber,)
     for k in ("title", "size"):
         print k, ":", getattr(doc, k).encode('utf-8')
     abs = db.makeDocAbstract(doc, query).encode('utf-8')
     print abs
     print



    4.3.2.6. Compatibility with the previous version

   The following code fragments can be used to ensure that code can run with
   both the old and the new API (as long as it does not use the new abilities
   of the new API of course).

   Adapting to the new package structure:


 try:
     from recoll import recoll
     from recoll import rclextract
     hasextract = True
 except:
     import recoll
     hasextract = False


   Adapting to the change of nature of the next Query member. The same test
   can be used to choose to use the scroll() method (new) or set the next
   value (old).


        rownum = query.next if type(query.next) == int else \
                  query.rownumber


Chapter 5. Installation and configuration

5.1. Installing a binary copy

   Recoll binary copies are always distributed as regular packages for your
   system. They can be obtained either through the system's normal software
   distribution framework (e.g. Debian/Ubuntu apt, FreeBSD ports, etc.), or
   from some type of "backports" repository providing versions newer than the
   standard ones, or found on the Recoll WEB site in some cases.

   There used to exist another form of binary install, as pre-compiled source
   trees, but these are just less convenient than the packages and don't
   exist any more.

   The package management tools will usually automatically deal with hard
   dependancies for packages obtained from a proper package repository. You
   will have to deal with them by hand for downloaded packages (for example,
   when dpkg complains about missing dependancies).

   In all cases, you will have to check or install supporting applications
   for the file types that you want to index beyond those that are natively
   processed by Recoll (text, HTML, email files, and a few others).

   You should also maybe have a look at the configuration section (but this
   may not be necessary for a quick test with default parameters). Most
   parameters can be more conveniently set from the GUI interface.

5.2. Supporting packages

   Recoll uses external applications to index some file types. You need to
   install them for the file types that you wish to have indexed (these are
   run-time optional dependencies. None is needed for building or running
   Recoll except for indexing their specific file type).

   After an indexing pass, the commands that were found missing can be
   displayed from the recoll File menu. The list is stored in the missing
   text file inside the configuration directory.

   A list of common file types which need external commands follows. Many of
   the handlers need the iconv command, which is not always listed as a
   dependancy.

   Please note that, due to the relatively dynamic nature of this
   information, the most up to date version is now kept on
   http://www.recoll.org/features.html along with links to the home pages or
   best source/patches pages, and misc tips. The list below is not updated
   often and may be quite stale.

   For many Linux distributions, most of the commands listed can be installed
   from the package repositories. However, the packages are sometimes
   outdated, or not the best version for Recoll, so you should take a look at
   http://www.recoll.org/features.html if a file type is important to you.

   As of Recoll release 1.14, a number of XML-based formats that were handled
   by ad hoc handler code now use the xsltproc command, which usually comes
   with libxslt. These are: abiword, fb2 (ebooks), kword, openoffice, svg.

   Now for the list:

     o Openoffice files need unzip and xsltproc.

     o PDF files need pdftotext which is part of Poppler (usually comes with
       the poppler-utils package). Avoid the original one from Xpdf.

     o Postscript files need pstotext. The original version has an issue with
       shell character in file names, which is corrected in recent packages.
       See http://www.recoll.org/features.html for more detail.

     o MS Word needs antiword. It is also useful to have wvWare installed as
       it may be be used as a fallback for some files which antiword does not
       handle.

     o MS Excel and PowerPoint are processed by internal Python handlers.

     o MS Open XML (docx) needs xsltproc.

     o Wordperfect files need wpd2html from the libwpd (or libwpd-tools on
       Ubuntu) package.

     o RTF files need unrtf, which, in its older versions, has much trouble
       with non-western character sets. Many Linux distributions carry
       outdated unrtf versions. Check http://www.recoll.org/features.html for
       details.

     o TeX files need untex or detex. Check
       http://www.recoll.org/features.html for sources if it's not packaged
       for your distribution.

     o dvi files need dvips.

     o djvu files need djvutxt and djvused from the DjVuLibre package.

     o Audio files: Recoll releases 1.14 and later use a single Python
       handler based on mutagen for all audio file types.

     o Pictures: Recoll uses the Exiftool Perl package to extract tag
       information. Most image file formats are supported. Note that there
       may not be much interest in indexing the technical tags (image size,
       aperture, etc.). This is only of interest if you store personal tags
       or textual descriptions inside the image files.

     o chm: files in Microsoft help format need Python and the pychm module
       (which needs chmlib).

     o ICS: up to Recoll 1.13, iCalendar files need Python and the icalendar
       module. icalendar is not needed for newer versions, which use internal
       code.

     o Zip archives need Python (and the standard zipfile module).

     o Rar archives need Python, the rarfile Python module and the unrar
       utility.

     o Midi karaoke files need Python and the Midi module

     o Konqueror webarchive format with Python (uses the Tarfile module).

     o Mimehtml web archive format (support based on the email handler, which
       introduces some mild weirdness, but still usable).

   Text, HTML, email folders, and Scribus files are processed internally. Lyx
   is used to index Lyx files. Many handlers need iconv and the standard sed
   and awk.

5.3. Building from source

  5.3.1. Prerequisites

   If you can install any or all of the following through the package manager
   for your system, all the better. Especially Qt is a very big piece of
   software, but you will most probably be able to find a binary package.

   You may have to compile Xapian but this is easy.

   The shopping list:

     o C++ compiler. Up to Recoll version 1.13.04, its absence can manifest
       itself by strange messages about a missing iconv_open.

     o Development files for Xapian core.

  Important

       If you are building Xapian for an older CPU (before Pentium 4 or
       Athlon 64), you need to add the --disable-sse flag to the configure
       command. Else all Xapian application will crash with an illegal
       instruction error.

     o Development files for Qt 4 . Recoll has not been tested with Qt 5 yet.
       Recoll 1.15.9 was the last version to support Qt 3. If you do not want
       to install or build the Qt Webkit module, Recoll has a configuration
       option to disable its use (see further).

     o Development files for X11 and zlib.

     o You may also need libiconv. On Linux systems, the iconv interface is
       part of libc and you should not need to do anything special.

   Check the Recoll download page for up to date version information.

  5.3.2. Building

   Recoll has been built on Linux, FreeBSD, Mac OS X, and Solaris, most
   versions after 2005 should be ok, maybe some older ones too (Solaris 8 is
   ok). If you build on another system, and need to modify things, I would
   very much welcome patches.

   Configure options: 

     o --without-aspell will disable the code for phonetic matching of search
       terms.

     o --with-fam or --with-inotify will enable the code for real time
       indexing. Inotify support is enabled by default on recent Linux
       systems.

     o --with-qzeitgeist will enable sending Zeitgeist events about the
       visited search results, and needs the qzeitgeist package.

     o --disable-webkit is available from version 1.17 to implement the
       result list with a Qt QTextBrowser instead of a WebKit widget if you
       do not or can't depend on the latter.

     o --disable-idxthreads is available from version 1.19 to suppress
       multithreading inside the indexing process. You can also use the
       run-time configuration to restrict recollindex to using a single
       thread, but the compile-time option may disable a few more unused
       locks. This only applies to the use of multithreading for the core
       index processing (data input). The Recoll monitor mode always uses at
       least two threads of execution.

     o --disable-python-module will avoid building the Python module.

     o --disable-xattr will prevent fetching data from file extended
       attributes. Beyond a few standard attributes, fetching extended
       attributes data can only be useful is some application stores data in
       there, and also needs some simple configuration (see comments in the
       fields configuration file).

     o --enable-camelcase will enable splitting camelCase words. This is not
       enabled by default as it has the unfortunate side-effect of making
       some phrase searches quite confusing: ie, "MySQL manual" would be
       matched by "MySQL manual" and "my sql manual" but not "mysql manual"
       (only inside phrase searches).

     o --with-file-command Specify the version of the 'file' command to use
       (ie: --with-file-command=/usr/local/bin/file). Can be useful to enable
       the gnu version on systems where the native one is bad.

     o --disable-qtgui Disable the Qt interface. Will allow building the
       indexer and the command line search program in absence of a Qt
       environment.

     o --disable-x11mon Disable X11 connection monitoring inside recollindex.
       Together with --disable-qtgui, this allows building recoll without Qt
       and X11.

     o --disable-pic will compile Recoll with position-dependant code. This
       is incompatible with building the KIO or the Python or PHP extensions,
       but might yield very marginally faster code.

     o Of course the usual autoconf configure options, like --prefix apply.

   Normal procedure:

         cd recoll-xxx
         ./configure
         make
         (practices usual hardship-repelling invocations)
      

   There is little auto-configuration. The configure script will mainly link
   one of the system-specific files in the mk directory to mk/sysconf. If
   your system is not known yet, it will tell you as much, and you may want
   to manually copy and modify one of the existing files (the new file name
   should be the output of uname -s).

    5.3.2.1. Building on Solaris

   We did not test building the GUI on Solaris for recent versions. You will
   need at least Qt 4.4. There are some hints on an old web site page, they
   may still be valid.

   Someone did test the 1.19 indexer and Python module build, they do work,
   with a few minor glitches. Be sure to use GNU make and install.

  5.3.3. Installation

   Either type make install or execute recollinstall prefix, in the root of
   the source tree. This will copy the commands to prefix/bin and the sample
   configuration files, scripts and other shared data to prefix/share/recoll.

   If the installation prefix given to recollinstall is different from either
   the system default or the value which was specified when executing
   configure (as in configure --prefix /some/path), you will have to set the
   RECOLL_DATADIR environment variable to indicate where the shared data is
   to be found (ie for (ba)sh: export
   RECOLL_DATADIR=/some/path/share/recoll).

   You can then proceed to configuration.

5.4. Configuration overview

   Most of the parameters specific to the recoll GUI are set through the
   Preferences menu and stored in the standard Qt place
   ($HOME/.config/Recoll.org/recoll.conf). You probably do not want to edit
   this by hand.

   Recoll indexing options are set inside text configuration files located in
   a configuration directory. There can be several such directories, each of
   which defines the parameters for one index.

   The configuration files can be edited by hand or through the Index
   configuration dialog (Preferences menu). The GUI tool will try to respect
   your formatting and comments as much as possible, so it is quite possible
   to use both ways.

   The most accurate documentation for the configuration parameters is given
   by comments inside the default files, and we will just give a general
   overview here.

   By default, for each index, there are two sets of configuration files.
   System-wide configuration files are kept in a directory named like
   /usr/[local/]share/recoll/examples, and define default values, shared by
   all indexes. For each index, a parallel set of files defines the
   customized parameters.

   In addition (as of Recoll version 1.19.7), it is possible to specify two
   additional configuration directories which will be stacked before and
   after the user configuration directory. These are defined by the
   RECOLL_CONFTOP and RECOLL_CONFMID environment variables. Values from
   configuration files inside the top directory will override user ones,
   values from configuration files inside the middle directory will override
   system ones and be overriden by user ones. These two variables may be of
   use to applications which augment Recoll functionality, and need to add
   configuration data without disturbing the user's files. Please note that
   the two, currently single, values will probably be interpreted as
   colon-separated lists in the future: do not use colon characters inside
   the directory paths.

   The default location of the configuration is the .recoll directory in your
   home. Most people will only use this directory.

   This location can be changed, or others can be added with the
   RECOLL_CONFDIR environment variable or the -c option parameter to recoll
   and recollindex.

   If the .recoll directory does not exist when recoll or recollindex are
   started, it will be created with a set of empty configuration files.
   recoll will give you a chance to edit the configuration file before
   starting indexing. recollindex will proceed immediately. To avoid
   mistakes, the automatic directory creation will only occur for the default
   location, not if -c or RECOLL_CONFDIR were used (in the latter cases, you
   will have to create the directory).

   All configuration files share the same format. For example, a short
   extract of the main configuration file might look as follows:

         # Space-separated list of directories to index.
         topdirs =  ~/docs /usr/share/doc

         [~/somedirectory-with-utf8-txt-files]
         defaultcharset = utf-8
        

   There are three kinds of lines:

     o Comment (starts with #) or empty.

     o Parameter affectation (name = value).

     o Section definition ([somedirname]).

   Depending on the type of configuration file, section definitions either
   separate groups of parameters or allow redefining some parameters for a
   directory sub-tree. They stay in effect until another section definition,
   or the end of file, is encountered. Some of the parameters used for
   indexing are looked up hierarchically from the current directory location
   upwards. Not all parameters can be meaningfully redefined, this is
   specified for each in the next section.

   When found at the beginning of a file path, the tilde character (~) is
   expanded to the name of the user's home directory, as a shell would do.

   White space is used for separation inside lists. List elements with
   embedded spaces can be quoted using double-quotes.

   Encoding issues. Most of the configuration parameters are plain ASCII. Two
   particular sets of values may cause encoding issues:

     o File path parameters may contain non-ascii characters and should use
       the exact same byte values as found in the file system directory.
       Usually, this means that the configuration file should use the system
       default locale encoding.

     o The unac_except_trans parameter should be encoded in UTF-8. If your
       system locale is not UTF-8, and you need to also specify non-ascii
       file paths, this poses a difficulty because common text editors cannot
       handle multiple encodings in a single file. In this relatively
       unlikely case, you can edit the configuration file as two separate
       text files with appropriate encodings, and concatenate them to create
       the complete configuration.

  5.4.1. Environment variables

   RECOLL_CONFDIR

           Defines the main configuration directory.

   RECOLL_TMPDIR, TMPDIR

           Locations for temporary files, in this order of priority. The
           default if none of these is set is to use /tmp. Big temporary
           files may be created during indexing, mostly for decompressing,
           and also for processing, e.g. email attachments.

   RECOLL_CONFTOP, RECOLL_CONFMID

           Allow adding configuration directories with priorities below and
           above the user directory (see above the Configuration overview
           section for details).

   RECOLL_EXTRA_DBS, RECOLL_ACTIVE_EXTRA_DBS

           Help for setting up external indexes. See this paragraph for
           explanations.

   RECOLL_DATADIR

           Defines replacement for the default location of Recoll data files,
           normally found in, e.g., /usr/share/recoll).

   RECOLL_FILTERSDIR

           Defines replacement for the default location of Recoll filters,
           normally found in, e.g., /usr/share/recoll/filters).

   ASPELL_PROG

           aspell program to use for creating the spelling dictionary. The
           result has to be compatible with the libaspell which Recoll is
           using.

   VARNAME

           Blabla

  5.4.2. The main configuration file, recoll.conf

   recoll.conf is the main configuration file. It defines things like what to
   index (top directories and things to ignore), and the default character
   set to use for document types which do not specify it internally.

   The default configuration will index your home directory. If this is not
   appropriate, start recoll to create a blank configuration, click Cancel,
   and edit the configuration file before restarting the command. This will
   start the initial indexing, which may take some time.

   Most of the following parameters can be changed from the Index
   Configuration menu in the recoll interface. Some can only be set by
   editing the configuration file.

    5.4.2.1. Parameters affecting what documents we index:

   topdirs

           Specifies the list of directories or files to index (recursively
           for directories). You can use symbolic links as elements of this
           list. See the followLinks option about following symbolic links
           found under the top elements (not followed by default).

   skippedNames

           A space-separated list of wilcard patterns for names of files or
           directories that should be completely ignored. The list defined in
           the default file is:

 skippedNames = #* bin CVS  Cache cache* caughtspam  tmp .thumbnails .svn \
                *~ .beagle .git .hg .bzr loop.ps .xsession-errors \
                .recoll* xapiandb recollrc recoll.conf

           The list can be redefined at any sub-directory in the indexed
           area.

           The top-level directories are not affected by this list (that is,
           a directory in topdirs might match and would still be indexed).

           The list in the default configuration does not exclude hidden
           directories (names beginning with a dot), which means that it may
           index quite a few things that you do not want. On the other hand,
           email user agents like thunderbird usually store messages in
           hidden directories, and you probably want this indexed. One
           possible solution is to have .* in skippedNames, and add things
           like ~/.thunderbird or ~/.evolution in topdirs.

           Not even the file names are indexed for patterns in this list. See
           the noContentSuffixes variable for an alternative approach which
           indexes the file names.

   noContentSuffixes

           This is a list of file name endings (not wildcard expressions, nor
           dot-delimited suffixes). Only the names of matching files will be
           indexed (no attempt at MIME type identification, no decompression,
           no content indexing). This can be redefined for subdirectories,
           and edited from the GUI. The default value is:

 noContentSuffixes = .md5 .map \
        .o .lib .dll .a .sys .exe .com \
        .mpp .mpt .vsd \
            .img .img.gz .img.bz2 .img.xz .image .image.gz .image.bz2 .image.xz \
        .dat .bak .rdf .log.gz .log .db .msf .pid \
        ,v ~ #

   skippedPaths and daemSkippedPaths

           A space-separated list of patterns for paths of files or
           directories that should be skipped. There is no default in the
           sample configuration file, but the code always adds the
           configuration and database directories in there.

           skippedPaths is used both by batch and real time indexing.
           daemSkippedPaths can be used to specify things that should be
           indexed at startup, but not monitored.

           Example of use for skipping text files only in a specific
           directory:

 skippedPaths = ~/somedir/*.txt
              

   skippedPathsFnmPathname

           The values in the *skippedPaths variables are matched by default
           with fnmatch(3), with the FNM_PATHNAME flag. This means that '/'
           characters must be matched explicitely. You can set
           skippedPathsFnmPathname to 0 to disable the use of FNM_PATHNAME
           (meaning that /*/dir3 will match /dir1/dir2/dir3).

   zipSkippedNames

           A space-separated list of patterns for names of files or
           directories that should be ignored inside zip archives. This is
           used directly by the zip handler, and has a function similar to
           skippedNames, but works independantly. Can be redefined for
           filesystem subdirectories. For versions up to 1.19, you will need
           to update the Zip handler and install a supplementary Python
           module. The details are described on the Recoll wiki.

   followLinks

           Specifies if the indexer should follow symbolic links while
           walking the file tree. The default is to ignore symbolic links to
           avoid multiple indexing of linked files. No effort is made to
           avoid duplication when this option is set to true. This option can
           be set individually for each of the topdirs members by using
           sections. It can not be changed below the topdirs level.

   indexedmimetypes

           Recoll normally indexes any file which it knows how to read. This
           list lets you restrict the indexed MIME types to what you specify.
           If the variable is unspecified or the list empty (the default),
           all supported types are processed. Can be redefined for
           subdirectories.

   excludedmimetypes

           This list lets you exclude some MIME types from indexing. Can be
           redefined for subdirectories.

   compressedfilemaxkbs

           Size limit for compressed (.gz or .bz2) files. These need to be
           decompressed in a temporary directory for identification, which
           can be very wasteful if 'uninteresting' big compressed files are
           present. Negative means no limit, 0 means no processing of any
           compressed file. Defaults to -1.

   textfilemaxmbs

           Maximum size for text files. Very big text files are often
           uninteresting logs. Set to -1 to disable (default 20MB).

   textfilepagekbs

           If set to other than -1, text files will be indexed as multiple
           documents of the given page size. This may be useful if you do
           want to index very big text files as it will both reduce memory
           usage at index time and help with loading data to the preview
           window. A size of a few megabytes would seem reasonable (default:
           1MB).

   membermaxkbs

           This defines the maximum size in kilobytes for an archive member
           (zip, tar or rar at the moment). Bigger entries will be skipped.

   indexallfilenames

           Recoll indexes file names in a special section of the database to
           allow specific file names searches using wild cards. This
           parameter decides if file name indexing is performed only for
           files with MIME types that would qualify them for full text
           indexing, or for all files inside the selected subtrees,
           independently of MIME type.

   usesystemfilecommand

           Decide if we execute a system command (file -i by default) as a
           final step for determining the MIME type for a file (the main
           procedure uses suffix associations as defined in the mimemap
           file). This can be useful for files with suffix-less names, but it
           will also cause the indexing of many bogus "text" files.

   systemfilecommand

           Command to use for mime for mime type determination if
           usesystefilecommand is set. Recent versions of xdg-mime sometimes
           work better than file.

   processwebqueue

           If this is set, process the directory where Web browser plugins
           copy visited pages for indexing.

   webqueuedir

           The path to the web indexing queue. This is hard-coded in the
           Firefox plugin as ~/.recollweb/ToIndex so there should be no need
           to change it.

    5.4.2.2. Parameters affecting how we generate terms:

   Changing some of these parameters will imply a full reindex. Also, when
   using multiple indexes, it may not make sense to search indexes that don't
   share the values for these parameters, because they usually affect both
   search and index operations.

   indexStripChars

           Decide if we strip characters of diacritics and convert them to
           lower-case before terms are indexed. If we don't, searches
           sensitive to case and diacritics can be performed, but the index
           will be bigger, and some marginal weirdness may sometimes occur.
           The default is a stripped index (indexStripChars = 1) for now.
           When using multiple indexes for a search, this parameter must be
           defined identically for all. Changing the value implies an index
           reset.

   maxTermExpand

           Maximum expansion count for a single term (e.g.: when using
           wildcards). The default of 10000 is reasonable and will avoid
           queries that appear frozen while the engine is walking the term
           list.

   maxXapianClauses

           Maximum number of elementary clauses we can add to a single Xapian
           query. In some cases, the result of term expansion can be
           multiplicative, and we want to avoid using excessive memory. The
           default of 100 000 should be both high enough in most cases and
           compatible with current typical hardware configurations.

   nonumbers

           If this set to true, no terms will be generated for numbers. For
           example "123", "1.5e6", 192.168.1.4, would not be indexed
           ("value123" would still be). Numbers are often quite interesting
           to search for, and this should probably not be set except for
           special situations, ie, scientific documents with huge amounts of
           numbers in them. This can only be set for a whole index, not for a
           subtree.

   nocjk

           If this set to true, specific east asian (Chinese Korean Japanese)
           characters/word splitting is turned off. This will save a small
           amount of cpu if you have no CJK documents. If your document base
           does include such text but you are not interested in searching it,
           setting nocjk may be a significant time and space saver.

   cjkngramlen

           This lets you adjust the size of n-grams used for indexing CJK
           text. The default value of 2 is probably appropriate in most
           cases. A value of 3 would allow more precision and efficiency on
           longer words, but the index will be approximately twice as large.

   indexstemminglanguages

           A list of languages for which the stem expansion databases will be
           built. See recollindex(1) or use the recollindex -l command for
           possible values. You can add a stem expansion database for a
           different language by using recollindex -s, but it will be deleted
           during the next indexing. Only languages listed in the
           configuration file are permanent.

   defaultcharset

           The name of the character set used for files that do not contain a
           character set definition (ie: plain text files). This can be
           redefined for any sub-directory. If it is not set at all, the
           character set used is the one defined by the nls environment (
           LC_ALL, LC_CTYPE, LANG), or iso8859-1 if nothing is set.

   unac_except_trans

           This is a list of characters, encoded in UTF-8, which should be
           handled specially when converting text to unaccented lowercase.
           For example, in Swedish, the letter a with diaeresis has full
           alphabet citizenship and should not be turned into an a. Each
           element in the space-separated list has the special character as
           first element and the translation following. The handling of both
           the lowercase and upper-case versions of a character should be
           specified, as appartenance to the list will turn-off both standard
           accent and case processing. Example for Swedish:

 unac_except_trans =  aaaa AAaa a:a: A:a: o:o: O:o:
            

           Note that the translation is not limited to a single character,
           you could very well have something like u:ue in the list.

           The default value set for unac_except_trans can't be listed here
           because I have trouble with SGML and UTF-8, but it only contains
           ligature decompositions: german ss, oe, ae, fi, fl.

           This parameter can't be defined for subdirectories, it is global,
           because there is no way to do otherwise when querying. If you have
           document sets which would need different values, you will have to
           index and query them separately.

   maildefcharset

           This can be used to define the default character set specifically
           for email messages which don't specify it. This is mainly useful
           for readpst (libpst) dumps, which are utf-8 but do not say so.

   localfields

           This allows setting fields for all documents under a given
           directory. Typical usage would be to set an "rclaptg" field, to be
           used in mimeview to select a specific viewer. If several fields
           are to be set, they should be separated with a semi-colon (';')
           character, which there is currently no way to escape. Also note
           the initial semi-colon. Example: localfields= ;rclaptg=gnus;other
           = val, then select specifier viewer with mimetype|tag=... in
           mimeview.

   testmodifusemtime

           If true, use mtime instead of default ctime to determine if a file
           has been modified (in addition to size, which is always used).
           Setting this can reduce re-indexing on systems where extended
           attributes are modified (by some other application), but not
           indexed (changing extended attributes only affects ctime). Notes:

              o This may prevent detection of change in some marginal file
                rename cases (the target would need to have the same size and
                mtime).

              o You should probably also set noxattrfields to 1 in this case,
                except if you still prefer to perform xattr indexing, for
                example if the local file update pattern makes it of value
                (as in general, there is a risk for pure extended attributes
                updates without file modification to go undetected).

           Perform a full index reset after changing the value of this
           parameter.

   noxattrfields

           Recoll versions 1.19 and later automatically translate file
           extended attributes into document fields (to be processed
           according to the parameters from the fields file). Setting this
           variable to 1 will disable the behaviour.

   metadatacmds

           This allows executing external commands for each file and storing
           the output in Recoll document fields. This could be used for
           example to index external tag data. The value is a list of field
           names and commands, don't forget an initial semi-colon. Example:

 [/some/area/of/the/fs]
 metadatacmds = ; tags = tmsu tags %f; otherfield = somecmd -xx %f
                

           As a specially disgusting hack brought by Recoll 1.19.7, if a
           "field name" begins with rclmulti, the data returned by the
           command is expected to contain multiple field values, in
           configuration file format. This allows setting several fields by
           executing a single command. Example:

 metadatacmds = ; rclmulti1 = somecmd %f
                

           If somecmd returns data in the form of:

 field1 = value1
 field2 = value for field2
                

           field1 and field2 will be set inside the document metadata.

    5.4.2.3. Parameters affecting where and how we store things:

   dbdir

           The name of the Xapian data directory. It will be created if
           needed when the index is initialized. If this is not an absolute
           path, it will be interpreted relative to the configuration
           directory. The value can have embedded spaces but starting or
           trailing spaces will be trimmed. You cannot use quotes here.

   idxstatusfile

           The name of the scratch file where the indexer process updates its
           status. Default: idxstatus.txt inside the configuration directory.

   maxfsoccuppc

           Maximum file system occupation before we stop indexing. The value
           is a percentage, corresponding to what the "Capacity" df output
           column shows. The default value is 0, meaning no checking.

   mboxcachedir

           The directory where mbox message offsets cache files are held.
           This is normally $RECOLL_CONFDIR/mboxcache, but it may be useful
           to share a directory between different configurations.

   mboxcacheminmbs

           The minimum mbox file size over which we cache the offsets. There
           is really no sense in caching offsets for small files. The default
           is 5 MB.

   webcachedir

           This is only used by the web browser plugin indexing code, and
           defines where the cache for visited pages will live. Default:
           $RECOLL_CONFDIR/webcache

   webcachemaxmbs

           This is only used by the web browser plugin indexing code, and
           defines the maximum size for the web page cache. Default: 40 MB.
           Quite unfortunately, this is only taken into account when creating
           the cache file. You need to delete the file for a change to be
           taken into account.

   idxflushmb

           Threshold (megabytes of new text data) where we flush from memory
           to disk index. Setting this can help control memory usage. A value
           of 0 means no explicit flushing, letting Xapian use its own
           default, which is flushing every 10000 (or XAPIAN_FLUSH_THRESHOLD)
           documents, which gives little memory usage control, as memory
           usage also depends on average document size. The default value is
           10, and it is probably a bit low. If your system usually has free
           memory, you can try higher values between 20 and 80. In my
           experience, values beyond 100 are always counterproductive.

    5.4.2.4. Parameters affecting multithread processing

   The Recoll indexing process recollindex can use multiple threads to speed
   up indexing on multiprocessor systems. The work done to index files is
   divided in several stages and some of the stages can be executed by
   multiple threads. The stages are:

    1. File system walking: this is always performed by the main thread.
    2. File conversion and data extraction.
    3. Text processing (splitting, stemming, etc.)
    4. Xapian index update.

   You can also read a longer document about the transformation of Recoll
   indexing to multithreading.

   The threads configuration is controlled by two configuration file
   parameters.

   thrQSizes

           This variable defines the job input queues configuration. There
           are three possible queues for stages 2, 3 and 4, and this
           parameter should give the queue depth for each stage (three
           integer values). If a value of -1 is used for a given stage, no
           queue is used, and the thread will go on performing the next
           stage. In practise, deep queues have not been shown to increase
           performance. A value of 0 for the first queue tells Recoll to
           perform autoconfiguration (no need for the two other values in
           this case) - this is the default configuration.

   thrTCounts

           This defines the number of threads used for each stage. If a value
           of -1 is used for one of the queue depths, the corresponding
           thread count is ignored. It makes no sense to use a value other
           than 1 for the last stage because updating the Xapian index is
           necessarily single-threaded (and protected by a mutex).

   The following example would use three queues (of depth 2), and 4 threads
   for converting source documents, 2 for processing their text, and one to
   update the index. This was tested to be the best configuration on the test
   system (quadri-processor with multiple disks).

 thrQSizes = 2 2 2
 thrTCounts =  4 2 1

   The following example would use a single queue, and the complete
   processing for each document would be performed by a single thread
   (several documents will still be processed in parallel in most cases). The
   threads will use mutual exclusion when entering the index update stage. In
   practise the performance would be close to the precedent case in general,
   but worse in certain cases (e.g. a Zip archive would be performed purely
   sequentially), so the previous approach is preferred. YMMV... The 2 last
   values for thrTCounts are ignored.

 thrQSizes = 2 -1 -1
 thrTCounts =  6 1 1

   The following example would disable multithreading. Indexing will be
   performed by a single thread.

 thrQSizes = -1 -1 -1

    5.4.2.5. Miscellaneous parameters:

   autodiacsens

           IF the index is not stripped, decide if we automatically trigger
           diacritics sensitivity if the search term has accented characters
           (not in unac_except_trans). Else you need to use the query
           language and the D modifier to specify diacritics sensitivity.
           Default is no.

   autocasesens

           IF the index is not stripped, decide if we automatically trigger
           character case sensitivity if the search term has upper-case
           characters in any but the first position. Else you need to use the
           query language and the C modifier to specify character-case
           sensitivity. Default is yes.

   loglevel,daemloglevel

           Verbosity level for recoll and recollindex. A value of 4 lists
           quite a lot of debug/information messages. 2 only lists errors.
           The daemversion is specific to the indexing monitor daemon.

   logfilename, daemlogfilename

           Where the messages should go. 'stderr' can be used as a special
           value, and is the default. The daemversion is specific to the
           indexing monitor daemon.

   checkneedretryindexscript

           This defines the name for a command executed by recollindex when
           starting indexing. If the exit status of the command is 0,
           recollindex retries to index all files which previously could not
           be indexed because of data extraction errors. The default value is
           a script which checks if any of the common bin directories have
           changed (indicating that a helper program may have been
           installed).

   mondelaypatterns

           This allows specify wildcard path patterns (processed with
           fnmatch(3) with 0 flag), to match files which change too often and
           for which a delay should be observed before re-indexing. This is a
           space-separated list, each entry being a pattern and a time in
           seconds, separated by a colon. You can use double quotes if a path
           entry contains white space. Example:

 mondelaypatterns = *.log:20 "this one has spaces*:10"
              

   monixinterval

           Minimum interval (seconds) for processing the indexing queue. The
           real time monitor does not process each event when it comes in,
           but will wait this time for the queue to accumulate to diminish
           overhead and in order to aggregate multiple events to the same
           file. Default 30 S.

   monauxinterval

           Period (in seconds) at which the real time monitor will regenerate
           the auxiliary databases (spelling, stemming) if needed. The
           default is one hour.

   monioniceclass, monioniceclassdata

           These allow defining the ionice class and data used by the indexer
           (default class 3, no data).

   filtermaxseconds

           Maximum handler execution time, after which it is aborted. Some
           postscript programs just loop...

   filtermaxmbytes

           Recoll 1.20.7 and later. Maximum handler memory utilisation. This
           uses setrlimit(RLIMIT_AS) on most systems (total virtual memory
           space size limit). Some programs may start with 500 MBytes of
           mapped shared libraries, so take this into account when choosing a
           value. The default is a liberal 2000MB.

   filtersdir

           A directory to search for the external input handler scripts used
           to index some types of files. The value should not be changed,
           except if you want to modify one of the default scripts. The value
           can be redefined for any sub-directory.

   iconsdir

           The name of the directory where recoll result list icons are
           stored. You can change this if you want different images.

   idxabsmlen

           Recoll stores an abstract for each indexed file inside the
           database. The text can come from an actual 'abstract' section in
           the document or will just be the beginning of the document. It is
           stored in the index so that it can be displayed inside the result
           lists without decoding the original file. The idxabsmlen parameter
           defines the size of the stored abstract. The default value is 250
           bytes. The search interface gives you the choice to display this
           stored text or a synthetic abstract built by extracting text
           around the search terms. If you always prefer the synthetic
           abstract, you can reduce this value and save a little space.

   idxmetastoredlen

           Maximum stored length for metadata fields. This does not affect
           indexing (the whole field is processed anyway), just the amount of
           data stored in the index for the purpose of displaying fields
           inside result lists or previews. The default value is 150 bytes
           which may be too low if you have custom fields.

   aspellLanguage

           Language definitions to use when creating the aspell dictionary.
           The value must match a set of aspell language definition files.
           You can type "aspell config" to see where these are installed
           (look for data-dir). The default if the variable is not set is to
           use your desktop national language environment to guess the value.

   noaspell

           If this is set, the aspell dictionary generation is turned off.
           Useful for cases where you don't need the functionality or when it
           is unusable because aspell crashes during dictionary generation.

   mhmboxquirks

           This allows definining location-related quirks for the mailbox
           handler. Currently only the tbird flag is defined, and it should
           be set for directories which hold Thunderbird data, as their
           folder format is weird.

  5.4.3. The fields file

   This file contains information about dynamic fields handling in Recoll.
   Some very basic fields have hard-wired behaviour, and, mostly, you should
   not change the original data inside the fields file. But you can create
   custom fields fitting your data and handle them just like they were native
   ones.

   The fields file has several sections, which each define an aspect of
   fields processing. Quite often, you'll have to modify several sections to
   obtain the desired behaviour.

   We will only give a short description here, you should refer to the
   comments inside the default file for more detailed information.

   Field names should be lowercase alphabetic ASCII.

   [prefixes]

           A field becomes indexed (searchable) by having a prefix defined in
           this section.

   [stored]

           A field becomes stored (displayable inside results) by having its
           name listed in this section (typically with an empty value).

   [aliases]

           This section defines lists of synonyms for the canonical names
           used inside the [prefixes] and [stored] sections

   [queryaliases]

           This section also defines aliases for the canonic field names,
           with the difference that the substitution will only be used at
           query time, avoiding any possibility that the value would pick-up
           random metadata from documents.

   handler-specific sections

           Some input handlers may need specific configuration for handling
           fields. Only the email message handler currently has such a
           section (named [mail]). It allows indexing arbitrary email headers
           in addition to the ones indexed by default. Other such sections
           may appear in the future.

   Here follows a small example of a personal fields file. This would extract
   a specific email header and use it as a searchable field, with data
   displayable inside result lists. (Side note: as the email handler does no
   decoding on the values, only plain ascii headers can be indexed, and only
   the first occurrence will be used for headers that occur several times).

 [prefixes]
 # Index mailmytag contents (with the given prefix)
 mailmytag = XMTAG

 [stored]
 # Store mailmytag inside the document data record (so that it can be
 # displayed - as %(mailmytag) - in result lists).
 mailmytag =

 [queryaliases]
 filename = fn
 containerfilename = cfn

 [mail]
 # Extract the X-My-Tag mail header, and use it internally with the
 # mailmytag field name
 x-my-tag = mailmytag

    5.4.3.1. Extended attributes in the fields file

   Recoll versions 1.19 and later process user extended file attributes as
   documents fields by default.

   Attributes are processed as fields of the same name, after removing the
   user prefix on Linux.

   The [xattrtofields] section of the fields file allows specifying
   translations from extended attributes names to Recoll field names. An
   empty translation disables use of the corresponding attribute data.

  5.4.4. The mimemap file

   mimemap specifies the file name extension to MIME type mappings.

   For file names without an extension, or with an unknown one, the system's
   file -i command will be executed to determine the MIME type (this can be
   switched off inside the main configuration file).

   The mappings can be specified on a per-subtree basis, which may be useful
   in some cases. Example: gaim logs have a .txt extension but should be
   handled specially, which is possible because they are usually all located
   in one place.

   The recoll_noindex mimemap variable has been moved to recoll.conf and
   renamed to noContentSuffixes, while keeping the same function, as of
   Recoll version 1.21. For older Recoll versions, see the documentation for
   noContentSuffixes but use recoll_noindex in mimemap.

  5.4.5. The mimeconf file

   mimeconf specifies how the different MIME types are handled for indexing,
   and which icons are displayed in the recoll result lists.

   Changing the parameters in the [index] section is probably not a good idea
   except if you are a Recoll developer.

   The [icons] section allows you to change the icons which are displayed by
   recoll in the result lists (the values are the basenames of the png images
   inside the iconsdir directory (specified in recoll.conf).

  5.4.6. The mimeview file

   mimeview specifies which programs are started when you click on an Open
   link in a result list. Ie: HTML is normally displayed using firefox, but
   you may prefer Konqueror, your openoffice.org program might be named
   oofice instead of openoffice etc.

   Changes to this file can be done by direct editing, or through the recoll
   GUI preferences dialog.

   If Use desktop preferences to choose document editor is checked in the
   Recoll GUI preferences, all mimeview entries will be ignored except the
   one labelled application/x-all (which is set to use xdg-open by default).

   In this case, the xallexcepts top level variable defines a list of MIME
   type exceptions which will be processed according to the local entries
   instead of being passed to the desktop. This is so that specific Recoll
   options such as a page number or a search string can be passed to
   applications that support them, such as the evince viewer.

   As for the other configuration files, the normal usage is to have a
   mimeview inside your own configuration directory, with just the
   non-default entries, which will override those from the central
   configuration file.

   All viewer definition entries must be placed under a [view] section.

   The keys in the file are normally MIME types. You can add an application
   tag to specialize the choice for an area of the filesystem (using a
   localfields specification in mimeconf). The syntax for the key is
   mimetype|tag

   The nouncompforviewmts entry, (placed at the top level, outside of the
   [view] section), holds a list of MIME types that should not be
   uncompressed before starting the viewer (if they are found compressed, ie:
   mydoc.doc.gz).

   The right side of each assignment holds a command to be executed for
   opening the file. The following substitutions are performed:

     o %D. Document date

     o %f. File name. This may be the name of a temporary file if it was
       necessary to create one (ie: to extract a subdocument from a
       container).

     o %i. Internal path, for subdocuments of containers. The format depends
       on the container type. If this appears in the command line, Recoll
       will not create a temporary file to extract the subdocument, expecting
       the called application (possibly a script) to be able to handle it.

     o %M. MIME type

     o %p. Page index. Only significant for a subset of document types,
       currently only PDF, Postscript and DVI files. Can be used to start the
       editor at the right page for a match or snippet.

     o %s. Search term. The value will only be set for documents with indexed
       page numbers (ie: PDF). The value will be one of the matched search
       terms. It would allow pre-setting the value in the "Find" entry inside
       Evince for example, for easy highlighting of the term.

     o %u. Url.

   In addition to the predefined values above, all strings like %(fieldname)
   will be replaced by the value of the field named fieldname for the
   document. This could be used in combination with field customisation to
   help with opening the document.

  5.4.7. The ptrans file

   ptrans specifies query-time path translations. These can be useful in
   multiple cases.

   The file has a section for any index which needs translations, either the
   main one or additional query indexes. The sections are named with the
   Xapian index directory names. No slash character should exist at the end
   of the paths (all comparisons are textual). An exemple should make things
   sufficiently clear

           [/home/me/.recoll/xapiandb]
           /this/directory/moved = /to/this/place

           [/path/to/additional/xapiandb]
           /server/volume1/docdir = /net/server/volume1/docdir
           /server/volume2/docdir = /net/server/volume2/docdir
        

  5.4.8. Examples of configuration adjustments

    5.4.8.1. Adding an external viewer for an non-indexed type

   Imagine that you have some kind of file which does not have indexable
   content, but for which you would like to have a functional Open link in
   the result list (when found by file name). The file names end in .blob and
   can be displayed by application blobviewer.

   You need two entries in the configuration files for this to work:

     o In $RECOLL_CONFDIR/mimemap (typically ~/.recoll/mimemap), add the
       following line:

 .blob = application/x-blobapp

       Note that the MIME type is made up here, and you could call it
       diesel/oil just the same.

     o In $RECOLL_CONFDIR/mimeview under the [view] section, add:

 application/x-blobapp = blobviewer %f

       We are supposing that blobviewer wants a file name parameter here, you
       would use %u if it liked URLs better.

   If you just wanted to change the application used by Recoll to display a
   MIME type which it already knows, you would just need to edit mimeview.
   The entries you add in your personal file override those in the central
   configuration, which you do not need to alter. mimeview can also be
   modified from the Gui.

    5.4.8.2. Adding indexing support for a new file type

   Let us now imagine that the above .blob files actually contain indexable
   text and that you know how to extract it with a command line program.
   Getting Recoll to index the files is easy. You need to perform the above
   alteration, and also to add data to the mimeconf file (typically in
   ~/.recoll/mimeconf):

     o Under the [index] section, add the following line (more about the
       rclblob indexing script later):

 application/x-blobapp = exec rclblob

     o Under the [icons] section, you should choose an icon to be displayed
       for the files inside the result lists. Icons are normally 64x64 pixels
       PNG files which live in /usr/[local/]share/recoll/images.

     o Under the [categories] section, you should add the MIME type where it
       makes sense (you can also create a category). Categories may be used
       for filtering in advanced search.

   The rclblob handler should be an executable program or script which exists
   inside /usr/[local/]share/recoll/filters. It will be given a file name as
   argument and should output the text or html contents on the standard
   output.

   The filter programming section describes in more detail how to write an
   input handler.
recoll-1.26.3/config.sub0000755000175000017500000010676313570165161012011 00000000000000#! /bin/sh
# Configuration validation subroutine script.
#   Copyright 1992-2016 Free Software Foundation, Inc.

timestamp='2016-11-04'

# This file is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see .
#
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that
# program.  This Exception is an additional permission under section 7
# of the GNU General Public License, version 3 ("GPLv3").


# Please send patches to .
#
# Configuration subroutine to validate and canonicalize a configuration type.
# Supply the specified configuration type as an argument.
# If it is invalid, we print an error message on stderr and exit with code 1.
# Otherwise, we print the canonical config type on stdout and succeed.

# You can get the latest version of this script from:
# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub

# This file is supposed to be the same for all GNU packages
# and recognize all the CPU types, system types and aliases
# that are meaningful with *any* GNU software.
# Each package is responsible for reporting which valid configurations
# it does not support.  The user should be able to distinguish
# a failure to support a valid configuration from a meaningless
# configuration.

# The goal of this file is to map all the various variations of a given
# machine specification into a single specification in the form:
#	CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
# or in some cases, the newer four-part form:
#	CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
# It is wrong to echo any other type of specification.

me=`echo "$0" | sed -e 's,.*/,,'`

usage="\
Usage: $0 [OPTION] CPU-MFR-OPSYS or ALIAS

Canonicalize a configuration name.

Operation modes:
  -h, --help         print this help, then exit
  -t, --time-stamp   print date of last modification, then exit
  -v, --version      print version number, then exit

Report bugs and patches to ."

version="\
GNU config.sub ($timestamp)

Copyright 1992-2016 Free Software Foundation, Inc.

This is free software; see the source for copying conditions.  There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."

help="
Try \`$me --help' for more information."

# Parse command line
while test $# -gt 0 ; do
  case $1 in
    --time-stamp | --time* | -t )
       echo "$timestamp" ; exit ;;
    --version | -v )
       echo "$version" ; exit ;;
    --help | --h* | -h )
       echo "$usage"; exit ;;
    -- )     # Stop option processing
       shift; break ;;
    - )	# Use stdin as input.
       break ;;
    -* )
       echo "$me: invalid option $1$help"
       exit 1 ;;

    *local*)
       # First pass through any local machine types.
       echo $1
       exit ;;

    * )
       break ;;
  esac
done

case $# in
 0) echo "$me: missing argument$help" >&2
    exit 1;;
 1) ;;
 *) echo "$me: too many arguments$help" >&2
    exit 1;;
esac

# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
# Here we must recognize all the valid KERNEL-OS combinations.
maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
case $maybe_os in
  nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \
  linux-musl* | linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \
  knetbsd*-gnu* | netbsd*-gnu* | netbsd*-eabi* | \
  kopensolaris*-gnu* | cloudabi*-eabi* | \
  storm-chaos* | os2-emx* | rtmk-nova*)
    os=-$maybe_os
    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
    ;;
  android-linux)
    os=-linux-android
    basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown
    ;;
  *)
    basic_machine=`echo $1 | sed 's/-[^-]*$//'`
    if [ $basic_machine != $1 ]
    then os=`echo $1 | sed 's/.*-/-/'`
    else os=; fi
    ;;
esac

### Let's recognize common machines as not being operating systems so
### that things like config.sub decstation-3100 work.  We also
### recognize some manufacturers as not being operating systems, so we
### can provide default operating systems below.
case $os in
	-sun*os*)
		# Prevent following clause from handling this invalid input.
		;;
	-dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
	-att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
	-unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
	-convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
	-c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
	-harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
	-apple | -axis | -knuth | -cray | -microblaze*)
		os=
		basic_machine=$1
		;;
	-bluegene*)
		os=-cnk
		;;
	-sim | -cisco | -oki | -wec | -winbond)
		os=
		basic_machine=$1
		;;
	-scout)
		;;
	-wrs)
		os=-vxworks
		basic_machine=$1
		;;
	-chorusos*)
		os=-chorusos
		basic_machine=$1
		;;
	-chorusrdb)
		os=-chorusrdb
		basic_machine=$1
		;;
	-hiux*)
		os=-hiuxwe2
		;;
	-sco6)
		os=-sco5v6
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-sco5)
		os=-sco3.2v5
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-sco4)
		os=-sco3.2v4
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-sco3.2.[4-9]*)
		os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-sco3.2v[4-9]*)
		# Don't forget version if it is 3.2v4 or newer.
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-sco5v6*)
		# Don't forget version if it is 3.2v4 or newer.
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-sco*)
		os=-sco3.2v2
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-udk*)
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-isc)
		os=-isc2.2
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-clix*)
		basic_machine=clipper-intergraph
		;;
	-isc*)
		basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
		;;
	-lynx*178)
		os=-lynxos178
		;;
	-lynx*5)
		os=-lynxos5
		;;
	-lynx*)
		os=-lynxos
		;;
	-ptx*)
		basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
		;;
	-windowsnt*)
		os=`echo $os | sed -e 's/windowsnt/winnt/'`
		;;
	-psos*)
		os=-psos
		;;
	-mint | -mint[0-9]*)
		basic_machine=m68k-atari
		os=-mint
		;;
esac

# Decode aliases for certain CPU-COMPANY combinations.
case $basic_machine in
	# Recognize the basic CPU types without company name.
	# Some are omitted here because they have special meanings below.
	1750a | 580 \
	| a29k \
	| aarch64 | aarch64_be \
	| alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
	| alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
	| am33_2.0 \
	| arc | arceb \
	| arm | arm[bl]e | arme[lb] | armv[2-8] | armv[3-8][lb] | armv7[arm] \
	| avr | avr32 \
	| ba \
	| be32 | be64 \
	| bfin \
	| c4x | c8051 | clipper \
	| d10v | d30v | dlx | dsp16xx \
	| e2k | epiphany \
	| fido | fr30 | frv | ft32 \
	| h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
	| hexagon \
	| i370 | i860 | i960 | ia64 \
	| ip2k | iq2000 \
	| k1om \
	| le32 | le64 \
	| lm32 \
	| m32c | m32r | m32rle | m68000 | m68k | m88k \
	| maxq | mb | microblaze | microblazeel | mcore | mep | metag \
	| mips | mipsbe | mipseb | mipsel | mipsle \
	| mips16 \
	| mips64 | mips64el \
	| mips64octeon | mips64octeonel \
	| mips64orion | mips64orionel \
	| mips64r5900 | mips64r5900el \
	| mips64vr | mips64vrel \
	| mips64vr4100 | mips64vr4100el \
	| mips64vr4300 | mips64vr4300el \
	| mips64vr5000 | mips64vr5000el \
	| mips64vr5900 | mips64vr5900el \
	| mipsisa32 | mipsisa32el \
	| mipsisa32r2 | mipsisa32r2el \
	| mipsisa32r6 | mipsisa32r6el \
	| mipsisa64 | mipsisa64el \
	| mipsisa64r2 | mipsisa64r2el \
	| mipsisa64r6 | mipsisa64r6el \
	| mipsisa64sb1 | mipsisa64sb1el \
	| mipsisa64sr71k | mipsisa64sr71kel \
	| mipsr5900 | mipsr5900el \
	| mipstx39 | mipstx39el \
	| mn10200 | mn10300 \
	| moxie \
	| mt \
	| msp430 \
	| nds32 | nds32le | nds32be \
	| nios | nios2 | nios2eb | nios2el \
	| ns16k | ns32k \
	| open8 | or1k | or1knd | or32 \
	| pdp10 | pdp11 | pj | pjl \
	| powerpc | powerpc64 | powerpc64le | powerpcle \
	| pru \
	| pyramid \
	| riscv32 | riscv64 \
	| rl78 | rx \
	| score \
	| sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[234]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
	| sh64 | sh64le \
	| sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
	| sparcv8 | sparcv9 | sparcv9b | sparcv9v \
	| spu \
	| tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \
	| ubicom32 \
	| v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \
	| visium \
	| we32k \
	| x86 | xc16x | xstormy16 | xtensa \
	| z8k | z80)
		basic_machine=$basic_machine-unknown
		;;
	c54x)
		basic_machine=tic54x-unknown
		;;
	c55x)
		basic_machine=tic55x-unknown
		;;
	c6x)
		basic_machine=tic6x-unknown
		;;
	leon|leon[3-9])
		basic_machine=sparc-$basic_machine
		;;
	m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | nvptx | picochip)
		basic_machine=$basic_machine-unknown
		os=-none
		;;
	m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
		;;
	ms1)
		basic_machine=mt-unknown
		;;

	strongarm | thumb | xscale)
		basic_machine=arm-unknown
		;;
	xgate)
		basic_machine=$basic_machine-unknown
		os=-none
		;;
	xscaleeb)
		basic_machine=armeb-unknown
		;;

	xscaleel)
		basic_machine=armel-unknown
		;;

	# We use `pc' rather than `unknown'
	# because (1) that's what they normally are, and
	# (2) the word "unknown" tends to confuse beginning users.
	i*86 | x86_64)
	  basic_machine=$basic_machine-pc
	  ;;
	# Object if more than one company name word.
	*-*-*)
		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
		exit 1
		;;
	# Recognize the basic CPU types with company name.
	580-* \
	| a29k-* \
	| aarch64-* | aarch64_be-* \
	| alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
	| alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
	| alphapca5[67]-* | alpha64pca5[67]-* | arc-* | arceb-* \
	| arm-*  | armbe-* | armle-* | armeb-* | armv*-* \
	| avr-* | avr32-* \
	| ba-* \
	| be32-* | be64-* \
	| bfin-* | bs2000-* \
	| c[123]* | c30-* | [cjt]90-* | c4x-* \
	| c8051-* | clipper-* | craynv-* | cydra-* \
	| d10v-* | d30v-* | dlx-* \
	| e2k-* | elxsi-* \
	| f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
	| h8300-* | h8500-* \
	| hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
	| hexagon-* \
	| i*86-* | i860-* | i960-* | ia64-* \
	| ip2k-* | iq2000-* \
	| k1om-* \
	| le32-* | le64-* \
	| lm32-* \
	| m32c-* | m32r-* | m32rle-* \
	| m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
	| m88110-* | m88k-* | maxq-* | mcore-* | metag-* \
	| microblaze-* | microblazeel-* \
	| mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
	| mips16-* \
	| mips64-* | mips64el-* \
	| mips64octeon-* | mips64octeonel-* \
	| mips64orion-* | mips64orionel-* \
	| mips64r5900-* | mips64r5900el-* \
	| mips64vr-* | mips64vrel-* \
	| mips64vr4100-* | mips64vr4100el-* \
	| mips64vr4300-* | mips64vr4300el-* \
	| mips64vr5000-* | mips64vr5000el-* \
	| mips64vr5900-* | mips64vr5900el-* \
	| mipsisa32-* | mipsisa32el-* \
	| mipsisa32r2-* | mipsisa32r2el-* \
	| mipsisa32r6-* | mipsisa32r6el-* \
	| mipsisa64-* | mipsisa64el-* \
	| mipsisa64r2-* | mipsisa64r2el-* \
	| mipsisa64r6-* | mipsisa64r6el-* \
	| mipsisa64sb1-* | mipsisa64sb1el-* \
	| mipsisa64sr71k-* | mipsisa64sr71kel-* \
	| mipsr5900-* | mipsr5900el-* \
	| mipstx39-* | mipstx39el-* \
	| mmix-* \
	| mt-* \
	| msp430-* \
	| nds32-* | nds32le-* | nds32be-* \
	| nios-* | nios2-* | nios2eb-* | nios2el-* \
	| none-* | np1-* | ns16k-* | ns32k-* \
	| open8-* \
	| or1k*-* \
	| orion-* \
	| pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
	| powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \
	| pru-* \
	| pyramid-* \
	| riscv32-* | riscv64-* \
	| rl78-* | romp-* | rs6000-* | rx-* \
	| sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
	| shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
	| sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
	| sparclite-* \
	| sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx*-* \
	| tahoe-* \
	| tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
	| tile*-* \
	| tron-* \
	| ubicom32-* \
	| v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \
	| vax-* \
	| visium-* \
	| we32k-* \
	| x86-* | x86_64-* | xc16x-* | xps100-* \
	| xstormy16-* | xtensa*-* \
	| ymp-* \
	| z8k-* | z80-*)
		;;
	# Recognize the basic CPU types without company name, with glob match.
	xtensa*)
		basic_machine=$basic_machine-unknown
		;;
	# Recognize the various machine names and aliases which stand
	# for a CPU type and a company and sometimes even an OS.
	386bsd)
		basic_machine=i386-unknown
		os=-bsd
		;;
	3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
		basic_machine=m68000-att
		;;
	3b*)
		basic_machine=we32k-att
		;;
	a29khif)
		basic_machine=a29k-amd
		os=-udi
		;;
	abacus)
		basic_machine=abacus-unknown
		;;
	adobe68k)
		basic_machine=m68010-adobe
		os=-scout
		;;
	alliant | fx80)
		basic_machine=fx80-alliant
		;;
	altos | altos3068)
		basic_machine=m68k-altos
		;;
	am29k)
		basic_machine=a29k-none
		os=-bsd
		;;
	amd64)
		basic_machine=x86_64-pc
		;;
	amd64-*)
		basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	amdahl)
		basic_machine=580-amdahl
		os=-sysv
		;;
	amiga | amiga-*)
		basic_machine=m68k-unknown
		;;
	amigaos | amigados)
		basic_machine=m68k-unknown
		os=-amigaos
		;;
	amigaunix | amix)
		basic_machine=m68k-unknown
		os=-sysv4
		;;
	apollo68)
		basic_machine=m68k-apollo
		os=-sysv
		;;
	apollo68bsd)
		basic_machine=m68k-apollo
		os=-bsd
		;;
	aros)
		basic_machine=i386-pc
		os=-aros
		;;
	asmjs)
		basic_machine=asmjs-unknown
		;;
	aux)
		basic_machine=m68k-apple
		os=-aux
		;;
	balance)
		basic_machine=ns32k-sequent
		os=-dynix
		;;
	blackfin)
		basic_machine=bfin-unknown
		os=-linux
		;;
	blackfin-*)
		basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'`
		os=-linux
		;;
	bluegene*)
		basic_machine=powerpc-ibm
		os=-cnk
		;;
	c54x-*)
		basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	c55x-*)
		basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	c6x-*)
		basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	c90)
		basic_machine=c90-cray
		os=-unicos
		;;
	cegcc)
		basic_machine=arm-unknown
		os=-cegcc
		;;
	convex-c1)
		basic_machine=c1-convex
		os=-bsd
		;;
	convex-c2)
		basic_machine=c2-convex
		os=-bsd
		;;
	convex-c32)
		basic_machine=c32-convex
		os=-bsd
		;;
	convex-c34)
		basic_machine=c34-convex
		os=-bsd
		;;
	convex-c38)
		basic_machine=c38-convex
		os=-bsd
		;;
	cray | j90)
		basic_machine=j90-cray
		os=-unicos
		;;
	craynv)
		basic_machine=craynv-cray
		os=-unicosmp
		;;
	cr16 | cr16-*)
		basic_machine=cr16-unknown
		os=-elf
		;;
	crds | unos)
		basic_machine=m68k-crds
		;;
	crisv32 | crisv32-* | etraxfs*)
		basic_machine=crisv32-axis
		;;
	cris | cris-* | etrax*)
		basic_machine=cris-axis
		;;
	crx)
		basic_machine=crx-unknown
		os=-elf
		;;
	da30 | da30-*)
		basic_machine=m68k-da30
		;;
	decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
		basic_machine=mips-dec
		;;
	decsystem10* | dec10*)
		basic_machine=pdp10-dec
		os=-tops10
		;;
	decsystem20* | dec20*)
		basic_machine=pdp10-dec
		os=-tops20
		;;
	delta | 3300 | motorola-3300 | motorola-delta \
	      | 3300-motorola | delta-motorola)
		basic_machine=m68k-motorola
		;;
	delta88)
		basic_machine=m88k-motorola
		os=-sysv3
		;;
	dicos)
		basic_machine=i686-pc
		os=-dicos
		;;
	djgpp)
		basic_machine=i586-pc
		os=-msdosdjgpp
		;;
	dpx20 | dpx20-*)
		basic_machine=rs6000-bull
		os=-bosx
		;;
	dpx2* | dpx2*-bull)
		basic_machine=m68k-bull
		os=-sysv3
		;;
	e500v[12])
		basic_machine=powerpc-unknown
		os=$os"spe"
		;;
	e500v[12]-*)
		basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
		os=$os"spe"
		;;
	ebmon29k)
		basic_machine=a29k-amd
		os=-ebmon
		;;
	elxsi)
		basic_machine=elxsi-elxsi
		os=-bsd
		;;
	encore | umax | mmax)
		basic_machine=ns32k-encore
		;;
	es1800 | OSE68k | ose68k | ose | OSE)
		basic_machine=m68k-ericsson
		os=-ose
		;;
	fx2800)
		basic_machine=i860-alliant
		;;
	genix)
		basic_machine=ns32k-ns
		;;
	gmicro)
		basic_machine=tron-gmicro
		os=-sysv
		;;
	go32)
		basic_machine=i386-pc
		os=-go32
		;;
	h3050r* | hiux*)
		basic_machine=hppa1.1-hitachi
		os=-hiuxwe2
		;;
	h8300hms)
		basic_machine=h8300-hitachi
		os=-hms
		;;
	h8300xray)
		basic_machine=h8300-hitachi
		os=-xray
		;;
	h8500hms)
		basic_machine=h8500-hitachi
		os=-hms
		;;
	harris)
		basic_machine=m88k-harris
		os=-sysv3
		;;
	hp300-*)
		basic_machine=m68k-hp
		;;
	hp300bsd)
		basic_machine=m68k-hp
		os=-bsd
		;;
	hp300hpux)
		basic_machine=m68k-hp
		os=-hpux
		;;
	hp3k9[0-9][0-9] | hp9[0-9][0-9])
		basic_machine=hppa1.0-hp
		;;
	hp9k2[0-9][0-9] | hp9k31[0-9])
		basic_machine=m68000-hp
		;;
	hp9k3[2-9][0-9])
		basic_machine=m68k-hp
		;;
	hp9k6[0-9][0-9] | hp6[0-9][0-9])
		basic_machine=hppa1.0-hp
		;;
	hp9k7[0-79][0-9] | hp7[0-79][0-9])
		basic_machine=hppa1.1-hp
		;;
	hp9k78[0-9] | hp78[0-9])
		# FIXME: really hppa2.0-hp
		basic_machine=hppa1.1-hp
		;;
	hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
		# FIXME: really hppa2.0-hp
		basic_machine=hppa1.1-hp
		;;
	hp9k8[0-9][13679] | hp8[0-9][13679])
		basic_machine=hppa1.1-hp
		;;
	hp9k8[0-9][0-9] | hp8[0-9][0-9])
		basic_machine=hppa1.0-hp
		;;
	hppa-next)
		os=-nextstep3
		;;
	hppaosf)
		basic_machine=hppa1.1-hp
		os=-osf
		;;
	hppro)
		basic_machine=hppa1.1-hp
		os=-proelf
		;;
	i370-ibm* | ibm*)
		basic_machine=i370-ibm
		;;
	i*86v32)
		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
		os=-sysv32
		;;
	i*86v4*)
		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
		os=-sysv4
		;;
	i*86v)
		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
		os=-sysv
		;;
	i*86sol2)
		basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
		os=-solaris2
		;;
	i386mach)
		basic_machine=i386-mach
		os=-mach
		;;
	i386-vsta | vsta)
		basic_machine=i386-unknown
		os=-vsta
		;;
	iris | iris4d)
		basic_machine=mips-sgi
		case $os in
		    -irix*)
			;;
		    *)
			os=-irix4
			;;
		esac
		;;
	isi68 | isi)
		basic_machine=m68k-isi
		os=-sysv
		;;
	leon-*|leon[3-9]-*)
		basic_machine=sparc-`echo $basic_machine | sed 's/-.*//'`
		;;
	m68knommu)
		basic_machine=m68k-unknown
		os=-linux
		;;
	m68knommu-*)
		basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'`
		os=-linux
		;;
	m88k-omron*)
		basic_machine=m88k-omron
		;;
	magnum | m3230)
		basic_machine=mips-mips
		os=-sysv
		;;
	merlin)
		basic_machine=ns32k-utek
		os=-sysv
		;;
	microblaze*)
		basic_machine=microblaze-xilinx
		;;
	mingw64)
		basic_machine=x86_64-pc
		os=-mingw64
		;;
	mingw32)
		basic_machine=i686-pc
		os=-mingw32
		;;
	mingw32ce)
		basic_machine=arm-unknown
		os=-mingw32ce
		;;
	miniframe)
		basic_machine=m68000-convergent
		;;
	*mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
		basic_machine=m68k-atari
		os=-mint
		;;
	mips3*-*)
		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
		;;
	mips3*)
		basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
		;;
	monitor)
		basic_machine=m68k-rom68k
		os=-coff
		;;
	morphos)
		basic_machine=powerpc-unknown
		os=-morphos
		;;
	moxiebox)
		basic_machine=moxie-unknown
		os=-moxiebox
		;;
	msdos)
		basic_machine=i386-pc
		os=-msdos
		;;
	ms1-*)
		basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
		;;
	msys)
		basic_machine=i686-pc
		os=-msys
		;;
	mvs)
		basic_machine=i370-ibm
		os=-mvs
		;;
	nacl)
		basic_machine=le32-unknown
		os=-nacl
		;;
	ncr3000)
		basic_machine=i486-ncr
		os=-sysv4
		;;
	netbsd386)
		basic_machine=i386-unknown
		os=-netbsd
		;;
	netwinder)
		basic_machine=armv4l-rebel
		os=-linux
		;;
	news | news700 | news800 | news900)
		basic_machine=m68k-sony
		os=-newsos
		;;
	news1000)
		basic_machine=m68030-sony
		os=-newsos
		;;
	news-3600 | risc-news)
		basic_machine=mips-sony
		os=-newsos
		;;
	necv70)
		basic_machine=v70-nec
		os=-sysv
		;;
	next | m*-next )
		basic_machine=m68k-next
		case $os in
		    -nextstep* )
			;;
		    -ns2*)
		      os=-nextstep2
			;;
		    *)
		      os=-nextstep3
			;;
		esac
		;;
	nh3000)
		basic_machine=m68k-harris
		os=-cxux
		;;
	nh[45]000)
		basic_machine=m88k-harris
		os=-cxux
		;;
	nindy960)
		basic_machine=i960-intel
		os=-nindy
		;;
	mon960)
		basic_machine=i960-intel
		os=-mon960
		;;
	nonstopux)
		basic_machine=mips-compaq
		os=-nonstopux
		;;
	np1)
		basic_machine=np1-gould
		;;
	neo-tandem)
		basic_machine=neo-tandem
		;;
	nse-tandem)
		basic_machine=nse-tandem
		;;
	nsr-tandem)
		basic_machine=nsr-tandem
		;;
	op50n-* | op60c-*)
		basic_machine=hppa1.1-oki
		os=-proelf
		;;
	openrisc | openrisc-*)
		basic_machine=or32-unknown
		;;
	os400)
		basic_machine=powerpc-ibm
		os=-os400
		;;
	OSE68000 | ose68000)
		basic_machine=m68000-ericsson
		os=-ose
		;;
	os68k)
		basic_machine=m68k-none
		os=-os68k
		;;
	pa-hitachi)
		basic_machine=hppa1.1-hitachi
		os=-hiuxwe2
		;;
	paragon)
		basic_machine=i860-intel
		os=-osf
		;;
	parisc)
		basic_machine=hppa-unknown
		os=-linux
		;;
	parisc-*)
		basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'`
		os=-linux
		;;
	pbd)
		basic_machine=sparc-tti
		;;
	pbb)
		basic_machine=m68k-tti
		;;
	pc532 | pc532-*)
		basic_machine=ns32k-pc532
		;;
	pc98)
		basic_machine=i386-pc
		;;
	pc98-*)
		basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	pentium | p5 | k5 | k6 | nexgen | viac3)
		basic_machine=i586-pc
		;;
	pentiumpro | p6 | 6x86 | athlon | athlon_*)
		basic_machine=i686-pc
		;;
	pentiumii | pentium2 | pentiumiii | pentium3)
		basic_machine=i686-pc
		;;
	pentium4)
		basic_machine=i786-pc
		;;
	pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
		basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	pentiumpro-* | p6-* | 6x86-* | athlon-*)
		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
		basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	pentium4-*)
		basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	pn)
		basic_machine=pn-gould
		;;
	power)	basic_machine=power-ibm
		;;
	ppc | ppcbe)	basic_machine=powerpc-unknown
		;;
	ppc-* | ppcbe-*)
		basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	ppcle | powerpclittle)
		basic_machine=powerpcle-unknown
		;;
	ppcle-* | powerpclittle-*)
		basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	ppc64)	basic_machine=powerpc64-unknown
		;;
	ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	ppc64le | powerpc64little)
		basic_machine=powerpc64le-unknown
		;;
	ppc64le-* | powerpc64little-*)
		basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	ps2)
		basic_machine=i386-ibm
		;;
	pw32)
		basic_machine=i586-unknown
		os=-pw32
		;;
	rdos | rdos64)
		basic_machine=x86_64-pc
		os=-rdos
		;;
	rdos32)
		basic_machine=i386-pc
		os=-rdos
		;;
	rom68k)
		basic_machine=m68k-rom68k
		os=-coff
		;;
	rm[46]00)
		basic_machine=mips-siemens
		;;
	rtpc | rtpc-*)
		basic_machine=romp-ibm
		;;
	s390 | s390-*)
		basic_machine=s390-ibm
		;;
	s390x | s390x-*)
		basic_machine=s390x-ibm
		;;
	sa29200)
		basic_machine=a29k-amd
		os=-udi
		;;
	sb1)
		basic_machine=mipsisa64sb1-unknown
		;;
	sb1el)
		basic_machine=mipsisa64sb1el-unknown
		;;
	sde)
		basic_machine=mipsisa32-sde
		os=-elf
		;;
	sei)
		basic_machine=mips-sei
		os=-seiux
		;;
	sequent)
		basic_machine=i386-sequent
		;;
	sh)
		basic_machine=sh-hitachi
		os=-hms
		;;
	sh5el)
		basic_machine=sh5le-unknown
		;;
	sh64)
		basic_machine=sh64-unknown
		;;
	sparclite-wrs | simso-wrs)
		basic_machine=sparclite-wrs
		os=-vxworks
		;;
	sps7)
		basic_machine=m68k-bull
		os=-sysv2
		;;
	spur)
		basic_machine=spur-unknown
		;;
	st2000)
		basic_machine=m68k-tandem
		;;
	stratus)
		basic_machine=i860-stratus
		os=-sysv4
		;;
	strongarm-* | thumb-*)
		basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'`
		;;
	sun2)
		basic_machine=m68000-sun
		;;
	sun2os3)
		basic_machine=m68000-sun
		os=-sunos3
		;;
	sun2os4)
		basic_machine=m68000-sun
		os=-sunos4
		;;
	sun3os3)
		basic_machine=m68k-sun
		os=-sunos3
		;;
	sun3os4)
		basic_machine=m68k-sun
		os=-sunos4
		;;
	sun4os3)
		basic_machine=sparc-sun
		os=-sunos3
		;;
	sun4os4)
		basic_machine=sparc-sun
		os=-sunos4
		;;
	sun4sol2)
		basic_machine=sparc-sun
		os=-solaris2
		;;
	sun3 | sun3-*)
		basic_machine=m68k-sun
		;;
	sun4)
		basic_machine=sparc-sun
		;;
	sun386 | sun386i | roadrunner)
		basic_machine=i386-sun
		;;
	sv1)
		basic_machine=sv1-cray
		os=-unicos
		;;
	symmetry)
		basic_machine=i386-sequent
		os=-dynix
		;;
	t3e)
		basic_machine=alphaev5-cray
		os=-unicos
		;;
	t90)
		basic_machine=t90-cray
		os=-unicos
		;;
	tile*)
		basic_machine=$basic_machine-unknown
		os=-linux-gnu
		;;
	tx39)
		basic_machine=mipstx39-unknown
		;;
	tx39el)
		basic_machine=mipstx39el-unknown
		;;
	toad1)
		basic_machine=pdp10-xkl
		os=-tops20
		;;
	tower | tower-32)
		basic_machine=m68k-ncr
		;;
	tpf)
		basic_machine=s390x-ibm
		os=-tpf
		;;
	udi29k)
		basic_machine=a29k-amd
		os=-udi
		;;
	ultra3)
		basic_machine=a29k-nyu
		os=-sym1
		;;
	v810 | necv810)
		basic_machine=v810-nec
		os=-none
		;;
	vaxv)
		basic_machine=vax-dec
		os=-sysv
		;;
	vms)
		basic_machine=vax-dec
		os=-vms
		;;
	vpp*|vx|vx-*)
		basic_machine=f301-fujitsu
		;;
	vxworks960)
		basic_machine=i960-wrs
		os=-vxworks
		;;
	vxworks68)
		basic_machine=m68k-wrs
		os=-vxworks
		;;
	vxworks29k)
		basic_machine=a29k-wrs
		os=-vxworks
		;;
	w65*)
		basic_machine=w65-wdc
		os=-none
		;;
	w89k-*)
		basic_machine=hppa1.1-winbond
		os=-proelf
		;;
	xbox)
		basic_machine=i686-pc
		os=-mingw32
		;;
	xps | xps100)
		basic_machine=xps100-honeywell
		;;
	xscale-* | xscalee[bl]-*)
		basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'`
		;;
	ymp)
		basic_machine=ymp-cray
		os=-unicos
		;;
	z8k-*-coff)
		basic_machine=z8k-unknown
		os=-sim
		;;
	z80-*-coff)
		basic_machine=z80-unknown
		os=-sim
		;;
	none)
		basic_machine=none-none
		os=-none
		;;

# Here we handle the default manufacturer of certain CPU types.  It is in
# some cases the only manufacturer, in others, it is the most popular.
	w89k)
		basic_machine=hppa1.1-winbond
		;;
	op50n)
		basic_machine=hppa1.1-oki
		;;
	op60c)
		basic_machine=hppa1.1-oki
		;;
	romp)
		basic_machine=romp-ibm
		;;
	mmix)
		basic_machine=mmix-knuth
		;;
	rs6000)
		basic_machine=rs6000-ibm
		;;
	vax)
		basic_machine=vax-dec
		;;
	pdp10)
		# there are many clones, so DEC is not a safe bet
		basic_machine=pdp10-unknown
		;;
	pdp11)
		basic_machine=pdp11-dec
		;;
	we32k)
		basic_machine=we32k-att
		;;
	sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele)
		basic_machine=sh-unknown
		;;
	sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
		basic_machine=sparc-sun
		;;
	cydra)
		basic_machine=cydra-cydrome
		;;
	orion)
		basic_machine=orion-highlevel
		;;
	orion105)
		basic_machine=clipper-highlevel
		;;
	mac | mpw | mac-mpw)
		basic_machine=m68k-apple
		;;
	pmac | pmac-mpw)
		basic_machine=powerpc-apple
		;;
	*-unknown)
		# Make sure to match an already-canonicalized machine name.
		;;
	*)
		echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
		exit 1
		;;
esac

# Here we canonicalize certain aliases for manufacturers.
case $basic_machine in
	*-digital*)
		basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
		;;
	*-commodore*)
		basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
		;;
	*)
		;;
esac

# Decode manufacturer-specific aliases for certain operating systems.

if [ x"$os" != x"" ]
then
case $os in
	# First match some system type aliases
	# that might get confused with valid system types.
	# -solaris* is a basic system type, with this one exception.
	-auroraux)
		os=-auroraux
		;;
	-solaris1 | -solaris1.*)
		os=`echo $os | sed -e 's|solaris1|sunos4|'`
		;;
	-solaris)
		os=-solaris2
		;;
	-svr4*)
		os=-sysv4
		;;
	-unixware*)
		os=-sysv4.2uw
		;;
	-gnu/linux*)
		os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
		;;
	# First accept the basic system types.
	# The portable systems comes first.
	# Each alternative MUST END IN A *, to match a version number.
	# -sysv* is not here because it comes later, after sysvr4.
	-gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
	      | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\
	      | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \
	      | -sym* | -kopensolaris* | -plan9* \
	      | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
	      | -aos* | -aros* | -cloudabi* | -sortix* \
	      | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
	      | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
	      | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
	      | -bitrig* | -openbsd* | -solidbsd* | -libertybsd* \
	      | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
	      | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
	      | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
	      | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
	      | -chorusos* | -chorusrdb* | -cegcc* \
	      | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
	      | -midipix* | -mingw32* | -mingw64* | -linux-gnu* | -linux-android* \
	      | -linux-newlib* | -linux-musl* | -linux-uclibc* \
	      | -uxpv* | -beos* | -mpeix* | -udk* | -moxiebox* \
	      | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
	      | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
	      | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
	      | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
	      | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
	      | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
	      | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es* \
	      | -onefs* | -tirtos* | -phoenix* | -fuchsia*)
	# Remember, each alternative MUST END IN *, to match a version number.
		;;
	-qnx*)
		case $basic_machine in
		    x86-* | i*86-*)
			;;
		    *)
			os=-nto$os
			;;
		esac
		;;
	-nto-qnx*)
		;;
	-nto*)
		os=`echo $os | sed -e 's|nto|nto-qnx|'`
		;;
	-sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
	      | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
	      | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
		;;
	-mac*)
		os=`echo $os | sed -e 's|mac|macos|'`
		;;
	-linux-dietlibc)
		os=-linux-dietlibc
		;;
	-linux*)
		os=`echo $os | sed -e 's|linux|linux-gnu|'`
		;;
	-sunos5*)
		os=`echo $os | sed -e 's|sunos5|solaris2|'`
		;;
	-sunos6*)
		os=`echo $os | sed -e 's|sunos6|solaris3|'`
		;;
	-opened*)
		os=-openedition
		;;
	-os400*)
		os=-os400
		;;
	-wince*)
		os=-wince
		;;
	-osfrose*)
		os=-osfrose
		;;
	-osf*)
		os=-osf
		;;
	-utek*)
		os=-bsd
		;;
	-dynix*)
		os=-bsd
		;;
	-acis*)
		os=-aos
		;;
	-atheos*)
		os=-atheos
		;;
	-syllable*)
		os=-syllable
		;;
	-386bsd)
		os=-bsd
		;;
	-ctix* | -uts*)
		os=-sysv
		;;
	-nova*)
		os=-rtmk-nova
		;;
	-ns2 )
		os=-nextstep2
		;;
	-nsk*)
		os=-nsk
		;;
	# Preserve the version number of sinix5.
	-sinix5.*)
		os=`echo $os | sed -e 's|sinix|sysv|'`
		;;
	-sinix*)
		os=-sysv4
		;;
	-tpf*)
		os=-tpf
		;;
	-triton*)
		os=-sysv3
		;;
	-oss*)
		os=-sysv3
		;;
	-svr4)
		os=-sysv4
		;;
	-svr3)
		os=-sysv3
		;;
	-sysvr4)
		os=-sysv4
		;;
	# This must come after -sysvr4.
	-sysv*)
		;;
	-ose*)
		os=-ose
		;;
	-es1800*)
		os=-ose
		;;
	-xenix)
		os=-xenix
		;;
	-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
		os=-mint
		;;
	-aros*)
		os=-aros
		;;
	-zvmoe)
		os=-zvmoe
		;;
	-dicos*)
		os=-dicos
		;;
	-nacl*)
		;;
	-ios)
		;;
	-none)
		;;
	*)
		# Get rid of the `-' at the beginning of $os.
		os=`echo $os | sed 's/[^-]*-//'`
		echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
		exit 1
		;;
esac
else

# Here we handle the default operating systems that come with various machines.
# The value should be what the vendor currently ships out the door with their
# machine or put another way, the most popular os provided with the machine.

# Note that if you're going to try to match "-MANUFACTURER" here (say,
# "-sun"), then you have to tell the case statement up towards the top
# that MANUFACTURER isn't an operating system.  Otherwise, code above
# will signal an error saying that MANUFACTURER isn't an operating
# system, and we'll never get to this point.

case $basic_machine in
	score-*)
		os=-elf
		;;
	spu-*)
		os=-elf
		;;
	*-acorn)
		os=-riscix1.2
		;;
	arm*-rebel)
		os=-linux
		;;
	arm*-semi)
		os=-aout
		;;
	c4x-* | tic4x-*)
		os=-coff
		;;
	c8051-*)
		os=-elf
		;;
	hexagon-*)
		os=-elf
		;;
	tic54x-*)
		os=-coff
		;;
	tic55x-*)
		os=-coff
		;;
	tic6x-*)
		os=-coff
		;;
	# This must come before the *-dec entry.
	pdp10-*)
		os=-tops20
		;;
	pdp11-*)
		os=-none
		;;
	*-dec | vax-*)
		os=-ultrix4.2
		;;
	m68*-apollo)
		os=-domain
		;;
	i386-sun)
		os=-sunos4.0.2
		;;
	m68000-sun)
		os=-sunos3
		;;
	m68*-cisco)
		os=-aout
		;;
	mep-*)
		os=-elf
		;;
	mips*-cisco)
		os=-elf
		;;
	mips*-*)
		os=-elf
		;;
	or32-*)
		os=-coff
		;;
	*-tti)	# must be before sparc entry or we get the wrong os.
		os=-sysv3
		;;
	sparc-* | *-sun)
		os=-sunos4.1.1
		;;
	*-be)
		os=-beos
		;;
	*-haiku)
		os=-haiku
		;;
	*-ibm)
		os=-aix
		;;
	*-knuth)
		os=-mmixware
		;;
	*-wec)
		os=-proelf
		;;
	*-winbond)
		os=-proelf
		;;
	*-oki)
		os=-proelf
		;;
	*-hp)
		os=-hpux
		;;
	*-hitachi)
		os=-hiux
		;;
	i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
		os=-sysv
		;;
	*-cbm)
		os=-amigaos
		;;
	*-dg)
		os=-dgux
		;;
	*-dolphin)
		os=-sysv3
		;;
	m68k-ccur)
		os=-rtu
		;;
	m88k-omron*)
		os=-luna
		;;
	*-next )
		os=-nextstep
		;;
	*-sequent)
		os=-ptx
		;;
	*-crds)
		os=-unos
		;;
	*-ns)
		os=-genix
		;;
	i370-*)
		os=-mvs
		;;
	*-next)
		os=-nextstep3
		;;
	*-gould)
		os=-sysv
		;;
	*-highlevel)
		os=-bsd
		;;
	*-encore)
		os=-bsd
		;;
	*-sgi)
		os=-irix
		;;
	*-siemens)
		os=-sysv4
		;;
	*-masscomp)
		os=-rtu
		;;
	f30[01]-fujitsu | f700-fujitsu)
		os=-uxpv
		;;
	*-rom68k)
		os=-coff
		;;
	*-*bug)
		os=-coff
		;;
	*-apple)
		os=-macos
		;;
	*-atari*)
		os=-mint
		;;
	*)
		os=-none
		;;
esac
fi

# Here we handle the case where we know the os, and the CPU type, but not the
# manufacturer.  We pick the logical manufacturer.
vendor=unknown
case $basic_machine in
	*-unknown)
		case $os in
			-riscix*)
				vendor=acorn
				;;
			-sunos*)
				vendor=sun
				;;
			-cnk*|-aix*)
				vendor=ibm
				;;
			-beos*)
				vendor=be
				;;
			-hpux*)
				vendor=hp
				;;
			-mpeix*)
				vendor=hp
				;;
			-hiux*)
				vendor=hitachi
				;;
			-unos*)
				vendor=crds
				;;
			-dgux*)
				vendor=dg
				;;
			-luna*)
				vendor=omron
				;;
			-genix*)
				vendor=ns
				;;
			-mvs* | -opened*)
				vendor=ibm
				;;
			-os400*)
				vendor=ibm
				;;
			-ptx*)
				vendor=sequent
				;;
			-tpf*)
				vendor=ibm
				;;
			-vxsim* | -vxworks* | -windiss*)
				vendor=wrs
				;;
			-aux*)
				vendor=apple
				;;
			-hms*)
				vendor=hitachi
				;;
			-mpw* | -macos*)
				vendor=apple
				;;
			-*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
				vendor=atari
				;;
			-vos*)
				vendor=stratus
				;;
		esac
		basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
		;;
esac

echo $basic_machine$os
exit

# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "timestamp='"
# time-stamp-format: "%:y-%02m-%02d"
# time-stamp-end: "'"
# End:
recoll-1.26.3/missing0000755000175000017500000001533013570165161011412 00000000000000#! /bin/sh
# Common wrapper for a few potentially missing GNU programs.

scriptversion=2013-10-28.13; # UTC

# Copyright (C) 1996-2014 Free Software Foundation, Inc.
# Originally written by Fran,cois Pinard , 1996.

# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.

# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.

# You should have received a copy of the GNU General Public License
# along with this program.  If not, see .

# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.

if test $# -eq 0; then
  echo 1>&2 "Try '$0 --help' for more information"
  exit 1
fi

case $1 in

  --is-lightweight)
    # Used by our autoconf macros to check whether the available missing
    # script is modern enough.
    exit 0
    ;;

  --run)
    # Back-compat with the calling convention used by older automake.
    shift
    ;;

  -h|--h|--he|--hel|--help)
    echo "\
$0 [OPTION]... PROGRAM [ARGUMENT]...

Run 'PROGRAM [ARGUMENT]...', returning a proper advice when this fails due
to PROGRAM being missing or too old.

Options:
  -h, --help      display this help and exit
  -v, --version   output version information and exit

Supported PROGRAM values:
  aclocal   autoconf  autoheader   autom4te  automake  makeinfo
  bison     yacc      flex         lex       help2man

Version suffixes to PROGRAM as well as the prefixes 'gnu-', 'gnu', and
'g' are ignored when checking the name.

Send bug reports to ."
    exit $?
    ;;

  -v|--v|--ve|--ver|--vers|--versi|--versio|--version)
    echo "missing $scriptversion (GNU Automake)"
    exit $?
    ;;

  -*)
    echo 1>&2 "$0: unknown '$1' option"
    echo 1>&2 "Try '$0 --help' for more information"
    exit 1
    ;;

esac

# Run the given program, remember its exit status.
"$@"; st=$?

# If it succeeded, we are done.
test $st -eq 0 && exit 0

# Also exit now if we it failed (or wasn't found), and '--version' was
# passed; such an option is passed most likely to detect whether the
# program is present and works.
case $2 in --version|--help) exit $st;; esac

# Exit code 63 means version mismatch.  This often happens when the user
# tries to use an ancient version of a tool on a file that requires a
# minimum version.
if test $st -eq 63; then
  msg="probably too old"
elif test $st -eq 127; then
  # Program was missing.
  msg="missing on your system"
else
  # Program was found and executed, but failed.  Give up.
  exit $st
fi

perl_URL=http://www.perl.org/
flex_URL=http://flex.sourceforge.net/
gnu_software_URL=http://www.gnu.org/software

program_details ()
{
  case $1 in
    aclocal|automake)
      echo "The '$1' program is part of the GNU Automake package:"
      echo "<$gnu_software_URL/automake>"
      echo "It also requires GNU Autoconf, GNU m4 and Perl in order to run:"
      echo "<$gnu_software_URL/autoconf>"
      echo "<$gnu_software_URL/m4/>"
      echo "<$perl_URL>"
      ;;
    autoconf|autom4te|autoheader)
      echo "The '$1' program is part of the GNU Autoconf package:"
      echo "<$gnu_software_URL/autoconf/>"
      echo "It also requires GNU m4 and Perl in order to run:"
      echo "<$gnu_software_URL/m4/>"
      echo "<$perl_URL>"
      ;;
  esac
}

give_advice ()
{
  # Normalize program name to check for.
  normalized_program=`echo "$1" | sed '
    s/^gnu-//; t
    s/^gnu//; t
    s/^g//; t'`

  printf '%s\n' "'$1' is $msg."

  configure_deps="'configure.ac' or m4 files included by 'configure.ac'"
  case $normalized_program in
    autoconf*)
      echo "You should only need it if you modified 'configure.ac',"
      echo "or m4 files included by it."
      program_details 'autoconf'
      ;;
    autoheader*)
      echo "You should only need it if you modified 'acconfig.h' or"
      echo "$configure_deps."
      program_details 'autoheader'
      ;;
    automake*)
      echo "You should only need it if you modified 'Makefile.am' or"
      echo "$configure_deps."
      program_details 'automake'
      ;;
    aclocal*)
      echo "You should only need it if you modified 'acinclude.m4' or"
      echo "$configure_deps."
      program_details 'aclocal'
      ;;
   autom4te*)
      echo "You might have modified some maintainer files that require"
      echo "the 'autom4te' program to be rebuilt."
      program_details 'autom4te'
      ;;
    bison*|yacc*)
      echo "You should only need it if you modified a '.y' file."
      echo "You may want to install the GNU Bison package:"
      echo "<$gnu_software_URL/bison/>"
      ;;
    lex*|flex*)
      echo "You should only need it if you modified a '.l' file."
      echo "You may want to install the Fast Lexical Analyzer package:"
      echo "<$flex_URL>"
      ;;
    help2man*)
      echo "You should only need it if you modified a dependency" \
           "of a man page."
      echo "You may want to install the GNU Help2man package:"
      echo "<$gnu_software_URL/help2man/>"
    ;;
    makeinfo*)
      echo "You should only need it if you modified a '.texi' file, or"
      echo "any other file indirectly affecting the aspect of the manual."
      echo "You might want to install the Texinfo package:"
      echo "<$gnu_software_URL/texinfo/>"
      echo "The spurious makeinfo call might also be the consequence of"
      echo "using a buggy 'make' (AIX, DU, IRIX), in which case you might"
      echo "want to install GNU make:"
      echo "<$gnu_software_URL/make/>"
      ;;
    *)
      echo "You might have modified some files without having the proper"
      echo "tools for further handling them.  Check the 'README' file, it"
      echo "often tells you about the needed prerequisites for installing"
      echo "this package.  You may also peek at any GNU archive site, in"
      echo "case some other package contains this missing '$1' program."
      ;;
  esac
}

give_advice "$1" | sed -e '1s/^/WARNING: /' \
                       -e '2,$s/^/         /' >&2

# Propagate the correct exit status (expected to be 127 for a program
# not found, 63 for a program that failed due to version mismatch).
exit $st

# Local variables:
# eval: (add-hook 'write-file-hooks 'time-stamp)
# time-stamp-start: "scriptversion="
# time-stamp-format: "%:y-%02m-%02d.%02H"
# time-stamp-time-zone: "UTC"
# time-stamp-end: "; # UTC"
# End:
recoll-1.26.3/utils/0000755000175000017500000000000013570165410011226 500000000000000recoll-1.26.3/utils/netcon.cpp0000644000175000017500000011463713533651561013162 00000000000000/* Copyright (C) 2002 J.F. Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

// Wrapper classes for the socket interface

#ifdef BUILDING_RECOLL
#include "autoconfig.h"
#else
#include "config.h"
#endif

#include "netcon.h"

#include 
#include 
#include 
#include 
#include 

#ifdef _AIX
#include 
#endif // _AIX

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#ifdef HAVE_KQUEUE
#include 
#include 
#include 
#endif

#include 

#ifdef MDU_INCLUDE_LOG
#include MDU_INCLUDE_LOG
#else
#include "log.h"
#endif

using namespace std;

#ifndef SOCKLEN_T
#define SOCKLEN_T socklen_t
#endif

// Size of path buffer in sockaddr_un (AF_UNIX socket
// addr). Mysteriously it is 108 (explicit value) under linux, no
// define accessible. Let's take a little margin as it appears that
// some systems use 92. I believe we could also malloc a variable size
// struct but why bother.
#ifndef UNIX_PATH_MAX
#define UNIX_PATH_MAX 90
#endif

// Need &one, &zero for setsockopt...
static const int one = 1;
static const int zero = 0;

#ifndef LOGSYSERR
#define LOGSYSERR(who, call, spar)                                \
    LOGERR(who << ": "  << call << "("  << spar << ") errno " <<  \
           errno << " ("  << strerror(errno) << ")\n")
#endif
#ifndef MIN
#define MIN(a,b) ((a)<(b)?(a):(b))
#endif
#ifndef MAX
#define MAX(a,b) ((a)>(b)?(a):(b))
#endif
#ifndef freeZ
#define freeZ(X) if (X) {free(X);X=0;}
#endif

#define MILLIS(OLD, NEW) ( (uint64_t((NEW).tv_sec) - (OLD).tv_sec) * 1000 + \
                            ((NEW).tv_usec - (OLD).tv_usec) / 1000 )

// Static method
// Simplified interface to 'select()'. Only use one fd, for either
// reading or writing. This is only used when not using the
// selectloop() style of network i/o.
// Note that timeo == 0 does NOT mean wait forever but no wait at all.
int Netcon::select1(int fd, int timeo, int write)
{
    int ret;
    struct timeval tv;
    fd_set rd;
    tv.tv_sec = timeo;
    tv.tv_usec =  0;
    FD_ZERO(&rd);
    FD_SET(fd, &rd);
    if (write) {
        ret = select(fd + 1, 0, &rd, 0, &tv);
    } else {
        ret = select(fd + 1, &rd, 0, 0, &tv);
    }
    if (!FD_ISSET(fd, &rd)) {
        LOGDEB2("Netcon::select1: fd " << fd << " timeout\n");
    }
    return ret;
}


///////////////////////////////////////////
// SelectLoop

class SelectLoop::Internal {
public:
    Internal() {
#ifdef HAVE_KQUEUE
        if ((kq = kqueue()) == -1) {
            LOGSYSERR("Netcon::selectloop", "kqueue", "");
        }
#endif
    }

    ~Internal() {
#ifdef HAVE_KQUEUE
        if (kq >= 0)
            close(kq);
#endif
    }
    
    // Set by client callback to tell selectloop to return.
    bool selectloopDoReturn{false};
    int  selectloopReturnValue{0};
    int  placetostart{0};

    // Map of NetconP indexed by fd
    map polldata;
#ifdef HAVE_KQUEUE
    int kq{-1};
#endif
    // The last time we did the periodic thing. Initialized by setperiodic()
    struct timeval lasthdlcall;

    // The call back function and its parameter
    int (*periodichandler)(void *){0};
    void *periodicparam{0};
    // The periodic interval
    int periodicmillis{0};

    void periodictimeout(struct timeval *tv);
    void periodictimeout(struct timespec *ts);
    int maybecallperiodic();
    int setselevents(int fd, int events);
    int setselevents(NetconP& con, int events);
};

SelectLoop::SelectLoop()
{
    m = new Internal;
}

SelectLoop::~SelectLoop()
{
    delete m;
}

void SelectLoop::loopReturn(int value)
{
    m->selectloopDoReturn = true;
    m->selectloopReturnValue = value;
}
        
void SelectLoop::setperiodichandler(int (*handler)(void *), void *p, int ms)
{
    m->periodichandler = handler;
    m->periodicparam = p;
    m->periodicmillis = ms;
    if (m->periodicmillis > 0) {
        gettimeofday(&m->lasthdlcall, 0);
    }
}

// Compute the appropriate timeout so that the select call returns in
// time to call the periodic routine.
void SelectLoop::Internal::periodictimeout(struct timeval *tv)
{
    // If periodic not set, the select call times out and we loop
    // after a very long time (we'd need to pass NULL to select for an
    // infinite wait, and I'm too lazy to handle it)
    if (periodicmillis <= 0) {
        tv->tv_sec = 10000;
        tv->tv_usec = 0;
        return;
    }

    struct timeval mtv;
    gettimeofday(&mtv, 0);
    int millis = periodicmillis - MILLIS(lasthdlcall, mtv);
    
    // millis <= 0 means we should have already done the thing. *dont* set the
    // tv to 0, which means no timeout at all !
    if (millis <= 0) {
        millis = 1;
    }
    tv->tv_sec = millis / 1000;
    tv->tv_usec = (millis % 1000) * 1000;
}

void SelectLoop::Internal::periodictimeout(struct timespec *ts)
{
    struct timeval tv;
    periodictimeout(&tv);
    ts->tv_sec = tv.tv_sec;
    ts->tv_nsec = tv.tv_usec * 1000;
}


// Check if it's time to call the handler. selectloop will return to
// caller if either we or the handler return 0
int SelectLoop::Internal::maybecallperiodic()
{
    if (periodicmillis <= 0) {
        return 1;
    }

    struct timeval mtv;
    gettimeofday(&mtv, 0);
    int millis = periodicmillis - MILLIS(lasthdlcall, mtv);

    if (millis <= 0) {
        lasthdlcall = mtv;
        if (periodichandler) {
            return periodichandler(periodicparam);
        } else {
            return 0;
        }
    }
    return 1;
}

#ifndef HAVE_KQUEUE

int SelectLoop::doLoop()
{
    for (;;) {
        if (m->selectloopDoReturn) {
            m->selectloopDoReturn = false;
            LOGDEB("Netcon::selectloop: returning on request\n");
            return m->selectloopReturnValue;
        }

        int nfds;
        fd_set rd, wd;
        FD_ZERO(&rd);
        FD_ZERO(&wd);

        // Walk the netcon map and set up the read and write fd_sets
        // for select()
        nfds = 0;
        for (auto& entry : m->polldata) {
            NetconP& pll = entry.second;
            int fd  = entry.first;
            LOGDEB2("Selectloop: fd " << fd << " flags 0x"  <<
                    pll->m_wantedEvents << "\n");
            if (pll->m_wantedEvents & Netcon::NETCONPOLL_READ) {
                FD_SET(fd, &rd);
                nfds = MAX(nfds, fd + 1);
            }
            if (pll->m_wantedEvents & Netcon::NETCONPOLL_WRITE) {
                FD_SET(fd, &wd);
                nfds = MAX(nfds, fd + 1);
            }
        }

        if (nfds == 0) {
            // This should never happen in a server as we should at least
            // always monitor the main listening server socket. For a
            // client, it's up to client code to avoid or process this
            // condition.

            // Just in case there would still be open fds in there
            // (with no r/w flags set). Should not be needed, but safer
            m->polldata.clear();
            LOGDEB1("Netcon::selectloop: no fds\n");
            return 0;
        }

        LOGDEB2("Netcon::selectloop: selecting, nfds = " << nfds << "\n");

        // Compute the next timeout according to what might need to be
        // done apart from waiting for data
        struct timeval tv;
        m->periodictimeout(&tv);
        // Wait for something to happen
        int ret = select(nfds, &rd, &wd, 0, &tv);
        LOGDEB2("Netcon::selectloop: nfds " << nfds <<
                " select returns " << ret << "\n");
        if (ret < 0) {
            LOGSYSERR("Netcon::selectloop", "select", "");
            return -1;
        }
        if (m->periodicmillis > 0 && m->maybecallperiodic() <= 0) {
            return 1;
        }

        // Timeout, do it again.
        if (ret == 0) {
            continue;
        }

        // Select returned > 0: at least one fd must be ready. Sweep the fd
        // table and act on the ready ones.
        // We don't start the fd sweep at 0, else some fds would be advantaged.
        // Note that we do an fd sweep, not a map sweep. This is
        // inefficient because the fd array may be very sparse. Otoh, the
        // map may change between 2 sweeps, so that we'd have to be smart
        // with the iterator. As the cost per unused fd is low (just 2 bit
        // flag tests), we keep it like this for now
        if (m->placetostart >= nfds) {
            m->placetostart = 0;
        }
        int i, fd;
        int activefds = 0;
        for (i = 0, fd = m->placetostart; i < nfds; i++, fd++) {
            if (fd >= nfds) {
                fd = 0;
            }

            int canread = FD_ISSET(fd, &rd);
            int canwrite = FD_ISSET(fd, &wd);
            bool none = !canread && !canwrite;
            LOGDEB2("Netcon::selectloop: fd " << fd << " "  << 
                    (none ? "blocked" : "can") << " "  << 
                    (canread ? "read" : "") << " "  << 
                    (canwrite ? "write" : "") << "\n");
            if (none) {
                continue;
            }

            auto it = m->polldata.find(fd);
            if (it == m->polldata.end()) {
                // This should never happen, because we only set our
                // own fds in the mask !
                LOGERR("Netcon::selectloop: fd "  << fd << " not found\n");
                continue;
            }
            activefds++;
            // Next start will be one beyond last serviced (modulo nfds)
            m->placetostart = fd + 1;

            NetconP& pll = it->second;
            if (canread && pll->cando(Netcon::NETCONPOLL_READ) <= 0) {
                pll->m_wantedEvents &= ~Netcon::NETCONPOLL_READ;
            }
            if (canwrite && pll->cando(Netcon::NETCONPOLL_WRITE) <= 0) {
                pll->m_wantedEvents &= ~Netcon::NETCONPOLL_WRITE;
            }
            if (!(pll->m_wantedEvents &
                  (Netcon::NETCONPOLL_WRITE | Netcon::NETCONPOLL_READ))) {
                LOGDEB0("Netcon::selectloop: fd " << it->first << " has 0x"
                        << it->second->m_wantedEvents << " mask, erasing\n");
                m->polldata.erase(it);
            }
        } // fd sweep

        if (ret > 0 && activefds != ret) {
            LOGERR("Select returned " << ret << " not equal to " <<
                   activefds << " active fd found\n");
            return -1;
        }
    } // forever loop
    LOGERR("SelectLoop::doLoop: got out of loop !\n");
    return -1;
}

#else // -> Using kqueue: use select()

int SelectLoop::doLoop()
{
    for (;;) {
        if (m->selectloopDoReturn) {
            m->selectloopDoReturn = false;
            LOGDEB("Netcon::selectloop: returning on request\n");
            return m->selectloopReturnValue;
        }

        // Check that we do have something to wait for.
        int nfds = 0;
        for (auto& entry : m->polldata) {
            NetconP& pll = entry.second;
            if (pll->m_wantedEvents & Netcon::NETCONPOLL_READ) {
                nfds++;
            } else if (pll->m_wantedEvents & Netcon::NETCONPOLL_WRITE) {
                nfds++;
            }
        }
        if (nfds == 0) {
            // This should never happen in a server as we should at least
            // always monitor the main listening server socket. For a
            // client, it's up to client code to avoid or process this
            // condition.

            // Just in case there would still be open fds in there
            // (with no r/w flags set). Should not be needed, but safer
            m->polldata.clear();
            LOGDEB1("Netcon::selectloop: no fds\n");
            return 0;
        }

        // Compute the next timeout according to what might need to be
        // done apart from waiting for data
        struct timespec ts;
        m->periodictimeout(&ts);
        // Wait for something to happen
        vector events;
        events.resize(nfds);
        LOGDEB1("Netcon::selectloop: kevent(), nfds = " << nfds << "\n");
        int ret = kevent(m->kq, 0, 0, &events[0], events.size(), &ts);
        LOGDEB1("Netcon::selectloop: nfds " << nfds <<
                " kevent returns " << ret << "\n");
        if (ret < 0) {
            LOGSYSERR("Netcon::selectloop", "kevent", "");
            return -1;
        }
        if (m->periodicmillis > 0 && m->maybecallperiodic() <= 0) {
            return 1;
        }
        if (ret == 0) {
            // Timeout, do it again.
            continue;
        }
 
        for (int i = 0; i < ret; i++) {
            struct kevent& ev = events[i];
            if (ev.flags & EV_ERROR) {
                LOGSYSERR("Netcon::selectLoop", "kevent", "");
                LOGERR("Netcon::selectLoop: event error: " <<
                       strerror(ev.data));
                return -1;
            }
            int canread = ev.filter == EVFILT_READ;
            int canwrite = ev.filter == EVFILT_WRITE; 
            bool none = !canread && !canwrite;
            LOGDEB1("Netcon::selectloop: fd " << int(ev.ident) << " "  << 
                    (none ? "blocked" : "can") << " "  << 
                    (canread ? "read" : "") << " "  << 
                    (canwrite ? "write" : "") << "\n");
            if (none) {
                LOGERR("Kevent returned unknown filter " << ev.filter <polldata.find(int(ev.ident));
            if (it == m->polldata.end()) {
                LOGERR("Netcon::selectloop: fd " << int(ev.ident) <<
                       " not found\n");
                continue;
            }
            NetconP& pll = it->second;
            if (canread && pll->cando(Netcon::NETCONPOLL_READ) <= 0) {
                pll->setselevents(pll->getselevents() &
                                  ~Netcon::NETCONPOLL_READ);
            }
            if (canwrite && pll->cando(Netcon::NETCONPOLL_WRITE) <= 0) {
                pll->setselevents(pll->getselevents() &
                                  ~Netcon::NETCONPOLL_WRITE);
            }
            if (!(pll->getselevents() &
                  (Netcon::NETCONPOLL_WRITE | Netcon::NETCONPOLL_READ))) {
                LOGDEB0("Netcon::selectloop: fd " << it->first << " has 0x"
                        << it->second->getselevents() << " mask, erasing\n");
                m->polldata.erase(it);
            }
        } // fd sweep

    } // forever loop
    LOGERR("SelectLoop::doLoop: got out of loop !\n");
    return -1;
}

#endif // kqueue version

int SelectLoop::Internal::setselevents(int fd, int events)
{
#ifdef HAVE_KQUEUE
    auto it = polldata.find(fd);
    if (it == polldata.end()) {
        return -1;
    }
    return setselevents(it->second, events);
#endif
    return 0;
}

int SelectLoop::Internal::setselevents(NetconP& con, int events)
{
#ifdef HAVE_KQUEUE
    struct kevent event;
    if (events & Netcon::NETCONPOLL_READ) {
        EV_SET(&event, con->m_fd, EVFILT_READ, EV_ADD, 0, 0, 0);
        if(kevent(kq, &event, 1, 0, 0, 0) < 0) {
            LOGSYSERR("SelectLoop::addselcon", "kevent", "");
            return -1;
        }
    } else {
        EV_SET(&event, con->m_fd, EVFILT_READ, EV_DELETE, 0, 0, 0);
        kevent(kq, &event, 1, 0, 0, 0);
    }
    if (events & Netcon::NETCONPOLL_WRITE) {
        EV_SET(&event, con->m_fd, EVFILT_WRITE, EV_ADD, 0, 0, 0);
        if(kevent(kq, &event, 1, 0, 0, 0) < 0) {
            LOGSYSERR("SelectLoop::addselcon", "kevent", "");
            return -1;
        }
    } else {
        EV_SET(&event, con->m_fd, EVFILT_WRITE, EV_DELETE, 0, 0, 0);
        kevent(kq, &event, 1, 0, 0, 0);
    }
#endif
    return 0;
}

// Add a connection to the monitored set. This can be used to change
// the event flags too (won't add duplicates)
int SelectLoop::addselcon(NetconP con, int events)
{
    if (!con) {
        return -1;
    }
    LOGDEB1("Netcon::addselcon: fd " << con->m_fd << "\n");
    con->set_nonblock(1);
    con->m_wantedEvents = events;
    m->polldata[con->m_fd] = con;
    con->setloop(this);
    return m->setselevents(con, events);
}

// Remove a connection from the monitored set.
int SelectLoop::remselcon(NetconP con)
{
    if (!con) {
        return -1;
    }
    LOGDEB1("Netcon::remselcon: fd " << con->m_fd << "\n");
    m->setselevents(con, 0);
    auto it = m->polldata.find(con->m_fd);
    if (it == m->polldata.end()) {
        LOGDEB1("Netcon::remselcon: con not found for fd " << 
                con->m_fd << "\n");
        return -1;
    }
    con->setloop(0);
    m->polldata.erase(it);
    return 0;
}

//////////////////////////////////////////////////////////
// Base class (Netcon) methods
Netcon::~Netcon()
{
    closeconn();
    if (m_peer) {
        free(m_peer);
        m_peer = 0;
    }
}

void Netcon::closeconn()
{
    if (m_ownfd && m_fd >= 0) {
        close(m_fd);
    }
    m_fd = -1;
    m_ownfd = true;
}

char *Netcon::sterror()
{
    return strerror(errno);
}

void Netcon::setpeer(const char *hostname)
{
    if (m_peer) {
        free(m_peer);
    }
    m_peer = strdup(hostname);
}

int Netcon::settcpnodelay(int on)
{
    LOGDEB2("Netcon::settcpnodelay\n");
    if (m_fd < 0) {
        LOGERR("Netcon::settcpnodelay: connection not opened\n");
        return -1;
    }
    char *cp = on ? (char *)&one : (char *)&zero;
    if (setsockopt(m_fd, IPPROTO_TCP, TCP_NODELAY, cp, sizeof(one)) < 0) {
        LOGSYSERR("NetconCli::settcpnodelay", "setsockopt", "TCP_NODELAY");
        return -1;
    }
    return 0;
}


// Set/reset non-blocking flag on fd
int Netcon::set_nonblock(int onoff)
{
    int  flags = fcntl(m_fd, F_GETFL, 0);
    if (flags != -1)   {
        int newflags = onoff ? flags | O_NONBLOCK : flags & ~O_NONBLOCK;
        if (newflags != flags)
            if (fcntl(m_fd, F_SETFL, newflags) < 0) {
                return -1;
            }
    }
    return flags;
}

int Netcon::setselevents(int events)
{
    m_wantedEvents = events;
    if (m_loop) {
        m_loop->m->setselevents(m_fd, events);
    }
    return m_wantedEvents;
}
        
/////////////////////////////////////////////////////////////////////
// Data socket (NetconData) methods

NetconData::NetconData(bool cancellable)
    : m_buf(0), m_bufbase(0), m_bufbytes(0), m_bufsize(0), m_wkfds{-1,-1}
{
    if (cancellable) {
        if (pipe(m_wkfds) < 0) {
            LOGSYSERR("NetconData::NetconData", "pipe", "");
            m_wkfds[0] = m_wkfds[1] = -1;
        }
        LOGDEB2("NetconData:: m_wkfds[0] " << m_wkfds[0] << " m_wkfds[1] " <<
               m_wkfds[1] << endl);
        for (int i = 0; i < 2; i++) {
            int flags = fcntl(m_wkfds[i], F_GETFL, 0);
            fcntl(m_wkfds[i], F_SETFL, flags | O_NONBLOCK);
        }
    }
}

NetconData::~NetconData()
{
    freeZ(m_buf);
    m_bufbase = 0;
    m_bufbytes = m_bufsize = 0;
    for (int i = 0; i < 2; i++) {
        if (m_wkfds[i] >= 0) {
            close(m_wkfds[i]);
        }
    }
}

int NetconData::send(const char *buf, int cnt, int expedited)
{
    LOGDEB2("NetconData::send: fd " << m_fd << " cnt " << cnt <<
            " expe " << expedited << "\n");
    int flag = 0;
    if (m_fd < 0) {
        LOGERR("NetconData::send: connection not opened\n");
        return -1;
    }
    if (expedited) {
        LOGDEB2("NetconData::send: expedited data, count " <= 0) {
        LOGDEB2("NetconData::cancelReceive: writing to " << m_wkfds[1] << endl);
        // We can't do a thing about the ::write return value, the
        // following nonsense is for cancelling warnings
        int ret = ::write(m_wkfds[1], "!", 1);
        ret = ret;
    }
}

// Receive at most cnt bytes (maybe less)
int NetconData::receive(char *buf, int cnt, int timeo)
{
    LOGDEB2("NetconData::receive: cnt " << cnt << " timeo "  << timeo <<
            " m_buf 0x" << m_buf << " m_bufbytes " << m_bufbytes << "\n");

    if (m_fd < 0) {
        LOGERR("NetconData::receive: connection not opened\n");
        return -1;
    }

    int fromibuf = 0;
    // Get whatever might have been left in the buffer by a previous
    // getline, except if we're called to fill the buffer of course
    if (m_buf && m_bufbytes > 0 && (buf < m_buf || buf > m_buf + m_bufsize)) {
        fromibuf = MIN(m_bufbytes, cnt);
        memcpy(buf, m_bufbase, fromibuf);
        m_bufbytes -= fromibuf;
        m_bufbase += fromibuf;
        cnt -= fromibuf;
        LOGDEB2("NetconData::receive: got " << fromibuf << " from mbuf\n");
        if (cnt <= 0) {
            return fromibuf;
        }
    }

    if (timeo > 0) {
        struct timeval tv;
        tv.tv_sec = timeo;
        tv.tv_usec =  0;
        fd_set rd;
        FD_ZERO(&rd);
        FD_SET(m_fd, &rd);
        bool cancellable = (m_wkfds[0] >= 0);
        if (cancellable) {
            LOGDEB2("NetconData::receive: cancel fd " << m_wkfds[0] << endl);
            FD_SET(m_wkfds[0], &rd);
        }
        int nfds = MAX(m_fd, m_wkfds[0]) + 1;

        int ret = select(nfds, &rd, 0, 0, &tv);
        LOGDEB2("NetconData::receive: select returned " << ret << endl);
        
        if (cancellable && FD_ISSET(m_wkfds[0], &rd)) {
            char b[100];
            // We can't do a thing about the return value, the
            // following nonsense is for cancelling warnings
            int ret = ::read(m_wkfds[0], b, 100);
            ret = ret;
            return Cancelled;
        }

        if (!FD_ISSET(m_fd, &rd)) {
            m_didtimo = 1;
            return TimeoutOrError;
        }

        if (ret < 0) {
            LOGSYSERR("NetconData::receive", "select", "");
            m_didtimo = 0;
            return TimeoutOrError;
        }
    }

    m_didtimo = 0;
    if ((cnt = read(m_fd, buf + fromibuf, cnt)) < 0) {
        LOGSYSERR("NetconData::receive", "read", m_fd);
        return -1;
    }
    LOGDEB2("NetconData::receive: normal return, fromibuf " << fromibuf <<
            " cnt "  << cnt << "\n");
    return fromibuf + cnt;
}

// Receive exactly cnt bytes (except for timeout)
int NetconData::doreceive(char *buf, int cnt, int timeo)
{
    int got, cur;
    LOGDEB2("Netcon::doreceive: cnt " << cnt << ", timeo " << timeo << "\n");
    cur = 0;
    while (cnt > cur) {
        got = receive(buf, cnt - cur, timeo);
        LOGDEB2("Netcon::doreceive: got " << got << "\n");
        if (got < 0) {
            return got;
        }
        if (got == 0) {
            return cur;
        }
        cur += got;
        buf += got;
    }
    return cur;
}

// Read data until cnt-1 characters are read or a newline is found. Add
// null char at end of buffer and return.
// As we don't know where the newline will be and it would be inefficient to
// read a character at a time, we use a buffer
// Unlike fgets, we return an integer status:
// >0: number of characters returned, not including the final 0
//  0: EOF reached, no chars transferred
// -1: error
static const int defbufsize = 200;
int NetconData::getline(char *buf, int cnt, int timeo)
{
    LOGDEB2("NetconData::getline: cnt " << cnt << ", timeo " << 
            timeo << "\n");
    if (m_buf == 0) {
        if ((m_buf = (char *)malloc(defbufsize)) == 0) {
            LOGSYSERR("NetconData::getline: Out of mem", "malloc", "");
            return -1;
        }
        m_bufsize = defbufsize;
        m_bufbase = m_buf;
        m_bufbytes = 0;
    }

    char *cp = buf;
    for (;;) {
        // Transfer from buffer. Have to take a lot of care to keep counts and
        // pointers consistant in all end cases
        int maxtransf = MIN(m_bufbytes, cnt - 1);
        int nn = maxtransf;
        LOGDEB2("Before loop, bufbytes " << m_bufbytes << ", maxtransf " <<
                maxtransf << ", nn: " << nn << "\n");
        for (nn = maxtransf; nn > 0;) {
            // This is not pretty but we want nn to be decremented for
            // each byte copied (even newline), and not become -1 if
            // we go to the end. Better ways welcome!
            nn--;
            if ((*cp++ = *m_bufbase++) == '\n') {
                break;
            }
        }
        // Update counts
        maxtransf -= nn; // Actual count transferred
        m_bufbytes -= maxtransf;
        cnt -= maxtransf;
        LOGDEB2("After transfer: actual transf " << maxtransf << " cnt " << 
                cnt << ", m_bufbytes " << m_bufbytes << "\n");

        // Finished ?
        if (cnt <= 1 || (cp > buf && cp[-1] == '\n')) {
            *cp = 0;
            return cp - buf;
        }

        // Transfer from net
        m_bufbase = m_buf;
        m_bufbytes = receive(m_buf, m_bufsize, timeo);
        if (m_bufbytes == 0) {
            // EOF
            *cp = 0;
            return cp - buf;
        }
        if (m_bufbytes < 0) {
            m_bufbytes = 0;
            *cp = 0;
            return -1;
        }
    }
}

// Called when selectloop detects that data can be read or written on
// the connection. The user callback would normally have been set
// up. If it is, call it and return. Else, perform housecleaning: read
// and discard.
int NetconData::cando(Netcon::Event reason)
{
    LOGDEB2("NetconData::cando\n");
    if (m_user) {
        return m_user->data(this, reason);
    }

    // No user callback. Clean up by ourselves
    if (reason & NETCONPOLL_READ) {
#define BS 200
        char buf[BS];
        int n;
        if ((n = receive(buf, BS)) < 0) {
            LOGSYSERR("NetconData::cando", "receive", "");
            return -1;
        }
        if (n == 0) {
            // EOF
            return 0;
        }
    }
    m_wantedEvents &= ~NETCONPOLL_WRITE;
    return 1;
}

///////////////////////////////////////////////////////////////////////
// Methods for a client connection (NetconCli)
int NetconCli::openconn(const char *host, unsigned int port, int timeo)
{
    int ret = -1;
    LOGDEB2("Netconcli::openconn: host " << host << ", port "  << port << "\n");

    closeconn();

    struct sockaddr *saddr;
    socklen_t addrsize;

    struct sockaddr_in ip_addr;
    struct sockaddr_un unix_addr;
    if (host[0] != '/') {
        memset(&ip_addr, 0, sizeof(ip_addr));
        ip_addr.sin_family = AF_INET;
        ip_addr.sin_port = htons(port);

        // Server name may be host name or IP address
        int addr;
        if ((addr = inet_addr(host)) != -1) {
            memcpy(&ip_addr.sin_addr, &addr, sizeof(addr));
        } else {
            struct hostent *hp;
            if ((hp = gethostbyname(host)) == 0) {
                LOGERR("NetconCli::openconn: gethostbyname(" << host << 
                       ") failed\n");
                return -1;
            }
            memcpy(&ip_addr.sin_addr, hp->h_addr, hp->h_length);
        }

        if ((m_fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
            LOGSYSERR("NetconCli::openconn", "socket", "");
            return -1;
        }
        addrsize = sizeof(ip_addr);
        saddr = (sockaddr*)&ip_addr;
    } else {
        memset(&unix_addr, 0, sizeof(unix_addr));
        unix_addr.sun_family = AF_UNIX;
        if (strlen(host) > UNIX_PATH_MAX - 1) {
            LOGERR("NetconCli::openconn: name too long: " << host << "\n");
            return -1;
        }
        strcpy(unix_addr.sun_path, host);

        if ((m_fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
            LOGSYSERR("NetconCli::openconn", "socket", "");
            return -1;
        }
        addrsize = sizeof(unix_addr);
        saddr = (sockaddr*)&unix_addr;
    }
    if (timeo > 0) {
        set_nonblock(1);
    }

    if (connect(m_fd, saddr, addrsize) < 0) {
        if (timeo > 0) {
            if (errno != EINPROGRESS) {
                goto out;
            }
            if (select1(m_fd, timeo, 1) == 1) {
                goto connectok;
            }
        }
        if (m_silentconnectfailure == 0) {
            LOGSYSERR("NetconCli", "connect", "");
        }
        goto out;
    }
connectok:
    if (timeo > 0) {
        set_nonblock(0);
    }

    LOGDEB2("NetconCli::connect: setting keepalive\n");
    if (setsockopt(m_fd, SOL_SOCKET, SO_KEEPALIVE,
                   (char *)&one, sizeof(one)) < 0) {
        LOGSYSERR("NetconCli::connect", "setsockopt", "KEEPALIVE");
    }
    setpeer(host);
    LOGDEB2("NetconCli::openconn: connection opened ok\n");
    ret = 0;
out:
    if (ret < 0) {
        closeconn();
    }
    return ret;
}

// Same as previous, but get the port number from services
int NetconCli::openconn(const char *host, const char *serv, int timeo)
{
    LOGDEB2("Netconcli::openconn: host " << host << ", serv " << serv << "\n");

    if (host[0]  != '/') {
        struct servent *sp;
        if ((sp = getservbyname(serv, "tcp")) == 0) {
            LOGERR("NetconCli::openconn: getservbyname failed for " << serv 
                   << "\n");
            return -1;
        }
        // Callee expects the port number in host byte order
        return openconn(host, ntohs(sp->s_port), timeo);
    } else {
        return openconn(host, (unsigned int)0, timeo);
    }
}


int NetconCli::setconn(int fd)
{
    LOGDEB2("Netconcli::setconn: fd " << fd << "\n");
    closeconn();

    m_fd = fd;
    m_ownfd = false;
    setpeer("");

    return 0;
}

///////////////////////////////////////////////////////////////////////
// Methods for the main (listening) server connection

NetconServLis::~NetconServLis()
{
#ifdef NETCON_ACCESSCONTROL
    freeZ(okaddrs.intarray);
    freeZ(okmasks.intarray);
#endif
}

#if 0
// code for dumping a struct servent
static void dump_servent(struct servent *servp)
{
    fprintf(stderr, "Official name %s\n", servp->s_name);
    for (char **cpp = servp->s_aliases; *cpp; cpp++) {
        fprintf(stderr, "Nickname %s\n", *cpp);
    }
    fprintf(stderr, "Port %d\n", (int)ntohs((short)servp->s_port));
    fprintf(stderr, "Proto %s\n", servp->s_proto);
}
#endif

// Set up service.
int NetconServLis::openservice(const char *serv, int backlog)
{
    int port;
    struct servent  *servp;
    if (!serv) {
        LOGERR("NetconServLis::openservice: null serv??\n");
        return -1;
    }
    LOGDEB1("NetconServLis::openservice: serv " << serv << "\n");
#ifdef NETCON_ACCESSCONTROL
    if (initperms(serv) < 0) {
        return -1;
    }
#endif

    m_serv = serv;
    if (serv[0] != '/') {
        if ((servp = getservbyname(serv, "tcp")) == 0) {
            LOGERR("NetconServLis::openservice: getservbyname failed for " << 
                   serv << "\n");
            return -1;
        }
        port = (int)ntohs((short)servp->s_port);
        return openservice(port, backlog);
    } else {
        if (strlen(serv) > UNIX_PATH_MAX - 1) {
            LOGERR("NetconServLis::openservice: too long for AF_UNIX: " << 
                   serv << "\n");
            return -1;
        }
        int ret = -1;
        struct sockaddr_un  addr;
        if ((m_fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
            LOGSYSERR("NetconServLis", "socket", "");
            return -1;
        }
        memset(&addr, 0, sizeof(addr));
        addr.sun_family = AF_UNIX;
        strcpy(addr.sun_path, serv);

        if (::bind(m_fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
            LOGSYSERR("NetconServLis", "bind", "");
            goto out;
        }
        if (listen(m_fd, backlog) < 0) {
            LOGSYSERR("NetconServLis", "listen", "");
            goto out;
        }

        LOGDEB1("NetconServLis::openservice: service opened ok\n");
        ret = 0;
out:
        if (ret < 0 && m_fd >= 0) {
            close(m_fd);
            m_fd = -1;
        }
        return ret;
    }
}

// Port is a natural host integer value
int NetconServLis::openservice(int port, int backlog)
{
    LOGDEB1("NetconServLis::openservice: port " << port << "\n");
#ifdef NETCON_ACCESSCONTROL
    if (initperms(port) < 0) {
        return -1;
    }
#endif
    int ret = -1;
    struct sockaddr_in  ipaddr;
    if ((m_fd = socket(AF_INET, SOCK_STREAM, 0)) < 0) {
        LOGSYSERR("NetconServLis", "socket", "");
        return -1;
    }
    (void) setsockopt(m_fd, SOL_SOCKET, SO_REUSEADDR, (char *)&one, sizeof(one));
#ifdef SO_REUSEPORT
    (void) setsockopt(m_fd, SOL_SOCKET, SO_REUSEPORT, (char *)&one, sizeof(one));
#endif /*SO_REUSEPORT*/
    memset(&ipaddr, 0, sizeof(ipaddr));
    ipaddr.sin_family = AF_INET;
    ipaddr.sin_addr.s_addr = htonl(INADDR_ANY);
    ipaddr.sin_port = htons((short)port);
    if (::bind(m_fd, (struct sockaddr *)&ipaddr, sizeof(ipaddr)) < 0) {
        LOGSYSERR("NetconServLis", "bind", "");
        goto out;
    }
    if (listen(m_fd, backlog) < 0) {
        LOGSYSERR("NetconServLis", "listen", "");
        goto out;
    }

    LOGDEB1("NetconServLis::openservice: service opened ok\n");
    ret = 0;
out:
    if (ret < 0 && m_fd >= 0) {
        close(m_fd);
        m_fd = -1;
    }
    return ret;
}

#ifdef NETCON_ACCESSCONTROL
int NetconServLis::initperms(int port)
{
    if (permsinit) {
        return 0;
    }

    char sport[30];
    sprintf(sport, "%d", port);
    return initperms(sport);
}

// Get authorized address lists from parameter file. This is disabled for now
int NetconServLis::initperms(const char *serv)
{
    if (permsinit) {
        return 0;
    }

    if (serv == 0 || *serv == 0 || strlen(serv) > 80) {
        LOGERR("NetconServLis::initperms: bad service name " << serv << "\n");
        return -1;
    }

    char keyname[100];
    sprintf(keyname, "%s_okaddrs", serv);
    if (genparams->getparam(keyname, &okaddrs, 1) < 0) {
        serv = "default";
        sprintf(keyname, "%s_okaddrs", serv);
        if (genparams->getparam(keyname, &okaddrs) < 0) {
            LOGERR("NetconServLis::initperms: no okaddrs found in config file\n");
            return -1;
        }
    }
    sprintf(keyname, "%s_okmasks", serv);
    if (genparams->getparam(keyname, &okmasks)) {
        LOGERR("NetconServLis::initperms: okmasks not found\n");
        return -1;
    }
    if (okaddrs.len == 0 || okmasks.len == 0) {
        LOGERR("NetconServLis::initperms: len 0 for okmasks or okaddrs\n");
        return -1;
    }

    permsinit = 1;
    return 0;
}
#endif /* NETCON_ACCESSCONTROL */

// Sample cando routine for server master connection: delete newly
// accepted connection. What else ?
// This is to be overriden by a derived class method for an application
// using the selectloop thing
int  NetconServLis::cando(Netcon::Event reason)
{
    delete accept();
    return 1;
}

NetconServCon *
NetconServLis::accept(int timeo)
{
    LOGDEB("NetconServLis::accept\n");

    if (timeo > 0) {
        int ret = select1(m_fd, timeo);
        if (ret == 0) {
            LOGDEB2("NetconServLis::accept timed out\n");
            m_didtimo = 1;
            return 0;
        }
        if (ret < 0) {
            LOGSYSERR("NetconServLis::accept", "select", "");
            return 0;
        }
    }
    m_didtimo = 0;

    NetconServCon *con = 0;
    int newfd = -1;
    struct sockaddr_in who;
    struct sockaddr_un uwho;
    if (m_serv.empty() || m_serv[0] != '/') {
        SOCKLEN_T clilen = (SOCKLEN_T)sizeof(who);
        if ((newfd = ::accept(m_fd, (struct sockaddr *)&who, &clilen)) < 0) {
            LOGSYSERR("NetconServCon::accept", "accept", "");
            goto out;
        }
#ifdef NETCON_ACCESSCONTROL
        if (checkperms(&who, clilen) < 0) {
            goto out;
        }
#endif
    } else {
        SOCKLEN_T clilen = (SOCKLEN_T)sizeof(uwho);
        if ((newfd = ::accept(m_fd, (struct sockaddr *)&uwho, &clilen)) < 0) {
            LOGSYSERR("NetconServCon::accept", "accept", "");
            goto out;
        }
    }

    con = new NetconServCon(newfd);
    if (con == 0) {
        LOGERR("NetconServLis::accept: new NetconServCon failed\n");
        goto out;
    }

    // Retrieve peer's host name. Errors are non fatal
    if (m_serv.empty() || m_serv[0] != '/') {
        struct hostent *hp;
        if ((hp = gethostbyaddr((char *) & (who.sin_addr),
                                sizeof(struct in_addr), AF_INET)) == 0) {
            LOGERR("NetconServLis::accept: gethostbyaddr failed for addr 0x" <<
                   who.sin_addr.s_addr << "\n");
            con->setpeer(inet_ntoa(who.sin_addr));
        } else {
            con->setpeer(hp->h_name);
        }
    } else {
        con->setpeer(m_serv.c_str());
    }

    LOGDEB2("NetconServLis::accept: setting keepalive\n");
    if (setsockopt(newfd, SOL_SOCKET, SO_KEEPALIVE,
                   (char *)&one, sizeof(one)) < 0) {
        LOGSYSERR("NetconServLis::accept", "setsockopt", "KEEPALIVE");
    }
    LOGDEB2("NetconServLis::accept: got connect from " << con->getpeer() << 
            "\n");

out:
    if (con == 0 && newfd >= 0) {
        close(newfd);
    }
    return con;
}

#ifdef NETCON_ACCESSCONTROL
int
NetconServLis::checkperms(void *cl, int)
{
    // If okmasks and addrs were not initialized, the default is allow to all
    if (okmasks.len <= 0 || okaddrs.len <= 0) {
        return 0;
    }

    struct sockaddr *addr = (struct sockaddr *)cl;
    unsigned long ip_addr;

    if (addr->sa_family != AF_INET) {
        LOGERR("NetconServLis::checkperms: connection from non-INET addr !\n");
        return -1;
    }

    ip_addr = ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr);
    LOGDEB2("checkperms: ip_addr: 0x" << ip_addr << "\n");
    for (int i = 0; i < okaddrs.len; i++) {
        unsigned int mask;
        if (i < okmasks.len) {
            mask = okmasks.intarray[i];
        } else {
            mask = okmasks.intarray[okmasks.len - 1];
        }
        LOGDEB2("checkperms: trying okaddr 0x" << okaddrs.intarray[i] <<
                ", mask 0x" << mask << "\n");
        if ((ip_addr & mask) == (okaddrs.intarray[i] & mask)) {
            return (0);
        }
    }
    LOGERR("NetconServLis::checkperm: connection from bad address 0x" <<
           ip_addr << "\n");
    return -1;
}
#endif /* NETCON_ACCESSCONTROL */
recoll-1.26.3/utils/md5ut.h0000644000175000017500000000176013533651561012367 00000000000000/* Copyright (C) 2014 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _MD5UT_H_
#define _MD5UT_H_

#include "md5.h"

/** md5 utility: compute file md5 */

extern bool MD5File(const std::string& filename, std::string& digest,
                    std::string *reason);

#endif /* _MD5UT_H_ */
recoll-1.26.3/utils/execmd.cpp0000644000175000017500000010376113533651561013135 00000000000000/* Copyright (C) 2004-2018 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifdef BUILDING_RECOLL
#include "autoconfig.h"
#else
#include "config.h"
#endif

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

#include 
#include 
#include 
#include 
#ifdef HAVE_SPAWN_H
#ifndef __USE_GNU
#define __USE_GNU
#define undef__USE_GNU
#endif
#include 
#ifdef undef__USE_GNU
#undef __USE_GNU
#endif
#endif

#include "execmd.h"
#include "netcon.h"
#include "closefrom.h"
#include "smallut.h"
#ifdef MDU_INCLUDE_LOG
#include MDU_INCLUDE_LOG
#else
#include "log.h"
#endif

using namespace std;

extern char **environ;

class ExecCmd::Internal {
public:
    Internal() {
        sigemptyset(&m_blkcld);
    }

    static bool      o_useVfork;

    vector   m_env;
    ExecCmdAdvise   *m_advise{0};
    ExecCmdProvide  *m_provide{0};
    bool             m_killRequest{false};
    int              m_timeoutMs{1000};
    int              m_killTimeoutMs{2000};
    int              m_rlimit_as_mbytes{0};
    string           m_stderrFile;
    // Pipe for data going to the command
    int              m_pipein[2]{-1,-1};
    std::shared_ptr m_tocmd;
    // Pipe for data coming out
    int              m_pipeout[2]{-1,-1};
    std::shared_ptr m_fromcmd;
    // Subprocess id
    pid_t            m_pid{-1};
    // Saved sigmask
    sigset_t         m_blkcld;

    // Reset internal state indicators. Any resources should have been
    // previously freed
    void reset() {
        m_killRequest = false;
        m_pipein[0] = m_pipein[1] = m_pipeout[0] = m_pipeout[1] = -1;
        m_pid = -1;
        sigemptyset(&m_blkcld);
    }
    // Child process code
    inline void dochild(const std::string& cmd, const char **argv,
                        const char **envv, bool has_input, bool has_output);
};
bool ExecCmd::Internal::o_useVfork{false};

ExecCmd::ExecCmd(int)
{
    m = new Internal();
    if (m) {
        m->reset();
    }
}
void ExecCmd::setAdvise(ExecCmdAdvise *adv)
{
    m->m_advise = adv;
}
void ExecCmd::setProvide(ExecCmdProvide *p)
{
    m->m_provide = p;
}
void ExecCmd::setTimeout(int mS)
{
    if (mS > 30) {
        m->m_timeoutMs = mS;
    }
}
void ExecCmd::setKillTimeout(int mS)
{
    m->m_killTimeoutMs = mS;
}
void ExecCmd::setStderr(const std::string& stderrFile)
{
    m->m_stderrFile = stderrFile;
}
pid_t ExecCmd::getChildPid()
{
    return m->m_pid;
}
void ExecCmd::setKill()
{
    m->m_killRequest = true;
}
void ExecCmd::zapChild()
{
    setKill();
    (void)wait();
}

bool ExecCmd::requestChildExit()
{
    if (m->m_pid > 0) {
        if (kill(m->m_pid, SIGTERM) == 0) {
            return true;
        }
    }
    return false;
}

/* From FreeBSD's which command */
static bool exec_is_there(const char *candidate)
{
    struct stat fin;

    /* XXX work around access(2) false positives for superuser */
    if (access(candidate, X_OK) == 0 &&
            stat(candidate, &fin) == 0 &&
            S_ISREG(fin.st_mode) &&
            (getuid() != 0 ||
             (fin.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) != 0)) {
        return true;
    }
    return false;
}

bool ExecCmd::which(const string& cmd, string& exepath, const char* path)
{
    if (cmd.empty()) {
        return false;
    }
    if (cmd[0] == '/') {
        if (exec_is_there(cmd.c_str())) {
            exepath = cmd;
            return true;
        } else {
            return false;
        }
    }

    const char *pp;
    if (path) {
        pp = path;
    } else {
        pp = getenv("PATH");
    }
    if (pp == 0) {
        return false;
    }

    vector pels;
    stringToTokens(pp, pels, ":");
    for (vector::iterator it = pels.begin(); it != pels.end(); it++) {
        if (it->empty()) {
            *it = ".";
        }
        string candidate = (it->empty() ? string(".") : *it) + "/" + cmd;
        if (exec_is_there(candidate.c_str())) {
            exepath = candidate;
            return true;
        }
    }
    return false;
}

void ExecCmd::useVfork(bool on)
{
    // Just in case: there are competent people who believe that the
    // dynamic linker can sometimes deadlock if execve() is resolved
    // inside the vfork/exec window. Make sure it's done now. If "/" is
    // an executable file, we have a problem.
    const char *argv[] = {"/", 0};
    execve("/", (char *const *)argv, environ);
    Internal::o_useVfork  = on;
}

void ExecCmd::putenv(const string& ea)
{
    m->m_env.push_back(ea);
}

void  ExecCmd::putenv(const string& name, const string& value)
{
    string ea = name + "=" + value;
    putenv(ea);
}

static void msleep(int millis)
{
    struct timespec spec;
    spec.tv_sec = millis / 1000;
    spec.tv_nsec = (millis % 1000) * 1000000;
    nanosleep(&spec, 0);
}

/** A resource manager to ensure that execcmd cleans up if an exception is
 *  raised in the callback, or at different places on errors occurring
 *  during method executions */
class ExecCmdRsrc {
public:
    ExecCmdRsrc(ExecCmd::Internal *parent)
        : m_parent(parent), m_active(true) {
    }
    void inactivate() {
        m_active = false;
    }
    ~ExecCmdRsrc() {
        if (!m_active || !m_parent) {
            return;
        }
        LOGDEB1("~ExecCmdRsrc: working. mypid: " << getpid() << "\n");

        // Better to close the descs first in case the child is waiting in read
        if (m_parent->m_pipein[0] >= 0) {
            close(m_parent->m_pipein[0]);
        }
        if (m_parent->m_pipein[1] >= 0) {
            close(m_parent->m_pipein[1]);
        }
        if (m_parent->m_pipeout[0] >= 0) {
            close(m_parent->m_pipeout[0]);
        }
        if (m_parent->m_pipeout[1] >= 0) {
            close(m_parent->m_pipeout[1]);
        }

        // It's apparently possible for m_pid to be > 0 and getpgid to fail. In
        // this case, we have to conclude that the child process does
        // not exist. Not too sure what causes this, but the previous code
        // definitely tried to call killpg(-1,) from time to time.
        pid_t grp;
        if (m_parent->m_pid > 0 && (grp = getpgid(m_parent->m_pid)) > 0) {
            LOGDEB("ExecCmd: pid " << m_parent->m_pid << " killpg(" << grp <<
                   ", SIGTERM)\n");
            int ret = killpg(grp, SIGTERM);
            if (ret == 0) {
                int ms_slept{0};
                for (int i = 0; ; i++) {
                    int tosleep = i == 0 ? 5 : (i == 1 ? 100 : 1000);
                    msleep(tosleep);
                    ms_slept += tosleep;
                    int status;
                    (void)waitpid(m_parent->m_pid, &status, WNOHANG);
                    if (kill(m_parent->m_pid, 0) != 0) {
                        break;
                    }
                    // killtimeout == -1 -> never KILL
                    if (m_parent->m_killTimeoutMs >= 0 &&
                        ms_slept >= m_parent->m_killTimeoutMs) {
                        LOGDEB("ExecCmd: killpg(" << grp << ", SIGKILL)\n");
                        killpg(grp, SIGKILL);
                        (void)waitpid(m_parent->m_pid, &status, WNOHANG);
                        break;
                    }
                }
            } else {
                LOGERR("ExecCmd: error killing process group " << (grp) <<
                       ": " << errno << "\n");
            }
        }
        m_parent->m_tocmd.reset();
        m_parent->m_fromcmd.reset();
        pthread_sigmask(SIG_UNBLOCK, &m_parent->m_blkcld, 0);
        m_parent->reset();
    }
private:
    ExecCmd::Internal *m_parent{nullptr};
    bool    m_active{false};
};

ExecCmd::~ExecCmd()
{
    if (m) {
        ExecCmdRsrc r(m);
    }
    if (m) {
        delete m;
        m = nullptr;
    }
}

// In child process. Set up pipes and exec command.
// This must not return. _exit() on error.
// *** This can be called after a vfork, so no modification of the
//     process memory at all is allowed ***
// The LOGXX calls should not be there, but they occur only after "impossible"
// errors, which we would most definitely want to have a hint about.
//
// Note that any of the LOGXX calls could block on a mutex set in the
// father process, so that only absolutely exceptional conditions,
// should be logged, for debugging and post-mortem purposes
// If one of the calls block, the problem manifests itself by 20mn
// (filter timeout) of looping on "ExecCmd::doexec: selectloop
// returned 1', because the father is waiting on the read descriptor
inline void ExecCmd::Internal::dochild(const string& cmd, const char **argv,
                                       const char **envv,
                                       bool has_input, bool has_output)
{
    // Start our own process group
    if (setpgid(0, 0)) {
        LOGINFO("ExecCmd::DOCHILD: setpgid(0, 0) failed: errno " << errno <<
                "\n");
    }

    // Restore SIGTERM to default. Really, signal handling should be
    // specified when creating the execmd, there might be other
    // signals to reset. Resetting SIGTERM helps Recoll get rid of its
    // filter children for now though. To be fixed one day...
    // Note that resetting to SIG_DFL is a portable use of
    // signal(). No need for sigaction() here.

    // There is supposedely a risk of problems if another thread was
    // calling a signal-affecting function when vfork was called. This
    // seems acceptable though as no self-respecting thread is going
    // to mess with the global process signal disposition.

    if (signal(SIGTERM, SIG_DFL) == SIG_ERR) {
        //LOGERR("ExecCmd::DOCHILD: signal() failed, errno " << errno << "\n");
    }
    sigset_t sset;
    sigfillset(&sset);
    pthread_sigmask(SIG_UNBLOCK, &sset, 0);
    sigprocmask(SIG_UNBLOCK, &sset, 0);

#ifdef HAVE_SETRLIMIT
#if defined RLIMIT_AS || defined RLIMIT_VMEM || defined RLIMIT_DATA
    if (m_rlimit_as_mbytes > 2000 && sizeof(rlim_t) < 8) {
        // Impossible limit, don't use it
        m_rlimit_as_mbytes = 0;
    }
    if (m_rlimit_as_mbytes > 0) {
        struct rlimit ram_limit = {
            static_cast(m_rlimit_as_mbytes * 1024 * 1024),
            RLIM_INFINITY
        };
        int resource;

        // RLIMIT_AS and RLIMIT_VMEM are usually synonyms when VMEM is
        // defined. RLIMIT_AS is Posix. Both don't really do what we
        // want, because they count e.g. shared lib mappings, which we
        // don't really care about.
        // RLIMIT_DATA only limits the data segment. Modern mallocs
        // use mmap and will not be bound. (Otoh if we only have this,
        // we're probably not modern).
        // So we're unsatisfied either way.
#ifdef RLIMIT_AS
        resource = RLIMIT_AS;
#elif defined RLIMIT_VMEM
        resource = RLIMIT_VMEM;
#else
        resource = RLIMIT_DATA;
#endif
        setrlimit(resource, &ram_limit);
    }
#endif
#endif // have_setrlimit

    if (has_input) {
        close(m_pipein[1]);
        if (m_pipein[0] != 0) {
            dup2(m_pipein[0], 0);
            close(m_pipein[0]);
        }
    }
    if (has_output) {
        close(m_pipeout[0]);
        if (m_pipeout[1] != 1) {
            if (dup2(m_pipeout[1], 1) < 0) {
                LOGERR("ExecCmd::DOCHILD: dup2() failed. errno " <<
                       errno << "\n");
            }
            if (close(m_pipeout[1]) < 0) {
                LOGERR("ExecCmd::DOCHILD: close() failed. errno " <<
                       errno << "\n");
            }
        }
    }
    // Do we need to redirect stderr ?
    if (!m_stderrFile.empty()) {
        int fd = open(m_stderrFile.c_str(), O_WRONLY | O_CREAT
#ifdef O_APPEND
                      | O_APPEND
#endif
                      , 0600);
        if (fd < 0) {
            close(2);
        } else {
            if (fd != 2) {
                dup2(fd, 2);
            }
            lseek(2, 0, 2);
        }
    }

    // Close all descriptors except 0,1,2
    libclf_closefrom(3);

    execve(cmd.c_str(), (char *const*)argv, (char *const*)envv);
    // Hu ho. This should never have happened as we checked the
    // existence of the executable before calling dochild... Until we
    // did this check, this was the chief cause of LOG mutex deadlock
    LOGERR("ExecCmd::DOCHILD: execve(" << cmd << ") failed. errno " <<
           errno << "\n");
    _exit(127);
}

void ExecCmd::setrlimit_as(int mbytes)
{
    m->m_rlimit_as_mbytes = mbytes;
}

int ExecCmd::startExec(const string& cmd, const vector& args,
                       bool has_input, bool has_output)
{
    {
        // Debug and logging
        string command = cmd + " ";
        for (vector::const_iterator it = args.begin();
                it != args.end(); it++) {
            command += "{" + *it + "} ";
        }
        LOGDEB("ExecCmd::startExec: (" << has_input << "|" << has_output <<
               ") " << command << "\n");
    }

    // The resource manager ensures resources are freed if we return early
    ExecCmdRsrc e(m);

    if (has_input && pipe(m->m_pipein) < 0) {
        LOGERR("ExecCmd::startExec: pipe(2) failed. errno " << errno << "\n" );
        return -1;
    }
    if (has_output && pipe(m->m_pipeout) < 0) {
        LOGERR("ExecCmd::startExec: pipe(2) failed. errno " << errno << "\n");
        return -1;
    }


//////////// vfork setup section
    // We do here things that we could/should do after a fork(), but
    // not a vfork(). Does no harm to do it here in both cases, except
    // that it needs cleanup (as compared to doing it just before
    // exec()).

    // Allocate arg vector (2 more for arg0 + final 0)
    typedef const char *Ccharp;
    Ccharp *argv;
    argv = (Ccharp *)malloc((args.size() + 2) * sizeof(char *));
    if (argv == 0) {
        LOGERR("ExecCmd::doexec: malloc() failed. errno " << errno << "\n");
        return -1;
    }
    // Fill up argv
    argv[0] = cmd.c_str();
    int i = 1;
    vector::const_iterator it;
    for (it = args.begin(); it != args.end(); it++) {
        argv[i++] = it->c_str();
    }
    argv[i] = 0;

    // Environment. We first merge our environment and the specified
    // variables in a map, overriding existing values,
    // then generate an appropriate char*[]
    Ccharp *envv;
    map envmap;
    for (int i = 0; environ[i] != 0; i++) {
        string entry(environ[i]);
        string::size_type eqpos = entry.find_first_of("=");
        if (eqpos == string::npos) {
            continue;
        }
        envmap[entry.substr(0, eqpos)] = entry.substr(eqpos+1);
    }
    for (const auto& entry : m->m_env) {
        string::size_type eqpos = entry.find_first_of("=");
        if (eqpos == string::npos) {
            continue;
        }
        envmap[entry.substr(0, eqpos)] = entry.substr(eqpos+1);
    }        

    // Allocate space for the array + string storage in one block.
    unsigned int allocsize = (envmap.size() + 2) * sizeof(char *);
    for (const auto& it : envmap) {
        allocsize += it.first.size() + 1 + it.second.size() + 1;
    }
    envv = (Ccharp *)malloc(allocsize);
    if (envv == 0) {
        LOGERR("ExecCmd::doexec: malloc() failed. errno " << errno << "\n");
        free(argv);
        return -1;
    }
    // Copy to new env array
    i = 0;
    char *cp = ((char *)envv) + (envmap.size() + 2) * sizeof(char *);
    for (const auto& it : envmap) {
        strcpy(cp, (it.first + "=" + it.second).c_str());
        envv[i++] = cp;
        cp += it.first.size() + 1 + it.second.size() + 1;
    }
    envv[i++] = 0;

    // As we are going to use execve, not execvp, do the PATH thing.
    string exe;
    if (!which(cmd, exe)) {
        LOGERR("ExecCmd::startExec: " << cmd << " not found\n");
        free(argv);
        free(envv);
        return 127 << 8;
    }
//////////////////////////////// End vfork child prepare section.

#if HAVE_POSIX_SPAWN && USE_POSIX_SPAWN
    // Note that posix_spawn provides no way to setrlimit() the child.
    {
        posix_spawnattr_t attrs;
        posix_spawnattr_init(&attrs);
        short flags;
        posix_spawnattr_getflags(&attrs, &flags);

        flags |=  POSIX_SPAWN_USEVFORK;

        posix_spawnattr_setpgroup(&attrs, 0);
        flags |= POSIX_SPAWN_SETPGROUP;

        sigset_t sset;
        sigemptyset(&sset);
        posix_spawnattr_setsigmask(&attrs, &sset);
        flags |= POSIX_SPAWN_SETSIGMASK;

        sigemptyset(&sset);
        sigaddset(&sset, SIGTERM);
        posix_spawnattr_setsigdefault(&attrs, &sset);
        flags |= POSIX_SPAWN_SETSIGDEF;

        posix_spawnattr_setflags(&attrs, flags);

        posix_spawn_file_actions_t facts;
        posix_spawn_file_actions_init(&facts);

        if (has_input) {
            posix_spawn_file_actions_addclose(&facts, m->m_pipein[1]);
            if (m->m_pipein[0] != 0) {
                posix_spawn_file_actions_adddup2(&facts, m->m_pipein[0], 0);
                posix_spawn_file_actions_addclose(&facts, m->m_pipein[0]);
            }
        }
        if (has_output) {
            posix_spawn_file_actions_addclose(&facts, m->m_pipeout[0]);
            if (m->m_pipeout[1] != 1) {
                posix_spawn_file_actions_adddup2(&facts, m->m_pipeout[1], 1);
                posix_spawn_file_actions_addclose(&facts, m->m_pipeout[1]);
            }
        }

        // Do we need to redirect stderr ?
        if (!m->m_stderrFile.empty()) {
            int oflags = O_WRONLY | O_CREAT;
#ifdef O_APPEND
            oflags |= O_APPEND;
#endif
            posix_spawn_file_actions_addopen(&facts, 2, m->m_stderrFile.c_str(),
                                             oflags, 0600);
        }
        LOGDEB1("using SPAWN\n");

        // posix_spawn() does not have any standard way to ask for
        // calling closefrom(). Afaik there is a solaris extension for this,
        // but let's just add all fds
        for (int i = 3; i < libclf_maxfd(); i++) {
            posix_spawn_file_actions_addclose(&facts, i);
        }

        int ret = posix_spawn(&m->m_pid, exe.c_str(), &facts, &attrs,
                              (char *const *)argv, (char *const *)envv);
        posix_spawnattr_destroy(&attrs);
        posix_spawn_file_actions_destroy(&facts);
        if (ret) {
            LOGERR("ExecCmd::startExec: posix_spawn() failed. errno " << ret <<
                   "\n");
            return -1;
        }
    }

#else
    if (Internal::o_useVfork) {
        LOGDEB1("using VFORK\n");
        m->m_pid = vfork();
    } else {
        LOGDEB1("using FORK\n");
        m->m_pid = fork();
    }
    if (m->m_pid < 0) {
        LOGERR("ExecCmd::startExec: fork(2) failed. errno " << errno << "\n");
        return -1;
    }
    if (m->m_pid == 0) {
        // e.inactivate() is not needed. As we do not return, the call
        // stack won't be unwound and destructors of local objects
        // won't be called.
        m->dochild(exe, argv, envv, has_input, has_output);
        // dochild does not return. Just in case...
        _exit(1);
    }
#endif

    // Father process

////////////////////
    // Vfork cleanup section
    free(argv);
    free(envv);
///////////////////

    // Set the process group for the child. This is also done in the
    // child process see wikipedia(Process_group)
    if (setpgid(m->m_pid, m->m_pid)) {
        // This can fail with EACCES if the son has already done execve
        // (linux at least)
        LOGDEB2("ExecCmd: father setpgid(son)(" << m->m_pid << "," <<
                m->m_pid << ") errno " << errno << " (ok)\n");
    }

    sigemptyset(&m->m_blkcld);
    sigaddset(&m->m_blkcld, SIGCHLD);
    pthread_sigmask(SIG_BLOCK, &m->m_blkcld, 0);

    if (has_input) {
        close(m->m_pipein[0]);
        m->m_pipein[0] = -1;
        NetconCli *iclicon = new NetconCli();
        iclicon->setconn(m->m_pipein[1]);
        m->m_tocmd = std::shared_ptr(iclicon);
    }
    if (has_output) {
        close(m->m_pipeout[1]);
        m->m_pipeout[1] = -1;
        NetconCli *oclicon = new NetconCli();
        oclicon->setconn(m->m_pipeout[0]);
        m->m_fromcmd = std::shared_ptr(oclicon);
    }

    /* Don't want to undo what we just did ! */
    e.inactivate();

    return 0;
}

// Netcon callback. Send data to the command's input
class ExecWriter : public NetconWorker {
public:
    ExecWriter(const string *input, ExecCmdProvide *provide,
               ExecCmd::Internal *parent)
        : m_cmd(parent), m_input(input), m_cnt(0), m_provide(provide) {
    }
    void shutdown() {
        close(m_cmd->m_pipein[1]);
        m_cmd->m_pipein[1] = -1;
        m_cmd->m_tocmd.reset();
    }
    virtual int data(NetconData *con, Netcon::Event reason) {
        if (!m_input) {
            return -1;
        }
        LOGDEB1("ExecWriter: input m_cnt " << m_cnt << " input length " <<
                m_input->length() << "\n");
        if (m_cnt >= m_input->length()) {
            // Fd ready for more but we got none. Try to get data, else
            // shutdown;
            if (!m_provide) {
                shutdown();
                return 0;
            }
            m_provide->newData();
            if (m_input->empty()) {
                shutdown();
                return 0;
            } else {
                // Ready with new buffer, reset use count
                m_cnt = 0;
            }
            LOGDEB2("ExecWriter: provide m_cnt " << m_cnt <<
                    " input length " << m_input->length() << "\n");
        }
        int ret = con->send(m_input->c_str() + m_cnt,
                            m_input->length() - m_cnt);
        LOGDEB2("ExecWriter: wrote " << (ret) << " to command\n");
        if (ret <= 0) {
            LOGERR("ExecWriter: data: can't write\n");
            return -1;
        }
        m_cnt += ret;
        return ret;
    }
private:
    ExecCmd::Internal *m_cmd;
    const string   *m_input;
    unsigned int    m_cnt; // Current offset inside m_input
    ExecCmdProvide *m_provide;
};

// Netcon callback. Get data from the command output.
class ExecReader : public NetconWorker {
public:
    ExecReader(string *output, ExecCmdAdvise *advise)
        : m_output(output), m_advise(advise) {
    }
    virtual int data(NetconData *con, Netcon::Event reason) {
        char buf[8192];
        int n = con->receive(buf, 8192);
        LOGDEB1("ExecReader: got " << (n) << " from command\n");
        if (n < 0) {
            LOGERR("ExecCmd::doexec: receive failed. errno " << errno << "\n");
        } else if (n > 0) {
            m_output->append(buf, n);
            if (m_advise) {
                m_advise->newData(n);
            }
        } // else n == 0, just return
        return n;
    }
private:
    string        *m_output;
    ExecCmdAdvise *m_advise;
};


int ExecCmd::doexec(const string& cmd, const vector& args,
                    const string *input, string *output)
{
    int status = startExec(cmd, args, input != 0, output != 0);
    if (status) {
        return status;
    }

    // Cleanup in case we return early
    ExecCmdRsrc e(m);
    SelectLoop myloop;
    int ret = 0;
    if (input || output) {
        // Setup output
        if (output) {
            NetconCli *oclicon = m->m_fromcmd.get();
            if (!oclicon) {
                LOGERR("ExecCmd::doexec: no connection from command\n");
                return -1;
            }
            oclicon->setcallback(std::shared_ptr
                                 (new ExecReader(output, m->m_advise)));
            myloop.addselcon(m->m_fromcmd, Netcon::NETCONPOLL_READ);
            // Give up ownership
            m->m_fromcmd.reset();
        }
        // Setup input
        if (input) {
            NetconCli *iclicon = m->m_tocmd.get();
            if (!iclicon) {
                LOGERR("ExecCmd::doexec: no connection from command\n");
                return -1;
            }
            iclicon->setcallback(std::shared_ptr
                                 (new ExecWriter(input, m->m_provide, m)));
            myloop.addselcon(m->m_tocmd, Netcon::NETCONPOLL_WRITE);
            // Give up ownership
            m->m_tocmd.reset();
        }

        // Do the actual reading/writing/waiting
        myloop.setperiodichandler(0, 0, m->m_timeoutMs);
        while ((ret = myloop.doLoop()) > 0) {
            LOGDEB("ExecCmd::doexec: selectloop returned " << (ret) << "\n");
            if (m->m_advise) {
                m->m_advise->newData(0);
            }
            if (m->m_killRequest) {
                LOGINFO("ExecCmd::doexec: cancel request\n");
                break;
            }
        }
        LOGDEB0("ExecCmd::doexec: selectloop returned " << (ret) << "\n");
        // Check for interrupt request: we won't want to waitpid()
        if (m->m_advise) {
            m->m_advise->newData(0);
        }

        // The netcons don't take ownership of the fds: we have to close them
        // (have to do it before wait, this may be the signal the child is
        // waiting for exiting).
        if (input) {
            close(m->m_pipein[1]);
            m->m_pipein[1] = -1;
        }
        if (output) {
            close(m->m_pipeout[0]);
            m->m_pipeout[0] = -1;
        }
    }

    // Normal return: deactivate cleaner, wait() will do the cleanup
    e.inactivate();

    int ret1 = ExecCmd::wait();
    if (ret) {
        return -1;
    }
    return ret1;
}

int ExecCmd::send(const string& data)
{
    NetconCli *con = m->m_tocmd.get();
    if (con == 0) {
        LOGERR("ExecCmd::send: outpipe is closed\n");
        return -1;
    }
    unsigned int nwritten = 0;
    while (nwritten < data.length()) {
        if (m->m_killRequest) {
            break;
        }
        int n = con->send(data.c_str() + nwritten, data.length() - nwritten);
        if (n < 0) {
            LOGERR("ExecCmd::send: send failed\n");
            return -1;
        }
        nwritten += n;
    }
    return nwritten;
}

int ExecCmd::receive(string& data, int cnt)
{
    NetconCli *con = m->m_fromcmd.get();
    if (con == 0) {
        LOGERR("ExecCmd::receive: inpipe is closed\n");
        return -1;
    }
    const int BS = 4096;
    char buf[BS];
    int ntot = 0;
    do {
        int toread = cnt > 0 ? MIN(cnt - ntot, BS) : BS;
        int n = con->receive(buf, toread);
        if (n < 0) {
            LOGERR("ExecCmd::receive: error\n");
            return -1;
        } else if (n > 0) {
            ntot += n;
            data.append(buf, n);
        } else {
            LOGDEB("ExecCmd::receive: got 0\n");
            break;
        }
    } while (cnt > 0 && ntot < cnt);
    return ntot;
}

int ExecCmd::getline(string& data)
{
    NetconCli *con = m->m_fromcmd.get();
    if (con == 0) {
        LOGERR("ExecCmd::receive: inpipe is closed\n");
        return -1;
    }
    const int BS = 1024;
    char buf[BS];
    int timeosecs = m->m_timeoutMs / 1000;
    if (timeosecs == 0) {
        timeosecs = 1;
    }

    // Note that we only go once through here, except in case of
    // timeout, which is why I think that the goto is more expressive
    // than a loop
again:
    int n = con->getline(buf, BS, timeosecs);
    if (n < 0) {
        if (con->timedout()) {
            LOGDEB0("ExecCmd::getline: select timeout, report and retry\n");
            if (m->m_advise) {
                m->m_advise->newData(0);
            }
            goto again;
        }
        LOGERR("ExecCmd::getline: error\n");
    } else if (n > 0) {
        data.append(buf, n);
    } else {
        LOGDEB("ExecCmd::getline: got 0\n");
    }
    return n;
}

class GetlineWatchdog : public ExecCmdAdvise {
public:
    GetlineWatchdog(int secs) : m_secs(secs), tstart(time(0)) {}
    void newData(int cnt) {
        if (time(0) - tstart >= m_secs) {
            throw std::runtime_error("getline timeout");
        }
    }
    int m_secs;
    time_t tstart;
};

int ExecCmd::getline(string& data, int timeosecs)
{
    GetlineWatchdog gwd(timeosecs);
    setAdvise(&gwd);
    try {
        return getline(data);
    } catch (...) {
        return -1;
    }
}


// Wait for command status and clean up all resources.
// We would like to avoid blocking here too, but there is no simple
// way to do this. The 2 possible approaches would be to:
//  - Use signals (alarm), waitpid() is interruptible. but signals and
//    threads... This would need a specialized thread, inter-thread comms etc.
//  - Use an intermediary process when starting the command. The
//    process forks a timer process, and the real command, then calls
//    a blocking waitpid on all at the end, and is guaranteed to get
//    at least the timer process status, thus yielding a select()
//    equivalent. This is bad too, because the timeout is on the whole
//    exec, not just the wait
// Just calling waitpid() with WNOHANG with a sleep() between tries
// does not work: the first waitpid() usually comes too early and
// reaps nothing, resulting in almost always one sleep() or more.
//
// So no timeout here. This has not been a problem in practise inside recoll.
// In case of need, using a semi-busy loop with short sleeps
// increasing from a few mS might work without creating too much
// overhead.
int ExecCmd::wait()
{
    ExecCmdRsrc e(m);
    int status = -1;
    if (!m->m_killRequest && m->m_pid > 0) {
        if (waitpid(m->m_pid, &status, 0) < 0) {
            LOGERR("ExecCmd::waitpid: returned -1 errno " << errno << "\n");
            status = -1;
        }
        LOGDEB("ExecCmd::wait: got status 0x" << (status) << "\n");
        m->m_pid = -1;
    }
    // Let the ExecCmdRsrc cleanup, it will do the killing/waiting if needed
    return status;
}

bool ExecCmd::maybereap(int *status)
{
    ExecCmdRsrc e(m);
    *status = -1;

    if (m->m_pid <= 0) {
        // Already waited for ??
        return true;
    }

    pid_t pid = waitpid(m->m_pid, status, WNOHANG);
    if (pid < 0) {
        LOGERR("ExecCmd::maybereap: returned -1 errno " << errno << "\n");
        m->m_pid = -1;
        return true;
    } else if (pid == 0) {
        LOGDEB1("ExecCmd::maybereap: not exited yet\n");
        e.inactivate();
        return false;
    } else {
        LOGDEB("ExecCmd::maybereap: got status 0x" << (status) << "\n");
        m->m_pid = -1;
        return true;
    }
}

// Static
bool ExecCmd::backtick(const vector cmd, string& out)
{
    if (cmd.empty()) {
        LOGERR("ExecCmd::backtick: empty command\n");
        return false;
    }
    vector::const_iterator it = cmd.begin();
    it++;
    vector args(it, cmd.end());
    ExecCmd mexec;
    int status = mexec.doexec(*cmd.begin(), args, 0, &out);
    return status == 0;
}

/// ReExec class methods ///////////////////////////////////////////////////
ReExec::ReExec(int argc, char *args[])
{
    init(argc, args);
}

void ReExec::init(int argc, char *args[])
{
    for (int i = 0; i < argc; i++) {
        m_argv.push_back(args[i]);
    }
    m_cfd = open(".", 0);
    char *cd = getcwd(0, 0);
    if (cd) {
        m_curdir = cd;
    }
    free(cd);
}

void ReExec::insertArgs(const vector& args, int idx)
{
    vector::iterator it, cit;
    unsigned int cmpoffset = (unsigned int) - 1;

    if (idx == -1 || string::size_type(idx) >= m_argv.size()) {
        it = m_argv.end();
        if (m_argv.size() >= args.size()) {
            cmpoffset = m_argv.size() - args.size();
        }
    } else {
        it = m_argv.begin() + idx;
        if (idx + args.size() <= m_argv.size()) {
            cmpoffset = idx;
        }
    }

    // Check that the option is not already there
    if (cmpoffset != (unsigned int) - 1) {
        bool allsame = true;
        for (unsigned int i = 0; i < args.size(); i++) {
            if (m_argv[cmpoffset + i] != args[i]) {
                allsame = false;
                break;
            }
        }
        if (allsame) {
            return;
        }
    }

    m_argv.insert(it, args.begin(), args.end());
}

void ReExec::removeArg(const string& arg)
{
    for (vector::iterator it = m_argv.begin();
            it != m_argv.end(); it++) {
        if (*it == arg) {
            it = m_argv.erase(it);
        }
    }
}

// Reexecute myself, as close as possible to the initial exec
void ReExec::reexec()
{

#if 0
    char *cwd;
    cwd = getcwd(0, 0);
    FILE *fp = stdout; //fopen("/tmp/exectrace", "w");
    if (fp) {
        fprintf(fp, "reexec: pwd: [%s] args: ", cwd ? cwd : "getcwd failed");
        for (vector::const_iterator it = m_argv.begin();
                it != m_argv.end(); it++) {
            fprintf(fp, "[%s] ", it->c_str());
        }
        fprintf(fp, "\n");
    }
#endif

    // Execute the atexit funcs
    while (!m_atexitfuncs.empty()) {
        (m_atexitfuncs.top())();
        m_atexitfuncs.pop();
    }

    // Try to get back to the initial working directory
    if (m_cfd < 0 || fchdir(m_cfd) < 0) {
        LOGINFO("ReExec::reexec: fchdir failed, trying chdir\n");
        if (!m_curdir.empty() && chdir(m_curdir.c_str())) {
            LOGERR("ReExec::reexec: chdir failed\n");
        }
    }

    // Close all descriptors except 0,1,2
    libclf_closefrom(3);

    // Allocate arg vector (1 more for final 0)
    typedef const char *Ccharp;
    Ccharp *argv;
    argv = (Ccharp *)malloc((m_argv.size() + 1) * sizeof(char *));
    if (argv == 0) {
        LOGERR("ExecCmd::doexec: malloc() failed. errno " << errno << "\n");
        return;
    }

    // Fill up argv
    int i = 0;
    vector::const_iterator it;
    for (it = m_argv.begin(); it != m_argv.end(); it++) {
        argv[i++] = it->c_str();
    }
    argv[i] = 0;
    execvp(m_argv[0].c_str(), (char *const*)argv);
}
recoll-1.26.3/utils/fileudi.cpp0000644000175000017500000000701613533651561013305 00000000000000/* Copyright (C) 2005 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef TEST_FILEUDI
#include "autoconfig.h"

#include 
#include 

#include "fileudi.h"
#include "md5.h"
#include "base64.h"

using std::string;

// Size of the hashed result (base64 of 16 bytes of md5, minus 2 pad chars)
#define HASHLEN 22

// Convert longish paths by truncating and appending hash of path
// The full length of the base64-encoded (minus pad) of the md5 is 22 chars
// We append this to the truncated path
void pathHash(const std::string &path, std::string &phash, unsigned int maxlen)
{
    if (maxlen < HASHLEN) {
	fprintf(stderr, "pathHash: internal error: requested len too small\n");
	abort();
    }

    if (path.length() <= maxlen) {
	phash = path;
	return;
    }

    // Compute the md5
    unsigned char chash[16];
    MD5_CTX ctx;
    MD5Init(&ctx);
    MD5Update(&ctx, (const unsigned char *)(path.c_str()+maxlen-HASHLEN), 
	      path.length() - (maxlen - HASHLEN));
    MD5Final(chash, &ctx);

    // Encode it to ascii. This shouldn't be strictly necessary as
    // xapian terms can be binary
    string hash;
    base64_encode(string((char *)chash, 16), hash);
    // We happen to know there will be 2 pad chars in there, that we
    // don't need as this won't ever be decoded. Resulting length is 22
    hash.resize(hash.length() - 2);

    // Truncate path and append hash
    phash = path.substr(0, maxlen - HASHLEN) + hash;
}


// Maximum length for path/unique terms stored for each document. We truncate
// longer paths and uniquize them by appending a hashed value. This
// is done to avoid xapian max term length limitations, not
// to gain space (we gain very little even with very short maxlens
// like 30). The xapian max key length seems to be around 250.
// The value for PATHHASHLEN includes the length of the hash part.
#define PATHHASHLEN 150

// Compute the unique term used to link documents to their file-system source:
// Hashed path + possible internal path
void make_udi(const string& fn, const string& ipath, string &udi)
{
    string s(fn);
    // Note that we append a "|" in all cases. Historical, could be removed
    s.append("|");
    s.append(ipath);
    pathHash(s, udi, PATHHASHLEN);
    return;
}

#else // TEST_FILEUDI
#include 
#include 
#include "fileudi.h"

using namespace std;

int main(int argc, char **argv)
{
    string path="/usr/lib/toto.cpp";
    string ipath = "1:2:3:4:5:10";
    string udi;
    make_udi(path, ipath, udi);
    printf("udi [%s]\n", udi.c_str());
    path = "/some/much/too/looooooooooooooong/path/bla/bla/bla"
	"/looooooooooooooong/path/bla/bla/bla/llllllllllllllllll"
	"/looooooooooooooong/path/bla/bla/bla/llllllllllllllllll";
    ipath = "1:2:3:4:5:10"
	"1:2:3:4:5:10"
	"1:2:3:4:5:10";
    make_udi(path, ipath, udi);
    printf("udi [%s]\n", udi.c_str());
}
#endif // TEST_FILEUDI
recoll-1.26.3/utils/readfile.cpp0000644000175000017500000004300113533651561013431 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifdef BUILDING_RECOLL
#include "autoconfig.h"
#else
#include "config.h"
#endif

#include "readfile.h"

#include 
#include 

#ifdef _WIN32
#include "safefcntl.h"
#include "safesysstat.h"
#include "safeunistd.h"
#include "transcode.h"
#define OPEN _wopen

#else
#define O_BINARY 0
#include 
#include 
#include 
#define OPEN open

#endif

#include 

#include "smallut.h"
#include "pathut.h"

#ifdef READFILE_ENABLE_MD5
#include "md5.h"
#endif

#ifdef MDU_INCLUDE_LOG
#include MDU_INCLUDE_LOG
#else
#include "log.h"
#endif

using namespace std;

///////////////
// Implementation of basic interface: read whole file to memory buffer
class FileToString : public FileScanDo {
public:
    FileToString(string& data) : m_data(data) {}

    // Note: the fstat() + reserve() (in init()) calls divide cpu
    // usage almost by 2 on both linux i586 and macosx (compared to
    // just append()) Also tried a version with mmap, but it's
    // actually slower on the mac and not faster on linux.
    virtual bool init(int64_t size, string *reason) {
        if (size > 0) {
            m_data.reserve(size);
        }
        return true;
    }
    virtual bool data(const char *buf, int cnt, string *reason) {
        try {
            m_data.append(buf, cnt);
        } catch (...) {
            catstrerror(reason, "append", errno);
            return false;
        }
        return true;
    }

    string& m_data;
};

bool file_to_string(const string& fn, string& data, int64_t offs, size_t cnt,
                    string *reason)
{
    FileToString accum(data);
    return file_scan(fn, &accum, offs, cnt, reason
#ifdef READFILE_ENABLE_MD5
                     , nullptr
#endif
        );
}

bool file_to_string(const string& fn, string& data, string *reason)
{
    return file_to_string(fn, data, 0, size_t(-1), reason);
}


/////////////
//  Callback/filtering interface

// Abstract class base for both source (origin) and filter
// (midstream). Both have a downstream
class FileScanUpstream {
public:
    virtual void setDownstream(FileScanDo *down) {
        m_down = down;
    }
    virtual FileScanDo *out() {
        return m_down;
    }
protected:        
    FileScanDo *m_down{nullptr};
};

// Source element.
class FileScanSource : public FileScanUpstream {
public:
    FileScanSource(FileScanDo *down) {
        setDownstream(down);
    }
    virtual bool scan() = 0;
};

// Inside element of a transformation pipe. The idea is that elements
// which don't recognize the data get themselves out of the pipe
// (pop()). Typically, only one of the decompression modules
// (e.g. gzip/bzip2/xz...) would remain. For now there is only gzip,
// it pops itself if the data does not have the right magic number
class FileScanFilter : public FileScanDo, public FileScanUpstream {
public:
    virtual void insertAtSink(FileScanDo *sink, FileScanUpstream *upstream) {
        setDownstream(sink);
        if (m_down) {
            m_down->setUpstream(this);
        }
        setUpstream(upstream);
        if (m_up) {
            m_up->setDownstream(this);
        }
    }

    // Remove myself from the pipe. 
    virtual void pop() {
        if (m_down) {
            m_down->setUpstream(m_up);
        }
        if (m_up) {
            m_up->setDownstream(m_down);
        }
    }

    virtual void setUpstream(FileScanUpstream *up) override {
        m_up = up;
    }

private:
    FileScanUpstream *m_up{nullptr};
};


#if defined(READFILE_ENABLE_ZLIB)
#include 

class GzFilter : public FileScanFilter {
public:
    virtual ~GzFilter() {
        if (m_initdone) {
            inflateEnd(&m_stream);
        }
    }

    virtual bool init(int64_t size, string *reason) override {
        LOGDEB1("GzFilter::init\n");
        if (out()) {
            return out()->init(size, reason);
        }
        return true;
    }

    virtual bool data(const char *buf, int cnt, string *reason) override {
        LOGDEB1("GzFilter::data: cnt " << cnt << endl);

        int error;
        m_stream.next_in = (Bytef*)buf;
        m_stream.avail_in = cnt;
        
        if (m_initdone == false) {
            // We do not support a first read cnt < 2. This quite
            // probably can't happen with a compressed file (size>2)
            // except if we're reading a tty which is improbable. So
            // assume this is a regular file.
            const unsigned char *ubuf = (const unsigned char *)buf;
            if ((cnt < 2) || ubuf[0] != 0x1f || ubuf[1] != 0x8b) {
                LOGDEB1("GzFilter::data: not gzip. out() is " << out() << "\n");
                pop();
                if (out()) {
                    return out()->data(buf, cnt, reason);
                } else {
                    return false;
                }
            }
            m_stream.opaque = nullptr;
            m_stream.zalloc = alloc_func;
            m_stream.zfree = free_func;
            m_stream.next_out = (Bytef*)m_obuf;
            m_stream.avail_out = m_obs;
            if ((error = inflateInit2(&m_stream, 15+32)) != Z_OK) {
                LOGERR("inflateInit2 error: " << error << endl);
                if (reason) {
                    *reason += " Zlib inflateinit failed";
                    if (m_stream.msg && *m_stream.msg) {
                        *reason += string(": ") + m_stream.msg;
                    }
                }
                return false;
            }
            m_initdone = true;
        }
        
        while (m_stream.avail_in != 0) {
            m_stream.next_out = (Bytef*)m_obuf;
            m_stream.avail_out = m_obs;
            if ((error = inflate(&m_stream, Z_SYNC_FLUSH)) < Z_OK) {
                LOGERR("inflate error: " << error << endl);
                if (reason) {
                    *reason += " Zlib inflate failed";
                    if (m_stream.msg && *m_stream.msg) {
                        *reason += string(": ") + m_stream.msg;
                    }
                }
                return false;
            }
            if (out() &&
                !out()->data(m_obuf, m_obs - m_stream.avail_out, reason)) {
                return false;
            }
        }
        return true;
    }
    
    static voidpf alloc_func(voidpf opaque, uInt items, uInt size) {
        return malloc(items * size);
    }
    static void free_func(voidpf opaque, voidpf address) {
        free(address);
    }

    bool m_initdone{false};
    z_stream m_stream;
    char m_obuf[10000];
    const int m_obs{10000};
};
#endif // GZ

#ifdef READFILE_ENABLE_MD5

class FileScanMd5 : public FileScanFilter {
public:
    FileScanMd5(string& d) : digest(d) {}
    virtual bool init(int64_t size, string *reason) override {
        LOGDEB1("FileScanMd5: init\n");
	MD5Init(&ctx);
        if (out()) {
            return out()->init(size, reason);
        }
	return true;
    }
    virtual bool data(const char *buf, int cnt, string *reason) override {
        LOGDEB1("FileScanMd5: data. cnt " << cnt << endl);
	MD5Update(&ctx, (const unsigned char*)buf, cnt);
        if (out() && !out()->data(buf, cnt, reason)) {
            return false;
        }
	return true;
    }
    bool finish() {
        LOGDEB1("FileScanMd5: finish\n");
        MD5Final(digest, &ctx);
        return true;
    }
    string &digest;
    MD5_CTX ctx;
};
#endif // MD5

// Source taking data from a regular file
class FileScanSourceFile : public FileScanSource {
public:
    FileScanSourceFile(FileScanDo *next, const string& fn, int64_t startoffs,
                       int64_t cnttoread, string *reason)
        : FileScanSource(next), m_fn(fn), m_startoffs(startoffs),
          m_cnttoread(cnttoread), m_reason(reason) { }

    virtual bool scan() {
        LOGDEB1("FileScanSourceFile: reading " << m_fn << " offs " <<
               m_startoffs<< " cnt " << m_cnttoread << " out " << out() << endl);
        const int RDBUFSZ = 8192;
        bool ret = false;
        bool noclosing = true;
        int fd = 0;
        struct stat st;
        // Initialize st_size: if fn.empty() , the fstat() call won't happen.
        st.st_size = 0;

        // If we have a file name, open it, else use stdin.
        if (!m_fn.empty()) {
            SYSPATH(m_fn, realpath);
            fd = OPEN(realpath, O_RDONLY | O_BINARY);
            if (fd < 0 || fstat(fd, &st) < 0) {
                catstrerror(m_reason, "open/stat", errno);
                return false;
            }
            noclosing = false;
        }

#if defined O_NOATIME && O_NOATIME != 0
        if (fcntl(fd, F_SETFL, O_NOATIME) < 0) {
            // perror("fcntl");
        }
#endif
        if (out()) {
            if (m_cnttoread != -1 && m_cnttoread) {
                out()->init(m_cnttoread + 1, m_reason);
            } else if (st.st_size > 0) {
                out()->init(st.st_size + 1, m_reason);
            } else {
                out()->init(0, m_reason);
            }
        }

        int64_t curoffs = 0;
        if (m_startoffs > 0 && !m_fn.empty()) {
            if (lseek(fd, m_startoffs, SEEK_SET) != m_startoffs) {
                catstrerror(m_reason, "lseek", errno);
                return false;
            }
            curoffs = m_startoffs;
        }

        char buf[RDBUFSZ];
        int64_t totread = 0;
        for (;;) {
            size_t toread = RDBUFSZ;
            if (m_startoffs > 0 && curoffs < m_startoffs) {
                toread = size_t(MIN(RDBUFSZ, m_startoffs - curoffs));
            }

            if (m_cnttoread != -1) {
                toread = MIN(toread, (uint64_t)(m_cnttoread - totread));
            }
            ssize_t n = static_cast(read(fd, buf, toread));
            if (n < 0) {
                catstrerror(m_reason, "read", errno);
                goto out;
            }
            if (n == 0) {
                break;
            }
            curoffs += n;
            if (curoffs - n < m_startoffs) {
                continue;
            }
            if (!out()->data(buf, n, m_reason)) {
                goto out;
            }
            totread += n;
            if (m_cnttoread > 0 && totread >= m_cnttoread) {
                break;
            }
        }

        ret = true;
    out:
        if (fd >= 0 && !noclosing) {
            close(fd);
        }
        return ret;
    }
    
protected:
    string m_fn;
    int64_t m_startoffs;
    int64_t m_cnttoread;
    string *m_reason;
};


#if defined(READFILE_ENABLE_MINIZ)
#include "miniz.h"

// Source taking data from a ZIP archive member
class FileScanSourceZip : public FileScanSource {
public:
    FileScanSourceZip(FileScanDo *next, const string& fn,
                      const string& member, string *reason)
        : FileScanSource(next), m_fn(fn), m_member(member),
          m_reason(reason) {}

    FileScanSourceZip(const char *data, size_t cnt, FileScanDo *next,
                      const string& member, string *reason)
        : FileScanSource(next), m_data(data), m_cnt(cnt), m_member(member),
          m_reason(reason) {}

    virtual bool scan() {
        bool ret = false;
        mz_zip_archive zip;
        mz_zip_zero_struct(&zip);
        void *opaque = this;

        bool ret1;
        if (m_fn.empty()) {
            ret1 = mz_zip_reader_init_mem(&zip, m_data, m_cnt, 0);
        } else {
            SYSPATH(m_fn, realpath);
            ret1 = mz_zip_reader_init_file(&zip, realpath, 0);
        }
        if (!ret1) {
            if (m_reason) {
                *m_reason += "mz_zip_reader_init_xx() failed: ";
                *m_reason +=
                    string(mz_zip_get_error_string(zip.m_last_error));
            }
            return false;
        }

        mz_uint32 file_index;
        if (mz_zip_reader_locate_file_v2(&zip, m_member.c_str(), NULL, 0,
                                         &file_index) < 0) {
            if (m_reason) {
                *m_reason += "mz_zip_reader_locate_file() failed: ";
                *m_reason += string(mz_zip_get_error_string(zip.m_last_error));
            }
            goto out;
        }

        mz_zip_archive_file_stat zstat;
        if (!mz_zip_reader_file_stat(&zip, file_index, &zstat)) {
            if (m_reason) {
                *m_reason += "mz_zip_reader_file_stat() failed: ";
                *m_reason += string(mz_zip_get_error_string(zip.m_last_error));
            }
            goto out;
        }
        if (out()) {
            if (!out()->init(zstat.m_uncomp_size, m_reason)) {
                goto out;
            }
        }
                
        if (!mz_zip_reader_extract_to_callback(
                &zip, file_index, write_cb, opaque, 0)) {
            if (m_reason) {
                *m_reason += "mz_zip_reader_extract_to_callback() failed: ";
                *m_reason += string(mz_zip_get_error_string(zip.m_last_error));
            }
            goto out;
        }
        
        ret = true;
    out:
        mz_zip_reader_end(&zip);
        return ret;
    }

    static size_t write_cb(void *pOpaque, mz_uint64 file_ofs,
                           const void *pBuf, size_t n) {
        const char *cp = (const char*)pBuf;
        LOGDEB1("write_cb: ofs " << file_ofs << " cnt " << n << " data: " <<
                string(cp, n) << endl);
        FileScanSourceZip *ths = (FileScanSourceZip *)pOpaque;
        if (ths->out()) {
            if (!ths->out()->data(cp, n, ths->m_reason)) {
                return (size_t)-1;
            }
        }
        return n;
    }
    
protected:
    const char *m_data;
    size_t m_cnt;
    string m_fn;
    string m_member;
    string *m_reason;
};

bool file_scan(const std::string& filename, const std::string& membername,
               FileScanDo* doer, std::string *reason)
{
    if (membername.empty()) {
        return file_scan(filename, doer, 0, -1, reason
#ifdef READFILE_ENABLE_MD5
, nullptr
#endif
            );
    } else {
            FileScanSourceZip source(doer, filename, membername, reason);
            return source.scan();
    }
}

bool string_scan(const char *data, size_t cnt, const std::string& membername,
                 FileScanDo* doer, std::string *reason)
{
    if (membername.empty()) {
        return string_scan(data, cnt, doer, reason
#ifdef READFILE_ENABLE_MD5
, nullptr
#endif
            );                           
    } else {
        FileScanSourceZip source(data, cnt, doer, membername, reason);
        return source.scan();
    }
}

#endif // READFILE_ENABLE_ZIP

bool file_scan(const string& fn, FileScanDo* doer, int64_t startoffs,
               int64_t cnttoread, string *reason
#ifdef READFILE_ENABLE_MD5
               , string *md5p
#endif
    )
{
    LOGDEB1("file_scan: doer " << doer << endl);
#if defined(READFILE_ENABLE_ZLIB)
    bool nodecomp = startoffs != 0;
#endif
    if (startoffs < 0) {
        startoffs = 0;
    }
    
    FileScanSourceFile source(doer, fn, startoffs, cnttoread, reason);
    FileScanUpstream *up = &source;
    up = up;
    
#if defined(READFILE_ENABLE_ZLIB)
    GzFilter gzfilter;
    if (!nodecomp) {
        gzfilter.insertAtSink(doer, up);
        up = &gzfilter;
    }
#endif

#ifdef READFILE_ENABLE_MD5
    // We compute the MD5 on the uncompressed data, so insert this
    // right at the source (after the decompressor).
    string digest;
    FileScanMd5 md5filter(digest);
    if (md5p) {
        md5filter.insertAtSink(doer, up);
        up = &md5filter;
    }
#endif
    
    bool ret = source.scan();

#ifdef READFILE_ENABLE_MD5
    if (md5p) {
        md5filter.finish();
        MD5HexPrint(digest, *md5p);
    }
#endif
    return ret;
}

bool file_scan(const string& fn, FileScanDo* doer, string *reason)
{
    return file_scan(fn, doer, 0, -1, reason
#ifdef READFILE_ENABLE_MD5
, nullptr
#endif
        );                           
}


class FileScanSourceBuffer : public FileScanSource {
public:
    FileScanSourceBuffer(FileScanDo *next, const char *data, size_t cnt,
                         string *reason)
        : FileScanSource(next), m_data(data), m_cnt(cnt), m_reason(reason) {}

    virtual bool scan() {
        if (out()) {
            if (!out()->init(m_cnt, m_reason)) {
                return false;
            }
            return out()->data(m_data, m_cnt, m_reason);
        } else {
            return true;
        }
    }
    
protected:
    const char *m_data{nullptr};
    size_t m_cnt{0};
    string *m_reason{nullptr};
};

bool string_scan(const char *data, size_t cnt, FileScanDo* doer,
                 std::string *reason
#ifdef READFILE_ENABLE_MD5
                 , std::string *md5p
#endif
    )
{
    FileScanSourceBuffer source(doer, data, cnt, reason);
    FileScanUpstream *up = &source;
    up = up;
    
#ifdef READFILE_ENABLE_MD5
    string digest;
    FileScanMd5 md5filter(digest);
    if (md5p) {
        md5filter.insertAtSink(doer, up);
        up = &md5filter;
    }
#endif
    
    bool ret = source.scan();

#ifdef READFILE_ENABLE_MD5
    if (md5p) {
        md5filter.finish();
        MD5HexPrint(digest, *md5p);
    }
#endif
    return ret;
}

recoll-1.26.3/utils/miniz.cpp0000644000175000017500000114447113533651561013022 00000000000000/**************************************************************************
 *
 * Copyright 2013-2014 RAD Game Tools and Valve Software
 * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 *
 **************************************************************************/

#include  "miniz.h"

typedef unsigned char mz_validate_uint16[sizeof(mz_uint16) == 2 ? 1 : -1];
typedef unsigned char mz_validate_uint32[sizeof(mz_uint32) == 4 ? 1 : -1];
typedef unsigned char mz_validate_uint64[sizeof(mz_uint64) == 8 ? 1 : -1];

#ifdef __cplusplus
extern "C" {
#endif

/* ------------------- zlib-style API's */

mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len)
{
    mz_uint32 i, s1 = (mz_uint32)(adler & 0xffff), s2 = (mz_uint32)(adler >> 16);
    size_t block_len = buf_len % 5552;
    if (!ptr)
        return MZ_ADLER32_INIT;
    while (buf_len)
    {
        for (i = 0; i + 7 < block_len; i += 8, ptr += 8)
        {
            s1 += ptr[0], s2 += s1;
            s1 += ptr[1], s2 += s1;
            s1 += ptr[2], s2 += s1;
            s1 += ptr[3], s2 += s1;
            s1 += ptr[4], s2 += s1;
            s1 += ptr[5], s2 += s1;
            s1 += ptr[6], s2 += s1;
            s1 += ptr[7], s2 += s1;
        }
        for (; i < block_len; ++i)
            s1 += *ptr++, s2 += s1;
        s1 %= 65521U, s2 %= 65521U;
        buf_len -= block_len;
        block_len = 5552;
    }
    return (s2 << 16) + s1;
}

/* Karl Malbrain's compact CRC-32. See "A compact CCITT crc16 and crc32 C implementation that balances processor cache usage against speed": http://www.geocities.com/malbrain/ */
#if 0
    mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len)
    {
        static const mz_uint32 s_crc32[16] = { 0, 0x1db71064, 0x3b6e20c8, 0x26d930ac, 0x76dc4190, 0x6b6b51f4, 0x4db26158, 0x5005713c,
                                               0xedb88320, 0xf00f9344, 0xd6d6a3e8, 0xcb61b38c, 0x9b64c2b0, 0x86d3d2d4, 0xa00ae278, 0xbdbdf21c };
        mz_uint32 crcu32 = (mz_uint32)crc;
        if (!ptr)
            return MZ_CRC32_INIT;
        crcu32 = ~crcu32;
        while (buf_len--)
        {
            mz_uint8 b = *ptr++;
            crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b & 0xF)];
            crcu32 = (crcu32 >> 4) ^ s_crc32[(crcu32 & 0xF) ^ (b >> 4)];
        }
        return ~crcu32;
    }
#else
/* Faster, but larger CPU cache footprint.
 */
mz_ulong mz_crc32(mz_ulong crc, const mz_uint8 *ptr, size_t buf_len)
{
    static const mz_uint32 s_crc_table[256] =
        {
          0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535,
          0x9E6495A3, 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD,
          0xE7B82D07, 0x90BF1D91, 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D,
          0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
          0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, 0x3B6E20C8, 0x4C69105E, 0xD56041E4,
          0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, 0x35B5A8FA, 0x42B2986C,
          0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, 0x26D930AC,
          0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
          0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB,
          0xB6662D3D, 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F,
          0x9FBFE4A5, 0xE8B8D433, 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB,
          0x086D3D2D, 0x91646C97, 0xE6635C01, 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
          0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA,
          0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, 0x4DB26158, 0x3AB551CE,
          0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, 0x4369E96A,
          0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
          0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409,
          0xCE61E49F, 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81,
          0xB7BD5C3B, 0xC0BA6CAD, 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739,
          0x9DD277AF, 0x04DB2615, 0x73DC1683, 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
          0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, 0xF00F9344, 0x8708A3D2, 0x1E01F268,
          0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, 0xFED41B76, 0x89D32BE0,
          0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, 0xD6D6A3E8,
          0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
          0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF,
          0x4669BE79, 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703,
          0x220216B9, 0x5505262F, 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7,
          0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
          0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE,
          0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, 0x86D3D2D4, 0xF1D4E242,
          0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, 0x88085AE6,
          0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
          0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D,
          0x3E6E77DB, 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5,
          0x47B2CF7F, 0x30B5FFE9, 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605,
          0xCDD70693, 0x54DE5729, 0x23D967BF, 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
          0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
        };

    mz_uint32 crc32 = (mz_uint32)crc ^ 0xFFFFFFFF;
    const mz_uint8 *pByte_buf = (const mz_uint8 *)ptr;

    while (buf_len >= 4)
    {
        crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF];
        crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[1]) & 0xFF];
        crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[2]) & 0xFF];
        crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[3]) & 0xFF];
        pByte_buf += 4;
        buf_len -= 4;
    }

    while (buf_len)
    {
        crc32 = (crc32 >> 8) ^ s_crc_table[(crc32 ^ pByte_buf[0]) & 0xFF];
        ++pByte_buf;
        --buf_len;
    }

    return ~crc32;
}
#endif

void mz_free(void *p)
{
    MZ_FREE(p);
}

void *miniz_def_alloc_func(void *opaque, size_t items, size_t size)
{
    (void)opaque, (void)items, (void)size;
    return MZ_MALLOC(items * size);
}
void miniz_def_free_func(void *opaque, void *address)
{
    (void)opaque, (void)address;
    MZ_FREE(address);
}
void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size)
{
    (void)opaque, (void)address, (void)items, (void)size;
    return MZ_REALLOC(address, items * size);
}

const char *mz_version(void)
{
    return MZ_VERSION;
}

#ifndef MINIZ_NO_ZLIB_APIS

int mz_deflateInit(mz_streamp pStream, int level)
{
    return mz_deflateInit2(pStream, level, MZ_DEFLATED, MZ_DEFAULT_WINDOW_BITS, 9, MZ_DEFAULT_STRATEGY);
}

int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy)
{
    tdefl_compressor *pComp;
    mz_uint comp_flags = TDEFL_COMPUTE_ADLER32 | tdefl_create_comp_flags_from_zip_params(level, window_bits, strategy);

    if (!pStream)
        return MZ_STREAM_ERROR;
    if ((method != MZ_DEFLATED) || ((mem_level < 1) || (mem_level > 9)) || ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS)))
        return MZ_PARAM_ERROR;

    pStream->data_type = 0;
    pStream->adler = MZ_ADLER32_INIT;
    pStream->msg = NULL;
    pStream->reserved = 0;
    pStream->total_in = 0;
    pStream->total_out = 0;
    if (!pStream->zalloc)
        pStream->zalloc = miniz_def_alloc_func;
    if (!pStream->zfree)
        pStream->zfree = miniz_def_free_func;

    pComp = (tdefl_compressor *)pStream->zalloc(pStream->opaque, 1, sizeof(tdefl_compressor));
    if (!pComp)
        return MZ_MEM_ERROR;

    pStream->state = (struct mz_internal_state *)pComp;

    if (tdefl_init(pComp, NULL, NULL, comp_flags) != TDEFL_STATUS_OKAY)
    {
        mz_deflateEnd(pStream);
        return MZ_PARAM_ERROR;
    }

    return MZ_OK;
}

int mz_deflateReset(mz_streamp pStream)
{
    if ((!pStream) || (!pStream->state) || (!pStream->zalloc) || (!pStream->zfree))
        return MZ_STREAM_ERROR;
    pStream->total_in = pStream->total_out = 0;
    tdefl_init((tdefl_compressor *)pStream->state, NULL, NULL, ((tdefl_compressor *)pStream->state)->m_flags);
    return MZ_OK;
}

int mz_deflate(mz_streamp pStream, int flush)
{
    size_t in_bytes, out_bytes;
    mz_ulong orig_total_in, orig_total_out;
    int mz_status = MZ_OK;

    if ((!pStream) || (!pStream->state) || (flush < 0) || (flush > MZ_FINISH) || (!pStream->next_out))
        return MZ_STREAM_ERROR;
    if (!pStream->avail_out)
        return MZ_BUF_ERROR;

    if (flush == MZ_PARTIAL_FLUSH)
        flush = MZ_SYNC_FLUSH;

    if (((tdefl_compressor *)pStream->state)->m_prev_return_status == TDEFL_STATUS_DONE)
        return (flush == MZ_FINISH) ? MZ_STREAM_END : MZ_BUF_ERROR;

    orig_total_in = pStream->total_in;
    orig_total_out = pStream->total_out;
    for (;;)
    {
        tdefl_status defl_status;
        in_bytes = pStream->avail_in;
        out_bytes = pStream->avail_out;

        defl_status = tdefl_compress((tdefl_compressor *)pStream->state, pStream->next_in, &in_bytes, pStream->next_out, &out_bytes, (tdefl_flush)flush);
        pStream->next_in += (mz_uint)in_bytes;
        pStream->avail_in -= (mz_uint)in_bytes;
        pStream->total_in += (mz_uint)in_bytes;
        pStream->adler = tdefl_get_adler32((tdefl_compressor *)pStream->state);

        pStream->next_out += (mz_uint)out_bytes;
        pStream->avail_out -= (mz_uint)out_bytes;
        pStream->total_out += (mz_uint)out_bytes;

        if (defl_status < 0)
        {
            mz_status = MZ_STREAM_ERROR;
            break;
        }
        else if (defl_status == TDEFL_STATUS_DONE)
        {
            mz_status = MZ_STREAM_END;
            break;
        }
        else if (!pStream->avail_out)
            break;
        else if ((!pStream->avail_in) && (flush != MZ_FINISH))
        {
            if ((flush) || (pStream->total_in != orig_total_in) || (pStream->total_out != orig_total_out))
                break;
            return MZ_BUF_ERROR; /* Can't make forward progress without some input.
 */
        }
    }
    return mz_status;
}

int mz_deflateEnd(mz_streamp pStream)
{
    if (!pStream)
        return MZ_STREAM_ERROR;
    if (pStream->state)
    {
        pStream->zfree(pStream->opaque, pStream->state);
        pStream->state = NULL;
    }
    return MZ_OK;
}

mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len)
{
    (void)pStream;
    /* This is really over conservative. (And lame, but it's actually pretty tricky to compute a true upper bound given the way tdefl's blocking works.) */
    return MZ_MAX(128 + (source_len * 110) / 100, 128 + source_len + ((source_len / (31 * 1024)) + 1) * 5);
}

int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level)
{
    int status;
    mz_stream stream;
    memset(&stream, 0, sizeof(stream));

    /* In case mz_ulong is 64-bits (argh I hate longs). */
    if ((source_len | *pDest_len) > 0xFFFFFFFFU)
        return MZ_PARAM_ERROR;

    stream.next_in = pSource;
    stream.avail_in = (mz_uint32)source_len;
    stream.next_out = pDest;
    stream.avail_out = (mz_uint32)*pDest_len;

    status = mz_deflateInit(&stream, level);
    if (status != MZ_OK)
        return status;

    status = mz_deflate(&stream, MZ_FINISH);
    if (status != MZ_STREAM_END)
    {
        mz_deflateEnd(&stream);
        return (status == MZ_OK) ? MZ_BUF_ERROR : status;
    }

    *pDest_len = stream.total_out;
    return mz_deflateEnd(&stream);
}

int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len)
{
    return mz_compress2(pDest, pDest_len, pSource, source_len, MZ_DEFAULT_COMPRESSION);
}

mz_ulong mz_compressBound(mz_ulong source_len)
{
    return mz_deflateBound(NULL, source_len);
}

typedef struct
{
    tinfl_decompressor m_decomp;
    mz_uint m_dict_ofs, m_dict_avail, m_first_call, m_has_flushed;
    int m_window_bits;
    mz_uint8 m_dict[TINFL_LZ_DICT_SIZE];
    tinfl_status m_last_status;
} inflate_state;

int mz_inflateInit2(mz_streamp pStream, int window_bits)
{
    inflate_state *pDecomp;
    if (!pStream)
        return MZ_STREAM_ERROR;
    if ((window_bits != MZ_DEFAULT_WINDOW_BITS) && (-window_bits != MZ_DEFAULT_WINDOW_BITS))
        return MZ_PARAM_ERROR;

    pStream->data_type = 0;
    pStream->adler = 0;
    pStream->msg = NULL;
    pStream->total_in = 0;
    pStream->total_out = 0;
    pStream->reserved = 0;
    if (!pStream->zalloc)
        pStream->zalloc = miniz_def_alloc_func;
    if (!pStream->zfree)
        pStream->zfree = miniz_def_free_func;

    pDecomp = (inflate_state *)pStream->zalloc(pStream->opaque, 1, sizeof(inflate_state));
    if (!pDecomp)
        return MZ_MEM_ERROR;

    pStream->state = (struct mz_internal_state *)pDecomp;

    tinfl_init(&pDecomp->m_decomp);
    pDecomp->m_dict_ofs = 0;
    pDecomp->m_dict_avail = 0;
    pDecomp->m_last_status = TINFL_STATUS_NEEDS_MORE_INPUT;
    pDecomp->m_first_call = 1;
    pDecomp->m_has_flushed = 0;
    pDecomp->m_window_bits = window_bits;

    return MZ_OK;
}

int mz_inflateInit(mz_streamp pStream)
{
    return mz_inflateInit2(pStream, MZ_DEFAULT_WINDOW_BITS);
}

int mz_inflate(mz_streamp pStream, int flush)
{
    inflate_state *pState;
    mz_uint n, first_call, decomp_flags = TINFL_FLAG_COMPUTE_ADLER32;
    size_t in_bytes, out_bytes, orig_avail_in;
    tinfl_status status;

    if ((!pStream) || (!pStream->state))
        return MZ_STREAM_ERROR;
    if (flush == MZ_PARTIAL_FLUSH)
        flush = MZ_SYNC_FLUSH;
    if ((flush) && (flush != MZ_SYNC_FLUSH) && (flush != MZ_FINISH))
        return MZ_STREAM_ERROR;

    pState = (inflate_state *)pStream->state;
    if (pState->m_window_bits > 0)
        decomp_flags |= TINFL_FLAG_PARSE_ZLIB_HEADER;
    orig_avail_in = pStream->avail_in;

    first_call = pState->m_first_call;
    pState->m_first_call = 0;
    if (pState->m_last_status < 0)
        return MZ_DATA_ERROR;

    if (pState->m_has_flushed && (flush != MZ_FINISH))
        return MZ_STREAM_ERROR;
    pState->m_has_flushed |= (flush == MZ_FINISH);

    if ((flush == MZ_FINISH) && (first_call))
    {
        /* MZ_FINISH on the first call implies that the input and output buffers are large enough to hold the entire compressed/decompressed file. */
        decomp_flags |= TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF;
        in_bytes = pStream->avail_in;
        out_bytes = pStream->avail_out;
        status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pStream->next_out, pStream->next_out, &out_bytes, decomp_flags);
        pState->m_last_status = status;
        pStream->next_in += (mz_uint)in_bytes;
        pStream->avail_in -= (mz_uint)in_bytes;
        pStream->total_in += (mz_uint)in_bytes;
        pStream->adler = tinfl_get_adler32(&pState->m_decomp);
        pStream->next_out += (mz_uint)out_bytes;
        pStream->avail_out -= (mz_uint)out_bytes;
        pStream->total_out += (mz_uint)out_bytes;

        if (status < 0)
            return MZ_DATA_ERROR;
        else if (status != TINFL_STATUS_DONE)
        {
            pState->m_last_status = TINFL_STATUS_FAILED;
            return MZ_BUF_ERROR;
        }
        return MZ_STREAM_END;
    }
    /* flush != MZ_FINISH then we must assume there's more input. */
    if (flush != MZ_FINISH)
        decomp_flags |= TINFL_FLAG_HAS_MORE_INPUT;

    if (pState->m_dict_avail)
    {
        n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
        memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
        pStream->next_out += n;
        pStream->avail_out -= n;
        pStream->total_out += n;
        pState->m_dict_avail -= n;
        pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);
        return ((pState->m_last_status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK;
    }

    for (;;)
    {
        in_bytes = pStream->avail_in;
        out_bytes = TINFL_LZ_DICT_SIZE - pState->m_dict_ofs;

        status = tinfl_decompress(&pState->m_decomp, pStream->next_in, &in_bytes, pState->m_dict, pState->m_dict + pState->m_dict_ofs, &out_bytes, decomp_flags);
        pState->m_last_status = status;

        pStream->next_in += (mz_uint)in_bytes;
        pStream->avail_in -= (mz_uint)in_bytes;
        pStream->total_in += (mz_uint)in_bytes;
        pStream->adler = tinfl_get_adler32(&pState->m_decomp);

        pState->m_dict_avail = (mz_uint)out_bytes;

        n = MZ_MIN(pState->m_dict_avail, pStream->avail_out);
        memcpy(pStream->next_out, pState->m_dict + pState->m_dict_ofs, n);
        pStream->next_out += n;
        pStream->avail_out -= n;
        pStream->total_out += n;
        pState->m_dict_avail -= n;
        pState->m_dict_ofs = (pState->m_dict_ofs + n) & (TINFL_LZ_DICT_SIZE - 1);

        if (status < 0)
            return MZ_DATA_ERROR; /* Stream is corrupted (there could be some uncompressed data left in the output dictionary - oh well). */
        else if ((status == TINFL_STATUS_NEEDS_MORE_INPUT) && (!orig_avail_in))
            return MZ_BUF_ERROR; /* Signal caller that we can't make forward progress without supplying more input or by setting flush to MZ_FINISH. */
        else if (flush == MZ_FINISH)
        {
            /* The output buffer MUST be large to hold the remaining uncompressed data when flush==MZ_FINISH. */
            if (status == TINFL_STATUS_DONE)
                return pState->m_dict_avail ? MZ_BUF_ERROR : MZ_STREAM_END;
            /* status here must be TINFL_STATUS_HAS_MORE_OUTPUT, which means there's at least 1 more byte on the way. If there's no more room left in the output buffer then something is wrong. */
            else if (!pStream->avail_out)
                return MZ_BUF_ERROR;
        }
        else if ((status == TINFL_STATUS_DONE) || (!pStream->avail_in) || (!pStream->avail_out) || (pState->m_dict_avail))
            break;
    }

    return ((status == TINFL_STATUS_DONE) && (!pState->m_dict_avail)) ? MZ_STREAM_END : MZ_OK;
}

int mz_inflateEnd(mz_streamp pStream)
{
    if (!pStream)
        return MZ_STREAM_ERROR;
    if (pStream->state)
    {
        pStream->zfree(pStream->opaque, pStream->state);
        pStream->state = NULL;
    }
    return MZ_OK;
}

int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len)
{
    mz_stream stream;
    int status;
    memset(&stream, 0, sizeof(stream));

    /* In case mz_ulong is 64-bits (argh I hate longs). */
    if ((source_len | *pDest_len) > 0xFFFFFFFFU)
        return MZ_PARAM_ERROR;

    stream.next_in = pSource;
    stream.avail_in = (mz_uint32)source_len;
    stream.next_out = pDest;
    stream.avail_out = (mz_uint32)*pDest_len;

    status = mz_inflateInit(&stream);
    if (status != MZ_OK)
        return status;

    status = mz_inflate(&stream, MZ_FINISH);
    if (status != MZ_STREAM_END)
    {
        mz_inflateEnd(&stream);
        return ((status == MZ_BUF_ERROR) && (!stream.avail_in)) ? MZ_DATA_ERROR : status;
    }
    *pDest_len = stream.total_out;

    return mz_inflateEnd(&stream);
}

const char *mz_error(int err)
{
    static struct
    {
        int m_err;
        const char *m_pDesc;
    } s_error_descs[] =
        {
          { MZ_OK, "" }, { MZ_STREAM_END, "stream end" }, { MZ_NEED_DICT, "need dictionary" }, { MZ_ERRNO, "file error" }, { MZ_STREAM_ERROR, "stream error" }, { MZ_DATA_ERROR, "data error" }, { MZ_MEM_ERROR, "out of memory" }, { MZ_BUF_ERROR, "buf error" }, { MZ_VERSION_ERROR, "version error" }, { MZ_PARAM_ERROR, "parameter error" }
        };
    mz_uint i;
    for (i = 0; i < sizeof(s_error_descs) / sizeof(s_error_descs[0]); ++i)
        if (s_error_descs[i].m_err == err)
            return s_error_descs[i].m_pDesc;
    return NULL;
}

#endif /*MINIZ_NO_ZLIB_APIS */

#ifdef __cplusplus
}
#endif

/*
  This is free and unencumbered software released into the public domain.

  Anyone is free to copy, modify, publish, use, compile, sell, or
  distribute this software, either in source code form or as a compiled
  binary, for any purpose, commercial or non-commercial, and by any
  means.

  In jurisdictions that recognize copyright laws, the author or authors
  of this software dedicate any and all copyright interest in the
  software to the public domain. We make this dedication for the benefit
  of the public at large and to the detriment of our heirs and
  successors. We intend this dedication to be an overt act of
  relinquishment in perpetuity of all present and future rights to this
  software under copyright law.

  THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  OTHER DEALINGS IN THE SOFTWARE.

  For more information, please refer to 
*/
/**************************************************************************
 *
 * Copyright 2013-2014 RAD Game Tools and Valve Software
 * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 *
 **************************************************************************/




#ifdef __cplusplus
extern "C" {
#endif

/* ------------------- Low-level Compression (independent from all decompression API's) */

/* Purposely making these tables static for faster init and thread safety. */
static const mz_uint16 s_tdefl_len_sym[256] =
    {
      257, 258, 259, 260, 261, 262, 263, 264, 265, 265, 266, 266, 267, 267, 268, 268, 269, 269, 269, 269, 270, 270, 270, 270, 271, 271, 271, 271, 272, 272, 272, 272,
      273, 273, 273, 273, 273, 273, 273, 273, 274, 274, 274, 274, 274, 274, 274, 274, 275, 275, 275, 275, 275, 275, 275, 275, 276, 276, 276, 276, 276, 276, 276, 276,
      277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 277, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278, 278,
      279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 279, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280, 280,
      281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281, 281,
      282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282, 282,
      283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283, 283,
      284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 284, 285
    };

static const mz_uint8 s_tdefl_len_extra[256] =
    {
      0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
      4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 0
    };

static const mz_uint8 s_tdefl_small_dist_sym[512] =
    {
      0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11,
      11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13,
      13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
      14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14,
      14, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15,
      15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
      16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
      16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
      16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
      17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
      17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17,
      17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17, 17
    };

static const mz_uint8 s_tdefl_small_dist_extra[512] =
    {
      0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
      5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
      6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
      7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
      7, 7, 7, 7, 7, 7, 7, 7
    };

static const mz_uint8 s_tdefl_large_dist_sym[128] =
    {
      0, 0, 18, 19, 20, 20, 21, 21, 22, 22, 22, 22, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26, 26,
      26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28,
      28, 28, 28, 28, 28, 28, 28, 28, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29
    };

static const mz_uint8 s_tdefl_large_dist_extra[128] =
    {
      0, 0, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
      12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13,
      13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13
    };

/* Radix sorts tdefl_sym_freq[] array by 16-bit key m_key. Returns ptr to sorted values. */
typedef struct
{
    mz_uint16 m_key, m_sym_index;
} tdefl_sym_freq;
static tdefl_sym_freq *tdefl_radix_sort_syms(mz_uint num_syms, tdefl_sym_freq *pSyms0, tdefl_sym_freq *pSyms1)
{
    mz_uint32 total_passes = 2, pass_shift, pass, i, hist[256 * 2];
    tdefl_sym_freq *pCur_syms = pSyms0, *pNew_syms = pSyms1;
    MZ_CLEAR_OBJ(hist);
    for (i = 0; i < num_syms; i++)
    {
        mz_uint freq = pSyms0[i].m_key;
        hist[freq & 0xFF]++;
        hist[256 + ((freq >> 8) & 0xFF)]++;
    }
    while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256]))
        total_passes--;
    for (pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
    {
        const mz_uint32 *pHist = &hist[pass << 8];
        mz_uint offsets[256], cur_ofs = 0;
        for (i = 0; i < 256; i++)
        {
            offsets[i] = cur_ofs;
            cur_ofs += pHist[i];
        }
        for (i = 0; i < num_syms; i++)
            pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
        {
            tdefl_sym_freq *t = pCur_syms;
            pCur_syms = pNew_syms;
            pNew_syms = t;
        }
    }
    return pCur_syms;
}

/* tdefl_calculate_minimum_redundancy() originally written by: Alistair Moffat, alistair@cs.mu.oz.au, Jyrki Katajainen, jyrki@diku.dk, November 1996. */
static void tdefl_calculate_minimum_redundancy(tdefl_sym_freq *A, int n)
{
    int root, leaf, next, avbl, used, dpth;
    if (n == 0)
        return;
    else if (n == 1)
    {
        A[0].m_key = 1;
        return;
    }
    A[0].m_key += A[1].m_key;
    root = 0;
    leaf = 2;
    for (next = 1; next < n - 1; next++)
    {
        if (leaf >= n || A[root].m_key < A[leaf].m_key)
        {
            A[next].m_key = A[root].m_key;
            A[root++].m_key = (mz_uint16)next;
        }
        else
            A[next].m_key = A[leaf++].m_key;
        if (leaf >= n || (root < next && A[root].m_key < A[leaf].m_key))
        {
            A[next].m_key = (mz_uint16)(A[next].m_key + A[root].m_key);
            A[root++].m_key = (mz_uint16)next;
        }
        else
            A[next].m_key = (mz_uint16)(A[next].m_key + A[leaf++].m_key);
    }
    A[n - 2].m_key = 0;
    for (next = n - 3; next >= 0; next--)
        A[next].m_key = A[A[next].m_key].m_key + 1;
    avbl = 1;
    used = dpth = 0;
    root = n - 2;
    next = n - 1;
    while (avbl > 0)
    {
        while (root >= 0 && (int)A[root].m_key == dpth)
        {
            used++;
            root--;
        }
        while (avbl > used)
        {
            A[next--].m_key = (mz_uint16)(dpth);
            avbl--;
        }
        avbl = 2 * used;
        dpth++;
        used = 0;
    }
}

/* Limits canonical Huffman code table's max code size. */
enum
{
    TDEFL_MAX_SUPPORTED_HUFF_CODESIZE = 32
};
static void tdefl_huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
{
    int i;
    mz_uint32 total = 0;
    if (code_list_len <= 1)
        return;
    for (i = max_code_size + 1; i <= TDEFL_MAX_SUPPORTED_HUFF_CODESIZE; i++)
        pNum_codes[max_code_size] += pNum_codes[i];
    for (i = max_code_size; i > 0; i--)
        total += (((mz_uint32)pNum_codes[i]) << (max_code_size - i));
    while (total != (1UL << max_code_size))
    {
        pNum_codes[max_code_size]--;
        for (i = max_code_size - 1; i > 0; i--)
            if (pNum_codes[i])
            {
                pNum_codes[i]--;
                pNum_codes[i + 1] += 2;
                break;
            }
        total--;
    }
}

static void tdefl_optimize_huffman_table(tdefl_compressor *d, int table_num, int table_len, int code_size_limit, int static_table)
{
    int i, j, l, num_codes[1 + TDEFL_MAX_SUPPORTED_HUFF_CODESIZE];
    mz_uint next_code[TDEFL_MAX_SUPPORTED_HUFF_CODESIZE + 1];
    MZ_CLEAR_OBJ(num_codes);
    if (static_table)
    {
        for (i = 0; i < table_len; i++)
            num_codes[d->m_huff_code_sizes[table_num][i]]++;
    }
    else
    {
        tdefl_sym_freq syms0[TDEFL_MAX_HUFF_SYMBOLS], syms1[TDEFL_MAX_HUFF_SYMBOLS], *pSyms;
        int num_used_syms = 0;
        const mz_uint16 *pSym_count = &d->m_huff_count[table_num][0];
        for (i = 0; i < table_len; i++)
            if (pSym_count[i])
            {
                syms0[num_used_syms].m_key = (mz_uint16)pSym_count[i];
                syms0[num_used_syms++].m_sym_index = (mz_uint16)i;
            }

        pSyms = tdefl_radix_sort_syms(num_used_syms, syms0, syms1);
        tdefl_calculate_minimum_redundancy(pSyms, num_used_syms);

        for (i = 0; i < num_used_syms; i++)
            num_codes[pSyms[i].m_key]++;

        tdefl_huffman_enforce_max_code_size(num_codes, num_used_syms, code_size_limit);

        MZ_CLEAR_OBJ(d->m_huff_code_sizes[table_num]);
        MZ_CLEAR_OBJ(d->m_huff_codes[table_num]);
        for (i = 1, j = num_used_syms; i <= code_size_limit; i++)
            for (l = num_codes[i]; l > 0; l--)
                d->m_huff_code_sizes[table_num][pSyms[--j].m_sym_index] = (mz_uint8)(i);
    }

    next_code[1] = 0;
    for (j = 0, i = 2; i <= code_size_limit; i++)
        next_code[i] = j = ((j + num_codes[i - 1]) << 1);

    for (i = 0; i < table_len; i++)
    {
        mz_uint rev_code = 0, code, code_size;
        if ((code_size = d->m_huff_code_sizes[table_num][i]) == 0)
            continue;
        code = next_code[code_size]++;
        for (l = code_size; l > 0; l--, code >>= 1)
            rev_code = (rev_code << 1) | (code & 1);
        d->m_huff_codes[table_num][i] = (mz_uint16)rev_code;
    }
}

#define TDEFL_PUT_BITS(b, l)                                       \
    do                                                             \
    {                                                              \
        mz_uint bits = b;                                          \
        mz_uint len = l;                                           \
        MZ_ASSERT(bits <= ((1U << len) - 1U));                     \
        d->m_bit_buffer |= (bits << d->m_bits_in);                 \
        d->m_bits_in += len;                                       \
        while (d->m_bits_in >= 8)                                  \
        {                                                          \
            if (d->m_pOutput_buf < d->m_pOutput_buf_end)           \
                *d->m_pOutput_buf++ = (mz_uint8)(d->m_bit_buffer); \
            d->m_bit_buffer >>= 8;                                 \
            d->m_bits_in -= 8;                                     \
        }                                                          \
    }                                                              \
    MZ_MACRO_END

#define TDEFL_RLE_PREV_CODE_SIZE()                                                                                       \
    {                                                                                                                    \
        if (rle_repeat_count)                                                                                            \
        {                                                                                                                \
            if (rle_repeat_count < 3)                                                                                    \
            {                                                                                                            \
                d->m_huff_count[2][prev_code_size] = (mz_uint16)(d->m_huff_count[2][prev_code_size] + rle_repeat_count); \
                while (rle_repeat_count--)                                                                               \
                    packed_code_sizes[num_packed_code_sizes++] = prev_code_size;                                         \
            }                                                                                                            \
            else                                                                                                         \
            {                                                                                                            \
                d->m_huff_count[2][16] = (mz_uint16)(d->m_huff_count[2][16] + 1);                                        \
                packed_code_sizes[num_packed_code_sizes++] = 16;                                                         \
                packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_repeat_count - 3);                           \
            }                                                                                                            \
            rle_repeat_count = 0;                                                                                        \
        }                                                                                                                \
    }

#define TDEFL_RLE_ZERO_CODE_SIZE()                                                         \
    {                                                                                      \
        if (rle_z_count)                                                                   \
        {                                                                                  \
            if (rle_z_count < 3)                                                           \
            {                                                                              \
                d->m_huff_count[2][0] = (mz_uint16)(d->m_huff_count[2][0] + rle_z_count);  \
                while (rle_z_count--)                                                      \
                    packed_code_sizes[num_packed_code_sizes++] = 0;                        \
            }                                                                              \
            else if (rle_z_count <= 10)                                                    \
            {                                                                              \
                d->m_huff_count[2][17] = (mz_uint16)(d->m_huff_count[2][17] + 1);          \
                packed_code_sizes[num_packed_code_sizes++] = 17;                           \
                packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 3);  \
            }                                                                              \
            else                                                                           \
            {                                                                              \
                d->m_huff_count[2][18] = (mz_uint16)(d->m_huff_count[2][18] + 1);          \
                packed_code_sizes[num_packed_code_sizes++] = 18;                           \
                packed_code_sizes[num_packed_code_sizes++] = (mz_uint8)(rle_z_count - 11); \
            }                                                                              \
            rle_z_count = 0;                                                               \
        }                                                                                  \
    }

static mz_uint8 s_tdefl_packed_code_size_syms_swizzle[] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };

static void tdefl_start_dynamic_block(tdefl_compressor *d)
{
    int num_lit_codes, num_dist_codes, num_bit_lengths;
    mz_uint i, total_code_sizes_to_pack, num_packed_code_sizes, rle_z_count, rle_repeat_count, packed_code_sizes_index;
    mz_uint8 code_sizes_to_pack[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], packed_code_sizes[TDEFL_MAX_HUFF_SYMBOLS_0 + TDEFL_MAX_HUFF_SYMBOLS_1], prev_code_size = 0xFF;

    d->m_huff_count[0][256] = 1;

    tdefl_optimize_huffman_table(d, 0, TDEFL_MAX_HUFF_SYMBOLS_0, 15, MZ_FALSE);
    tdefl_optimize_huffman_table(d, 1, TDEFL_MAX_HUFF_SYMBOLS_1, 15, MZ_FALSE);

    for (num_lit_codes = 286; num_lit_codes > 257; num_lit_codes--)
        if (d->m_huff_code_sizes[0][num_lit_codes - 1])
            break;
    for (num_dist_codes = 30; num_dist_codes > 1; num_dist_codes--)
        if (d->m_huff_code_sizes[1][num_dist_codes - 1])
            break;

    memcpy(code_sizes_to_pack, &d->m_huff_code_sizes[0][0], num_lit_codes);
    memcpy(code_sizes_to_pack + num_lit_codes, &d->m_huff_code_sizes[1][0], num_dist_codes);
    total_code_sizes_to_pack = num_lit_codes + num_dist_codes;
    num_packed_code_sizes = 0;
    rle_z_count = 0;
    rle_repeat_count = 0;

    memset(&d->m_huff_count[2][0], 0, sizeof(d->m_huff_count[2][0]) * TDEFL_MAX_HUFF_SYMBOLS_2);
    for (i = 0; i < total_code_sizes_to_pack; i++)
    {
        mz_uint8 code_size = code_sizes_to_pack[i];
        if (!code_size)
        {
            TDEFL_RLE_PREV_CODE_SIZE();
            if (++rle_z_count == 138)
            {
                TDEFL_RLE_ZERO_CODE_SIZE();
            }
        }
        else
        {
            TDEFL_RLE_ZERO_CODE_SIZE();
            if (code_size != prev_code_size)
            {
                TDEFL_RLE_PREV_CODE_SIZE();
                d->m_huff_count[2][code_size] = (mz_uint16)(d->m_huff_count[2][code_size] + 1);
                packed_code_sizes[num_packed_code_sizes++] = code_size;
            }
            else if (++rle_repeat_count == 6)
            {
                TDEFL_RLE_PREV_CODE_SIZE();
            }
        }
        prev_code_size = code_size;
    }
    if (rle_repeat_count)
    {
        TDEFL_RLE_PREV_CODE_SIZE();
    }
    else
    {
        TDEFL_RLE_ZERO_CODE_SIZE();
    }

    tdefl_optimize_huffman_table(d, 2, TDEFL_MAX_HUFF_SYMBOLS_2, 7, MZ_FALSE);

    TDEFL_PUT_BITS(2, 2);

    TDEFL_PUT_BITS(num_lit_codes - 257, 5);
    TDEFL_PUT_BITS(num_dist_codes - 1, 5);

    for (num_bit_lengths = 18; num_bit_lengths >= 0; num_bit_lengths--)
        if (d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[num_bit_lengths]])
            break;
    num_bit_lengths = MZ_MAX(4, (num_bit_lengths + 1));
    TDEFL_PUT_BITS(num_bit_lengths - 4, 4);
    for (i = 0; (int)i < num_bit_lengths; i++)
        TDEFL_PUT_BITS(d->m_huff_code_sizes[2][s_tdefl_packed_code_size_syms_swizzle[i]], 3);

    for (packed_code_sizes_index = 0; packed_code_sizes_index < num_packed_code_sizes;)
    {
        mz_uint code = packed_code_sizes[packed_code_sizes_index++];
        MZ_ASSERT(code < TDEFL_MAX_HUFF_SYMBOLS_2);
        TDEFL_PUT_BITS(d->m_huff_codes[2][code], d->m_huff_code_sizes[2][code]);
        if (code >= 16)
            TDEFL_PUT_BITS(packed_code_sizes[packed_code_sizes_index++], "\02\03\07"[code - 16]);
    }
}

static void tdefl_start_static_block(tdefl_compressor *d)
{
    mz_uint i;
    mz_uint8 *p = &d->m_huff_code_sizes[0][0];

    for (i = 0; i <= 143; ++i)
        *p++ = 8;
    for (; i <= 255; ++i)
        *p++ = 9;
    for (; i <= 279; ++i)
        *p++ = 7;
    for (; i <= 287; ++i)
        *p++ = 8;

    memset(d->m_huff_code_sizes[1], 5, 32);

    tdefl_optimize_huffman_table(d, 0, 288, 15, MZ_TRUE);
    tdefl_optimize_huffman_table(d, 1, 32, 15, MZ_TRUE);

    TDEFL_PUT_BITS(1, 2);
}

static const mz_uint mz_bitmasks[17] = { 0x0000, 0x0001, 0x0003, 0x0007, 0x000F, 0x001F, 0x003F, 0x007F, 0x00FF, 0x01FF, 0x03FF, 0x07FF, 0x0FFF, 0x1FFF, 0x3FFF, 0x7FFF, 0xFFFF };

#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
{
    mz_uint flags;
    mz_uint8 *pLZ_codes;
    mz_uint8 *pOutput_buf = d->m_pOutput_buf;
    mz_uint8 *pLZ_code_buf_end = d->m_pLZ_code_buf;
    mz_uint64 bit_buffer = d->m_bit_buffer;
    mz_uint bits_in = d->m_bits_in;

#define TDEFL_PUT_BITS_FAST(b, l)                    \
    {                                                \
        bit_buffer |= (((mz_uint64)(b)) << bits_in); \
        bits_in += (l);                              \
    }

    flags = 1;
    for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < pLZ_code_buf_end; flags >>= 1)
    {
        if (flags == 1)
            flags = *pLZ_codes++ | 0x100;

        if (flags & 1)
        {
            mz_uint s0, s1, n0, n1, sym, num_extra_bits;
            mz_uint match_len = pLZ_codes[0], match_dist = *(const mz_uint16 *)(pLZ_codes + 1);
            pLZ_codes += 3;

            MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
            TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
            TDEFL_PUT_BITS_FAST(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);

            /* This sequence coaxes MSVC into using cmov's vs. jmp's. */
            s0 = s_tdefl_small_dist_sym[match_dist & 511];
            n0 = s_tdefl_small_dist_extra[match_dist & 511];
            s1 = s_tdefl_large_dist_sym[match_dist >> 8];
            n1 = s_tdefl_large_dist_extra[match_dist >> 8];
            sym = (match_dist < 512) ? s0 : s1;
            num_extra_bits = (match_dist < 512) ? n0 : n1;

            MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
            TDEFL_PUT_BITS_FAST(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
            TDEFL_PUT_BITS_FAST(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
        }
        else
        {
            mz_uint lit = *pLZ_codes++;
            MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
            TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);

            if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
            {
                flags >>= 1;
                lit = *pLZ_codes++;
                MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
                TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);

                if (((flags & 2) == 0) && (pLZ_codes < pLZ_code_buf_end))
                {
                    flags >>= 1;
                    lit = *pLZ_codes++;
                    MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
                    TDEFL_PUT_BITS_FAST(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
                }
            }
        }

        if (pOutput_buf >= d->m_pOutput_buf_end)
            return MZ_FALSE;

        *(mz_uint64 *)pOutput_buf = bit_buffer;
        pOutput_buf += (bits_in >> 3);
        bit_buffer >>= (bits_in & ~7);
        bits_in &= 7;
    }

#undef TDEFL_PUT_BITS_FAST

    d->m_pOutput_buf = pOutput_buf;
    d->m_bits_in = 0;
    d->m_bit_buffer = 0;

    while (bits_in)
    {
        mz_uint32 n = MZ_MIN(bits_in, 16);
        TDEFL_PUT_BITS((mz_uint)bit_buffer & mz_bitmasks[n], n);
        bit_buffer >>= n;
        bits_in -= n;
    }

    TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);

    return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#else
static mz_bool tdefl_compress_lz_codes(tdefl_compressor *d)
{
    mz_uint flags;
    mz_uint8 *pLZ_codes;

    flags = 1;
    for (pLZ_codes = d->m_lz_code_buf; pLZ_codes < d->m_pLZ_code_buf; flags >>= 1)
    {
        if (flags == 1)
            flags = *pLZ_codes++ | 0x100;
        if (flags & 1)
        {
            mz_uint sym, num_extra_bits;
            mz_uint match_len = pLZ_codes[0], match_dist = (pLZ_codes[1] | (pLZ_codes[2] << 8));
            pLZ_codes += 3;

            MZ_ASSERT(d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
            TDEFL_PUT_BITS(d->m_huff_codes[0][s_tdefl_len_sym[match_len]], d->m_huff_code_sizes[0][s_tdefl_len_sym[match_len]]);
            TDEFL_PUT_BITS(match_len & mz_bitmasks[s_tdefl_len_extra[match_len]], s_tdefl_len_extra[match_len]);

            if (match_dist < 512)
            {
                sym = s_tdefl_small_dist_sym[match_dist];
                num_extra_bits = s_tdefl_small_dist_extra[match_dist];
            }
            else
            {
                sym = s_tdefl_large_dist_sym[match_dist >> 8];
                num_extra_bits = s_tdefl_large_dist_extra[match_dist >> 8];
            }
            MZ_ASSERT(d->m_huff_code_sizes[1][sym]);
            TDEFL_PUT_BITS(d->m_huff_codes[1][sym], d->m_huff_code_sizes[1][sym]);
            TDEFL_PUT_BITS(match_dist & mz_bitmasks[num_extra_bits], num_extra_bits);
        }
        else
        {
            mz_uint lit = *pLZ_codes++;
            MZ_ASSERT(d->m_huff_code_sizes[0][lit]);
            TDEFL_PUT_BITS(d->m_huff_codes[0][lit], d->m_huff_code_sizes[0][lit]);
        }
    }

    TDEFL_PUT_BITS(d->m_huff_codes[0][256], d->m_huff_code_sizes[0][256]);

    return (d->m_pOutput_buf < d->m_pOutput_buf_end);
}
#endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN && MINIZ_HAS_64BIT_REGISTERS */

static mz_bool tdefl_compress_block(tdefl_compressor *d, mz_bool static_block)
{
    if (static_block)
        tdefl_start_static_block(d);
    else
        tdefl_start_dynamic_block(d);
    return tdefl_compress_lz_codes(d);
}

static int tdefl_flush_block(tdefl_compressor *d, int flush)
{
    mz_uint saved_bit_buf, saved_bits_in;
    mz_uint8 *pSaved_output_buf;
    mz_bool comp_block_succeeded = MZ_FALSE;
    int n, use_raw_block = ((d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS) != 0) && (d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size;
    mz_uint8 *pOutput_buf_start = ((d->m_pPut_buf_func == NULL) && ((*d->m_pOut_buf_size - d->m_out_buf_ofs) >= TDEFL_OUT_BUF_SIZE)) ? ((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs) : d->m_output_buf;

    d->m_pOutput_buf = pOutput_buf_start;
    d->m_pOutput_buf_end = d->m_pOutput_buf + TDEFL_OUT_BUF_SIZE - 16;

    MZ_ASSERT(!d->m_output_flush_remaining);
    d->m_output_flush_ofs = 0;
    d->m_output_flush_remaining = 0;

    *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> d->m_num_flags_left);
    d->m_pLZ_code_buf -= (d->m_num_flags_left == 8);

    if ((d->m_flags & TDEFL_WRITE_ZLIB_HEADER) && (!d->m_block_index))
    {
        TDEFL_PUT_BITS(0x78, 8);
        TDEFL_PUT_BITS(0x01, 8);
    }

    TDEFL_PUT_BITS(flush == TDEFL_FINISH, 1);

    pSaved_output_buf = d->m_pOutput_buf;
    saved_bit_buf = d->m_bit_buffer;
    saved_bits_in = d->m_bits_in;

    if (!use_raw_block)
        comp_block_succeeded = tdefl_compress_block(d, (d->m_flags & TDEFL_FORCE_ALL_STATIC_BLOCKS) || (d->m_total_lz_bytes < 48));

    /* If the block gets expanded, forget the current contents of the output buffer and send a raw block instead. */
    if (((use_raw_block) || ((d->m_total_lz_bytes) && ((d->m_pOutput_buf - pSaved_output_buf + 1U) >= d->m_total_lz_bytes))) &&
        ((d->m_lookahead_pos - d->m_lz_code_buf_dict_pos) <= d->m_dict_size))
    {
        mz_uint i;
        d->m_pOutput_buf = pSaved_output_buf;
        d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
        TDEFL_PUT_BITS(0, 2);
        if (d->m_bits_in)
        {
            TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
        }
        for (i = 2; i; --i, d->m_total_lz_bytes ^= 0xFFFF)
        {
            TDEFL_PUT_BITS(d->m_total_lz_bytes & 0xFFFF, 16);
        }
        for (i = 0; i < d->m_total_lz_bytes; ++i)
        {
            TDEFL_PUT_BITS(d->m_dict[(d->m_lz_code_buf_dict_pos + i) & TDEFL_LZ_DICT_SIZE_MASK], 8);
        }
    }
    /* Check for the extremely unlikely (if not impossible) case of the compressed block not fitting into the output buffer when using dynamic codes. */
    else if (!comp_block_succeeded)
    {
        d->m_pOutput_buf = pSaved_output_buf;
        d->m_bit_buffer = saved_bit_buf, d->m_bits_in = saved_bits_in;
        tdefl_compress_block(d, MZ_TRUE);
    }

    if (flush)
    {
        if (flush == TDEFL_FINISH)
        {
            if (d->m_bits_in)
            {
                TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
            }
            if (d->m_flags & TDEFL_WRITE_ZLIB_HEADER)
            {
                mz_uint i, a = d->m_adler32;
                for (i = 0; i < 4; i++)
                {
                    TDEFL_PUT_BITS((a >> 24) & 0xFF, 8);
                    a <<= 8;
                }
            }
        }
        else
        {
            mz_uint i, z = 0;
            TDEFL_PUT_BITS(0, 3);
            if (d->m_bits_in)
            {
                TDEFL_PUT_BITS(0, 8 - d->m_bits_in);
            }
            for (i = 2; i; --i, z ^= 0xFFFF)
            {
                TDEFL_PUT_BITS(z & 0xFFFF, 16);
            }
        }
    }

    MZ_ASSERT(d->m_pOutput_buf < d->m_pOutput_buf_end);

    memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
    memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);

    d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
    d->m_pLZ_flags = d->m_lz_code_buf;
    d->m_num_flags_left = 8;
    d->m_lz_code_buf_dict_pos += d->m_total_lz_bytes;
    d->m_total_lz_bytes = 0;
    d->m_block_index++;

    if ((n = (int)(d->m_pOutput_buf - pOutput_buf_start)) != 0)
    {
        if (d->m_pPut_buf_func)
        {
            *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
            if (!(*d->m_pPut_buf_func)(d->m_output_buf, n, d->m_pPut_buf_user))
                return (d->m_prev_return_status = TDEFL_STATUS_PUT_BUF_FAILED);
        }
        else if (pOutput_buf_start == d->m_output_buf)
        {
            int bytes_to_copy = (int)MZ_MIN((size_t)n, (size_t)(*d->m_pOut_buf_size - d->m_out_buf_ofs));
            memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf, bytes_to_copy);
            d->m_out_buf_ofs += bytes_to_copy;
            if ((n -= bytes_to_copy) != 0)
            {
                d->m_output_flush_ofs = bytes_to_copy;
                d->m_output_flush_remaining = n;
            }
        }
        else
        {
            d->m_out_buf_ofs += n;
        }
    }

    return d->m_output_flush_remaining;
}

#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
#ifdef MINIZ_UNALIGNED_USE_MEMCPY
static inline mz_uint16 TDEFL_READ_UNALIGNED_WORD(const mz_uint8* p)
{
	mz_uint16 ret;
	memcpy(&ret, p, sizeof(mz_uint16));
	return ret;
}
static inline mz_uint16 TDEFL_READ_UNALIGNED_WORD2(const mz_uint16* p)
{
	mz_uint16 ret;
	memcpy(&ret, p, sizeof(mz_uint16));
	return ret;
}
#else
#define TDEFL_READ_UNALIGNED_WORD(p) *(const mz_uint16 *)(p)
#define TDEFL_READ_UNALIGNED_WORD2(p) *(const mz_uint16 *)(p)
#endif
static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
{
    mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
    mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
    const mz_uint16 *s = (const mz_uint16 *)(d->m_dict + pos), *p, *q;
    mz_uint16 c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]), s01 = TDEFL_READ_UNALIGNED_WORD2(s);
    MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
    if (max_match_len <= match_len)
        return;
    for (;;)
    {
        for (;;)
        {
            if (--num_probes_left == 0)
                return;
#define TDEFL_PROBE                                                                             \
    next_probe_pos = d->m_next[probe_pos];                                                      \
    if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist)) \
        return;                                                                                 \
    probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK;                                       \
    if (TDEFL_READ_UNALIGNED_WORD(&d->m_dict[probe_pos + match_len - 1]) == c01)                \
        break;
            TDEFL_PROBE;
            TDEFL_PROBE;
            TDEFL_PROBE;
        }
        if (!dist)
            break;
        q = (const mz_uint16 *)(d->m_dict + probe_pos);
        if (TDEFL_READ_UNALIGNED_WORD2(q) != s01)
            continue;
        p = s;
        probe_len = 32;
        do
        {
        } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) &&
                 (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0));
        if (!probe_len)
        {
            *pMatch_dist = dist;
            *pMatch_len = MZ_MIN(max_match_len, (mz_uint)TDEFL_MAX_MATCH_LEN);
            break;
        }
        else if ((probe_len = ((mz_uint)(p - s) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q)) > match_len)
        {
            *pMatch_dist = dist;
            if ((*pMatch_len = match_len = MZ_MIN(max_match_len, probe_len)) == max_match_len)
                break;
            c01 = TDEFL_READ_UNALIGNED_WORD(&d->m_dict[pos + match_len - 1]);
        }
    }
}
#else
static MZ_FORCEINLINE void tdefl_find_match(tdefl_compressor *d, mz_uint lookahead_pos, mz_uint max_dist, mz_uint max_match_len, mz_uint *pMatch_dist, mz_uint *pMatch_len)
{
    mz_uint dist, pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK, match_len = *pMatch_len, probe_pos = pos, next_probe_pos, probe_len;
    mz_uint num_probes_left = d->m_max_probes[match_len >= 32];
    const mz_uint8 *s = d->m_dict + pos, *p, *q;
    mz_uint8 c0 = d->m_dict[pos + match_len], c1 = d->m_dict[pos + match_len - 1];
    MZ_ASSERT(max_match_len <= TDEFL_MAX_MATCH_LEN);
    if (max_match_len <= match_len)
        return;
    for (;;)
    {
        for (;;)
        {
            if (--num_probes_left == 0)
                return;
#define TDEFL_PROBE                                                                               \
    next_probe_pos = d->m_next[probe_pos];                                                        \
    if ((!next_probe_pos) || ((dist = (mz_uint16)(lookahead_pos - next_probe_pos)) > max_dist))   \
        return;                                                                                   \
    probe_pos = next_probe_pos & TDEFL_LZ_DICT_SIZE_MASK;                                         \
    if ((d->m_dict[probe_pos + match_len] == c0) && (d->m_dict[probe_pos + match_len - 1] == c1)) \
        break;
            TDEFL_PROBE;
            TDEFL_PROBE;
            TDEFL_PROBE;
        }
        if (!dist)
            break;
        p = s;
        q = d->m_dict + probe_pos;
        for (probe_len = 0; probe_len < max_match_len; probe_len++)
            if (*p++ != *q++)
                break;
        if (probe_len > match_len)
        {
            *pMatch_dist = dist;
            if ((*pMatch_len = match_len = probe_len) == max_match_len)
                return;
            c0 = d->m_dict[pos + match_len];
            c1 = d->m_dict[pos + match_len - 1];
        }
    }
}
#endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES */

#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
static mz_bool tdefl_compress_fast(tdefl_compressor *d)
{
    /* Faster, minimally featured LZRW1-style match+parse loop with better register utilization. Intended for applications where raw throughput is valued more highly than ratio. */
    mz_uint lookahead_pos = d->m_lookahead_pos, lookahead_size = d->m_lookahead_size, dict_size = d->m_dict_size, total_lz_bytes = d->m_total_lz_bytes, num_flags_left = d->m_num_flags_left;
    mz_uint8 *pLZ_code_buf = d->m_pLZ_code_buf, *pLZ_flags = d->m_pLZ_flags;
    mz_uint cur_pos = lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;

    while ((d->m_src_buf_left) || ((d->m_flush) && (lookahead_size)))
    {
        const mz_uint TDEFL_COMP_FAST_LOOKAHEAD_SIZE = 4096;
        mz_uint dst_pos = (lookahead_pos + lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
        mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(d->m_src_buf_left, TDEFL_COMP_FAST_LOOKAHEAD_SIZE - lookahead_size);
        d->m_src_buf_left -= num_bytes_to_process;
        lookahead_size += num_bytes_to_process;

        while (num_bytes_to_process)
        {
            mz_uint32 n = MZ_MIN(TDEFL_LZ_DICT_SIZE - dst_pos, num_bytes_to_process);
            memcpy(d->m_dict + dst_pos, d->m_pSrc, n);
            if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
                memcpy(d->m_dict + TDEFL_LZ_DICT_SIZE + dst_pos, d->m_pSrc, MZ_MIN(n, (TDEFL_MAX_MATCH_LEN - 1) - dst_pos));
            d->m_pSrc += n;
            dst_pos = (dst_pos + n) & TDEFL_LZ_DICT_SIZE_MASK;
            num_bytes_to_process -= n;
        }

        dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - lookahead_size, dict_size);
        if ((!d->m_flush) && (lookahead_size < TDEFL_COMP_FAST_LOOKAHEAD_SIZE))
            break;

        while (lookahead_size >= 4)
        {
            mz_uint cur_match_dist, cur_match_len = 1;
            mz_uint8 *pCur_dict = d->m_dict + cur_pos;
            mz_uint first_trigram = (*(const mz_uint32 *)pCur_dict) & 0xFFFFFF;
            mz_uint hash = (first_trigram ^ (first_trigram >> (24 - (TDEFL_LZ_HASH_BITS - 8)))) & TDEFL_LEVEL1_HASH_SIZE_MASK;
            mz_uint probe_pos = d->m_hash[hash];
            d->m_hash[hash] = (mz_uint16)lookahead_pos;

            if (((cur_match_dist = (mz_uint16)(lookahead_pos - probe_pos)) <= dict_size) && ((*(const mz_uint32 *)(d->m_dict + (probe_pos &= TDEFL_LZ_DICT_SIZE_MASK)) & 0xFFFFFF) == first_trigram))
            {
                const mz_uint16 *p = (const mz_uint16 *)pCur_dict;
                const mz_uint16 *q = (const mz_uint16 *)(d->m_dict + probe_pos);
                mz_uint32 probe_len = 32;
                do
                {
                } while ((TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) &&
                         (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (TDEFL_READ_UNALIGNED_WORD2(++p) == TDEFL_READ_UNALIGNED_WORD2(++q)) && (--probe_len > 0));
                cur_match_len = ((mz_uint)(p - (const mz_uint16 *)pCur_dict) * 2) + (mz_uint)(*(const mz_uint8 *)p == *(const mz_uint8 *)q);
                if (!probe_len)
                    cur_match_len = cur_match_dist ? TDEFL_MAX_MATCH_LEN : 0;

                if ((cur_match_len < TDEFL_MIN_MATCH_LEN) || ((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)))
                {
                    cur_match_len = 1;
                    *pLZ_code_buf++ = (mz_uint8)first_trigram;
                    *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
                    d->m_huff_count[0][(mz_uint8)first_trigram]++;
                }
                else
                {
                    mz_uint32 s0, s1;
                    cur_match_len = MZ_MIN(cur_match_len, lookahead_size);

                    MZ_ASSERT((cur_match_len >= TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 1) && (cur_match_dist <= TDEFL_LZ_DICT_SIZE));

                    cur_match_dist--;

                    pLZ_code_buf[0] = (mz_uint8)(cur_match_len - TDEFL_MIN_MATCH_LEN);
                    *(mz_uint16 *)(&pLZ_code_buf[1]) = (mz_uint16)cur_match_dist;
                    pLZ_code_buf += 3;
                    *pLZ_flags = (mz_uint8)((*pLZ_flags >> 1) | 0x80);

                    s0 = s_tdefl_small_dist_sym[cur_match_dist & 511];
                    s1 = s_tdefl_large_dist_sym[cur_match_dist >> 8];
                    d->m_huff_count[1][(cur_match_dist < 512) ? s0 : s1]++;

                    d->m_huff_count[0][s_tdefl_len_sym[cur_match_len - TDEFL_MIN_MATCH_LEN]]++;
                }
            }
            else
            {
                *pLZ_code_buf++ = (mz_uint8)first_trigram;
                *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
                d->m_huff_count[0][(mz_uint8)first_trigram]++;
            }

            if (--num_flags_left == 0)
            {
                num_flags_left = 8;
                pLZ_flags = pLZ_code_buf++;
            }

            total_lz_bytes += cur_match_len;
            lookahead_pos += cur_match_len;
            dict_size = MZ_MIN(dict_size + cur_match_len, (mz_uint)TDEFL_LZ_DICT_SIZE);
            cur_pos = (cur_pos + cur_match_len) & TDEFL_LZ_DICT_SIZE_MASK;
            MZ_ASSERT(lookahead_size >= cur_match_len);
            lookahead_size -= cur_match_len;

            if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
            {
                int n;
                d->m_lookahead_pos = lookahead_pos;
                d->m_lookahead_size = lookahead_size;
                d->m_dict_size = dict_size;
                d->m_total_lz_bytes = total_lz_bytes;
                d->m_pLZ_code_buf = pLZ_code_buf;
                d->m_pLZ_flags = pLZ_flags;
                d->m_num_flags_left = num_flags_left;
                if ((n = tdefl_flush_block(d, 0)) != 0)
                    return (n < 0) ? MZ_FALSE : MZ_TRUE;
                total_lz_bytes = d->m_total_lz_bytes;
                pLZ_code_buf = d->m_pLZ_code_buf;
                pLZ_flags = d->m_pLZ_flags;
                num_flags_left = d->m_num_flags_left;
            }
        }

        while (lookahead_size)
        {
            mz_uint8 lit = d->m_dict[cur_pos];

            total_lz_bytes++;
            *pLZ_code_buf++ = lit;
            *pLZ_flags = (mz_uint8)(*pLZ_flags >> 1);
            if (--num_flags_left == 0)
            {
                num_flags_left = 8;
                pLZ_flags = pLZ_code_buf++;
            }

            d->m_huff_count[0][lit]++;

            lookahead_pos++;
            dict_size = MZ_MIN(dict_size + 1, (mz_uint)TDEFL_LZ_DICT_SIZE);
            cur_pos = (cur_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
            lookahead_size--;

            if (pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8])
            {
                int n;
                d->m_lookahead_pos = lookahead_pos;
                d->m_lookahead_size = lookahead_size;
                d->m_dict_size = dict_size;
                d->m_total_lz_bytes = total_lz_bytes;
                d->m_pLZ_code_buf = pLZ_code_buf;
                d->m_pLZ_flags = pLZ_flags;
                d->m_num_flags_left = num_flags_left;
                if ((n = tdefl_flush_block(d, 0)) != 0)
                    return (n < 0) ? MZ_FALSE : MZ_TRUE;
                total_lz_bytes = d->m_total_lz_bytes;
                pLZ_code_buf = d->m_pLZ_code_buf;
                pLZ_flags = d->m_pLZ_flags;
                num_flags_left = d->m_num_flags_left;
            }
        }
    }

    d->m_lookahead_pos = lookahead_pos;
    d->m_lookahead_size = lookahead_size;
    d->m_dict_size = dict_size;
    d->m_total_lz_bytes = total_lz_bytes;
    d->m_pLZ_code_buf = pLZ_code_buf;
    d->m_pLZ_flags = pLZ_flags;
    d->m_num_flags_left = num_flags_left;
    return MZ_TRUE;
}
#endif /* MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */

static MZ_FORCEINLINE void tdefl_record_literal(tdefl_compressor *d, mz_uint8 lit)
{
    d->m_total_lz_bytes++;
    *d->m_pLZ_code_buf++ = lit;
    *d->m_pLZ_flags = (mz_uint8)(*d->m_pLZ_flags >> 1);
    if (--d->m_num_flags_left == 0)
    {
        d->m_num_flags_left = 8;
        d->m_pLZ_flags = d->m_pLZ_code_buf++;
    }
    d->m_huff_count[0][lit]++;
}

static MZ_FORCEINLINE void tdefl_record_match(tdefl_compressor *d, mz_uint match_len, mz_uint match_dist)
{
    mz_uint32 s0, s1;

    MZ_ASSERT((match_len >= TDEFL_MIN_MATCH_LEN) && (match_dist >= 1) && (match_dist <= TDEFL_LZ_DICT_SIZE));

    d->m_total_lz_bytes += match_len;

    d->m_pLZ_code_buf[0] = (mz_uint8)(match_len - TDEFL_MIN_MATCH_LEN);

    match_dist -= 1;
    d->m_pLZ_code_buf[1] = (mz_uint8)(match_dist & 0xFF);
    d->m_pLZ_code_buf[2] = (mz_uint8)(match_dist >> 8);
    d->m_pLZ_code_buf += 3;

    *d->m_pLZ_flags = (mz_uint8)((*d->m_pLZ_flags >> 1) | 0x80);
    if (--d->m_num_flags_left == 0)
    {
        d->m_num_flags_left = 8;
        d->m_pLZ_flags = d->m_pLZ_code_buf++;
    }

    s0 = s_tdefl_small_dist_sym[match_dist & 511];
    s1 = s_tdefl_large_dist_sym[(match_dist >> 8) & 127];
    d->m_huff_count[1][(match_dist < 512) ? s0 : s1]++;

    if (match_len >= TDEFL_MIN_MATCH_LEN)
        d->m_huff_count[0][s_tdefl_len_sym[match_len - TDEFL_MIN_MATCH_LEN]]++;
}

static mz_bool tdefl_compress_normal(tdefl_compressor *d)
{
    const mz_uint8 *pSrc = d->m_pSrc;
    size_t src_buf_left = d->m_src_buf_left;
    tdefl_flush flush = d->m_flush;

    while ((src_buf_left) || ((flush) && (d->m_lookahead_size)))
    {
        mz_uint len_to_move, cur_match_dist, cur_match_len, cur_pos;
        /* Update dictionary and hash chains. Keeps the lookahead size equal to TDEFL_MAX_MATCH_LEN. */
        if ((d->m_lookahead_size + d->m_dict_size) >= (TDEFL_MIN_MATCH_LEN - 1))
        {
            mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK, ins_pos = d->m_lookahead_pos + d->m_lookahead_size - 2;
            mz_uint hash = (d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK];
            mz_uint num_bytes_to_process = (mz_uint)MZ_MIN(src_buf_left, TDEFL_MAX_MATCH_LEN - d->m_lookahead_size);
            const mz_uint8 *pSrc_end = pSrc + num_bytes_to_process;
            src_buf_left -= num_bytes_to_process;
            d->m_lookahead_size += num_bytes_to_process;
            while (pSrc != pSrc_end)
            {
                mz_uint8 c = *pSrc++;
                d->m_dict[dst_pos] = c;
                if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
                    d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
                hash = ((hash << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
                d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
                d->m_hash[hash] = (mz_uint16)(ins_pos);
                dst_pos = (dst_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK;
                ins_pos++;
            }
        }
        else
        {
            while ((src_buf_left) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
            {
                mz_uint8 c = *pSrc++;
                mz_uint dst_pos = (d->m_lookahead_pos + d->m_lookahead_size) & TDEFL_LZ_DICT_SIZE_MASK;
                src_buf_left--;
                d->m_dict[dst_pos] = c;
                if (dst_pos < (TDEFL_MAX_MATCH_LEN - 1))
                    d->m_dict[TDEFL_LZ_DICT_SIZE + dst_pos] = c;
                if ((++d->m_lookahead_size + d->m_dict_size) >= TDEFL_MIN_MATCH_LEN)
                {
                    mz_uint ins_pos = d->m_lookahead_pos + (d->m_lookahead_size - 1) - 2;
                    mz_uint hash = ((d->m_dict[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] << (TDEFL_LZ_HASH_SHIFT * 2)) ^ (d->m_dict[(ins_pos + 1) & TDEFL_LZ_DICT_SIZE_MASK] << TDEFL_LZ_HASH_SHIFT) ^ c) & (TDEFL_LZ_HASH_SIZE - 1);
                    d->m_next[ins_pos & TDEFL_LZ_DICT_SIZE_MASK] = d->m_hash[hash];
                    d->m_hash[hash] = (mz_uint16)(ins_pos);
                }
            }
        }
        d->m_dict_size = MZ_MIN(TDEFL_LZ_DICT_SIZE - d->m_lookahead_size, d->m_dict_size);
        if ((!flush) && (d->m_lookahead_size < TDEFL_MAX_MATCH_LEN))
            break;

        /* Simple lazy/greedy parsing state machine. */
        len_to_move = 1;
        cur_match_dist = 0;
        cur_match_len = d->m_saved_match_len ? d->m_saved_match_len : (TDEFL_MIN_MATCH_LEN - 1);
        cur_pos = d->m_lookahead_pos & TDEFL_LZ_DICT_SIZE_MASK;
        if (d->m_flags & (TDEFL_RLE_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS))
        {
            if ((d->m_dict_size) && (!(d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS)))
            {
                mz_uint8 c = d->m_dict[(cur_pos - 1) & TDEFL_LZ_DICT_SIZE_MASK];
                cur_match_len = 0;
                while (cur_match_len < d->m_lookahead_size)
                {
                    if (d->m_dict[cur_pos + cur_match_len] != c)
                        break;
                    cur_match_len++;
                }
                if (cur_match_len < TDEFL_MIN_MATCH_LEN)
                    cur_match_len = 0;
                else
                    cur_match_dist = 1;
            }
        }
        else
        {
            tdefl_find_match(d, d->m_lookahead_pos, d->m_dict_size, d->m_lookahead_size, &cur_match_dist, &cur_match_len);
        }
        if (((cur_match_len == TDEFL_MIN_MATCH_LEN) && (cur_match_dist >= 8U * 1024U)) || (cur_pos == cur_match_dist) || ((d->m_flags & TDEFL_FILTER_MATCHES) && (cur_match_len <= 5)))
        {
            cur_match_dist = cur_match_len = 0;
        }
        if (d->m_saved_match_len)
        {
            if (cur_match_len > d->m_saved_match_len)
            {
                tdefl_record_literal(d, (mz_uint8)d->m_saved_lit);
                if (cur_match_len >= 128)
                {
                    tdefl_record_match(d, cur_match_len, cur_match_dist);
                    d->m_saved_match_len = 0;
                    len_to_move = cur_match_len;
                }
                else
                {
                    d->m_saved_lit = d->m_dict[cur_pos];
                    d->m_saved_match_dist = cur_match_dist;
                    d->m_saved_match_len = cur_match_len;
                }
            }
            else
            {
                tdefl_record_match(d, d->m_saved_match_len, d->m_saved_match_dist);
                len_to_move = d->m_saved_match_len - 1;
                d->m_saved_match_len = 0;
            }
        }
        else if (!cur_match_dist)
            tdefl_record_literal(d, d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)]);
        else if ((d->m_greedy_parsing) || (d->m_flags & TDEFL_RLE_MATCHES) || (cur_match_len >= 128))
        {
            tdefl_record_match(d, cur_match_len, cur_match_dist);
            len_to_move = cur_match_len;
        }
        else
        {
            d->m_saved_lit = d->m_dict[MZ_MIN(cur_pos, sizeof(d->m_dict) - 1)];
            d->m_saved_match_dist = cur_match_dist;
            d->m_saved_match_len = cur_match_len;
        }
        /* Move the lookahead forward by len_to_move bytes. */
        d->m_lookahead_pos += len_to_move;
        MZ_ASSERT(d->m_lookahead_size >= len_to_move);
        d->m_lookahead_size -= len_to_move;
        d->m_dict_size = MZ_MIN(d->m_dict_size + len_to_move, (mz_uint)TDEFL_LZ_DICT_SIZE);
        /* Check if it's time to flush the current LZ codes to the internal output buffer. */
        if ((d->m_pLZ_code_buf > &d->m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE - 8]) ||
            ((d->m_total_lz_bytes > 31 * 1024) && (((((mz_uint)(d->m_pLZ_code_buf - d->m_lz_code_buf) * 115) >> 7) >= d->m_total_lz_bytes) || (d->m_flags & TDEFL_FORCE_ALL_RAW_BLOCKS))))
        {
            int n;
            d->m_pSrc = pSrc;
            d->m_src_buf_left = src_buf_left;
            if ((n = tdefl_flush_block(d, 0)) != 0)
                return (n < 0) ? MZ_FALSE : MZ_TRUE;
        }
    }

    d->m_pSrc = pSrc;
    d->m_src_buf_left = src_buf_left;
    return MZ_TRUE;
}

static tdefl_status tdefl_flush_output_buffer(tdefl_compressor *d)
{
    if (d->m_pIn_buf_size)
    {
        *d->m_pIn_buf_size = d->m_pSrc - (const mz_uint8 *)d->m_pIn_buf;
    }

    if (d->m_pOut_buf_size)
    {
        size_t n = MZ_MIN(*d->m_pOut_buf_size - d->m_out_buf_ofs, d->m_output_flush_remaining);
        memcpy((mz_uint8 *)d->m_pOut_buf + d->m_out_buf_ofs, d->m_output_buf + d->m_output_flush_ofs, n);
        d->m_output_flush_ofs += (mz_uint)n;
        d->m_output_flush_remaining -= (mz_uint)n;
        d->m_out_buf_ofs += n;

        *d->m_pOut_buf_size = d->m_out_buf_ofs;
    }

    return (d->m_finished && !d->m_output_flush_remaining) ? TDEFL_STATUS_DONE : TDEFL_STATUS_OKAY;
}

tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush)
{
    if (!d)
    {
        if (pIn_buf_size)
            *pIn_buf_size = 0;
        if (pOut_buf_size)
            *pOut_buf_size = 0;
        return TDEFL_STATUS_BAD_PARAM;
    }

    d->m_pIn_buf = pIn_buf;
    d->m_pIn_buf_size = pIn_buf_size;
    d->m_pOut_buf = pOut_buf;
    d->m_pOut_buf_size = pOut_buf_size;
    d->m_pSrc = (const mz_uint8 *)(pIn_buf);
    d->m_src_buf_left = pIn_buf_size ? *pIn_buf_size : 0;
    d->m_out_buf_ofs = 0;
    d->m_flush = flush;

    if (((d->m_pPut_buf_func != NULL) == ((pOut_buf != NULL) || (pOut_buf_size != NULL))) || (d->m_prev_return_status != TDEFL_STATUS_OKAY) ||
        (d->m_wants_to_finish && (flush != TDEFL_FINISH)) || (pIn_buf_size && *pIn_buf_size && !pIn_buf) || (pOut_buf_size && *pOut_buf_size && !pOut_buf))
    {
        if (pIn_buf_size)
            *pIn_buf_size = 0;
        if (pOut_buf_size)
            *pOut_buf_size = 0;
        return (d->m_prev_return_status = TDEFL_STATUS_BAD_PARAM);
    }
    d->m_wants_to_finish |= (flush == TDEFL_FINISH);

    if ((d->m_output_flush_remaining) || (d->m_finished))
        return (d->m_prev_return_status = tdefl_flush_output_buffer(d));

#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
    if (((d->m_flags & TDEFL_MAX_PROBES_MASK) == 1) &&
        ((d->m_flags & TDEFL_GREEDY_PARSING_FLAG) != 0) &&
        ((d->m_flags & (TDEFL_FILTER_MATCHES | TDEFL_FORCE_ALL_RAW_BLOCKS | TDEFL_RLE_MATCHES)) == 0))
    {
        if (!tdefl_compress_fast(d))
            return d->m_prev_return_status;
    }
    else
#endif /* #if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN */
    {
        if (!tdefl_compress_normal(d))
            return d->m_prev_return_status;
    }

    if ((d->m_flags & (TDEFL_WRITE_ZLIB_HEADER | TDEFL_COMPUTE_ADLER32)) && (pIn_buf))
        d->m_adler32 = (mz_uint32)mz_adler32(d->m_adler32, (const mz_uint8 *)pIn_buf, d->m_pSrc - (const mz_uint8 *)pIn_buf);

    if ((flush) && (!d->m_lookahead_size) && (!d->m_src_buf_left) && (!d->m_output_flush_remaining))
    {
        if (tdefl_flush_block(d, flush) < 0)
            return d->m_prev_return_status;
        d->m_finished = (flush == TDEFL_FINISH);
        if (flush == TDEFL_FULL_FLUSH)
        {
            MZ_CLEAR_OBJ(d->m_hash);
            MZ_CLEAR_OBJ(d->m_next);
            d->m_dict_size = 0;
        }
    }

    return (d->m_prev_return_status = tdefl_flush_output_buffer(d));
}

tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush)
{
    MZ_ASSERT(d->m_pPut_buf_func);
    return tdefl_compress(d, pIn_buf, &in_buf_size, NULL, NULL, flush);
}

tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
{
    d->m_pPut_buf_func = pPut_buf_func;
    d->m_pPut_buf_user = pPut_buf_user;
    d->m_flags = (mz_uint)(flags);
    d->m_max_probes[0] = 1 + ((flags & 0xFFF) + 2) / 3;
    d->m_greedy_parsing = (flags & TDEFL_GREEDY_PARSING_FLAG) != 0;
    d->m_max_probes[1] = 1 + (((flags & 0xFFF) >> 2) + 2) / 3;
    if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
        MZ_CLEAR_OBJ(d->m_hash);
    d->m_lookahead_pos = d->m_lookahead_size = d->m_dict_size = d->m_total_lz_bytes = d->m_lz_code_buf_dict_pos = d->m_bits_in = 0;
    d->m_output_flush_ofs = d->m_output_flush_remaining = d->m_finished = d->m_block_index = d->m_bit_buffer = d->m_wants_to_finish = 0;
    d->m_pLZ_code_buf = d->m_lz_code_buf + 1;
    d->m_pLZ_flags = d->m_lz_code_buf;
    d->m_num_flags_left = 8;
    d->m_pOutput_buf = d->m_output_buf;
    d->m_pOutput_buf_end = d->m_output_buf;
    d->m_prev_return_status = TDEFL_STATUS_OKAY;
    d->m_saved_match_dist = d->m_saved_match_len = d->m_saved_lit = 0;
    d->m_adler32 = 1;
    d->m_pIn_buf = NULL;
    d->m_pOut_buf = NULL;
    d->m_pIn_buf_size = NULL;
    d->m_pOut_buf_size = NULL;
    d->m_flush = TDEFL_NO_FLUSH;
    d->m_pSrc = NULL;
    d->m_src_buf_left = 0;
    d->m_out_buf_ofs = 0;
    if (!(flags & TDEFL_NONDETERMINISTIC_PARSING_FLAG))
        MZ_CLEAR_OBJ(d->m_dict);
    memset(&d->m_huff_count[0][0], 0, sizeof(d->m_huff_count[0][0]) * TDEFL_MAX_HUFF_SYMBOLS_0);
    memset(&d->m_huff_count[1][0], 0, sizeof(d->m_huff_count[1][0]) * TDEFL_MAX_HUFF_SYMBOLS_1);
    return TDEFL_STATUS_OKAY;
}

tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d)
{
    return d->m_prev_return_status;
}

mz_uint32 tdefl_get_adler32(tdefl_compressor *d)
{
    return d->m_adler32;
}

mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
{
    tdefl_compressor *pComp;
    mz_bool succeeded;
    if (((buf_len) && (!pBuf)) || (!pPut_buf_func))
        return MZ_FALSE;
    pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
    if (!pComp)
        return MZ_FALSE;
    succeeded = (tdefl_init(pComp, pPut_buf_func, pPut_buf_user, flags) == TDEFL_STATUS_OKAY);
    succeeded = succeeded && (tdefl_compress_buffer(pComp, pBuf, buf_len, TDEFL_FINISH) == TDEFL_STATUS_DONE);
    MZ_FREE(pComp);
    return succeeded;
}

typedef struct
{
    size_t m_size, m_capacity;
    mz_uint8 *m_pBuf;
    mz_bool m_expandable;
} tdefl_output_buffer;

static mz_bool tdefl_output_buffer_putter(const void *pBuf, int len, void *pUser)
{
    tdefl_output_buffer *p = (tdefl_output_buffer *)pUser;
    size_t new_size = p->m_size + len;
    if (new_size > p->m_capacity)
    {
        size_t new_capacity = p->m_capacity;
        mz_uint8 *pNew_buf;
        if (!p->m_expandable)
            return MZ_FALSE;
        do
        {
            new_capacity = MZ_MAX(128U, new_capacity << 1U);
        } while (new_size > new_capacity);
        pNew_buf = (mz_uint8 *)MZ_REALLOC(p->m_pBuf, new_capacity);
        if (!pNew_buf)
            return MZ_FALSE;
        p->m_pBuf = pNew_buf;
        p->m_capacity = new_capacity;
    }
    memcpy((mz_uint8 *)p->m_pBuf + p->m_size, pBuf, len);
    p->m_size = new_size;
    return MZ_TRUE;
}

void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
{
    tdefl_output_buffer out_buf;
    MZ_CLEAR_OBJ(out_buf);
    if (!pOut_len)
        return MZ_FALSE;
    else
        *pOut_len = 0;
    out_buf.m_expandable = MZ_TRUE;
    if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
        return NULL;
    *pOut_len = out_buf.m_size;
    return out_buf.m_pBuf;
}

size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
{
    tdefl_output_buffer out_buf;
    MZ_CLEAR_OBJ(out_buf);
    if (!pOut_buf)
        return 0;
    out_buf.m_pBuf = (mz_uint8 *)pOut_buf;
    out_buf.m_capacity = out_buf_len;
    if (!tdefl_compress_mem_to_output(pSrc_buf, src_buf_len, tdefl_output_buffer_putter, &out_buf, flags))
        return 0;
    return out_buf.m_size;
}

static const mz_uint s_tdefl_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 };

/* level may actually range from [0,10] (10 is a "hidden" max level, where we want a bit more compression and it's fine if throughput to fall off a cliff on some files). */
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy)
{
    mz_uint comp_flags = s_tdefl_num_probes[(level >= 0) ? MZ_MIN(10, level) : MZ_DEFAULT_LEVEL] | ((level <= 3) ? TDEFL_GREEDY_PARSING_FLAG : 0);
    if (window_bits > 0)
        comp_flags |= TDEFL_WRITE_ZLIB_HEADER;

    if (!level)
        comp_flags |= TDEFL_FORCE_ALL_RAW_BLOCKS;
    else if (strategy == MZ_FILTERED)
        comp_flags |= TDEFL_FILTER_MATCHES;
    else if (strategy == MZ_HUFFMAN_ONLY)
        comp_flags &= ~TDEFL_MAX_PROBES_MASK;
    else if (strategy == MZ_FIXED)
        comp_flags |= TDEFL_FORCE_ALL_STATIC_BLOCKS;
    else if (strategy == MZ_RLE)
        comp_flags |= TDEFL_RLE_MATCHES;

    return comp_flags;
}

#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable : 4204) /* nonstandard extension used : non-constant aggregate initializer (also supported by GNU C and C99, so no big deal) */
#endif

/* Simple PNG writer function by Alex Evans, 2011. Released into the public domain: https://gist.github.com/908299, more context at
 http://altdevblogaday.org/2011/04/06/a-smaller-jpg-encoder/.
 This is actually a modification of Alex's original code so PNG files generated by this function pass pngcheck. */
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip)
{
    /* Using a local copy of this array here in case MINIZ_NO_ZLIB_APIS was defined. */
    static const mz_uint s_tdefl_png_num_probes[11] = { 0, 1, 6, 32, 16, 32, 128, 256, 512, 768, 1500 };
    tdefl_compressor *pComp = (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
    tdefl_output_buffer out_buf;
    int i, bpl = w * num_chans, y, z;
    mz_uint32 c;
    *pLen_out = 0;
    if (!pComp)
        return NULL;
    MZ_CLEAR_OBJ(out_buf);
    out_buf.m_expandable = MZ_TRUE;
    out_buf.m_capacity = 57 + MZ_MAX(64, (1 + bpl) * h);
    if (NULL == (out_buf.m_pBuf = (mz_uint8 *)MZ_MALLOC(out_buf.m_capacity)))
    {
        MZ_FREE(pComp);
        return NULL;
    }
    /* write dummy header */
    for (z = 41; z; --z)
        tdefl_output_buffer_putter(&z, 1, &out_buf);
    /* compress image data */
    tdefl_init(pComp, tdefl_output_buffer_putter, &out_buf, s_tdefl_png_num_probes[MZ_MIN(10, level)] | TDEFL_WRITE_ZLIB_HEADER);
    for (y = 0; y < h; ++y)
    {
        tdefl_compress_buffer(pComp, &z, 1, TDEFL_NO_FLUSH);
        tdefl_compress_buffer(pComp, (mz_uint8 *)pImage + (flip ? (h - 1 - y) : y) * bpl, bpl, TDEFL_NO_FLUSH);
    }
    if (tdefl_compress_buffer(pComp, NULL, 0, TDEFL_FINISH) != TDEFL_STATUS_DONE)
    {
        MZ_FREE(pComp);
        MZ_FREE(out_buf.m_pBuf);
        return NULL;
    }
    /* write real header */
    *pLen_out = out_buf.m_size - 41;
    {
        static const mz_uint8 chans[] = { 0x00, 0x00, 0x04, 0x02, 0x06 };
        mz_uint8 pnghdr[41] = { 0x89, 0x50, 0x4e, 0x47, 0x0d,
                                0x0a, 0x1a, 0x0a, 0x00, 0x00,
                                0x00, 0x0d, 0x49, 0x48, 0x44,
                                0x52, 0x00, 0x00, 0x00, 0x00,
                                0x00, 0x00, 0x00, 0x00, 0x08,
                                0x00, 0x00, 0x00, 0x00, 0x00,
                                0x00, 0x00, 0x00, 0x00, 0x00,
                                0x00, 0x00, 0x49, 0x44, 0x41,
                                0x54 };
        pnghdr[18] = (mz_uint8)(w >> 8);
        pnghdr[19] = (mz_uint8)w;
        pnghdr[22] = (mz_uint8)(h >> 8);
        pnghdr[23] = (mz_uint8)h;
        pnghdr[25] = chans[num_chans];
        pnghdr[33] = (mz_uint8)(*pLen_out >> 24);
        pnghdr[34] = (mz_uint8)(*pLen_out >> 16);
        pnghdr[35] = (mz_uint8)(*pLen_out >> 8);
        pnghdr[36] = (mz_uint8)*pLen_out;
        c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, pnghdr + 12, 17);
        for (i = 0; i < 4; ++i, c <<= 8)
            ((mz_uint8 *)(pnghdr + 29))[i] = (mz_uint8)(c >> 24);
        memcpy(out_buf.m_pBuf, pnghdr, 41);
    }
    /* write footer (IDAT CRC-32, followed by IEND chunk) */
    if (!tdefl_output_buffer_putter("\0\0\0\0\0\0\0\0\x49\x45\x4e\x44\xae\x42\x60\x82", 16, &out_buf))
    {
        *pLen_out = 0;
        MZ_FREE(pComp);
        MZ_FREE(out_buf.m_pBuf);
        return NULL;
    }
    c = (mz_uint32)mz_crc32(MZ_CRC32_INIT, out_buf.m_pBuf + 41 - 4, *pLen_out + 4);
    for (i = 0; i < 4; ++i, c <<= 8)
        (out_buf.m_pBuf + out_buf.m_size - 16)[i] = (mz_uint8)(c >> 24);
    /* compute final size of file, grab compressed data buffer and return */
    *pLen_out += 57;
    MZ_FREE(pComp);
    return out_buf.m_pBuf;
}
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out)
{
    /* Level 6 corresponds to TDEFL_DEFAULT_MAX_PROBES or MZ_DEFAULT_LEVEL (but we can't depend on MZ_DEFAULT_LEVEL being available in case the zlib API's where #defined out) */
    return tdefl_write_image_to_png_file_in_memory_ex(pImage, w, h, num_chans, pLen_out, 6, MZ_FALSE);
}

/* Allocate the tdefl_compressor and tinfl_decompressor structures in C so that */
/* non-C language bindings to tdefL_ and tinfl_ API don't need to worry about */
/* structure size and allocation mechanism. */
tdefl_compressor *tdefl_compressor_alloc()
{
    return (tdefl_compressor *)MZ_MALLOC(sizeof(tdefl_compressor));
}

void tdefl_compressor_free(tdefl_compressor *pComp)
{
    MZ_FREE(pComp);
}

#ifdef _MSC_VER
#pragma warning(pop)
#endif

#ifdef __cplusplus
}
#endif
/**************************************************************************
 *
 * Copyright 2013-2014 RAD Game Tools and Valve Software
 * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 *
 **************************************************************************/



#ifdef __cplusplus
extern "C" {
#endif

/* ------------------- Low-level Decompression (completely independent from all compression API's) */

#define TINFL_MEMCPY(d, s, l) memcpy(d, s, l)
#define TINFL_MEMSET(p, c, l) memset(p, c, l)

#define TINFL_CR_BEGIN  \
    switch (r->m_state) \
    {                   \
        case 0:
#define TINFL_CR_RETURN(state_index, result) \
    do                                       \
    {                                        \
        status = result;                     \
        r->m_state = state_index;            \
        goto common_exit;                    \
        case state_index:;                   \
    }                                        \
    MZ_MACRO_END
#define TINFL_CR_RETURN_FOREVER(state_index, result) \
    do                                               \
    {                                                \
        for (;;)                                     \
        {                                            \
            TINFL_CR_RETURN(state_index, result);    \
        }                                            \
    }                                                \
    MZ_MACRO_END
#define TINFL_CR_FINISH }

#define TINFL_GET_BYTE(state_index, c)                                                                                                                           \
    do                                                                                                                                                           \
    {                                                                                                                                                            \
        while (pIn_buf_cur >= pIn_buf_end)                                                                                                                       \
        {                                                                                                                                                        \
            TINFL_CR_RETURN(state_index, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS); \
        }                                                                                                                                                        \
        c = *pIn_buf_cur++;                                                                                                                                      \
    }                                                                                                                                                            \
    MZ_MACRO_END

#define TINFL_NEED_BITS(state_index, n)                \
    do                                                 \
    {                                                  \
        mz_uint c;                                     \
        TINFL_GET_BYTE(state_index, c);                \
        bit_buf |= (((tinfl_bit_buf_t)c) << num_bits); \
        num_bits += 8;                                 \
    } while (num_bits < (mz_uint)(n))
#define TINFL_SKIP_BITS(state_index, n)      \
    do                                       \
    {                                        \
        if (num_bits < (mz_uint)(n))         \
        {                                    \
            TINFL_NEED_BITS(state_index, n); \
        }                                    \
        bit_buf >>= (n);                     \
        num_bits -= (n);                     \
    }                                        \
    MZ_MACRO_END
#define TINFL_GET_BITS(state_index, b, n)    \
    do                                       \
    {                                        \
        if (num_bits < (mz_uint)(n))         \
        {                                    \
            TINFL_NEED_BITS(state_index, n); \
        }                                    \
        b = bit_buf & ((1 << (n)) - 1);      \
        bit_buf >>= (n);                     \
        num_bits -= (n);                     \
    }                                        \
    MZ_MACRO_END

/* TINFL_HUFF_BITBUF_FILL() is only used rarely, when the number of bytes remaining in the input buffer falls below 2. */
/* It reads just enough bytes from the input stream that are needed to decode the next Huffman code (and absolutely no more). It works by trying to fully decode a */
/* Huffman code by using whatever bits are currently present in the bit buffer. If this fails, it reads another byte, and tries again until it succeeds or until the */
/* bit buffer contains >=15 bits (deflate's max. Huffman code size). */
#define TINFL_HUFF_BITBUF_FILL(state_index, pHuff)                             \
    do                                                                         \
    {                                                                          \
        temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)];     \
        if (temp >= 0)                                                         \
        {                                                                      \
            code_len = temp >> 9;                                              \
            if ((code_len) && (num_bits >= code_len))                          \
                break;                                                         \
        }                                                                      \
        else if (num_bits > TINFL_FAST_LOOKUP_BITS)                            \
        {                                                                      \
            code_len = TINFL_FAST_LOOKUP_BITS;                                 \
            do                                                                 \
            {                                                                  \
                temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)]; \
            } while ((temp < 0) && (num_bits >= (code_len + 1)));              \
            if (temp >= 0)                                                     \
                break;                                                         \
        }                                                                      \
        TINFL_GET_BYTE(state_index, c);                                        \
        bit_buf |= (((tinfl_bit_buf_t)c) << num_bits);                         \
        num_bits += 8;                                                         \
    } while (num_bits < 15);

/* TINFL_HUFF_DECODE() decodes the next Huffman coded symbol. It's more complex than you would initially expect because the zlib API expects the decompressor to never read */
/* beyond the final byte of the deflate stream. (In other words, when this macro wants to read another byte from the input, it REALLY needs another byte in order to fully */
/* decode the next Huffman code.) Handling this properly is particularly important on raw deflate (non-zlib) streams, which aren't followed by a byte aligned adler-32. */
/* The slow path is only executed at the very end of the input buffer. */
/* v1.16: The original macro handled the case at the very end of the passed-in input buffer, but we also need to handle the case where the user passes in 1+zillion bytes */
/* following the deflate data and our non-conservative read-ahead path won't kick in here on this code. This is much trickier. */
#define TINFL_HUFF_DECODE(state_index, sym, pHuff)                                                                                  \
    do                                                                                                                              \
    {                                                                                                                               \
        int temp;                                                                                                                   \
        mz_uint code_len, c;                                                                                                        \
        if (num_bits < 15)                                                                                                          \
        {                                                                                                                           \
            if ((pIn_buf_end - pIn_buf_cur) < 2)                                                                                    \
            {                                                                                                                       \
                TINFL_HUFF_BITBUF_FILL(state_index, pHuff);                                                                         \
            }                                                                                                                       \
            else                                                                                                                    \
            {                                                                                                                       \
                bit_buf |= (((tinfl_bit_buf_t)pIn_buf_cur[0]) << num_bits) | (((tinfl_bit_buf_t)pIn_buf_cur[1]) << (num_bits + 8)); \
                pIn_buf_cur += 2;                                                                                                   \
                num_bits += 16;                                                                                                     \
            }                                                                                                                       \
        }                                                                                                                           \
        if ((temp = (pHuff)->m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)                                               \
            code_len = temp >> 9, temp &= 511;                                                                                      \
        else                                                                                                                        \
        {                                                                                                                           \
            code_len = TINFL_FAST_LOOKUP_BITS;                                                                                      \
            do                                                                                                                      \
            {                                                                                                                       \
                temp = (pHuff)->m_tree[~temp + ((bit_buf >> code_len++) & 1)];                                                      \
            } while (temp < 0);                                                                                                     \
        }                                                                                                                           \
        sym = temp;                                                                                                                 \
        bit_buf >>= code_len;                                                                                                       \
        num_bits -= code_len;                                                                                                       \
    }                                                                                                                               \
    MZ_MACRO_END

tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags)
{
    static const int s_length_base[31] = { 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31, 35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0 };
    static const int s_length_extra[31] = { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 0, 0, 0 };
    static const int s_dist_base[32] = { 1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193, 257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145, 8193, 12289, 16385, 24577, 0, 0 };
    static const int s_dist_extra[32] = { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13 };
    static const mz_uint8 s_length_dezigzag[19] = { 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 };
    static const int s_min_table_sizes[3] = { 257, 1, 4 };

    tinfl_status status = TINFL_STATUS_FAILED;
    mz_uint32 num_bits, dist, counter, num_extra;
    tinfl_bit_buf_t bit_buf;
    const mz_uint8 *pIn_buf_cur = pIn_buf_next, *const pIn_buf_end = pIn_buf_next + *pIn_buf_size;
    mz_uint8 *pOut_buf_cur = pOut_buf_next, *const pOut_buf_end = pOut_buf_next + *pOut_buf_size;
    size_t out_buf_size_mask = (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF) ? (size_t)-1 : ((pOut_buf_next - pOut_buf_start) + *pOut_buf_size) - 1, dist_from_out_buf_start;

    /* Ensure the output buffer's size is a power of 2, unless the output buffer is large enough to hold the entire output file (in which case it doesn't matter). */
    if (((out_buf_size_mask + 1) & out_buf_size_mask) || (pOut_buf_next < pOut_buf_start))
    {
        *pIn_buf_size = *pOut_buf_size = 0;
        return TINFL_STATUS_BAD_PARAM;
    }

    num_bits = r->m_num_bits;
    bit_buf = r->m_bit_buf;
    dist = r->m_dist;
    counter = r->m_counter;
    num_extra = r->m_num_extra;
    dist_from_out_buf_start = r->m_dist_from_out_buf_start;
    TINFL_CR_BEGIN

    bit_buf = num_bits = dist = counter = num_extra = r->m_zhdr0 = r->m_zhdr1 = 0;
    r->m_z_adler32 = r->m_check_adler32 = 1;
    if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
    {
        TINFL_GET_BYTE(1, r->m_zhdr0);
        TINFL_GET_BYTE(2, r->m_zhdr1);
        counter = (((r->m_zhdr0 * 256 + r->m_zhdr1) % 31 != 0) || (r->m_zhdr1 & 32) || ((r->m_zhdr0 & 15) != 8));
        if (!(decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
            counter |= (((1U << (8U + (r->m_zhdr0 >> 4))) > 32768U) || ((out_buf_size_mask + 1) < (size_t)(1U << (8U + (r->m_zhdr0 >> 4)))));
        if (counter)
        {
            TINFL_CR_RETURN_FOREVER(36, TINFL_STATUS_FAILED);
        }
    }

    do
    {
        TINFL_GET_BITS(3, r->m_final, 3);
        r->m_type = r->m_final >> 1;
        if (r->m_type == 0)
        {
            TINFL_SKIP_BITS(5, num_bits & 7);
            for (counter = 0; counter < 4; ++counter)
            {
                if (num_bits)
                    TINFL_GET_BITS(6, r->m_raw_header[counter], 8);
                else
                    TINFL_GET_BYTE(7, r->m_raw_header[counter]);
            }
            if ((counter = (r->m_raw_header[0] | (r->m_raw_header[1] << 8))) != (mz_uint)(0xFFFF ^ (r->m_raw_header[2] | (r->m_raw_header[3] << 8))))
            {
                TINFL_CR_RETURN_FOREVER(39, TINFL_STATUS_FAILED);
            }
            while ((counter) && (num_bits))
            {
                TINFL_GET_BITS(51, dist, 8);
                while (pOut_buf_cur >= pOut_buf_end)
                {
                    TINFL_CR_RETURN(52, TINFL_STATUS_HAS_MORE_OUTPUT);
                }
                *pOut_buf_cur++ = (mz_uint8)dist;
                counter--;
            }
            while (counter)
            {
                size_t n;
                while (pOut_buf_cur >= pOut_buf_end)
                {
                    TINFL_CR_RETURN(9, TINFL_STATUS_HAS_MORE_OUTPUT);
                }
                while (pIn_buf_cur >= pIn_buf_end)
                {
                    TINFL_CR_RETURN(38, (decomp_flags & TINFL_FLAG_HAS_MORE_INPUT) ? TINFL_STATUS_NEEDS_MORE_INPUT : TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS);
                }
                n = MZ_MIN(MZ_MIN((size_t)(pOut_buf_end - pOut_buf_cur), (size_t)(pIn_buf_end - pIn_buf_cur)), counter);
                TINFL_MEMCPY(pOut_buf_cur, pIn_buf_cur, n);
                pIn_buf_cur += n;
                pOut_buf_cur += n;
                counter -= (mz_uint)n;
            }
        }
        else if (r->m_type == 3)
        {
            TINFL_CR_RETURN_FOREVER(10, TINFL_STATUS_FAILED);
        }
        else
        {
            if (r->m_type == 1)
            {
                mz_uint8 *p = r->m_tables[0].m_code_size;
                mz_uint i;
                r->m_table_sizes[0] = 288;
                r->m_table_sizes[1] = 32;
                TINFL_MEMSET(r->m_tables[1].m_code_size, 5, 32);
                for (i = 0; i <= 143; ++i)
                    *p++ = 8;
                for (; i <= 255; ++i)
                    *p++ = 9;
                for (; i <= 279; ++i)
                    *p++ = 7;
                for (; i <= 287; ++i)
                    *p++ = 8;
            }
            else
            {
                for (counter = 0; counter < 3; counter++)
                {
                    TINFL_GET_BITS(11, r->m_table_sizes[counter], "\05\05\04"[counter]);
                    r->m_table_sizes[counter] += s_min_table_sizes[counter];
                }
                MZ_CLEAR_OBJ(r->m_tables[2].m_code_size);
                for (counter = 0; counter < r->m_table_sizes[2]; counter++)
                {
                    mz_uint s;
                    TINFL_GET_BITS(14, s, 3);
                    r->m_tables[2].m_code_size[s_length_dezigzag[counter]] = (mz_uint8)s;
                }
                r->m_table_sizes[2] = 19;
            }
            for (; (int)r->m_type >= 0; r->m_type--)
            {
                int tree_next, tree_cur;
                tinfl_huff_table *pTable;
                mz_uint i, j, used_syms, total, sym_index, next_code[17], total_syms[16];
                pTable = &r->m_tables[r->m_type];
                MZ_CLEAR_OBJ(total_syms);
                MZ_CLEAR_OBJ(pTable->m_look_up);
                MZ_CLEAR_OBJ(pTable->m_tree);
                for (i = 0; i < r->m_table_sizes[r->m_type]; ++i)
                    total_syms[pTable->m_code_size[i]]++;
                used_syms = 0, total = 0;
                next_code[0] = next_code[1] = 0;
                for (i = 1; i <= 15; ++i)
                {
                    used_syms += total_syms[i];
                    next_code[i + 1] = (total = ((total + total_syms[i]) << 1));
                }
                if ((65536 != total) && (used_syms > 1))
                {
                    TINFL_CR_RETURN_FOREVER(35, TINFL_STATUS_FAILED);
                }
                for (tree_next = -1, sym_index = 0; sym_index < r->m_table_sizes[r->m_type]; ++sym_index)
                {
                    mz_uint rev_code = 0, l, cur_code, code_size = pTable->m_code_size[sym_index];
                    if (!code_size)
                        continue;
                    cur_code = next_code[code_size]++;
                    for (l = code_size; l > 0; l--, cur_code >>= 1)
                        rev_code = (rev_code << 1) | (cur_code & 1);
                    if (code_size <= TINFL_FAST_LOOKUP_BITS)
                    {
                        mz_int16 k = (mz_int16)((code_size << 9) | sym_index);
                        while (rev_code < TINFL_FAST_LOOKUP_SIZE)
                        {
                            pTable->m_look_up[rev_code] = k;
                            rev_code += (1 << code_size);
                        }
                        continue;
                    }
                    if (0 == (tree_cur = pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)]))
                    {
                        pTable->m_look_up[rev_code & (TINFL_FAST_LOOKUP_SIZE - 1)] = (mz_int16)tree_next;
                        tree_cur = tree_next;
                        tree_next -= 2;
                    }
                    rev_code >>= (TINFL_FAST_LOOKUP_BITS - 1);
                    for (j = code_size; j > (TINFL_FAST_LOOKUP_BITS + 1); j--)
                    {
                        tree_cur -= ((rev_code >>= 1) & 1);
                        if (!pTable->m_tree[-tree_cur - 1])
                        {
                            pTable->m_tree[-tree_cur - 1] = (mz_int16)tree_next;
                            tree_cur = tree_next;
                            tree_next -= 2;
                        }
                        else
                            tree_cur = pTable->m_tree[-tree_cur - 1];
                    }
                    tree_cur -= ((rev_code >>= 1) & 1);
                    pTable->m_tree[-tree_cur - 1] = (mz_int16)sym_index;
                }
                if (r->m_type == 2)
                {
                    for (counter = 0; counter < (r->m_table_sizes[0] + r->m_table_sizes[1]);)
                    {
                        mz_uint s;
                        TINFL_HUFF_DECODE(16, dist, &r->m_tables[2]);
                        if (dist < 16)
                        {
                            r->m_len_codes[counter++] = (mz_uint8)dist;
                            continue;
                        }
                        if ((dist == 16) && (!counter))
                        {
                            TINFL_CR_RETURN_FOREVER(17, TINFL_STATUS_FAILED);
                        }
                        num_extra = "\02\03\07"[dist - 16];
                        TINFL_GET_BITS(18, s, num_extra);
                        s += "\03\03\013"[dist - 16];
                        TINFL_MEMSET(r->m_len_codes + counter, (dist == 16) ? r->m_len_codes[counter - 1] : 0, s);
                        counter += s;
                    }
                    if ((r->m_table_sizes[0] + r->m_table_sizes[1]) != counter)
                    {
                        TINFL_CR_RETURN_FOREVER(21, TINFL_STATUS_FAILED);
                    }
                    TINFL_MEMCPY(r->m_tables[0].m_code_size, r->m_len_codes, r->m_table_sizes[0]);
                    TINFL_MEMCPY(r->m_tables[1].m_code_size, r->m_len_codes + r->m_table_sizes[0], r->m_table_sizes[1]);
                }
            }
            for (;;)
            {
                mz_uint8 *pSrc;
                for (;;)
                {
                    if (((pIn_buf_end - pIn_buf_cur) < 4) || ((pOut_buf_end - pOut_buf_cur) < 2))
                    {
                        TINFL_HUFF_DECODE(23, counter, &r->m_tables[0]);
                        if (counter >= 256)
                            break;
                        while (pOut_buf_cur >= pOut_buf_end)
                        {
                            TINFL_CR_RETURN(24, TINFL_STATUS_HAS_MORE_OUTPUT);
                        }
                        *pOut_buf_cur++ = (mz_uint8)counter;
                    }
                    else
                    {
                        int sym2;
                        mz_uint code_len;
#if TINFL_USE_64BIT_BITBUF
                        if (num_bits < 30)
                        {
                            bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE32(pIn_buf_cur)) << num_bits);
                            pIn_buf_cur += 4;
                            num_bits += 32;
                        }
#else
                        if (num_bits < 15)
                        {
                            bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
                            pIn_buf_cur += 2;
                            num_bits += 16;
                        }
#endif
                        if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
                            code_len = sym2 >> 9;
                        else
                        {
                            code_len = TINFL_FAST_LOOKUP_BITS;
                            do
                            {
                                sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
                            } while (sym2 < 0);
                        }
                        counter = sym2;
                        bit_buf >>= code_len;
                        num_bits -= code_len;
                        if (counter & 256)
                            break;

#if !TINFL_USE_64BIT_BITBUF
                        if (num_bits < 15)
                        {
                            bit_buf |= (((tinfl_bit_buf_t)MZ_READ_LE16(pIn_buf_cur)) << num_bits);
                            pIn_buf_cur += 2;
                            num_bits += 16;
                        }
#endif
                        if ((sym2 = r->m_tables[0].m_look_up[bit_buf & (TINFL_FAST_LOOKUP_SIZE - 1)]) >= 0)
                            code_len = sym2 >> 9;
                        else
                        {
                            code_len = TINFL_FAST_LOOKUP_BITS;
                            do
                            {
                                sym2 = r->m_tables[0].m_tree[~sym2 + ((bit_buf >> code_len++) & 1)];
                            } while (sym2 < 0);
                        }
                        bit_buf >>= code_len;
                        num_bits -= code_len;

                        pOut_buf_cur[0] = (mz_uint8)counter;
                        if (sym2 & 256)
                        {
                            pOut_buf_cur++;
                            counter = sym2;
                            break;
                        }
                        pOut_buf_cur[1] = (mz_uint8)sym2;
                        pOut_buf_cur += 2;
                    }
                }
                if ((counter &= 511) == 256)
                    break;

                num_extra = s_length_extra[counter - 257];
                counter = s_length_base[counter - 257];
                if (num_extra)
                {
                    mz_uint extra_bits;
                    TINFL_GET_BITS(25, extra_bits, num_extra);
                    counter += extra_bits;
                }

                TINFL_HUFF_DECODE(26, dist, &r->m_tables[1]);
                num_extra = s_dist_extra[dist];
                dist = s_dist_base[dist];
                if (num_extra)
                {
                    mz_uint extra_bits;
                    TINFL_GET_BITS(27, extra_bits, num_extra);
                    dist += extra_bits;
                }

                dist_from_out_buf_start = pOut_buf_cur - pOut_buf_start;
                if ((dist > dist_from_out_buf_start) && (decomp_flags & TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF))
                {
                    TINFL_CR_RETURN_FOREVER(37, TINFL_STATUS_FAILED);
                }

                pSrc = pOut_buf_start + ((dist_from_out_buf_start - dist) & out_buf_size_mask);

                if ((MZ_MAX(pOut_buf_cur, pSrc) + counter) > pOut_buf_end)
                {
                    while (counter--)
                    {
                        while (pOut_buf_cur >= pOut_buf_end)
                        {
                            TINFL_CR_RETURN(53, TINFL_STATUS_HAS_MORE_OUTPUT);
                        }
                        *pOut_buf_cur++ = pOut_buf_start[(dist_from_out_buf_start++ - dist) & out_buf_size_mask];
                    }
                    continue;
                }
#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES
                else if ((counter >= 9) && (counter <= dist))
                {
                    const mz_uint8 *pSrc_end = pSrc + (counter & ~7);
                    do
                    {
                        ((mz_uint32 *)pOut_buf_cur)[0] = ((const mz_uint32 *)pSrc)[0];
                        ((mz_uint32 *)pOut_buf_cur)[1] = ((const mz_uint32 *)pSrc)[1];
                        pOut_buf_cur += 8;
                    } while ((pSrc += 8) < pSrc_end);
                    if ((counter &= 7) < 3)
                    {
                        if (counter)
                        {
                            pOut_buf_cur[0] = pSrc[0];
                            if (counter > 1)
                                pOut_buf_cur[1] = pSrc[1];
                            pOut_buf_cur += counter;
                        }
                        continue;
                    }
                }
#endif
                while(counter>2)
                {
                    pOut_buf_cur[0] = pSrc[0];
                    pOut_buf_cur[1] = pSrc[1];
                    pOut_buf_cur[2] = pSrc[2];
                    pOut_buf_cur += 3;
                    pSrc += 3;
					counter -= 3;
                }
                if (counter > 0)
                {
                    pOut_buf_cur[0] = pSrc[0];
                    if (counter > 1)
                        pOut_buf_cur[1] = pSrc[1];
                    pOut_buf_cur += counter;
                }
            }
        }
    } while (!(r->m_final & 1));

    /* Ensure byte alignment and put back any bytes from the bitbuf if we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */
    /* I'm being super conservative here. A number of simplifications can be made to the byte alignment part, and the Adler32 check shouldn't ever need to worry about reading from the bitbuf now. */
    TINFL_SKIP_BITS(32, num_bits & 7);
    while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8))
    {
        --pIn_buf_cur;
        num_bits -= 8;
    }
    bit_buf &= (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1);
    MZ_ASSERT(!num_bits); /* if this assert fires then we've read beyond the end of non-deflate/zlib streams with following data (such as gzip streams). */

    if (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER)
    {
        for (counter = 0; counter < 4; ++counter)
        {
            mz_uint s;
            if (num_bits)
                TINFL_GET_BITS(41, s, 8);
            else
                TINFL_GET_BYTE(42, s);
            r->m_z_adler32 = (r->m_z_adler32 << 8) | s;
        }
    }
    TINFL_CR_RETURN_FOREVER(34, TINFL_STATUS_DONE);

    TINFL_CR_FINISH

common_exit:
    /* As long as we aren't telling the caller that we NEED more input to make forward progress: */
    /* Put back any bytes from the bitbuf in case we've looked ahead too far on gzip, or other Deflate streams followed by arbitrary data. */
    /* We need to be very careful here to NOT push back any bytes we definitely know we need to make forward progress, though, or we'll lock the caller up into an inf loop. */
    if ((status != TINFL_STATUS_NEEDS_MORE_INPUT) && (status != TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS))
    {
        while ((pIn_buf_cur > pIn_buf_next) && (num_bits >= 8))
        {
            --pIn_buf_cur;
            num_bits -= 8;
        }
    }
    r->m_num_bits = num_bits;
    r->m_bit_buf = bit_buf & (tinfl_bit_buf_t)((((mz_uint64)1) << num_bits) - (mz_uint64)1);
    r->m_dist = dist;
    r->m_counter = counter;
    r->m_num_extra = num_extra;
    r->m_dist_from_out_buf_start = dist_from_out_buf_start;
    *pIn_buf_size = pIn_buf_cur - pIn_buf_next;
    *pOut_buf_size = pOut_buf_cur - pOut_buf_next;
    if ((decomp_flags & (TINFL_FLAG_PARSE_ZLIB_HEADER | TINFL_FLAG_COMPUTE_ADLER32)) && (status >= 0))
    {
        const mz_uint8 *ptr = pOut_buf_next;
        size_t buf_len = *pOut_buf_size;
        mz_uint32 i, s1 = r->m_check_adler32 & 0xffff, s2 = r->m_check_adler32 >> 16;
        size_t block_len = buf_len % 5552;
        while (buf_len)
        {
            for (i = 0; i + 7 < block_len; i += 8, ptr += 8)
            {
                s1 += ptr[0], s2 += s1;
                s1 += ptr[1], s2 += s1;
                s1 += ptr[2], s2 += s1;
                s1 += ptr[3], s2 += s1;
                s1 += ptr[4], s2 += s1;
                s1 += ptr[5], s2 += s1;
                s1 += ptr[6], s2 += s1;
                s1 += ptr[7], s2 += s1;
            }
            for (; i < block_len; ++i)
                s1 += *ptr++, s2 += s1;
            s1 %= 65521U, s2 %= 65521U;
            buf_len -= block_len;
            block_len = 5552;
        }
        r->m_check_adler32 = (s2 << 16) + s1;
        if ((status == TINFL_STATUS_DONE) && (decomp_flags & TINFL_FLAG_PARSE_ZLIB_HEADER) && (r->m_check_adler32 != r->m_z_adler32))
            status = TINFL_STATUS_ADLER32_MISMATCH;
    }
    return status;
}

/* Higher level helper functions. */
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags)
{
    tinfl_decompressor decomp;
    void *pBuf = NULL, *pNew_buf;
    size_t src_buf_ofs = 0, out_buf_capacity = 0;
    *pOut_len = 0;
    tinfl_init(&decomp);
    for (;;)
    {
        size_t src_buf_size = src_buf_len - src_buf_ofs, dst_buf_size = out_buf_capacity - *pOut_len, new_out_buf_capacity;
        tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf + src_buf_ofs, &src_buf_size, (mz_uint8 *)pBuf, pBuf ? (mz_uint8 *)pBuf + *pOut_len : NULL, &dst_buf_size,
                                               (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
        if ((status < 0) || (status == TINFL_STATUS_NEEDS_MORE_INPUT))
        {
            MZ_FREE(pBuf);
            *pOut_len = 0;
            return NULL;
        }
        src_buf_ofs += src_buf_size;
        *pOut_len += dst_buf_size;
        if (status == TINFL_STATUS_DONE)
            break;
        new_out_buf_capacity = out_buf_capacity * 2;
        if (new_out_buf_capacity < 128)
            new_out_buf_capacity = 128;
        pNew_buf = MZ_REALLOC(pBuf, new_out_buf_capacity);
        if (!pNew_buf)
        {
            MZ_FREE(pBuf);
            *pOut_len = 0;
            return NULL;
        }
        pBuf = pNew_buf;
        out_buf_capacity = new_out_buf_capacity;
    }
    return pBuf;
}

size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags)
{
    tinfl_decompressor decomp;
    tinfl_status status;
    tinfl_init(&decomp);
    status = tinfl_decompress(&decomp, (const mz_uint8 *)pSrc_buf, &src_buf_len, (mz_uint8 *)pOut_buf, (mz_uint8 *)pOut_buf, &out_buf_len, (flags & ~TINFL_FLAG_HAS_MORE_INPUT) | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF);
    return (status != TINFL_STATUS_DONE) ? TINFL_DECOMPRESS_MEM_TO_MEM_FAILED : out_buf_len;
}

int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags)
{
    int result = 0;
    tinfl_decompressor decomp;
    mz_uint8 *pDict = (mz_uint8 *)MZ_MALLOC(TINFL_LZ_DICT_SIZE);
    size_t in_buf_ofs = 0, dict_ofs = 0;
    if (!pDict)
        return TINFL_STATUS_FAILED;
    tinfl_init(&decomp);
    for (;;)
    {
        size_t in_buf_size = *pIn_buf_size - in_buf_ofs, dst_buf_size = TINFL_LZ_DICT_SIZE - dict_ofs;
        tinfl_status status = tinfl_decompress(&decomp, (const mz_uint8 *)pIn_buf + in_buf_ofs, &in_buf_size, pDict, pDict + dict_ofs, &dst_buf_size,
                                               (flags & ~(TINFL_FLAG_HAS_MORE_INPUT | TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF)));
        in_buf_ofs += in_buf_size;
        if ((dst_buf_size) && (!(*pPut_buf_func)(pDict + dict_ofs, (int)dst_buf_size, pPut_buf_user)))
            break;
        if (status != TINFL_STATUS_HAS_MORE_OUTPUT)
        {
            result = (status == TINFL_STATUS_DONE);
            break;
        }
        dict_ofs = (dict_ofs + dst_buf_size) & (TINFL_LZ_DICT_SIZE - 1);
    }
    MZ_FREE(pDict);
    *pIn_buf_size = in_buf_ofs;
    return result;
}

tinfl_decompressor *tinfl_decompressor_alloc()
{
    tinfl_decompressor *pDecomp = (tinfl_decompressor *)MZ_MALLOC(sizeof(tinfl_decompressor));
    if (pDecomp)
        tinfl_init(pDecomp);
    return pDecomp;
}

void tinfl_decompressor_free(tinfl_decompressor *pDecomp)
{
    MZ_FREE(pDecomp);
}

#ifdef __cplusplus
}
#endif
/**************************************************************************
 *
 * Copyright 2013-2014 RAD Game Tools and Valve Software
 * Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC
 * Copyright 2016 Martin Raiber
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy
 * of this software and associated documentation files (the "Software"), to deal
 * in the Software without restriction, including without limitation the rights
 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
 * copies of the Software, and to permit persons to whom the Software is
 * furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
 * THE SOFTWARE.
 *
 **************************************************************************/


#ifndef MINIZ_NO_ARCHIVE_APIS

#ifdef __cplusplus
extern "C" {
#endif

/* ------------------- .ZIP archive reading */

#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include 

#if defined(_MSC_VER) || defined(__MINGW64__)
static FILE *mz_fopen(const char *pFilename, const char *pMode)
{
    FILE *pFile = NULL;
    fopen_s(&pFile, pFilename, pMode);
    return pFile;
}
static FILE *mz_freopen(const char *pPath, const char *pMode, FILE *pStream)
{
    FILE *pFile = NULL;
    if (freopen_s(&pFile, pPath, pMode, pStream))
        return NULL;
    return pFile;
}
#ifndef MINIZ_NO_TIME
#include 
#endif
#define MZ_FOPENREAD mz_fopen
#define MZ_FOPEN mz_fopen
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 _ftelli64
#define MZ_FSEEK64 _fseeki64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN mz_freopen
#define MZ_DELETE_FILE remove
#elif defined(__MINGW32__)
#ifndef MINIZ_NO_TIME
#include 
#endif
#define MZ_FOPENREAD(f, m) _wfopen(f, m)
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT _stat
#define MZ_FILE_STAT _stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__TINYC__)
#ifndef MINIZ_NO_TIME
#include 
#endif
#define MZ_FOPENREAD(f, m) fopen(f, m)
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__GNUC__) && _LARGEFILE64_SOURCE
#ifndef MINIZ_NO_TIME
#include 
#endif
#define MZ_FOPEN(f, m) fopen64(f, m)
#define MZ_FOPENREAD(f, m) fopen64(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello64
#define MZ_FSEEK64 fseeko64
#define MZ_FILE_STAT_STRUCT stat64
#define MZ_FILE_STAT stat64
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen64(p, m, s)
#define MZ_DELETE_FILE remove
#elif defined(__APPLE__)
#ifndef MINIZ_NO_TIME
#include 
#endif
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FOPENREAD(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(p, m, s) freopen(p, m, s)
#define MZ_DELETE_FILE remove

#else
#pragma message("Using fopen, ftello, fseeko, stat() etc. path for file I/O - this path may not support large files.")
#ifndef MINIZ_NO_TIME
#include 
#endif
#define MZ_FOPENREAD(f, m) fopen(f, m)
#define MZ_FOPEN(f, m) fopen(f, m)
#define MZ_FCLOSE fclose
#define MZ_FREAD fread
#define MZ_FWRITE fwrite
#ifdef __STRICT_ANSI__
#define MZ_FTELL64 ftell
#define MZ_FSEEK64 fseek
#else
#define MZ_FTELL64 ftello
#define MZ_FSEEK64 fseeko
#endif
#define MZ_FILE_STAT_STRUCT stat
#define MZ_FILE_STAT stat
#define MZ_FFLUSH fflush
#define MZ_FREOPEN(f, m, s) freopen(f, m, s)
#define MZ_DELETE_FILE remove
#endif /* #ifdef _MSC_VER */
#endif /* #ifdef MINIZ_NO_STDIO */

#define MZ_TOLOWER(c) ((((c) >= 'A') && ((c) <= 'Z')) ? ((c) - 'A' + 'a') : (c))

/* Various ZIP archive enums. To completely avoid cross platform compiler alignment and platform endian issues, miniz.c doesn't use structs for any of this stuff. */
enum
{
    /* ZIP archive identifiers and record sizes */
    MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06054b50,
    MZ_ZIP_CENTRAL_DIR_HEADER_SIG = 0x02014b50,
    MZ_ZIP_LOCAL_DIR_HEADER_SIG = 0x04034b50,
    MZ_ZIP_LOCAL_DIR_HEADER_SIZE = 30,
    MZ_ZIP_CENTRAL_DIR_HEADER_SIZE = 46,
    MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE = 22,

    /* ZIP64 archive identifier and record sizes */
    MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG = 0x06064b50,
    MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG = 0x07064b50,
    MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE = 56,
    MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE = 20,
    MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID = 0x0001,
    MZ_ZIP_DATA_DESCRIPTOR_ID = 0x08074b50,
    MZ_ZIP_DATA_DESCRIPTER_SIZE64 = 24,
    MZ_ZIP_DATA_DESCRIPTER_SIZE32 = 16,

    /* Central directory header record offsets */
    MZ_ZIP_CDH_SIG_OFS = 0,
    MZ_ZIP_CDH_VERSION_MADE_BY_OFS = 4,
    MZ_ZIP_CDH_VERSION_NEEDED_OFS = 6,
    MZ_ZIP_CDH_BIT_FLAG_OFS = 8,
    MZ_ZIP_CDH_METHOD_OFS = 10,
    MZ_ZIP_CDH_FILE_TIME_OFS = 12,
    MZ_ZIP_CDH_FILE_DATE_OFS = 14,
    MZ_ZIP_CDH_CRC32_OFS = 16,
    MZ_ZIP_CDH_COMPRESSED_SIZE_OFS = 20,
    MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS = 24,
    MZ_ZIP_CDH_FILENAME_LEN_OFS = 28,
    MZ_ZIP_CDH_EXTRA_LEN_OFS = 30,
    MZ_ZIP_CDH_COMMENT_LEN_OFS = 32,
    MZ_ZIP_CDH_DISK_START_OFS = 34,
    MZ_ZIP_CDH_INTERNAL_ATTR_OFS = 36,
    MZ_ZIP_CDH_EXTERNAL_ATTR_OFS = 38,
    MZ_ZIP_CDH_LOCAL_HEADER_OFS = 42,

    /* Local directory header offsets */
    MZ_ZIP_LDH_SIG_OFS = 0,
    MZ_ZIP_LDH_VERSION_NEEDED_OFS = 4,
    MZ_ZIP_LDH_BIT_FLAG_OFS = 6,
    MZ_ZIP_LDH_METHOD_OFS = 8,
    MZ_ZIP_LDH_FILE_TIME_OFS = 10,
    MZ_ZIP_LDH_FILE_DATE_OFS = 12,
    MZ_ZIP_LDH_CRC32_OFS = 14,
    MZ_ZIP_LDH_COMPRESSED_SIZE_OFS = 18,
    MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS = 22,
    MZ_ZIP_LDH_FILENAME_LEN_OFS = 26,
    MZ_ZIP_LDH_EXTRA_LEN_OFS = 28,
    MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR = 1 << 3,

    /* End of central directory offsets */
    MZ_ZIP_ECDH_SIG_OFS = 0,
    MZ_ZIP_ECDH_NUM_THIS_DISK_OFS = 4,
    MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS = 6,
    MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 8,
    MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS = 10,
    MZ_ZIP_ECDH_CDIR_SIZE_OFS = 12,
    MZ_ZIP_ECDH_CDIR_OFS_OFS = 16,
    MZ_ZIP_ECDH_COMMENT_SIZE_OFS = 20,

    /* ZIP64 End of central directory locator offsets */
    MZ_ZIP64_ECDL_SIG_OFS = 0,                    /* 4 bytes */
    MZ_ZIP64_ECDL_NUM_DISK_CDIR_OFS = 4,          /* 4 bytes */
    MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS = 8,  /* 8 bytes */
    MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS = 16, /* 4 bytes */

    /* ZIP64 End of central directory header offsets */
    MZ_ZIP64_ECDH_SIG_OFS = 0,                       /* 4 bytes */
    MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS = 4,            /* 8 bytes */
    MZ_ZIP64_ECDH_VERSION_MADE_BY_OFS = 12,          /* 2 bytes */
    MZ_ZIP64_ECDH_VERSION_NEEDED_OFS = 14,           /* 2 bytes */
    MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS = 16,            /* 4 bytes */
    MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS = 20,            /* 4 bytes */
    MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS = 24, /* 8 bytes */
    MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS = 32,       /* 8 bytes */
    MZ_ZIP64_ECDH_CDIR_SIZE_OFS = 40,                /* 8 bytes */
    MZ_ZIP64_ECDH_CDIR_OFS_OFS = 48,                 /* 8 bytes */
    MZ_ZIP_VERSION_MADE_BY_DOS_FILESYSTEM_ID = 0,
    MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG = 0x10,
    MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED = 1,
    MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG = 32,
    MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION = 64,
    MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED = 8192,
    MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8 = 1 << 11
};

typedef struct
{
    void *m_p;
    size_t m_size, m_capacity;
    mz_uint m_element_size;
} mz_zip_array;

struct mz_zip_internal_state_tag
{
    mz_zip_array m_central_dir;
    mz_zip_array m_central_dir_offsets;
    mz_zip_array m_sorted_central_dir_offsets;

    /* The flags passed in when the archive is initially opened. */
    uint32_t m_init_flags;

    /* MZ_TRUE if the archive has a zip64 end of central directory headers, etc. */
    mz_bool m_zip64;

    /* MZ_TRUE if we found zip64 extended info in the central directory (m_zip64 will also be slammed to true too, even if we didn't find a zip64 end of central dir header, etc.) */
    mz_bool m_zip64_has_extended_info_fields;

    /* These fields are used by the file, FILE, memory, and memory/heap read/write helpers. */
    MZ_FILE *m_pFile;
    mz_uint64 m_file_archive_start_ofs;

    void *m_pMem;
    size_t m_mem_size;
    size_t m_mem_capacity;
};

#define MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(array_ptr, element_size) (array_ptr)->m_element_size = element_size

#if defined(DEBUG) || defined(_DEBUG) || defined(NDEBUG)
static MZ_FORCEINLINE mz_uint mz_zip_array_range_check(const mz_zip_array *pArray, mz_uint index)
{
    MZ_ASSERT(index < pArray->m_size);
    return index;
}
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) ((element_type *)((array_ptr)->m_p))[mz_zip_array_range_check(array_ptr, index)]
#else
#define MZ_ZIP_ARRAY_ELEMENT(array_ptr, element_type, index) ((element_type *)((array_ptr)->m_p))[index]
#endif

static MZ_FORCEINLINE void mz_zip_array_init(mz_zip_array *pArray, mz_uint32 element_size)
{
    memset(pArray, 0, sizeof(mz_zip_array));
    pArray->m_element_size = element_size;
}

static MZ_FORCEINLINE void mz_zip_array_clear(mz_zip_archive *pZip, mz_zip_array *pArray)
{
    pZip->m_pFree(pZip->m_pAlloc_opaque, pArray->m_p);
    memset(pArray, 0, sizeof(mz_zip_array));
}

static mz_bool mz_zip_array_ensure_capacity(mz_zip_archive *pZip, mz_zip_array *pArray, size_t min_new_capacity, mz_uint growing)
{
    void *pNew_p;
    size_t new_capacity = min_new_capacity;
    MZ_ASSERT(pArray->m_element_size);
    if (pArray->m_capacity >= min_new_capacity)
        return MZ_TRUE;
    if (growing)
    {
        new_capacity = MZ_MAX(1, pArray->m_capacity);
        while (new_capacity < min_new_capacity)
            new_capacity *= 2;
    }
    if (NULL == (pNew_p = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pArray->m_p, pArray->m_element_size, new_capacity)))
        return MZ_FALSE;
    pArray->m_p = pNew_p;
    pArray->m_capacity = new_capacity;
    return MZ_TRUE;
}

static MZ_FORCEINLINE mz_bool mz_zip_array_reserve(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_capacity, mz_uint growing)
{
    if (new_capacity > pArray->m_capacity)
    {
        if (!mz_zip_array_ensure_capacity(pZip, pArray, new_capacity, growing))
            return MZ_FALSE;
    }
    return MZ_TRUE;
}

static MZ_FORCEINLINE mz_bool mz_zip_array_resize(mz_zip_archive *pZip, mz_zip_array *pArray, size_t new_size, mz_uint growing)
{
    if (new_size > pArray->m_capacity)
    {
        if (!mz_zip_array_ensure_capacity(pZip, pArray, new_size, growing))
            return MZ_FALSE;
    }
    pArray->m_size = new_size;
    return MZ_TRUE;
}

static MZ_FORCEINLINE mz_bool mz_zip_array_ensure_room(mz_zip_archive *pZip, mz_zip_array *pArray, size_t n)
{
    return mz_zip_array_reserve(pZip, pArray, pArray->m_size + n, MZ_TRUE);
}

static MZ_FORCEINLINE mz_bool mz_zip_array_push_back(mz_zip_archive *pZip, mz_zip_array *pArray, const void *pElements, size_t n)
{
    size_t orig_size = pArray->m_size;
    if (!mz_zip_array_resize(pZip, pArray, orig_size + n, MZ_TRUE))
        return MZ_FALSE;
    memcpy((mz_uint8 *)pArray->m_p + orig_size * pArray->m_element_size, pElements, n * pArray->m_element_size);
    return MZ_TRUE;
}

#ifndef MINIZ_NO_TIME
static MZ_TIME_T mz_zip_dos_to_time_t(int dos_time, int dos_date)
{
    struct tm tm;
    memset(&tm, 0, sizeof(tm));
    tm.tm_isdst = -1;
    tm.tm_year = ((dos_date >> 9) & 127) + 1980 - 1900;
    tm.tm_mon = ((dos_date >> 5) & 15) - 1;
    tm.tm_mday = dos_date & 31;
    tm.tm_hour = (dos_time >> 11) & 31;
    tm.tm_min = (dos_time >> 5) & 63;
    tm.tm_sec = (dos_time << 1) & 62;
    return mktime(&tm);
}

#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static void mz_zip_time_t_to_dos_time(MZ_TIME_T time, mz_uint16 *pDOS_time, mz_uint16 *pDOS_date)
{
#ifdef _MSC_VER
    struct tm tm_struct;
    struct tm *tm = &tm_struct;
    errno_t err = localtime_s(tm, &time);
    if (err)
    {
        *pDOS_date = 0;
        *pDOS_time = 0;
        return;
    }
#else
    struct tm *tm = localtime(&time);
#endif /* #ifdef _MSC_VER */

    *pDOS_time = (mz_uint16)(((tm->tm_hour) << 11) + ((tm->tm_min) << 5) + ((tm->tm_sec) >> 1));
    *pDOS_date = (mz_uint16)(((tm->tm_year + 1900 - 1980) << 9) + ((tm->tm_mon + 1) << 5) + tm->tm_mday);
}
#endif /* MINIZ_NO_ARCHIVE_WRITING_APIS */

#ifndef MINIZ_NO_STDIO
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
static mz_bool mz_zip_get_file_modified_time(const char *pFilename, MZ_TIME_T *pTime)
{
    struct MZ_FILE_STAT_STRUCT file_stat;

    /* On Linux with x86 glibc, this call will fail on large files (I think >= 0x80000000 bytes) unless you compiled with _LARGEFILE64_SOURCE. Argh. */
    if (MZ_FILE_STAT(pFilename, &file_stat) != 0)
        return MZ_FALSE;

    *pTime = file_stat.st_mtime;

    return MZ_TRUE;
}
#endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS*/

static mz_bool mz_zip_set_file_times(const char *pFilename, MZ_TIME_T access_time, MZ_TIME_T modified_time)
{
    struct utimbuf t;

    memset(&t, 0, sizeof(t));
    t.actime = access_time;
    t.modtime = modified_time;

    return !utime(pFilename, &t);
}
#endif /* #ifndef MINIZ_NO_STDIO */
#endif /* #ifndef MINIZ_NO_TIME */

static MZ_FORCEINLINE mz_bool mz_zip_set_error(mz_zip_archive *pZip, mz_zip_error err_num)
{
    if (pZip)
        pZip->m_last_error = err_num;
    return MZ_FALSE;
}

static mz_bool mz_zip_reader_init_internal(mz_zip_archive *pZip, mz_uint flags)
{
    (void)flags;
    if ((!pZip) || (pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (!pZip->m_pAlloc)
        pZip->m_pAlloc = miniz_def_alloc_func;
    if (!pZip->m_pFree)
        pZip->m_pFree = miniz_def_free_func;
    if (!pZip->m_pRealloc)
        pZip->m_pRealloc = miniz_def_realloc_func;

    pZip->m_archive_size = 0;
    pZip->m_central_directory_file_ofs = 0;
    pZip->m_total_files = 0;
    pZip->m_last_error = MZ_ZIP_NO_ERROR;

    if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
        return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

    memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));
    MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8));
    MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32));
    MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32));
    pZip->m_pState->m_init_flags = flags;
    pZip->m_pState->m_zip64 = MZ_FALSE;
    pZip->m_pState->m_zip64_has_extended_info_fields = MZ_FALSE;

    pZip->m_zip_mode = MZ_ZIP_MODE_READING;

    return MZ_TRUE;
}

static MZ_FORCEINLINE mz_bool mz_zip_reader_filename_less(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, mz_uint r_index)
{
    const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE;
    const mz_uint8 *pR = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, r_index));
    mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS), r_len = MZ_READ_LE16(pR + MZ_ZIP_CDH_FILENAME_LEN_OFS);
    mz_uint8 l = 0, r = 0;
    pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
    pR += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
    pE = pL + MZ_MIN(l_len, r_len);
    while (pL < pE)
    {
        if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR)))
            break;
        pL++;
        pR++;
    }
    return (pL == pE) ? (l_len < r_len) : (l < r);
}

#define MZ_SWAP_UINT32(a, b) \
    do                       \
    {                        \
        mz_uint32 t = a;     \
        a = b;               \
        b = t;               \
    }                        \
    MZ_MACRO_END

/* Heap sort of lowercased filenames, used to help accelerate plain central directory searches by mz_zip_reader_locate_file(). (Could also use qsort(), but it could allocate memory.) */
static void mz_zip_reader_sort_central_dir_offsets_by_filename(mz_zip_archive *pZip)
{
    mz_zip_internal_state *pState = pZip->m_pState;
    const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
    const mz_zip_array *pCentral_dir = &pState->m_central_dir;
    mz_uint32 *pIndices;
    mz_uint32 start, end;
    const mz_uint32 size = pZip->m_total_files;

    if (size <= 1U)
        return;

    pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0);

    start = (size - 2U) >> 1U;
    for (;;)
    {
        mz_uint64 child, root = start;
        for (;;)
        {
            if ((child = (root << 1U) + 1U) >= size)
                break;
            child += (((child + 1U) < size) && (mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1U])));
            if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child]))
                break;
            MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
            root = child;
        }
        if (!start)
            break;
        start--;
    }

    end = size - 1;
    while (end > 0)
    {
        mz_uint64 child, root = 0;
        MZ_SWAP_UINT32(pIndices[end], pIndices[0]);
        for (;;)
        {
            if ((child = (root << 1U) + 1U) >= end)
                break;
            child += (((child + 1U) < end) && mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[child], pIndices[child + 1U]));
            if (!mz_zip_reader_filename_less(pCentral_dir, pCentral_dir_offsets, pIndices[root], pIndices[child]))
                break;
            MZ_SWAP_UINT32(pIndices[root], pIndices[child]);
            root = child;
        }
        end--;
    }
}

static mz_bool mz_zip_reader_locate_header_sig(mz_zip_archive *pZip, mz_uint32 record_sig, mz_uint32 record_size, mz_int64 *pOfs)
{
    mz_int64 cur_file_ofs;
    mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
    mz_uint8 *pBuf = (mz_uint8 *)buf_u32;

    /* Basic sanity checks - reject files which are too small */
    if (pZip->m_archive_size < record_size)
        return MZ_FALSE;

    /* Find the record by scanning the file from the end towards the beginning. */
    cur_file_ofs = MZ_MAX((mz_int64)pZip->m_archive_size - (mz_int64)sizeof(buf_u32), 0);
    for (;;)
    {
        int i, n = (int)MZ_MIN(sizeof(buf_u32), pZip->m_archive_size - cur_file_ofs);

        if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, n) != (mz_uint)n)
            return MZ_FALSE;

        for (i = n - 4; i >= 0; --i)
        {
            mz_uint s = MZ_READ_LE32(pBuf + i);
            if (s == record_sig)
            {
                if ((pZip->m_archive_size - (cur_file_ofs + i)) >= record_size)
                    break;
            }
        }

        if (i >= 0)
        {
            cur_file_ofs += i;
            break;
        }

        /* Give up if we've searched the entire file, or we've gone back "too far" (~64kb) */
        if ((!cur_file_ofs) || ((pZip->m_archive_size - cur_file_ofs) >= (MZ_UINT16_MAX + record_size)))
            return MZ_FALSE;

        cur_file_ofs = MZ_MAX(cur_file_ofs - (sizeof(buf_u32) - 3), 0);
    }

    *pOfs = cur_file_ofs;
    return MZ_TRUE;
}

static mz_bool mz_zip_reader_read_central_dir(mz_zip_archive *pZip, mz_uint flags)
{
    mz_uint cdir_size = 0, cdir_entries_on_this_disk = 0, num_this_disk = 0, cdir_disk_index = 0;
    mz_uint64 cdir_ofs = 0;
    mz_int64 cur_file_ofs = 0;
    const mz_uint8 *p;

    mz_uint32 buf_u32[4096 / sizeof(mz_uint32)];
    mz_uint8 *pBuf = (mz_uint8 *)buf_u32;
    mz_bool sort_central_dir = ((flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0);
    mz_uint32 zip64_end_of_central_dir_locator_u32[(MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)];
    mz_uint8 *pZip64_locator = (mz_uint8 *)zip64_end_of_central_dir_locator_u32;

    mz_uint32 zip64_end_of_central_dir_header_u32[(MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)];
    mz_uint8 *pZip64_end_of_central_dir = (mz_uint8 *)zip64_end_of_central_dir_header_u32;

    mz_uint64 zip64_end_of_central_dir_ofs = 0;

    /* Basic sanity checks - reject files which are too small, and check the first 4 bytes of the file to make sure a local header is there. */
    if (pZip->m_archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE);

    if (!mz_zip_reader_locate_header_sig(pZip, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE, &cur_file_ofs))
        return mz_zip_set_error(pZip, MZ_ZIP_FAILED_FINDING_CENTRAL_DIR);

    /* Read and verify the end of central directory record. */
    if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);

    if (MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_SIG_OFS) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG)
        return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE);

    if (cur_file_ofs >= (MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE + MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE))
    {
        if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs - MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE, pZip64_locator, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) == MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE)
        {
            if (MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_SIG_OFS) == MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG)
            {
                zip64_end_of_central_dir_ofs = MZ_READ_LE64(pZip64_locator + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS);
                if (zip64_end_of_central_dir_ofs > (pZip->m_archive_size - MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE))
                    return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE);

                if (pZip->m_pRead(pZip->m_pIO_opaque, zip64_end_of_central_dir_ofs, pZip64_end_of_central_dir, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) == MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE)
                {
                    if (MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIG_OFS) == MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG)
                    {
                        pZip->m_pState->m_zip64 = MZ_TRUE;
                    }
                }
            }
        }
    }

    pZip->m_total_files = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS);
    cdir_entries_on_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS);
    num_this_disk = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_THIS_DISK_OFS);
    cdir_disk_index = MZ_READ_LE16(pBuf + MZ_ZIP_ECDH_NUM_DISK_CDIR_OFS);
    cdir_size = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_SIZE_OFS);
    cdir_ofs = MZ_READ_LE32(pBuf + MZ_ZIP_ECDH_CDIR_OFS_OFS);

    if (pZip->m_pState->m_zip64)
    {
        mz_uint32 zip64_total_num_of_disks = MZ_READ_LE32(pZip64_locator + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS);
        mz_uint64 zip64_cdir_total_entries = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS);
        mz_uint64 zip64_cdir_total_entries_on_this_disk = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS);
        mz_uint64 zip64_size_of_end_of_central_dir_record = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS);
        mz_uint64 zip64_size_of_central_directory = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_SIZE_OFS);

        if (zip64_size_of_end_of_central_dir_record < (MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - 12))
            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

        if (zip64_total_num_of_disks != 1U)
            return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK);

        /* Check for miniz's practical limits */
        if (zip64_cdir_total_entries > MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);

        pZip->m_total_files = (mz_uint32)zip64_cdir_total_entries;

        if (zip64_cdir_total_entries_on_this_disk > MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);

        cdir_entries_on_this_disk = (mz_uint32)zip64_cdir_total_entries_on_this_disk;

        /* Check for miniz's current practical limits (sorry, this should be enough for millions of files) */
        if (zip64_size_of_central_directory > MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE);

        cdir_size = (mz_uint32)zip64_size_of_central_directory;

        num_this_disk = MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_NUM_THIS_DISK_OFS);

        cdir_disk_index = MZ_READ_LE32(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_NUM_DISK_CDIR_OFS);

        cdir_ofs = MZ_READ_LE64(pZip64_end_of_central_dir + MZ_ZIP64_ECDH_CDIR_OFS_OFS);
    }

    if (pZip->m_total_files != cdir_entries_on_this_disk)
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK);

    if (((num_this_disk | cdir_disk_index) != 0) && ((num_this_disk != 1) || (cdir_disk_index != 1)))
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK);

    if (cdir_size < pZip->m_total_files * MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    if ((cdir_ofs + (mz_uint64)cdir_size) > pZip->m_archive_size)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    pZip->m_central_directory_file_ofs = cdir_ofs;

    if (pZip->m_total_files)
    {
        mz_uint i, n;
        /* Read the entire central directory into a heap block, and allocate another heap block to hold the unsorted central dir file record offsets, and possibly another to hold the sorted indices. */
        if ((!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir, cdir_size, MZ_FALSE)) ||
            (!mz_zip_array_resize(pZip, &pZip->m_pState->m_central_dir_offsets, pZip->m_total_files, MZ_FALSE)))
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

        if (sort_central_dir)
        {
            if (!mz_zip_array_resize(pZip, &pZip->m_pState->m_sorted_central_dir_offsets, pZip->m_total_files, MZ_FALSE))
                return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        }

        if (pZip->m_pRead(pZip->m_pIO_opaque, cdir_ofs, pZip->m_pState->m_central_dir.m_p, cdir_size) != cdir_size)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);

        /* Now create an index into the central directory file records, do some basic sanity checking on each record */
        p = (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p;
        for (n = cdir_size, i = 0; i < pZip->m_total_files; ++i)
        {
            mz_uint total_header_size, disk_index, bit_flags, filename_size, ext_data_size;
            mz_uint64 comp_size, decomp_size, local_header_ofs;

            if ((n < MZ_ZIP_CENTRAL_DIR_HEADER_SIZE) || (MZ_READ_LE32(p) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG))
                return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

            MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, i) = (mz_uint32)(p - (const mz_uint8 *)pZip->m_pState->m_central_dir.m_p);

            if (sort_central_dir)
                MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_sorted_central_dir_offsets, mz_uint32, i) = i;

            comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
            decomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
            local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);
            filename_size = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
            ext_data_size = MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS);

            if ((!pZip->m_pState->m_zip64_has_extended_info_fields) &&
                (ext_data_size) &&
                (MZ_MAX(MZ_MAX(comp_size, decomp_size), local_header_ofs) == MZ_UINT32_MAX))
            {
                /* Attempt to find zip64 extended information field in the entry's extra data */
                mz_uint32 extra_size_remaining = ext_data_size;

                if (extra_size_remaining)
                {
                    const mz_uint8 *pExtra_data = p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size;

                    do
                    {
                        mz_uint32 field_id;
                        mz_uint32 field_data_size;

                        if (extra_size_remaining < (sizeof(mz_uint16) * 2))
                            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

                        field_id = MZ_READ_LE16(pExtra_data);
                        field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16));

                        if ((field_data_size + sizeof(mz_uint16) * 2) > extra_size_remaining)
                            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

                        if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID)
                        {
                            /* Ok, the archive didn't have any zip64 headers but it uses a zip64 extended information field so mark it as zip64 anyway (this can occur with infozip's zip util when it reads compresses files from stdin). */
                            pZip->m_pState->m_zip64 = MZ_TRUE;
                            pZip->m_pState->m_zip64_has_extended_info_fields = MZ_TRUE;
                            break;
                        }

                        pExtra_data += sizeof(mz_uint16) * 2 + field_data_size;
                        extra_size_remaining = extra_size_remaining - sizeof(mz_uint16) * 2 - field_data_size;
                    } while (extra_size_remaining);
                }
            }

            /* I've seen archives that aren't marked as zip64 that uses zip64 ext data, argh */
            if ((comp_size != MZ_UINT32_MAX) && (decomp_size != MZ_UINT32_MAX))
            {
                if (((!MZ_READ_LE32(p + MZ_ZIP_CDH_METHOD_OFS)) && (decomp_size != comp_size)) || (decomp_size && !comp_size))
                    return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
            }

            disk_index = MZ_READ_LE16(p + MZ_ZIP_CDH_DISK_START_OFS);
            if ((disk_index == MZ_UINT16_MAX) || ((disk_index != num_this_disk) && (disk_index != 1)))
                return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_MULTIDISK);

            if (comp_size != MZ_UINT32_MAX)
            {
                if (((mz_uint64)MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS) + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + comp_size) > pZip->m_archive_size)
                    return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
            }

            bit_flags = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
            if (bit_flags & MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_LOCAL_DIR_IS_MASKED)
                return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION);

            if ((total_header_size = MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS)) > n)
                return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

            n -= total_header_size;
            p += total_header_size;
        }
    }

    if (sort_central_dir)
        mz_zip_reader_sort_central_dir_offsets_by_filename(pZip);

    return MZ_TRUE;
}

void mz_zip_zero_struct(mz_zip_archive *pZip)
{
    if (pZip)
        MZ_CLEAR_OBJ(*pZip);
}

static mz_bool mz_zip_reader_end_internal(mz_zip_archive *pZip, mz_bool set_last_error)
{
    mz_bool status = MZ_TRUE;

    if (!pZip)
        return MZ_FALSE;

    if ((!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
    {
        if (set_last_error)
            pZip->m_last_error = MZ_ZIP_INVALID_PARAMETER;

        return MZ_FALSE;
    }

    if (pZip->m_pState)
    {
        mz_zip_internal_state *pState = pZip->m_pState;
        pZip->m_pState = NULL;

        mz_zip_array_clear(pZip, &pState->m_central_dir);
        mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
        mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);

#ifndef MINIZ_NO_STDIO
        if (pState->m_pFile)
        {
            if (pZip->m_zip_type == MZ_ZIP_TYPE_FILE)
            {
                if (MZ_FCLOSE(pState->m_pFile) == EOF)
                {
                    if (set_last_error)
                        pZip->m_last_error = MZ_ZIP_FILE_CLOSE_FAILED;
                    status = MZ_FALSE;
                }
            }
            pState->m_pFile = NULL;
        }
#endif /* #ifndef MINIZ_NO_STDIO */

        pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
    }
    pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;

    return status;
}

mz_bool mz_zip_reader_end(mz_zip_archive *pZip)
{
    return mz_zip_reader_end_internal(pZip, MZ_TRUE);
}
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint flags)
{
    if ((!pZip) || (!pZip->m_pRead))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (!mz_zip_reader_init_internal(pZip, flags))
        return MZ_FALSE;

    pZip->m_zip_type = MZ_ZIP_TYPE_USER;
    pZip->m_archive_size = size;

    if (!mz_zip_reader_read_central_dir(pZip, flags))
    {
        mz_zip_reader_end_internal(pZip, MZ_FALSE);
        return MZ_FALSE;
    }

    return MZ_TRUE;
}

static size_t mz_zip_mem_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n)
{
    mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
    size_t s = (file_ofs >= pZip->m_archive_size) ? 0 : (size_t)MZ_MIN(pZip->m_archive_size - file_ofs, n);
    memcpy(pBuf, (const mz_uint8 *)pZip->m_pState->m_pMem + file_ofs, s);
    return s;
}

mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint flags)
{
    if (!pMem)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE);

    if (!mz_zip_reader_init_internal(pZip, flags))
        return MZ_FALSE;

    pZip->m_zip_type = MZ_ZIP_TYPE_MEMORY;
    pZip->m_archive_size = size;
    pZip->m_pRead = mz_zip_mem_read_func;
    pZip->m_pIO_opaque = pZip;
    pZip->m_pNeeds_keepalive = NULL;

#ifdef __cplusplus
    pZip->m_pState->m_pMem = const_cast(pMem);
#else
    pZip->m_pState->m_pMem = (void *)pMem;
#endif

    pZip->m_pState->m_mem_size = size;

    if (!mz_zip_reader_read_central_dir(pZip, flags))
    {
        mz_zip_reader_end_internal(pZip, MZ_FALSE);
        return MZ_FALSE;
    }

    return MZ_TRUE;
}

#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_read_func(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n)
{
    mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
    mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);

    file_ofs += pZip->m_pState->m_file_archive_start_ofs;

    if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
        return 0;

    return MZ_FREAD(pBuf, 1, n, pZip->m_pState->m_pFile);
}

mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const WCHAR_TYPE *pFilename, mz_uint32 flags)
{
    return mz_zip_reader_init_file_v2(pZip, pFilename, flags, 0, 0);
}

mz_bool mz_zip_reader_init_file_v2(mz_zip_archive *pZip, const WCHAR_TYPE *pFilename, mz_uint flags, mz_uint64 file_start_ofs, mz_uint64 archive_size)
{
    mz_uint64 file_size;
    MZ_FILE *pFile;

    if ((!pZip) || (!pFilename) || ((archive_size) && (archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
#ifdef _WIN32
    pFile = MZ_FOPENREAD(pFilename, L"rb");
#else
    pFile = MZ_FOPENREAD(pFilename, "rb");
#endif
    if (!pFile)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED);

    file_size = archive_size;
    if (!file_size)
    {
        if (MZ_FSEEK64(pFile, 0, SEEK_END))
        {
            MZ_FCLOSE(pFile);
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_SEEK_FAILED);
        }

        file_size = MZ_FTELL64(pFile);
    }

    /* TODO: Better sanity check archive_size and the # of actual remaining bytes */

    if (file_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
    {
	MZ_FCLOSE(pFile);
        return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE);
    }

    if (!mz_zip_reader_init_internal(pZip, flags))
    {
        MZ_FCLOSE(pFile);
        return MZ_FALSE;
    }

    pZip->m_zip_type = MZ_ZIP_TYPE_FILE;
    pZip->m_pRead = mz_zip_file_read_func;
    pZip->m_pIO_opaque = pZip;
    pZip->m_pState->m_pFile = pFile;
    pZip->m_archive_size = file_size;
    pZip->m_pState->m_file_archive_start_ofs = file_start_ofs;

    if (!mz_zip_reader_read_central_dir(pZip, flags))
    {
        mz_zip_reader_end_internal(pZip, MZ_FALSE);
        return MZ_FALSE;
    }

    return MZ_TRUE;
}

mz_bool mz_zip_reader_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint64 archive_size, mz_uint flags)
{
    mz_uint64 cur_file_ofs;

    if ((!pZip) || (!pFile))
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED);

    cur_file_ofs = MZ_FTELL64(pFile);

    if (!archive_size)
    {
        if (MZ_FSEEK64(pFile, 0, SEEK_END))
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_SEEK_FAILED);

        archive_size = MZ_FTELL64(pFile) - cur_file_ofs;

        if (archive_size < MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
            return mz_zip_set_error(pZip, MZ_ZIP_NOT_AN_ARCHIVE);
    }

    if (!mz_zip_reader_init_internal(pZip, flags))
        return MZ_FALSE;

    pZip->m_zip_type = MZ_ZIP_TYPE_CFILE;
    pZip->m_pRead = mz_zip_file_read_func;

    pZip->m_pIO_opaque = pZip;
    pZip->m_pState->m_pFile = pFile;
    pZip->m_archive_size = archive_size;
    pZip->m_pState->m_file_archive_start_ofs = cur_file_ofs;

    if (!mz_zip_reader_read_central_dir(pZip, flags))
    {
        mz_zip_reader_end_internal(pZip, MZ_FALSE);
        return MZ_FALSE;
    }

    return MZ_TRUE;
}

#endif /* #ifndef MINIZ_NO_STDIO */

static MZ_FORCEINLINE const mz_uint8 *mz_zip_get_cdh(mz_zip_archive *pZip, mz_uint file_index)
{
    if ((!pZip) || (!pZip->m_pState) || (file_index >= pZip->m_total_files))
        return NULL;
    return &MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index));
}

mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index)
{
    mz_uint m_bit_flag;
    const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index);
    if (!p)
    {
        mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
        return MZ_FALSE;
    }

    m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
    return (m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION)) != 0;
}

mz_bool mz_zip_reader_is_file_supported(mz_zip_archive *pZip, mz_uint file_index)
{
    mz_uint bit_flag;
    mz_uint method;

    const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index);
    if (!p)
    {
        mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
        return MZ_FALSE;
    }

    method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
    bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);

    if ((method != 0) && (method != MZ_DEFLATED))
    {
        mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD);
        return MZ_FALSE;
    }

    if (bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION))
    {
        mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION);
        return MZ_FALSE;
    }

    if (bit_flag & MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG)
    {
        mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE);
        return MZ_FALSE;
    }

    return MZ_TRUE;
}

mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index)
{
    mz_uint filename_len, attribute_mapping_id, external_attr;
    const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index);
    if (!p)
    {
        mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
        return MZ_FALSE;
    }

    filename_len = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
    if (filename_len)
    {
        if (*(p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_len - 1) == '/')
            return MZ_TRUE;
    }

    /* Bugfix: This code was also checking if the internal attribute was non-zero, which wasn't correct. */
    /* Most/all zip writers (hopefully) set DOS file/directory attributes in the low 16-bits, so check for the DOS directory flag and ignore the source OS ID in the created by field. */
    /* FIXME: Remove this check? Is it necessary - we already check the filename. */
    attribute_mapping_id = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS) >> 8;
    (void)attribute_mapping_id;

    external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
    if ((external_attr & MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG) != 0)
    {
        return MZ_TRUE;
    }

    return MZ_FALSE;
}

static mz_bool mz_zip_file_stat_internal(mz_zip_archive *pZip, mz_uint file_index, const mz_uint8 *pCentral_dir_header, mz_zip_archive_file_stat *pStat, mz_bool *pFound_zip64_extra_data)
{
    mz_uint n;
    const mz_uint8 *p = pCentral_dir_header;

    if (pFound_zip64_extra_data)
        *pFound_zip64_extra_data = MZ_FALSE;

    if ((!p) || (!pStat))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    /* Extract fields from the central directory record. */
    pStat->m_file_index = file_index;
    pStat->m_central_dir_ofs = MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index);
    pStat->m_version_made_by = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_MADE_BY_OFS);
    pStat->m_version_needed = MZ_READ_LE16(p + MZ_ZIP_CDH_VERSION_NEEDED_OFS);
    pStat->m_bit_flag = MZ_READ_LE16(p + MZ_ZIP_CDH_BIT_FLAG_OFS);
    pStat->m_method = MZ_READ_LE16(p + MZ_ZIP_CDH_METHOD_OFS);
#ifndef MINIZ_NO_TIME
    pStat->m_time = mz_zip_dos_to_time_t(MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_TIME_OFS), MZ_READ_LE16(p + MZ_ZIP_CDH_FILE_DATE_OFS));
#endif
    pStat->m_crc32 = MZ_READ_LE32(p + MZ_ZIP_CDH_CRC32_OFS);
    pStat->m_comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
    pStat->m_uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);
    pStat->m_internal_attr = MZ_READ_LE16(p + MZ_ZIP_CDH_INTERNAL_ATTR_OFS);
    pStat->m_external_attr = MZ_READ_LE32(p + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS);
    pStat->m_local_header_ofs = MZ_READ_LE32(p + MZ_ZIP_CDH_LOCAL_HEADER_OFS);

    /* Copy as much of the filename and comment as possible. */
    n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
    n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE - 1);
    memcpy(pStat->m_filename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
    pStat->m_filename[n] = '\0';

    n = MZ_READ_LE16(p + MZ_ZIP_CDH_COMMENT_LEN_OFS);
    n = MZ_MIN(n, MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE - 1);
    pStat->m_comment_size = n;
    memcpy(pStat->m_comment, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS) + MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS), n);
    pStat->m_comment[n] = '\0';

    /* Set some flags for convienance */
    pStat->m_is_directory = mz_zip_reader_is_file_a_directory(pZip, file_index);
    pStat->m_is_encrypted = mz_zip_reader_is_file_encrypted(pZip, file_index);
    pStat->m_is_supported = mz_zip_reader_is_file_supported(pZip, file_index);

    /* See if we need to read any zip64 extended information fields. */
    /* Confusingly, these zip64 fields can be present even on non-zip64 archives (Debian zip on a huge files from stdin piped to stdout creates them). */
    if (MZ_MAX(MZ_MAX(pStat->m_comp_size, pStat->m_uncomp_size), pStat->m_local_header_ofs) == MZ_UINT32_MAX)
    {
        /* Attempt to find zip64 extended information field in the entry's extra data */
        mz_uint32 extra_size_remaining = MZ_READ_LE16(p + MZ_ZIP_CDH_EXTRA_LEN_OFS);

        if (extra_size_remaining)
        {
            const mz_uint8 *pExtra_data = p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);

            do
            {
                mz_uint32 field_id;
                mz_uint32 field_data_size;

                if (extra_size_remaining < (sizeof(mz_uint16) * 2))
                    return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

                field_id = MZ_READ_LE16(pExtra_data);
                field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16));

                if ((field_data_size + sizeof(mz_uint16) * 2) > extra_size_remaining)
                    return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

                if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID)
                {
                    const mz_uint8 *pField_data = pExtra_data + sizeof(mz_uint16) * 2;
                    mz_uint32 field_data_remaining = field_data_size;

                    if (pFound_zip64_extra_data)
                        *pFound_zip64_extra_data = MZ_TRUE;

                    if (pStat->m_uncomp_size == MZ_UINT32_MAX)
                    {
                        if (field_data_remaining < sizeof(mz_uint64))
                            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

                        pStat->m_uncomp_size = MZ_READ_LE64(pField_data);
                        pField_data += sizeof(mz_uint64);
                        field_data_remaining -= sizeof(mz_uint64);
                    }

                    if (pStat->m_comp_size == MZ_UINT32_MAX)
                    {
                        if (field_data_remaining < sizeof(mz_uint64))
                            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

                        pStat->m_comp_size = MZ_READ_LE64(pField_data);
                        pField_data += sizeof(mz_uint64);
                        field_data_remaining -= sizeof(mz_uint64);
                    }

                    if (pStat->m_local_header_ofs == MZ_UINT32_MAX)
                    {
                        if (field_data_remaining < sizeof(mz_uint64))
                            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

                        pStat->m_local_header_ofs = MZ_READ_LE64(pField_data);
                        pField_data += sizeof(mz_uint64);
                        field_data_remaining -= sizeof(mz_uint64);
                    }

                    break;
                }

                pExtra_data += sizeof(mz_uint16) * 2 + field_data_size;
                extra_size_remaining = extra_size_remaining - sizeof(mz_uint16) * 2 - field_data_size;
            } while (extra_size_remaining);
        }
    }

    return MZ_TRUE;
}

static MZ_FORCEINLINE mz_bool mz_zip_string_equal(const char *pA, const char *pB, mz_uint len, mz_uint flags)
{
    mz_uint i;
    if (flags & MZ_ZIP_FLAG_CASE_SENSITIVE)
        return 0 == memcmp(pA, pB, len);
    for (i = 0; i < len; ++i)
        if (MZ_TOLOWER(pA[i]) != MZ_TOLOWER(pB[i]))
            return MZ_FALSE;
    return MZ_TRUE;
}

static MZ_FORCEINLINE int mz_zip_filename_compare(const mz_zip_array *pCentral_dir_array, const mz_zip_array *pCentral_dir_offsets, mz_uint l_index, const char *pR, mz_uint r_len)
{
    const mz_uint8 *pL = &MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_array, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(pCentral_dir_offsets, mz_uint32, l_index)), *pE;
    mz_uint l_len = MZ_READ_LE16(pL + MZ_ZIP_CDH_FILENAME_LEN_OFS);
    mz_uint8 l = 0, r = 0;
    pL += MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
    pE = pL + MZ_MIN(l_len, r_len);
    while (pL < pE)
    {
        if ((l = MZ_TOLOWER(*pL)) != (r = MZ_TOLOWER(*pR)))
            break;
        pL++;
        pR++;
    }
    return (pL == pE) ? (int)(l_len - r_len) : (l - r);
}

static mz_bool mz_zip_locate_file_binary_search(mz_zip_archive *pZip, const char *pFilename, mz_uint32 *pIndex)
{
    mz_zip_internal_state *pState = pZip->m_pState;
    const mz_zip_array *pCentral_dir_offsets = &pState->m_central_dir_offsets;
    const mz_zip_array *pCentral_dir = &pState->m_central_dir;
    mz_uint32 *pIndices = &MZ_ZIP_ARRAY_ELEMENT(&pState->m_sorted_central_dir_offsets, mz_uint32, 0);
    const uint32_t size = pZip->m_total_files;
    const mz_uint filename_len = (mz_uint)strlen(pFilename);

    if (pIndex)
        *pIndex = 0;

    if (size)
    {
        /* yes I could use uint32_t's, but then we would have to add some special case checks in the loop, argh, and */
        /* honestly the major expense here on 32-bit CPU's will still be the filename compare */
        mz_int64 l = 0, h = (mz_int64)size - 1;

        while (l <= h)
        {
            mz_int64 m = l + ((h - l) >> 1);
            uint32_t file_index = pIndices[(uint32_t)m];

            int comp = mz_zip_filename_compare(pCentral_dir, pCentral_dir_offsets, file_index, pFilename, filename_len);
            if (!comp)
            {
                if (pIndex)
                    *pIndex = file_index;
                return MZ_TRUE;
            }
            else if (comp < 0)
                l = m + 1;
            else
                h = m - 1;
        }
    }

    return mz_zip_set_error(pZip, MZ_ZIP_FILE_NOT_FOUND);
}

int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags)
{
    mz_uint32 index;
    if (!mz_zip_reader_locate_file_v2(pZip, pName, pComment, flags, &index))
        return -1;
    else
        return (int)index;
}

mz_bool mz_zip_reader_locate_file_v2(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags, mz_uint32 *pIndex)
{
    mz_uint file_index;
    size_t name_len, comment_len;

    if (pIndex)
        *pIndex = 0;

    if ((!pZip) || (!pZip->m_pState) || (!pName))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    /* See if we can use a binary search */
    if (((pZip->m_pState->m_init_flags & MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY) == 0) &&
        (pZip->m_zip_mode == MZ_ZIP_MODE_READING) &&
        ((flags & (MZ_ZIP_FLAG_IGNORE_PATH | MZ_ZIP_FLAG_CASE_SENSITIVE)) == 0) && (!pComment) && (pZip->m_pState->m_sorted_central_dir_offsets.m_size))
    {
        return mz_zip_locate_file_binary_search(pZip, pName, pIndex);
    }

    /* Locate the entry by scanning the entire central directory */
    name_len = strlen(pName);
    if (name_len > MZ_UINT16_MAX)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    comment_len = pComment ? strlen(pComment) : 0;
    if (comment_len > MZ_UINT16_MAX)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    for (file_index = 0; file_index < pZip->m_total_files; file_index++)
    {
        const mz_uint8 *pHeader = &MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir, mz_uint8, MZ_ZIP_ARRAY_ELEMENT(&pZip->m_pState->m_central_dir_offsets, mz_uint32, file_index));
        mz_uint filename_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_FILENAME_LEN_OFS);
        const char *pFilename = (const char *)pHeader + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE;
        if (filename_len < name_len)
            continue;
        if (comment_len)
        {
            mz_uint file_extra_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_EXTRA_LEN_OFS), file_comment_len = MZ_READ_LE16(pHeader + MZ_ZIP_CDH_COMMENT_LEN_OFS);
            const char *pFile_comment = pFilename + filename_len + file_extra_len;
            if ((file_comment_len != comment_len) || (!mz_zip_string_equal(pComment, pFile_comment, file_comment_len, flags)))
                continue;
        }
        if ((flags & MZ_ZIP_FLAG_IGNORE_PATH) && (filename_len))
        {
            int ofs = filename_len - 1;
            do
            {
                if ((pFilename[ofs] == '/') || (pFilename[ofs] == '\\') || (pFilename[ofs] == ':'))
                    break;
            } while (--ofs >= 0);
            ofs++;
            pFilename += ofs;
            filename_len -= ofs;
        }
        if ((filename_len == name_len) && (mz_zip_string_equal(pName, pFilename, filename_len, flags)))
        {
            if (pIndex)
                *pIndex = file_index;
            return MZ_TRUE;
        }
    }

    return mz_zip_set_error(pZip, MZ_ZIP_FILE_NOT_FOUND);
}

mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size)
{
    int status = TINFL_STATUS_DONE;
    mz_uint64 needed_size, cur_file_ofs, comp_remaining, out_buf_ofs = 0, read_buf_size, read_buf_ofs = 0, read_buf_avail;
    mz_zip_archive_file_stat file_stat;
    void *pRead_buf;
    mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)];
    mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
    tinfl_decompressor inflator;

    if ((!pZip) || (!pZip->m_pState) || ((buf_size) && (!pBuf)) || ((user_read_buf_size) && (!pUser_read_buf)) || (!pZip->m_pRead))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
        return MZ_FALSE;

    /* A directory or zero length file */
    if ((file_stat.m_is_directory) || (!file_stat.m_comp_size))
        return MZ_TRUE;

    /* Encryption and patch files are not supported. */
    if (file_stat.m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG))
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION);

    /* This function only supports decompressing stored and deflate. */
    if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED))
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD);

    /* Ensure supplied output buffer is large enough. */
    needed_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? file_stat.m_comp_size : file_stat.m_uncomp_size;
    if (buf_size < needed_size)
        return mz_zip_set_error(pZip, MZ_ZIP_BUF_TOO_SMALL);

    /* Read and parse the local directory entry. */
    cur_file_ofs = file_stat.m_local_header_ofs;
    if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);

    if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
    if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method))
    {
        /* The file is stored or the caller has requested the compressed data. */
        if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pBuf, (size_t)needed_size) != needed_size)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);

#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
        if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) == 0)
        {
            if (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)
                return mz_zip_set_error(pZip, MZ_ZIP_CRC_CHECK_FAILED);
        }
#endif

        return MZ_TRUE;
    }

    /* Decompress the file either directly from memory or from a file input buffer. */
    tinfl_init(&inflator);

    if (pZip->m_pState->m_pMem)
    {
        /* Read directly from the archive in memory. */
        pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
        read_buf_size = read_buf_avail = file_stat.m_comp_size;
        comp_remaining = 0;
    }
    else if (pUser_read_buf)
    {
        /* Use a user provided read buffer. */
        if (!user_read_buf_size)
            return MZ_FALSE;
        pRead_buf = (mz_uint8 *)pUser_read_buf;
        read_buf_size = user_read_buf_size;
        read_buf_avail = 0;
        comp_remaining = file_stat.m_comp_size;
    }
    else
    {
        /* Temporarily allocate a read buffer. */
        read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE);
        if (((sizeof(size_t) == sizeof(mz_uint32))) && (read_buf_size > 0x7FFFFFFF))
            return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);

        if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size)))
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

        read_buf_avail = 0;
        comp_remaining = file_stat.m_comp_size;
    }

    do
    {
        /* The size_t cast here should be OK because we've verified that the output buffer is >= file_stat.m_uncomp_size above */
        size_t in_buf_size, out_buf_size = (size_t)(file_stat.m_uncomp_size - out_buf_ofs);
        if ((!read_buf_avail) && (!pZip->m_pState->m_pMem))
        {
            read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
            if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail)
            {
                status = TINFL_STATUS_FAILED;
                mz_zip_set_error(pZip, MZ_ZIP_DECOMPRESSION_FAILED);
                break;
            }
            cur_file_ofs += read_buf_avail;
            comp_remaining -= read_buf_avail;
            read_buf_ofs = 0;
        }
        in_buf_size = (size_t)read_buf_avail;
        status = tinfl_decompress(&inflator, (mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pBuf, (mz_uint8 *)pBuf + out_buf_ofs, &out_buf_size, TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF | (comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0));
        read_buf_avail -= in_buf_size;
        read_buf_ofs += in_buf_size;
        out_buf_ofs += out_buf_size;
    } while (status == TINFL_STATUS_NEEDS_MORE_INPUT);

    if (status == TINFL_STATUS_DONE)
    {
        /* Make sure the entire file was decompressed, and check its CRC. */
        if (out_buf_ofs != file_stat.m_uncomp_size)
        {
            mz_zip_set_error(pZip, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE);
            status = TINFL_STATUS_FAILED;
        }
#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
        else if (mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, (size_t)file_stat.m_uncomp_size) != file_stat.m_crc32)
        {
            mz_zip_set_error(pZip, MZ_ZIP_CRC_CHECK_FAILED);
            status = TINFL_STATUS_FAILED;
        }
#endif
    }

    if ((!pZip->m_pState->m_pMem) && (!pUser_read_buf))
        pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);

    return status == TINFL_STATUS_DONE;
}

mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size)
{
    mz_uint32 file_index;
    if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index))
        return MZ_FALSE;
    return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, pUser_read_buf, user_read_buf_size);
}

mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags)
{
    return mz_zip_reader_extract_to_mem_no_alloc(pZip, file_index, pBuf, buf_size, flags, NULL, 0);
}

mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags)
{
    return mz_zip_reader_extract_file_to_mem_no_alloc(pZip, pFilename, pBuf, buf_size, flags, NULL, 0);
}

void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags)
{
    mz_uint64 comp_size, uncomp_size, alloc_size;
    const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index);
    void *pBuf;

    if (pSize)
        *pSize = 0;

    if (!p)
    {
        mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
        return NULL;
    }

    comp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS);
    uncomp_size = MZ_READ_LE32(p + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS);

    alloc_size = (flags & MZ_ZIP_FLAG_COMPRESSED_DATA) ? comp_size : uncomp_size;
    if (((sizeof(size_t) == sizeof(mz_uint32))) && (alloc_size > 0x7FFFFFFF))
    {
        mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);
        return NULL;
    }

    if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)alloc_size)))
    {
        mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        return NULL;
    }

    if (!mz_zip_reader_extract_to_mem(pZip, file_index, pBuf, (size_t)alloc_size, flags))
    {
        pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
        return NULL;
    }

    if (pSize)
        *pSize = (size_t)alloc_size;
    return pBuf;
}

void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags)
{
    mz_uint32 file_index;
    if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index))
    {
        if (pSize)
            *pSize = 0;
        return MZ_FALSE;
    }
    return mz_zip_reader_extract_to_heap(pZip, file_index, pSize, flags);
}

mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags)
{
    int status = TINFL_STATUS_DONE;
    mz_uint file_crc32 = MZ_CRC32_INIT;
    mz_uint64 read_buf_size, read_buf_ofs = 0, read_buf_avail, comp_remaining, out_buf_ofs = 0, cur_file_ofs;
    mz_zip_archive_file_stat file_stat;
    void *pRead_buf = NULL;
    void *pWrite_buf = NULL;
    mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)];
    mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;

    if ((!pZip) || (!pZip->m_pState) || (!pCallback) || (!pZip->m_pRead))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
        return MZ_FALSE;

    /* A directory or zero length file */
    if ((file_stat.m_is_directory) || (!file_stat.m_comp_size))
        return MZ_TRUE;

    /* Encryption and patch files are not supported. */
    if (file_stat.m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG))
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION);

    /* This function only supports decompressing stored and deflate. */
    if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED))
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD);

    /* Read and do some minimal validation of the local directory entry (this doesn't crack the zip64 stuff, which we already have from the central dir) */
    cur_file_ofs = file_stat.m_local_header_ofs;
    if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);

    if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
    if ((cur_file_ofs + file_stat.m_comp_size) > pZip->m_archive_size)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    /* Decompress the file either directly from memory or from a file input buffer. */
    if (pZip->m_pState->m_pMem)
    {
        pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + cur_file_ofs;
        read_buf_size = read_buf_avail = file_stat.m_comp_size;
        comp_remaining = 0;
    }
    else
    {
        read_buf_size = MZ_MIN(file_stat.m_comp_size, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE);
        if (NULL == (pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)read_buf_size)))
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

        read_buf_avail = 0;
        comp_remaining = file_stat.m_comp_size;
    }

    if ((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!file_stat.m_method))
    {
        /* The file is stored or the caller has requested the compressed data. */
        if (pZip->m_pState->m_pMem)
        {
            if (((sizeof(size_t) == sizeof(mz_uint32))) && (file_stat.m_comp_size > MZ_UINT32_MAX))
                return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);

            if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)file_stat.m_comp_size) != file_stat.m_comp_size)
            {
                mz_zip_set_error(pZip, MZ_ZIP_WRITE_CALLBACK_FAILED);
                status = TINFL_STATUS_FAILED;
            }
            else if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
            {
#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
                file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)file_stat.m_comp_size);
#endif
            }

            cur_file_ofs += file_stat.m_comp_size;
            out_buf_ofs += file_stat.m_comp_size;
            comp_remaining = 0;
        }
        else
        {
            while (comp_remaining)
            {
                read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
                if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail)
                {
                    mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
                    status = TINFL_STATUS_FAILED;
                    break;
                }

#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
                if (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
                {
                    file_crc32 = (mz_uint32)mz_crc32(file_crc32, (const mz_uint8 *)pRead_buf, (size_t)read_buf_avail);
                }
#endif

                if (pCallback(pOpaque, out_buf_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail)
                {
                    mz_zip_set_error(pZip, MZ_ZIP_WRITE_CALLBACK_FAILED);
                    status = TINFL_STATUS_FAILED;
                    break;
                }

                cur_file_ofs += read_buf_avail;
                out_buf_ofs += read_buf_avail;
                comp_remaining -= read_buf_avail;
            }
        }
    }
    else
    {
        tinfl_decompressor inflator;
        tinfl_init(&inflator);

        if (NULL == (pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE)))
        {
            mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
            status = TINFL_STATUS_FAILED;
        }
        else
        {
            do
            {
                mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pWrite_buf + (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
                size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));
                if ((!read_buf_avail) && (!pZip->m_pState->m_pMem))
                {
                    read_buf_avail = MZ_MIN(read_buf_size, comp_remaining);
                    if (pZip->m_pRead(pZip->m_pIO_opaque, cur_file_ofs, pRead_buf, (size_t)read_buf_avail) != read_buf_avail)
                    {
                        mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
                        status = TINFL_STATUS_FAILED;
                        break;
                    }
                    cur_file_ofs += read_buf_avail;
                    comp_remaining -= read_buf_avail;
                    read_buf_ofs = 0;
                }

                in_buf_size = (size_t)read_buf_avail;
                status = tinfl_decompress(&inflator, (const mz_uint8 *)pRead_buf + read_buf_ofs, &in_buf_size, (mz_uint8 *)pWrite_buf, pWrite_buf_cur, &out_buf_size, comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
                read_buf_avail -= in_buf_size;
                read_buf_ofs += in_buf_size;

                if (out_buf_size)
                {
                    if (pCallback(pOpaque, out_buf_ofs, pWrite_buf_cur, out_buf_size) != out_buf_size)
                    {
                        mz_zip_set_error(pZip, MZ_ZIP_WRITE_CALLBACK_FAILED);
                        status = TINFL_STATUS_FAILED;
                        break;
                    }

#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
                    file_crc32 = (mz_uint32)mz_crc32(file_crc32, pWrite_buf_cur, out_buf_size);
#endif
                    if ((out_buf_ofs += out_buf_size) > file_stat.m_uncomp_size)
                    {
                        mz_zip_set_error(pZip, MZ_ZIP_DECOMPRESSION_FAILED);
                        status = TINFL_STATUS_FAILED;
                        break;
                    }
                }
            } while ((status == TINFL_STATUS_NEEDS_MORE_INPUT) || (status == TINFL_STATUS_HAS_MORE_OUTPUT));
        }
    }

    if ((status == TINFL_STATUS_DONE) && (!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)))
    {
        /* Make sure the entire file was decompressed, and check its CRC. */
        if (out_buf_ofs != file_stat.m_uncomp_size)
        {
            mz_zip_set_error(pZip, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE);
            status = TINFL_STATUS_FAILED;
        }
#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
        else if (file_crc32 != file_stat.m_crc32)
        {
            mz_zip_set_error(pZip, MZ_ZIP_DECOMPRESSION_FAILED);
            status = TINFL_STATUS_FAILED;
        }
#endif
    }

    if (!pZip->m_pState->m_pMem)
        pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);

    if (pWrite_buf)
        pZip->m_pFree(pZip->m_pAlloc_opaque, pWrite_buf);

    return status == TINFL_STATUS_DONE;
}

mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags)
{
    mz_uint32 file_index;
    if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index))
        return MZ_FALSE;

    return mz_zip_reader_extract_to_callback(pZip, file_index, pCallback, pOpaque, flags);
}

mz_zip_reader_extract_iter_state* mz_zip_reader_extract_iter_new(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags)
{
    mz_zip_reader_extract_iter_state *pState;
    mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)];
    mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;

    /* Argument sanity check */
    if ((!pZip) || (!pZip->m_pState))
        return NULL;

    /* Allocate an iterator status structure */
    pState = (mz_zip_reader_extract_iter_state*)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_reader_extract_iter_state));
    if (!pState)
    {
        mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        return NULL;
    }

    /* Fetch file details */
    if (!mz_zip_reader_file_stat(pZip, file_index, &pState->file_stat))
    {
        pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
        return NULL;
    }

    /* Encryption and patch files are not supported. */
    if (pState->file_stat.m_bit_flag & (MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_IS_ENCRYPTED | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_USES_STRONG_ENCRYPTION | MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_COMPRESSED_PATCH_FLAG))
    {
        mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION);
        pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
        return NULL;
    }

    /* This function only supports decompressing stored and deflate. */
    if ((!(flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (pState->file_stat.m_method != 0) && (pState->file_stat.m_method != MZ_DEFLATED))
    {
        mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD);
        pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
        return NULL;
    }

    /* Init state - save args */
    pState->pZip = pZip;
    pState->flags = flags;

    /* Init state - reset variables to defaults */
    pState->status = TINFL_STATUS_DONE;
#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
    pState->file_crc32 = MZ_CRC32_INIT;
#endif
    pState->read_buf_ofs = 0;
    pState->out_buf_ofs = 0;
    pState->pRead_buf = NULL;
    pState->pWrite_buf = NULL;
    pState->out_blk_remain = 0;

    /* Read and parse the local directory entry. */
    pState->cur_file_ofs = pState->file_stat.m_local_header_ofs;
    if (pZip->m_pRead(pZip->m_pIO_opaque, pState->cur_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
    {
        mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
        pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
        return NULL;
    }

    if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
    {
        mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
        pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
        return NULL;
    }

    pState->cur_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS) + MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
    if ((pState->cur_file_ofs + pState->file_stat.m_comp_size) > pZip->m_archive_size)
    {
        mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
        pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
        return NULL;
    }

    /* Decompress the file either directly from memory or from a file input buffer. */
    if (pZip->m_pState->m_pMem)
    {
        pState->pRead_buf = (mz_uint8 *)pZip->m_pState->m_pMem + pState->cur_file_ofs;
        pState->read_buf_size = pState->read_buf_avail = pState->file_stat.m_comp_size;
        pState->comp_remaining = pState->file_stat.m_comp_size;
    }
    else
    {
        if (!((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!pState->file_stat.m_method)))
        {
            /* Decompression required, therefore intermediate read buffer required */
            pState->read_buf_size = MZ_MIN(pState->file_stat.m_comp_size, MZ_ZIP_MAX_IO_BUF_SIZE);
            if (NULL == (pState->pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)pState->read_buf_size)))
            {
                mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
                pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
                return NULL;
            }
        }
        else
        {
            /* Decompression not required - we will be reading directly into user buffer, no temp buf required */
            pState->read_buf_size = 0;
        }
        pState->read_buf_avail = 0;
        pState->comp_remaining = pState->file_stat.m_comp_size;
    }

    if (!((flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!pState->file_stat.m_method)))
    {
        /* Decompression required, init decompressor */
        tinfl_init( &pState->inflator );

        /* Allocate write buffer */
        if (NULL == (pState->pWrite_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, TINFL_LZ_DICT_SIZE)))
        {
            mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
            if (pState->pRead_buf)
                pZip->m_pFree(pZip->m_pAlloc_opaque, pState->pRead_buf);
            pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
            return NULL;
        }
    }

    return pState;
}

mz_zip_reader_extract_iter_state* mz_zip_reader_extract_file_iter_new(mz_zip_archive *pZip, const char *pFilename, mz_uint flags)
{
    mz_uint32 file_index;

    /* Locate file index by name */
    if (!mz_zip_reader_locate_file_v2(pZip, pFilename, NULL, flags, &file_index))
        return NULL;

    /* Construct iterator */
    return mz_zip_reader_extract_iter_new(pZip, file_index, flags);
}

size_t mz_zip_reader_extract_iter_read(mz_zip_reader_extract_iter_state* pState, void* pvBuf, size_t buf_size)
{
    size_t copied_to_caller = 0;

    /* Argument sanity check */
    if ((!pState) || (!pState->pZip) || (!pState->pZip->m_pState) || (!pvBuf))
        return 0;

    if ((pState->flags & MZ_ZIP_FLAG_COMPRESSED_DATA) || (!pState->file_stat.m_method))
    {
        /* The file is stored or the caller has requested the compressed data, calc amount to return. */
        copied_to_caller = MZ_MIN( buf_size, pState->comp_remaining );

        /* Zip is in memory....or requires reading from a file? */
        if (pState->pZip->m_pState->m_pMem)
        {
            /* Copy data to caller's buffer */
            memcpy( pvBuf, pState->pRead_buf, copied_to_caller );
            pState->pRead_buf = ((mz_uint8*)pState->pRead_buf) + copied_to_caller;
        }
        else
        {
            /* Read directly into caller's buffer */
            if (pState->pZip->m_pRead(pState->pZip->m_pIO_opaque, pState->cur_file_ofs, pvBuf, copied_to_caller) != copied_to_caller)
            {
                /* Failed to read all that was asked for, flag failure and alert user */
                mz_zip_set_error(pState->pZip, MZ_ZIP_FILE_READ_FAILED);
                pState->status = TINFL_STATUS_FAILED;
                copied_to_caller = 0;
            }
        }

#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
        /* Compute CRC if not returning compressed data only */
        if (!(pState->flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
            pState->file_crc32 = (mz_uint32)mz_crc32(pState->file_crc32, (const mz_uint8 *)pvBuf, copied_to_caller);
#endif

        /* Advance offsets, dec counters */
        pState->cur_file_ofs += copied_to_caller;
        pState->out_buf_ofs += copied_to_caller;
        pState->comp_remaining -= copied_to_caller;
    }
    else
    {
        do
        {
            /* Calc ptr to write buffer - given current output pos and block size */
            mz_uint8 *pWrite_buf_cur = (mz_uint8 *)pState->pWrite_buf + (pState->out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));

            /* Calc max output size - given current output pos and block size */
            size_t in_buf_size, out_buf_size = TINFL_LZ_DICT_SIZE - (pState->out_buf_ofs & (TINFL_LZ_DICT_SIZE - 1));

            if (!pState->out_blk_remain)
            {
                /* Read more data from file if none available (and reading from file) */
                if ((!pState->read_buf_avail) && (!pState->pZip->m_pState->m_pMem))
                {
                    /* Calc read size */
                    pState->read_buf_avail = MZ_MIN(pState->read_buf_size, pState->comp_remaining);
                    if (pState->pZip->m_pRead(pState->pZip->m_pIO_opaque, pState->cur_file_ofs, pState->pRead_buf, (size_t)pState->read_buf_avail) != pState->read_buf_avail)
                    {
                        mz_zip_set_error(pState->pZip, MZ_ZIP_FILE_READ_FAILED);
                        pState->status = TINFL_STATUS_FAILED;
                        break;
                    }

                    /* Advance offsets, dec counters */
                    pState->cur_file_ofs += pState->read_buf_avail;
                    pState->comp_remaining -= pState->read_buf_avail;
                    pState->read_buf_ofs = 0;
                }

                /* Perform decompression */
                in_buf_size = (size_t)pState->read_buf_avail;
                pState->status = tinfl_decompress(&pState->inflator, (const mz_uint8 *)pState->pRead_buf + pState->read_buf_ofs, &in_buf_size, (mz_uint8 *)pState->pWrite_buf, pWrite_buf_cur, &out_buf_size, pState->comp_remaining ? TINFL_FLAG_HAS_MORE_INPUT : 0);
                pState->read_buf_avail -= in_buf_size;
                pState->read_buf_ofs += in_buf_size;

                /* Update current output block size remaining */
                pState->out_blk_remain = out_buf_size;
            }

            if (pState->out_blk_remain)
            {
                /* Calc amount to return. */
                size_t to_copy = MZ_MIN( (buf_size - copied_to_caller), pState->out_blk_remain );

                /* Copy data to caller's buffer */
                memcpy( (uint8_t*)pvBuf + copied_to_caller, pWrite_buf_cur, to_copy );

#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
                /* Perform CRC */
                pState->file_crc32 = (mz_uint32)mz_crc32(pState->file_crc32, pWrite_buf_cur, to_copy);
#endif

                /* Decrement data consumed from block */
                pState->out_blk_remain -= to_copy;

                /* Inc output offset, while performing sanity check */
                if ((pState->out_buf_ofs += to_copy) > pState->file_stat.m_uncomp_size)
                {
                    mz_zip_set_error(pState->pZip, MZ_ZIP_DECOMPRESSION_FAILED);
                    pState->status = TINFL_STATUS_FAILED;
                    break;
                }

                /* Increment counter of data copied to caller */
                copied_to_caller += to_copy;
            }
        } while ( (copied_to_caller < buf_size) && ((pState->status == TINFL_STATUS_NEEDS_MORE_INPUT) || (pState->status == TINFL_STATUS_HAS_MORE_OUTPUT)) );
    }

    /* Return how many bytes were copied into user buffer */
    return copied_to_caller;
}

mz_bool mz_zip_reader_extract_iter_free(mz_zip_reader_extract_iter_state* pState)
{
    int status;

    /* Argument sanity check */
    if ((!pState) || (!pState->pZip) || (!pState->pZip->m_pState))
        return MZ_FALSE;

    /* Was decompression completed and requested? */
    if ((pState->status == TINFL_STATUS_DONE) && (!(pState->flags & MZ_ZIP_FLAG_COMPRESSED_DATA)))
    {
        /* Make sure the entire file was decompressed, and check its CRC. */
        if (pState->out_buf_ofs != pState->file_stat.m_uncomp_size)
        {
            mz_zip_set_error(pState->pZip, MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE);
            pState->status = TINFL_STATUS_FAILED;
        }
#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
        else if (pState->file_crc32 != pState->file_stat.m_crc32)
        {
            mz_zip_set_error(pState->pZip, MZ_ZIP_DECOMPRESSION_FAILED);
            pState->status = TINFL_STATUS_FAILED;
        }
#endif
    }

    /* Free buffers */
    if (!pState->pZip->m_pState->m_pMem)
        pState->pZip->m_pFree(pState->pZip->m_pAlloc_opaque, pState->pRead_buf);
    if (pState->pWrite_buf)
        pState->pZip->m_pFree(pState->pZip->m_pAlloc_opaque, pState->pWrite_buf);

    /* Save status */
    status = pState->status;

    /* Free context */
    pState->pZip->m_pFree(pState->pZip->m_pAlloc_opaque, pState);

    return status == TINFL_STATUS_DONE;
}

#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_callback(void *pOpaque, mz_uint64 ofs, const void *pBuf, size_t n)
{
    (void)ofs;

    return MZ_FWRITE(pBuf, 1, n, (MZ_FILE *)pOpaque);
}

mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags)
{
    mz_bool status;
    mz_zip_archive_file_stat file_stat;
    MZ_FILE *pFile;

    if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
        return MZ_FALSE;

    if ((file_stat.m_is_directory) || (!file_stat.m_is_supported))
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE);

    pFile = MZ_FOPEN(pDst_filename, "wb");
    if (!pFile)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED);

    status = mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_file_write_callback, pFile, flags);

    if (MZ_FCLOSE(pFile) == EOF)
    {
        if (status)
            mz_zip_set_error(pZip, MZ_ZIP_FILE_CLOSE_FAILED);

        status = MZ_FALSE;
    }

#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_STDIO)
    if (status)
        mz_zip_set_file_times(pDst_filename, file_stat.m_time, file_stat.m_time);
#endif

    return status;
}

mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags)
{
    mz_uint32 file_index;
    if (!mz_zip_reader_locate_file_v2(pZip, pArchive_filename, NULL, flags, &file_index))
        return MZ_FALSE;

    return mz_zip_reader_extract_to_file(pZip, file_index, pDst_filename, flags);
}

mz_bool mz_zip_reader_extract_to_cfile(mz_zip_archive *pZip, mz_uint file_index, MZ_FILE *pFile, mz_uint flags)
{
    mz_zip_archive_file_stat file_stat;

    if (!mz_zip_reader_file_stat(pZip, file_index, &file_stat))
        return MZ_FALSE;

    if ((file_stat.m_is_directory) || (!file_stat.m_is_supported))
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE);

    return mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_file_write_callback, pFile, flags);
}

mz_bool mz_zip_reader_extract_file_to_cfile(mz_zip_archive *pZip, const char *pArchive_filename, MZ_FILE *pFile, mz_uint flags)
{
    mz_uint32 file_index;
    if (!mz_zip_reader_locate_file_v2(pZip, pArchive_filename, NULL, flags, &file_index))
        return MZ_FALSE;

    return mz_zip_reader_extract_to_cfile(pZip, file_index, pFile, flags);
}
#endif /* #ifndef MINIZ_NO_STDIO */

static size_t mz_zip_compute_crc32_callback(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n)
{
    mz_uint32 *p = (mz_uint32 *)pOpaque;
    (void)file_ofs;
    *p = (mz_uint32)mz_crc32(*p, (const mz_uint8 *)pBuf, n);
    return n;
}

mz_bool mz_zip_validate_file(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags)
{
    mz_zip_archive_file_stat file_stat;
    mz_zip_internal_state *pState;
    const mz_uint8 *pCentral_dir_header;
    mz_bool found_zip64_ext_data_in_cdir = MZ_FALSE;
    mz_bool found_zip64_ext_data_in_ldir = MZ_FALSE;
    mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)];
    mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
    mz_uint64 local_header_ofs = 0;
    mz_uint32 local_header_filename_len, local_header_extra_len, local_header_crc32;
    mz_uint64 local_header_comp_size, local_header_uncomp_size;
    mz_uint32 uncomp_crc32 = MZ_CRC32_INIT;
    mz_bool has_data_descriptor;
    mz_uint32 local_header_bit_flags;

    mz_zip_array file_data_array;
    mz_zip_array_init(&file_data_array, 1);

    if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (!pZip->m_pRead))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (file_index > pZip->m_total_files)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    pState = pZip->m_pState;

    pCentral_dir_header = mz_zip_get_cdh(pZip, file_index);

    if (!mz_zip_file_stat_internal(pZip, file_index, pCentral_dir_header, &file_stat, &found_zip64_ext_data_in_cdir))
        return MZ_FALSE;

    /* A directory or zero length file */
    if ((file_stat.m_is_directory) || (!file_stat.m_uncomp_size))
        return MZ_TRUE;

    /* Encryption and patch files are not supported. */
    if (file_stat.m_is_encrypted)
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_ENCRYPTION);

    /* This function only supports stored and deflate. */
    if ((file_stat.m_method != 0) && (file_stat.m_method != MZ_DEFLATED))
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_METHOD);

    if (!file_stat.m_is_supported)
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_FEATURE);

    /* Read and parse the local directory entry. */
    local_header_ofs = file_stat.m_local_header_ofs;
    if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);

    if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    local_header_filename_len = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS);
    local_header_extra_len = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
    local_header_comp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS);
    local_header_uncomp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS);
    local_header_crc32 = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_CRC32_OFS);
    local_header_bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
    has_data_descriptor = (local_header_bit_flags & 8) != 0;

    if (local_header_filename_len != strlen(file_stat.m_filename))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    if ((local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len + local_header_extra_len + file_stat.m_comp_size) > pZip->m_archive_size)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    if (!mz_zip_array_resize(pZip, &file_data_array, MZ_MAX(local_header_filename_len, local_header_extra_len), MZ_FALSE))
        return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

    if (local_header_filename_len)
    {
        if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE, file_data_array.m_p, local_header_filename_len) != local_header_filename_len)
        {
            mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
            goto handle_failure;
        }

        /* I've seen 1 archive that had the same pathname, but used backslashes in the local dir and forward slashes in the central dir. Do we care about this? For now, this case will fail validation. */
        if (memcmp(file_stat.m_filename, file_data_array.m_p, local_header_filename_len) != 0)
        {
            mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED);
            goto handle_failure;
        }
    }

    if ((local_header_extra_len) && ((local_header_comp_size == MZ_UINT32_MAX) || (local_header_uncomp_size == MZ_UINT32_MAX)))
    {
        mz_uint32 extra_size_remaining = local_header_extra_len;
        const mz_uint8 *pExtra_data = (const mz_uint8 *)file_data_array.m_p;

        if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len, file_data_array.m_p, local_header_extra_len) != local_header_extra_len)
        {
            mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
            goto handle_failure;
        }

        do
        {
            mz_uint32 field_id, field_data_size, field_total_size;

            if (extra_size_remaining < (sizeof(mz_uint16) * 2))
                return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

            field_id = MZ_READ_LE16(pExtra_data);
            field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16));
            field_total_size = field_data_size + sizeof(mz_uint16) * 2;

            if (field_total_size > extra_size_remaining)
                return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

            if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID)
            {
                const mz_uint8 *pSrc_field_data = pExtra_data + sizeof(mz_uint32);

                if (field_data_size < sizeof(mz_uint64) * 2)
                {
                    mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
                    goto handle_failure;
                }

                local_header_uncomp_size = MZ_READ_LE64(pSrc_field_data);
                local_header_comp_size = MZ_READ_LE64(pSrc_field_data + sizeof(mz_uint64));

                found_zip64_ext_data_in_ldir = MZ_TRUE;
                break;
            }

            pExtra_data += field_total_size;
            extra_size_remaining -= field_total_size;
        } while (extra_size_remaining);
    }

    /* TODO: parse local header extra data when local_header_comp_size is 0xFFFFFFFF! (big_descriptor.zip) */
    /* I've seen zips in the wild with the data descriptor bit set, but proper local header values and bogus data descriptors */
    if ((has_data_descriptor) && (!local_header_comp_size) && (!local_header_crc32))
    {
        mz_uint8 descriptor_buf[32];
        mz_bool has_id;
        const mz_uint8 *pSrc;
        mz_uint32 file_crc32;
        mz_uint64 comp_size = 0, uncomp_size = 0;

        mz_uint32 num_descriptor_uint32s = ((pState->m_zip64) || (found_zip64_ext_data_in_ldir)) ? 6 : 4;

        if (pZip->m_pRead(pZip->m_pIO_opaque, local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_len + local_header_extra_len + file_stat.m_comp_size, descriptor_buf, sizeof(mz_uint32) * num_descriptor_uint32s) != (sizeof(mz_uint32) * num_descriptor_uint32s))
        {
            mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
            goto handle_failure;
        }

        has_id = (MZ_READ_LE32(descriptor_buf) == MZ_ZIP_DATA_DESCRIPTOR_ID);
        pSrc = has_id ? (descriptor_buf + sizeof(mz_uint32)) : descriptor_buf;

        file_crc32 = MZ_READ_LE32(pSrc);

        if ((pState->m_zip64) || (found_zip64_ext_data_in_ldir))
        {
            comp_size = MZ_READ_LE64(pSrc + sizeof(mz_uint32));
            uncomp_size = MZ_READ_LE64(pSrc + sizeof(mz_uint32) + sizeof(mz_uint64));
        }
        else
        {
            comp_size = MZ_READ_LE32(pSrc + sizeof(mz_uint32));
            uncomp_size = MZ_READ_LE32(pSrc + sizeof(mz_uint32) + sizeof(mz_uint32));
        }

        if ((file_crc32 != file_stat.m_crc32) || (comp_size != file_stat.m_comp_size) || (uncomp_size != file_stat.m_uncomp_size))
        {
            mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED);
            goto handle_failure;
        }
    }
    else
    {
        if ((local_header_crc32 != file_stat.m_crc32) || (local_header_comp_size != file_stat.m_comp_size) || (local_header_uncomp_size != file_stat.m_uncomp_size))
        {
            mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED);
            goto handle_failure;
        }
    }

    mz_zip_array_clear(pZip, &file_data_array);

    if ((flags & MZ_ZIP_FLAG_VALIDATE_HEADERS_ONLY) == 0)
    {
        if (!mz_zip_reader_extract_to_callback(pZip, file_index, mz_zip_compute_crc32_callback, &uncomp_crc32, 0))
            return MZ_FALSE;

        /* 1 more check to be sure, although the extract checks too. */
        if (uncomp_crc32 != file_stat.m_crc32)
        {
            mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED);
            return MZ_FALSE;
        }
    }

    return MZ_TRUE;

handle_failure:
    mz_zip_array_clear(pZip, &file_data_array);
    return MZ_FALSE;
}

mz_bool mz_zip_validate_archive(mz_zip_archive *pZip, mz_uint flags)
{
    mz_zip_internal_state *pState;
    uint32_t i;

    if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || (!pZip->m_pRead))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    pState = pZip->m_pState;

    /* Basic sanity checks */
    if (!pState->m_zip64)
    {
        if (pZip->m_total_files > MZ_UINT16_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);

        if (pZip->m_archive_size > MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);
    }
    else
    {
        if (pZip->m_total_files >= MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);

        if (pState->m_central_dir.m_size >= MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);
    }

    for (i = 0; i < pZip->m_total_files; i++)
    {
        if (MZ_ZIP_FLAG_VALIDATE_LOCATE_FILE_FLAG & flags)
        {
            mz_uint32 found_index;
            mz_zip_archive_file_stat stat;

            if (!mz_zip_reader_file_stat(pZip, i, &stat))
                return MZ_FALSE;

            if (!mz_zip_reader_locate_file_v2(pZip, stat.m_filename, NULL, 0, &found_index))
                return MZ_FALSE;

            /* This check can fail if there are duplicate filenames in the archive (which we don't check for when writing - that's up to the user) */
            if (found_index != i)
                return mz_zip_set_error(pZip, MZ_ZIP_VALIDATION_FAILED);
        }

        if (!mz_zip_validate_file(pZip, i, flags))
            return MZ_FALSE;
    }

    return MZ_TRUE;
}

mz_bool mz_zip_validate_mem_archive(const void *pMem, size_t size, mz_uint flags, mz_zip_error *pErr)
{
    mz_bool success = MZ_TRUE;
    mz_zip_archive zip;
    mz_zip_error actual_err = MZ_ZIP_NO_ERROR;

    if ((!pMem) || (!size))
    {
        if (pErr)
            *pErr = MZ_ZIP_INVALID_PARAMETER;
        return MZ_FALSE;
    }

    mz_zip_zero_struct(&zip);

    if (!mz_zip_reader_init_mem(&zip, pMem, size, flags))
    {
        if (pErr)
            *pErr = zip.m_last_error;
        return MZ_FALSE;
    }

    if (!mz_zip_validate_archive(&zip, flags))
    {
        actual_err = zip.m_last_error;
        success = MZ_FALSE;
    }

    if (!mz_zip_reader_end_internal(&zip, success))
    {
        if (!actual_err)
            actual_err = zip.m_last_error;
        success = MZ_FALSE;
    }

    if (pErr)
        *pErr = actual_err;

    return success;
}

#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_validate_file_archive(const WCHAR_TYPE *pFilename, mz_uint flags, mz_zip_error *pErr)
{
    mz_bool success = MZ_TRUE;
    mz_zip_archive zip;
    mz_zip_error actual_err = MZ_ZIP_NO_ERROR;

    if (!pFilename)
    {
        if (pErr)
            *pErr = MZ_ZIP_INVALID_PARAMETER;
        return MZ_FALSE;
    }

    mz_zip_zero_struct(&zip);

    if (!mz_zip_reader_init_file_v2(&zip, pFilename, flags, 0, 0))
    {
        if (pErr)
            *pErr = zip.m_last_error;
        return MZ_FALSE;
    }

    if (!mz_zip_validate_archive(&zip, flags))
    {
        actual_err = zip.m_last_error;
        success = MZ_FALSE;
    }

    if (!mz_zip_reader_end_internal(&zip, success))
    {
        if (!actual_err)
            actual_err = zip.m_last_error;
        success = MZ_FALSE;
    }

    if (pErr)
        *pErr = actual_err;

    return success;
}
#endif /* #ifndef MINIZ_NO_STDIO */

/* ------------------- .ZIP archive writing */

#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS

static MZ_FORCEINLINE void mz_write_le16(mz_uint8 *p, mz_uint16 v)
{
    p[0] = (mz_uint8)v;
    p[1] = (mz_uint8)(v >> 8);
}
static MZ_FORCEINLINE void mz_write_le32(mz_uint8 *p, mz_uint32 v)
{
    p[0] = (mz_uint8)v;
    p[1] = (mz_uint8)(v >> 8);
    p[2] = (mz_uint8)(v >> 16);
    p[3] = (mz_uint8)(v >> 24);
}
static MZ_FORCEINLINE void mz_write_le64(mz_uint8 *p, mz_uint64 v)
{
    mz_write_le32(p, (mz_uint32)v);
    mz_write_le32(p + sizeof(mz_uint32), (mz_uint32)(v >> 32));
}

#define MZ_WRITE_LE16(p, v) mz_write_le16((mz_uint8 *)(p), (mz_uint16)(v))
#define MZ_WRITE_LE32(p, v) mz_write_le32((mz_uint8 *)(p), (mz_uint32)(v))
#define MZ_WRITE_LE64(p, v) mz_write_le64((mz_uint8 *)(p), (mz_uint64)(v))

static size_t mz_zip_heap_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n)
{
    mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
    mz_zip_internal_state *pState = pZip->m_pState;
    mz_uint64 new_size = MZ_MAX(file_ofs + n, pState->m_mem_size);

    if (!n)
        return 0;

    /* An allocation this big is likely to just fail on 32-bit systems, so don't even go there. */
    if ((sizeof(size_t) == sizeof(mz_uint32)) && (new_size > 0x7FFFFFFF))
    {
        mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE);
        return 0;
    }

    if (new_size > pState->m_mem_capacity)
    {
        void *pNew_block;
        size_t new_capacity = MZ_MAX(64, pState->m_mem_capacity);

        while (new_capacity < new_size)
            new_capacity *= 2;

        if (NULL == (pNew_block = pZip->m_pRealloc(pZip->m_pAlloc_opaque, pState->m_pMem, 1, new_capacity)))
        {
            mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
            return 0;
        }

        pState->m_pMem = pNew_block;
        pState->m_mem_capacity = new_capacity;
    }
    memcpy((mz_uint8 *)pState->m_pMem + file_ofs, pBuf, n);
    pState->m_mem_size = (size_t)new_size;
    return n;
}

static mz_bool mz_zip_writer_end_internal(mz_zip_archive *pZip, mz_bool set_last_error)
{
    mz_zip_internal_state *pState;
    mz_bool status = MZ_TRUE;

    if ((!pZip) || (!pZip->m_pState) || (!pZip->m_pAlloc) || (!pZip->m_pFree) || ((pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) && (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED)))
    {
        if (set_last_error)
            mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
        return MZ_FALSE;
    }

    pState = pZip->m_pState;
    pZip->m_pState = NULL;
    mz_zip_array_clear(pZip, &pState->m_central_dir);
    mz_zip_array_clear(pZip, &pState->m_central_dir_offsets);
    mz_zip_array_clear(pZip, &pState->m_sorted_central_dir_offsets);

#ifndef MINIZ_NO_STDIO
    if (pState->m_pFile)
    {
        if (pZip->m_zip_type == MZ_ZIP_TYPE_FILE)
        {
            if (MZ_FCLOSE(pState->m_pFile) == EOF)
            {
                if (set_last_error)
                    mz_zip_set_error(pZip, MZ_ZIP_FILE_CLOSE_FAILED);
                status = MZ_FALSE;
            }
        }

        pState->m_pFile = NULL;
    }
#endif /* #ifndef MINIZ_NO_STDIO */

    if ((pZip->m_pWrite == mz_zip_heap_write_func) && (pState->m_pMem))
    {
        pZip->m_pFree(pZip->m_pAlloc_opaque, pState->m_pMem);
        pState->m_pMem = NULL;
    }

    pZip->m_pFree(pZip->m_pAlloc_opaque, pState);
    pZip->m_zip_mode = MZ_ZIP_MODE_INVALID;
    return status;
}

mz_bool mz_zip_writer_init_v2(mz_zip_archive *pZip, mz_uint64 existing_size, mz_uint flags)
{
    mz_bool zip64 = (flags & MZ_ZIP_FLAG_WRITE_ZIP64) != 0;

    if ((!pZip) || (pZip->m_pState) || (!pZip->m_pWrite) || (pZip->m_zip_mode != MZ_ZIP_MODE_INVALID))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING)
    {
        if (!pZip->m_pRead)
            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
    }

    if (pZip->m_file_offset_alignment)
    {
        /* Ensure user specified file offset alignment is a power of 2. */
        if (pZip->m_file_offset_alignment & (pZip->m_file_offset_alignment - 1))
            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
    }

    if (!pZip->m_pAlloc)
        pZip->m_pAlloc = miniz_def_alloc_func;
    if (!pZip->m_pFree)
        pZip->m_pFree = miniz_def_free_func;
    if (!pZip->m_pRealloc)
        pZip->m_pRealloc = miniz_def_realloc_func;

    pZip->m_archive_size = existing_size;
    pZip->m_central_directory_file_ofs = 0;
    pZip->m_total_files = 0;

    if (NULL == (pZip->m_pState = (mz_zip_internal_state *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(mz_zip_internal_state))))
        return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

    memset(pZip->m_pState, 0, sizeof(mz_zip_internal_state));

    MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir, sizeof(mz_uint8));
    MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_central_dir_offsets, sizeof(mz_uint32));
    MZ_ZIP_ARRAY_SET_ELEMENT_SIZE(&pZip->m_pState->m_sorted_central_dir_offsets, sizeof(mz_uint32));

    pZip->m_pState->m_zip64 = zip64;
    pZip->m_pState->m_zip64_has_extended_info_fields = zip64;

    pZip->m_zip_type = MZ_ZIP_TYPE_USER;
    pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;

    return MZ_TRUE;
}

mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size)
{
    return mz_zip_writer_init_v2(pZip, existing_size, 0);
}

mz_bool mz_zip_writer_init_heap_v2(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size, mz_uint flags)
{
    pZip->m_pWrite = mz_zip_heap_write_func;
    pZip->m_pNeeds_keepalive = NULL;

    if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING)
        pZip->m_pRead = mz_zip_mem_read_func;

    pZip->m_pIO_opaque = pZip;

    if (!mz_zip_writer_init_v2(pZip, size_to_reserve_at_beginning, flags))
        return MZ_FALSE;

    pZip->m_zip_type = MZ_ZIP_TYPE_HEAP;

    if (0 != (initial_allocation_size = MZ_MAX(initial_allocation_size, size_to_reserve_at_beginning)))
    {
        if (NULL == (pZip->m_pState->m_pMem = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, initial_allocation_size)))
        {
            mz_zip_writer_end_internal(pZip, MZ_FALSE);
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        }
        pZip->m_pState->m_mem_capacity = initial_allocation_size;
    }

    return MZ_TRUE;
}

mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size)
{
    return mz_zip_writer_init_heap_v2(pZip, size_to_reserve_at_beginning, initial_allocation_size, 0);
}

#ifndef MINIZ_NO_STDIO
static size_t mz_zip_file_write_func(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n)
{
    mz_zip_archive *pZip = (mz_zip_archive *)pOpaque;
    mz_int64 cur_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);

    file_ofs += pZip->m_pState->m_file_archive_start_ofs;

    if (((mz_int64)file_ofs < 0) || (((cur_ofs != (mz_int64)file_ofs)) && (MZ_FSEEK64(pZip->m_pState->m_pFile, (mz_int64)file_ofs, SEEK_SET))))
    {
        mz_zip_set_error(pZip, MZ_ZIP_FILE_SEEK_FAILED);
        return 0;
    }

    return MZ_FWRITE(pBuf, 1, n, pZip->m_pState->m_pFile);
}

mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning)
{
    return mz_zip_writer_init_file_v2(pZip, pFilename, size_to_reserve_at_beginning, 0);
}

mz_bool mz_zip_writer_init_file_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning, mz_uint flags)
{
    MZ_FILE *pFile;

    pZip->m_pWrite = mz_zip_file_write_func;
    pZip->m_pNeeds_keepalive = NULL;

    if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING)
        pZip->m_pRead = mz_zip_file_read_func;

    pZip->m_pIO_opaque = pZip;

    if (!mz_zip_writer_init_v2(pZip, size_to_reserve_at_beginning, flags))
        return MZ_FALSE;

    if (NULL == (pFile = MZ_FOPEN(pFilename, (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING) ? "w+b" : "wb")))
    {
        mz_zip_writer_end(pZip);
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED);
    }

    pZip->m_pState->m_pFile = pFile;
    pZip->m_zip_type = MZ_ZIP_TYPE_FILE;

    if (size_to_reserve_at_beginning)
    {
        mz_uint64 cur_ofs = 0;
        char buf[4096];

        MZ_CLEAR_OBJ(buf);

        do
        {
            size_t n = (size_t)MZ_MIN(sizeof(buf), size_to_reserve_at_beginning);
            if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_ofs, buf, n) != n)
            {
                mz_zip_writer_end(pZip);
                return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
            }
            cur_ofs += n;
            size_to_reserve_at_beginning -= n;
        } while (size_to_reserve_at_beginning);
    }

    return MZ_TRUE;
}

mz_bool mz_zip_writer_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint flags)
{
    pZip->m_pWrite = mz_zip_file_write_func;
    pZip->m_pNeeds_keepalive = NULL;

    if (flags & MZ_ZIP_FLAG_WRITE_ALLOW_READING)
        pZip->m_pRead = mz_zip_file_read_func;

    pZip->m_pIO_opaque = pZip;

    if (!mz_zip_writer_init_v2(pZip, 0, flags))
        return MZ_FALSE;

    pZip->m_pState->m_pFile = pFile;
    pZip->m_pState->m_file_archive_start_ofs = MZ_FTELL64(pZip->m_pState->m_pFile);
    pZip->m_zip_type = MZ_ZIP_TYPE_CFILE;

    return MZ_TRUE;
}
#endif /* #ifndef MINIZ_NO_STDIO */

mz_bool mz_zip_writer_init_from_reader_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint flags)
{
    mz_zip_internal_state *pState;

    if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_READING))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (flags & MZ_ZIP_FLAG_WRITE_ZIP64)
    {
        /* We don't support converting a non-zip64 file to zip64 - this seems like more trouble than it's worth. (What about the existing 32-bit data descriptors that could follow the compressed data?) */
        if (!pZip->m_pState->m_zip64)
            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
    }

    /* No sense in trying to write to an archive that's already at the support max size */
    if (pZip->m_pState->m_zip64)
    {
        if (pZip->m_total_files == MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
    }
    else
    {
        if (pZip->m_total_files == MZ_UINT16_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);

        if ((pZip->m_archive_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + MZ_ZIP_LOCAL_DIR_HEADER_SIZE) > MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE);
    }

    pState = pZip->m_pState;

    if (pState->m_pFile)
    {
#ifdef MINIZ_NO_STDIO
        (void)pFilename;
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
#else
        if (pZip->m_pIO_opaque != pZip)
            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

        if (pZip->m_zip_type == MZ_ZIP_TYPE_FILE)
        {
            if (!pFilename)
                return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

            /* Archive is being read from stdio and was originally opened only for reading. Try to reopen as writable. */
            if (NULL == (pState->m_pFile = MZ_FREOPEN(pFilename, "r+b", pState->m_pFile)))
            {
                /* The mz_zip_archive is now in a bogus state because pState->m_pFile is NULL, so just close it. */
                mz_zip_reader_end_internal(pZip, MZ_FALSE);
                return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED);
            }
        }

        pZip->m_pWrite = mz_zip_file_write_func;
        pZip->m_pNeeds_keepalive = NULL;
#endif /* #ifdef MINIZ_NO_STDIO */
    }
    else if (pState->m_pMem)
    {
        /* Archive lives in a memory block. Assume it's from the heap that we can resize using the realloc callback. */
        if (pZip->m_pIO_opaque != pZip)
            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

        pState->m_mem_capacity = pState->m_mem_size;
        pZip->m_pWrite = mz_zip_heap_write_func;
        pZip->m_pNeeds_keepalive = NULL;
    }
    /* Archive is being read via a user provided read function - make sure the user has specified a write function too. */
    else if (!pZip->m_pWrite)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    /* Start writing new files at the archive's current central directory location. */
    /* TODO: We could add a flag that lets the user start writing immediately AFTER the existing central dir - this would be safer. */
    pZip->m_archive_size = pZip->m_central_directory_file_ofs;
    pZip->m_central_directory_file_ofs = 0;

    /* Clear the sorted central dir offsets, they aren't useful or maintained now. */
    /* Even though we're now in write mode, files can still be extracted and verified, but file locates will be slow. */
    /* TODO: We could easily maintain the sorted central directory offsets. */
    mz_zip_array_clear(pZip, &pZip->m_pState->m_sorted_central_dir_offsets);

    pZip->m_zip_mode = MZ_ZIP_MODE_WRITING;

    return MZ_TRUE;
}

mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename)
{
    return mz_zip_writer_init_from_reader_v2(pZip, pFilename, 0);
}

/* TODO: pArchive_name is a terrible name here! */
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags)
{
    return mz_zip_writer_add_mem_ex(pZip, pArchive_name, pBuf, buf_size, NULL, 0, level_and_flags, 0, 0);
}

typedef struct
{
    mz_zip_archive *m_pZip;
    mz_uint64 m_cur_archive_file_ofs;
    mz_uint64 m_comp_size;
} mz_zip_writer_add_state;

static mz_bool mz_zip_writer_add_put_buf_callback(const void *pBuf, int len, void *pUser)
{
    mz_zip_writer_add_state *pState = (mz_zip_writer_add_state *)pUser;
    if ((int)pState->m_pZip->m_pWrite(pState->m_pZip->m_pIO_opaque, pState->m_cur_archive_file_ofs, pBuf, len) != len)
        return MZ_FALSE;

    pState->m_cur_archive_file_ofs += len;
    pState->m_comp_size += len;
    return MZ_TRUE;
}

#define MZ_ZIP64_MAX_LOCAL_EXTRA_FIELD_SIZE (sizeof(mz_uint16) * 2 + sizeof(mz_uint64) * 2)
#define MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE (sizeof(mz_uint16) * 2 + sizeof(mz_uint64) * 3)
static mz_uint32 mz_zip_writer_create_zip64_extra_data(mz_uint8 *pBuf, mz_uint64 *pUncomp_size, mz_uint64 *pComp_size, mz_uint64 *pLocal_header_ofs)
{
    mz_uint8 *pDst = pBuf;
    mz_uint32 field_size = 0;

    MZ_WRITE_LE16(pDst + 0, MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID);
    MZ_WRITE_LE16(pDst + 2, 0);
    pDst += sizeof(mz_uint16) * 2;

    if (pUncomp_size)
    {
        MZ_WRITE_LE64(pDst, *pUncomp_size);
        pDst += sizeof(mz_uint64);
        field_size += sizeof(mz_uint64);
    }

    if (pComp_size)
    {
        MZ_WRITE_LE64(pDst, *pComp_size);
        pDst += sizeof(mz_uint64);
        field_size += sizeof(mz_uint64);
    }

    if (pLocal_header_ofs)
    {
        MZ_WRITE_LE64(pDst, *pLocal_header_ofs);
        pDst += sizeof(mz_uint64);
        field_size += sizeof(mz_uint64);
    }

    MZ_WRITE_LE16(pBuf + 2, field_size);

    return (mz_uint32)(pDst - pBuf);
}

static mz_bool mz_zip_writer_create_local_dir_header(mz_zip_archive *pZip, mz_uint8 *pDst, mz_uint16 filename_size, mz_uint16 extra_size, mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32, mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date)
{
    (void)pZip;
    memset(pDst, 0, MZ_ZIP_LOCAL_DIR_HEADER_SIZE);
    MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_SIG_OFS, MZ_ZIP_LOCAL_DIR_HEADER_SIG);
    MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_VERSION_NEEDED_OFS, method ? 20 : 0);
    MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_BIT_FLAG_OFS, bit_flags);
    MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_METHOD_OFS, method);
    MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_TIME_OFS, dos_time);
    MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILE_DATE_OFS, dos_date);
    MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_CRC32_OFS, uncomp_crc32);
    MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS, MZ_MIN(comp_size, MZ_UINT32_MAX));
    MZ_WRITE_LE32(pDst + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS, MZ_MIN(uncomp_size, MZ_UINT32_MAX));
    MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_FILENAME_LEN_OFS, filename_size);
    MZ_WRITE_LE16(pDst + MZ_ZIP_LDH_EXTRA_LEN_OFS, extra_size);
    return MZ_TRUE;
}

static mz_bool mz_zip_writer_create_central_dir_header(mz_zip_archive *pZip, mz_uint8 *pDst,
                                                       mz_uint16 filename_size, mz_uint16 extra_size, mz_uint16 comment_size,
                                                       mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32,
                                                       mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
                                                       mz_uint64 local_header_ofs, mz_uint32 ext_attributes)
{
    (void)pZip;
    memset(pDst, 0, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);
    MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_SIG_OFS, MZ_ZIP_CENTRAL_DIR_HEADER_SIG);
    MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_VERSION_NEEDED_OFS, method ? 20 : 0);
    MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_BIT_FLAG_OFS, bit_flags);
    MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_METHOD_OFS, method);
    MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_TIME_OFS, dos_time);
    MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILE_DATE_OFS, dos_date);
    MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_CRC32_OFS, uncomp_crc32);
    MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, MZ_MIN(comp_size, MZ_UINT32_MAX));
    MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, MZ_MIN(uncomp_size, MZ_UINT32_MAX));
    MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_FILENAME_LEN_OFS, filename_size);
    MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_EXTRA_LEN_OFS, extra_size);
    MZ_WRITE_LE16(pDst + MZ_ZIP_CDH_COMMENT_LEN_OFS, comment_size);
    MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_EXTERNAL_ATTR_OFS, ext_attributes);
    MZ_WRITE_LE32(pDst + MZ_ZIP_CDH_LOCAL_HEADER_OFS, MZ_MIN(local_header_ofs, MZ_UINT32_MAX));
    return MZ_TRUE;
}

static mz_bool mz_zip_writer_add_to_central_dir(mz_zip_archive *pZip, const char *pFilename, mz_uint16 filename_size,
                                                const void *pExtra, mz_uint16 extra_size, const void *pComment, mz_uint16 comment_size,
                                                mz_uint64 uncomp_size, mz_uint64 comp_size, mz_uint32 uncomp_crc32,
                                                mz_uint16 method, mz_uint16 bit_flags, mz_uint16 dos_time, mz_uint16 dos_date,
                                                mz_uint64 local_header_ofs, mz_uint32 ext_attributes,
                                                const char *user_extra_data, mz_uint user_extra_data_len)
{
    mz_zip_internal_state *pState = pZip->m_pState;
    mz_uint32 central_dir_ofs = (mz_uint32)pState->m_central_dir.m_size;
    size_t orig_central_dir_size = pState->m_central_dir.m_size;
    mz_uint8 central_dir_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];

    if (!pZip->m_pState->m_zip64)
    {
        if (local_header_ofs > 0xFFFFFFFF)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_TOO_LARGE);
    }

    /* miniz doesn't support central dirs >= MZ_UINT32_MAX bytes yet */
    if (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + filename_size + extra_size + user_extra_data_len + comment_size) >= MZ_UINT32_MAX)
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE);

    if (!mz_zip_writer_create_central_dir_header(pZip, central_dir_header, filename_size, extra_size + user_extra_data_len, comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_header_ofs, ext_attributes))
        return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);

    if ((!mz_zip_array_push_back(pZip, &pState->m_central_dir, central_dir_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE)) ||
        (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pFilename, filename_size)) ||
        (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pExtra, extra_size)) ||
        (!mz_zip_array_push_back(pZip, &pState->m_central_dir, user_extra_data, user_extra_data_len)) ||
        (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pComment, comment_size)) ||
        (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, ¢ral_dir_ofs, 1)))
    {
        /* Try to resize the central directory array back into its original state. */
        mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE);
        return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
    }

    return MZ_TRUE;
}

static mz_bool mz_zip_writer_validate_archive_name(const char *pArchive_name)
{
    /* Basic ZIP archive filename validity checks: Valid filenames cannot start with a forward slash, cannot contain a drive letter, and cannot use DOS-style backward slashes. */
    if (*pArchive_name == '/')
        return MZ_FALSE;

    while (*pArchive_name)
    {
        if ((*pArchive_name == '\\') || (*pArchive_name == ':'))
            return MZ_FALSE;

        pArchive_name++;
    }

    return MZ_TRUE;
}

static mz_uint mz_zip_writer_compute_padding_needed_for_file_alignment(mz_zip_archive *pZip)
{
    mz_uint32 n;
    if (!pZip->m_file_offset_alignment)
        return 0;
    n = (mz_uint32)(pZip->m_archive_size & (pZip->m_file_offset_alignment - 1));
    return (mz_uint)((pZip->m_file_offset_alignment - n) & (pZip->m_file_offset_alignment - 1));
}

static mz_bool mz_zip_writer_write_zeros(mz_zip_archive *pZip, mz_uint64 cur_file_ofs, mz_uint32 n)
{
    char buf[4096];
    memset(buf, 0, MZ_MIN(sizeof(buf), n));
    while (n)
    {
        mz_uint32 s = MZ_MIN(sizeof(buf), n);
        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_file_ofs, buf, s) != s)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        cur_file_ofs += s;
        n -= s;
    }
    return MZ_TRUE;
}

mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags,
                                 mz_uint64 uncomp_size, mz_uint32 uncomp_crc32)
{
    return mz_zip_writer_add_mem_ex_v2(pZip, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, uncomp_size, uncomp_crc32, NULL, NULL, 0, NULL, 0);
}

mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size,
                                    mz_uint level_and_flags, mz_uint64 uncomp_size, mz_uint32 uncomp_crc32, MZ_TIME_T *last_modified,
                                    const char *user_extra_data, mz_uint user_extra_data_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len)
{
    mz_uint16 method = 0, dos_time = 0, dos_date = 0;
    mz_uint level, ext_attributes = 0, num_alignment_padding_bytes;
    mz_uint64 local_dir_header_ofs = pZip->m_archive_size, cur_archive_file_ofs = pZip->m_archive_size, comp_size = 0;
    size_t archive_name_size;
    mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
    tdefl_compressor *pComp = NULL;
    mz_bool store_data_uncompressed;
    mz_zip_internal_state *pState;
    mz_uint8 *pExtra_data = NULL;
    mz_uint32 extra_size = 0;
    mz_uint8 extra_data[MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE];
    mz_uint16 bit_flags = 0;

    if ((int)level_and_flags < 0)
        level_and_flags = MZ_DEFAULT_LEVEL;

    if (uncomp_size || (buf_size && !(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)))
        bit_flags |= MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR;

    if (!(level_and_flags & MZ_ZIP_FLAG_ASCII_FILENAME))
        bit_flags |= MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8;

    level = level_and_flags & 0xF;
    store_data_uncompressed = ((!level) || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA));

    if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || ((buf_size) && (!pBuf)) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    pState = pZip->m_pState;

    if (pState->m_zip64)
    {
        if (pZip->m_total_files == MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
    }
    else
    {
        if (pZip->m_total_files == MZ_UINT16_MAX)
        {
            pState->m_zip64 = MZ_TRUE;
            /*return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); */
        }
        if ((buf_size > 0xFFFFFFFF) || (uncomp_size > 0xFFFFFFFF))
        {
            pState->m_zip64 = MZ_TRUE;
            /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */
        }
    }

    if ((!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)) && (uncomp_size))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (!mz_zip_writer_validate_archive_name(pArchive_name))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME);

#ifndef MINIZ_NO_TIME
    if (last_modified != NULL)
    {
        mz_zip_time_t_to_dos_time(*last_modified, &dos_time, &dos_date);
    }
    else
    {
        MZ_TIME_T cur_time;
        time(&cur_time);
        mz_zip_time_t_to_dos_time(cur_time, &dos_time, &dos_date);
    }
#endif /* #ifndef MINIZ_NO_TIME */

	if (!(level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
	{
		uncomp_crc32 = (mz_uint32)mz_crc32(MZ_CRC32_INIT, (const mz_uint8 *)pBuf, buf_size);
		uncomp_size = buf_size;
		if (uncomp_size <= 3)
		{
			level = 0;
			store_data_uncompressed = MZ_TRUE;
		}
	}

    archive_name_size = strlen(pArchive_name);
    if (archive_name_size > MZ_UINT16_MAX)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME);

    num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);

    /* miniz doesn't support central dirs >= MZ_UINT32_MAX bytes yet */
    if (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE + comment_size) >= MZ_UINT32_MAX)
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE);

    if (!pState->m_zip64)
    {
        /* Bail early if the archive would obviously become too large */
        if ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + archive_name_size 
			+ MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + user_extra_data_len + 
			pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + user_extra_data_central_len
			+ MZ_ZIP_DATA_DESCRIPTER_SIZE32) > 0xFFFFFFFF)
        {
            pState->m_zip64 = MZ_TRUE;
            /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */
        }
    }

    if ((archive_name_size) && (pArchive_name[archive_name_size - 1] == '/'))
    {
        /* Set DOS Subdirectory attribute bit. */
        ext_attributes |= MZ_ZIP_DOS_DIR_ATTRIBUTE_BITFLAG;

        /* Subdirectories cannot contain data. */
        if ((buf_size) || (uncomp_size))
            return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
    }

    /* Try to do any allocations before writing to the archive, so if an allocation fails the file remains unmodified. (A good idea if we're doing an in-place modification.) */
    if ((!mz_zip_array_ensure_room(pZip, &pState->m_central_dir, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + comment_size + (pState->m_zip64 ? MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE : 0))) || (!mz_zip_array_ensure_room(pZip, &pState->m_central_dir_offsets, 1)))
        return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

    if ((!store_data_uncompressed) && (buf_size))
    {
        if (NULL == (pComp = (tdefl_compressor *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor))))
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
    }

    if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes))
    {
        pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
        return MZ_FALSE;
    }

    local_dir_header_ofs += num_alignment_padding_bytes;
    if (pZip->m_file_offset_alignment)
    {
        MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0);
    }
    cur_archive_file_ofs += num_alignment_padding_bytes;

    MZ_CLEAR_OBJ(local_dir_header);

    if (!store_data_uncompressed || (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA))
    {
        method = MZ_DEFLATED;
    }

    if (pState->m_zip64)
    {
        if (uncomp_size >= MZ_UINT32_MAX || local_dir_header_ofs >= MZ_UINT32_MAX)
        {
            pExtra_data = extra_data;
            extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL,
                                                               (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL);
        }

        if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, extra_size + user_extra_data_len, 0, 0, 0, method, bit_flags, dos_time, dos_date))
            return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);

        if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header))
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        cur_archive_file_ofs += sizeof(local_dir_header);

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size)
        {
            pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
        }
        cur_archive_file_ofs += archive_name_size;

        if (pExtra_data != NULL)
        {
            if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, extra_data, extra_size) != extra_size)
                return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

            cur_archive_file_ofs += extra_size;
        }
    }
    else
    {
        if ((comp_size > MZ_UINT32_MAX) || (cur_archive_file_ofs > MZ_UINT32_MAX))
            return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);
        if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, user_extra_data_len, 0, 0, 0, method, bit_flags, dos_time, dos_date))
            return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);

        if (pZip->m_pWrite(pZip->m_pIO_opaque, local_dir_header_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header))
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        cur_archive_file_ofs += sizeof(local_dir_header);

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size)
        {
            pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
        }
        cur_archive_file_ofs += archive_name_size;
    }

	if (user_extra_data_len > 0)
	{
		if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, user_extra_data, user_extra_data_len) != user_extra_data_len)
			return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

		cur_archive_file_ofs += user_extra_data_len;
	}

    if (store_data_uncompressed)
    {
        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pBuf, buf_size) != buf_size)
        {
            pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
        }

        cur_archive_file_ofs += buf_size;
        comp_size = buf_size;
    }
    else if (buf_size)
    {
        mz_zip_writer_add_state state;

        state.m_pZip = pZip;
        state.m_cur_archive_file_ofs = cur_archive_file_ofs;
        state.m_comp_size = 0;

        if ((tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params(level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY) ||
            (tdefl_compress_buffer(pComp, pBuf, buf_size, TDEFL_FINISH) != TDEFL_STATUS_DONE))
        {
            pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
            return mz_zip_set_error(pZip, MZ_ZIP_COMPRESSION_FAILED);
        }

        comp_size = state.m_comp_size;
        cur_archive_file_ofs = state.m_cur_archive_file_ofs;
    }

    pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
    pComp = NULL;

    if (uncomp_size)
    {
        mz_uint8 local_dir_footer[MZ_ZIP_DATA_DESCRIPTER_SIZE64];
        mz_uint32 local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE32;

        MZ_ASSERT(bit_flags & MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR);

        MZ_WRITE_LE32(local_dir_footer + 0, MZ_ZIP_DATA_DESCRIPTOR_ID);
        MZ_WRITE_LE32(local_dir_footer + 4, uncomp_crc32);
        if (pExtra_data == NULL)
        {
            if (comp_size > MZ_UINT32_MAX)
                return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);

            MZ_WRITE_LE32(local_dir_footer + 8, comp_size);
            MZ_WRITE_LE32(local_dir_footer + 12, uncomp_size);
        }
        else
        {
            MZ_WRITE_LE64(local_dir_footer + 8, comp_size);
            MZ_WRITE_LE64(local_dir_footer + 16, uncomp_size);
            local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE64;
        }

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_footer, local_dir_footer_size) != local_dir_footer_size)
            return MZ_FALSE;

        cur_archive_file_ofs += local_dir_footer_size;
    }

    if (pExtra_data != NULL)
    {
        extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL,
                                                           (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL);
    }

    if (!mz_zip_writer_add_to_central_dir(pZip, pArchive_name, (mz_uint16)archive_name_size, pExtra_data, extra_size, pComment,
                                          comment_size, uncomp_size, comp_size, uncomp_crc32, method, bit_flags, dos_time, dos_date, local_dir_header_ofs, ext_attributes,
                                          user_extra_data_central, user_extra_data_central_len))
        return MZ_FALSE;

    pZip->m_total_files++;
    pZip->m_archive_size = cur_archive_file_ofs;

    return MZ_TRUE;
}

#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_add_cfile(mz_zip_archive *pZip, const char *pArchive_name, MZ_FILE *pSrc_file, mz_uint64 size_to_add, const MZ_TIME_T *pFile_time, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags,
                                const char *user_extra_data, mz_uint user_extra_data_len, const char *user_extra_data_central, mz_uint user_extra_data_central_len)
{
    mz_uint16 gen_flags = MZ_ZIP_LDH_BIT_FLAG_HAS_LOCATOR;
    mz_uint uncomp_crc32 = MZ_CRC32_INIT, level, num_alignment_padding_bytes;
    mz_uint16 method = 0, dos_time = 0, dos_date = 0, ext_attributes = 0;
    mz_uint64 local_dir_header_ofs, cur_archive_file_ofs = pZip->m_archive_size, uncomp_size = size_to_add, comp_size = 0;
    size_t archive_name_size;
    mz_uint8 local_dir_header[MZ_ZIP_LOCAL_DIR_HEADER_SIZE];
    mz_uint8 *pExtra_data = NULL;
    mz_uint32 extra_size = 0;
    mz_uint8 extra_data[MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE];
    mz_zip_internal_state *pState;

    if (!(level_and_flags & MZ_ZIP_FLAG_ASCII_FILENAME))
        gen_flags |= MZ_ZIP_GENERAL_PURPOSE_BIT_FLAG_UTF8;

    if ((int)level_and_flags < 0)
        level_and_flags = MZ_DEFAULT_LEVEL;
    level = level_and_flags & 0xF;

    /* Sanity checks */
    if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pArchive_name) || ((comment_size) && (!pComment)) || (level > MZ_UBER_COMPRESSION))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    pState = pZip->m_pState;

    if ((!pState->m_zip64) && (uncomp_size > MZ_UINT32_MAX))
    {
        /* Source file is too large for non-zip64 */
        /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */
        pState->m_zip64 = MZ_TRUE;
    }

    /* We could support this, but why? */
    if (level_and_flags & MZ_ZIP_FLAG_COMPRESSED_DATA)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (!mz_zip_writer_validate_archive_name(pArchive_name))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME);

    if (pState->m_zip64)
    {
        if (pZip->m_total_files == MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
    }
    else
    {
        if (pZip->m_total_files == MZ_UINT16_MAX)
        {
            pState->m_zip64 = MZ_TRUE;
            /*return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES); */
        }
    }

    archive_name_size = strlen(pArchive_name);
    if (archive_name_size > MZ_UINT16_MAX)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_FILENAME);

    num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);

    /* miniz doesn't support central dirs >= MZ_UINT32_MAX bytes yet */
    if (((mz_uint64)pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP64_MAX_CENTRAL_EXTRA_FIELD_SIZE + comment_size) >= MZ_UINT32_MAX)
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE);

    if (!pState->m_zip64)
    {
        /* Bail early if the archive would obviously become too large */
        if ((pZip->m_archive_size + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + archive_name_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE 
			+ archive_name_size + comment_size + user_extra_data_len + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + 1024
			+ MZ_ZIP_DATA_DESCRIPTER_SIZE32 + user_extra_data_central_len) > 0xFFFFFFFF)
        {
            pState->m_zip64 = MZ_TRUE;
            /*return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE); */
        }
    }

#ifndef MINIZ_NO_TIME
    if (pFile_time)
    {
        mz_zip_time_t_to_dos_time(*pFile_time, &dos_time, &dos_date);
    }
#endif

    if (uncomp_size <= 3)
        level = 0;

    if (!mz_zip_writer_write_zeros(pZip, cur_archive_file_ofs, num_alignment_padding_bytes))
    {
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
    }

    cur_archive_file_ofs += num_alignment_padding_bytes;
    local_dir_header_ofs = cur_archive_file_ofs;

    if (pZip->m_file_offset_alignment)
    {
        MZ_ASSERT((cur_archive_file_ofs & (pZip->m_file_offset_alignment - 1)) == 0);
    }

    if (uncomp_size && level)
    {
        method = MZ_DEFLATED;
    }

    MZ_CLEAR_OBJ(local_dir_header);
    if (pState->m_zip64)
    {
        if (uncomp_size >= MZ_UINT32_MAX || local_dir_header_ofs >= MZ_UINT32_MAX)
        {
            pExtra_data = extra_data;
            extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL,
                                                               (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL);
        }

        if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, extra_size + user_extra_data_len, 0, 0, 0, method, gen_flags, dos_time, dos_date))
            return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header))
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        cur_archive_file_ofs += sizeof(local_dir_header);

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size)
        {
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
        }

        cur_archive_file_ofs += archive_name_size;

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, extra_data, extra_size) != extra_size)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        cur_archive_file_ofs += extra_size;
    }
    else
    {
        if ((comp_size > MZ_UINT32_MAX) || (cur_archive_file_ofs > MZ_UINT32_MAX))
            return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);
        if (!mz_zip_writer_create_local_dir_header(pZip, local_dir_header, (mz_uint16)archive_name_size, user_extra_data_len, 0, 0, 0, method, gen_flags, dos_time, dos_date))
            return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_header, sizeof(local_dir_header)) != sizeof(local_dir_header))
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        cur_archive_file_ofs += sizeof(local_dir_header);

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pArchive_name, archive_name_size) != archive_name_size)
        {
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
        }

        cur_archive_file_ofs += archive_name_size;
    }

    if (user_extra_data_len > 0)
    {
        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, user_extra_data, user_extra_data_len) != user_extra_data_len)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        cur_archive_file_ofs += user_extra_data_len;
    }

    if (uncomp_size)
    {
        mz_uint64 uncomp_remaining = uncomp_size;
        void *pRead_buf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, MZ_ZIP_MAX_IO_BUF_SIZE);
        if (!pRead_buf)
        {
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        }

        if (!level)
        {
            while (uncomp_remaining)
            {
                mz_uint n = (mz_uint)MZ_MIN((mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE, uncomp_remaining);
                if ((MZ_FREAD(pRead_buf, 1, n, pSrc_file) != n) || (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, pRead_buf, n) != n))
                {
                    pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
                    return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
                }
                uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, n);
                uncomp_remaining -= n;
                cur_archive_file_ofs += n;
            }
            comp_size = uncomp_size;
        }
        else
        {
            mz_bool result = MZ_FALSE;
            mz_zip_writer_add_state state;
            tdefl_compressor *pComp = (tdefl_compressor *)pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, sizeof(tdefl_compressor));
            if (!pComp)
            {
                pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
                return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
            }

            state.m_pZip = pZip;
            state.m_cur_archive_file_ofs = cur_archive_file_ofs;
            state.m_comp_size = 0;

            if (tdefl_init(pComp, mz_zip_writer_add_put_buf_callback, &state, tdefl_create_comp_flags_from_zip_params(level, -15, MZ_DEFAULT_STRATEGY)) != TDEFL_STATUS_OKAY)
            {
                pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);
                pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
                return mz_zip_set_error(pZip, MZ_ZIP_INTERNAL_ERROR);
            }

            for (;;)
            {
                size_t in_buf_size = (mz_uint32)MZ_MIN(uncomp_remaining, (mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE);
                tdefl_status status;
                tdefl_flush flush = TDEFL_NO_FLUSH;

                if (MZ_FREAD(pRead_buf, 1, in_buf_size, pSrc_file) != in_buf_size)
                {
                    mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
                    break;
                }

                uncomp_crc32 = (mz_uint32)mz_crc32(uncomp_crc32, (const mz_uint8 *)pRead_buf, in_buf_size);
                uncomp_remaining -= in_buf_size;

                if (pZip->m_pNeeds_keepalive != NULL && pZip->m_pNeeds_keepalive(pZip->m_pIO_opaque))
                    flush = TDEFL_FULL_FLUSH;

                status = tdefl_compress_buffer(pComp, pRead_buf, in_buf_size, uncomp_remaining ? flush : TDEFL_FINISH);
                if (status == TDEFL_STATUS_DONE)
                {
                    result = MZ_TRUE;
                    break;
                }
                else if (status != TDEFL_STATUS_OKAY)
                {
                    mz_zip_set_error(pZip, MZ_ZIP_COMPRESSION_FAILED);
                    break;
                }
            }

            pZip->m_pFree(pZip->m_pAlloc_opaque, pComp);

            if (!result)
            {
                pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
                return MZ_FALSE;
            }

            comp_size = state.m_comp_size;
            cur_archive_file_ofs = state.m_cur_archive_file_ofs;
        }

        pZip->m_pFree(pZip->m_pAlloc_opaque, pRead_buf);
    }

    {
        mz_uint8 local_dir_footer[MZ_ZIP_DATA_DESCRIPTER_SIZE64];
        mz_uint32 local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE32;

        MZ_WRITE_LE32(local_dir_footer + 0, MZ_ZIP_DATA_DESCRIPTOR_ID);
        MZ_WRITE_LE32(local_dir_footer + 4, uncomp_crc32);
        if (pExtra_data == NULL)
        {
            if (comp_size > MZ_UINT32_MAX)
                return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);

            MZ_WRITE_LE32(local_dir_footer + 8, comp_size);
            MZ_WRITE_LE32(local_dir_footer + 12, uncomp_size);
        }
        else
        {
            MZ_WRITE_LE64(local_dir_footer + 8, comp_size);
            MZ_WRITE_LE64(local_dir_footer + 16, uncomp_size);
            local_dir_footer_size = MZ_ZIP_DATA_DESCRIPTER_SIZE64;
        }

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_archive_file_ofs, local_dir_footer, local_dir_footer_size) != local_dir_footer_size)
            return MZ_FALSE;

        cur_archive_file_ofs += local_dir_footer_size;
    }

    if (pExtra_data != NULL)
    {
        extra_size = mz_zip_writer_create_zip64_extra_data(extra_data, (uncomp_size >= MZ_UINT32_MAX) ? &uncomp_size : NULL,
                                                           (uncomp_size >= MZ_UINT32_MAX) ? &comp_size : NULL, (local_dir_header_ofs >= MZ_UINT32_MAX) ? &local_dir_header_ofs : NULL);
    }

    if (!mz_zip_writer_add_to_central_dir(pZip, pArchive_name, (mz_uint16)archive_name_size, pExtra_data, extra_size, pComment, comment_size,
                                          uncomp_size, comp_size, uncomp_crc32, method, gen_flags, dos_time, dos_date, local_dir_header_ofs, ext_attributes,
                                          user_extra_data_central, user_extra_data_central_len))
        return MZ_FALSE;

    pZip->m_total_files++;
    pZip->m_archive_size = cur_archive_file_ofs;

    return MZ_TRUE;
}

mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags)
{
    MZ_FILE *pSrc_file = NULL;
    mz_uint64 uncomp_size = 0;
    MZ_TIME_T file_modified_time;
    MZ_TIME_T *pFile_time = NULL;
    mz_bool status;

    memset(&file_modified_time, 0, sizeof(file_modified_time));

#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_STDIO)
    pFile_time = &file_modified_time;
    if (!mz_zip_get_file_modified_time(pSrc_filename, &file_modified_time))
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_STAT_FAILED);
#endif

    pSrc_file = MZ_FOPEN(pSrc_filename, "rb");
    if (!pSrc_file)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_OPEN_FAILED);

    MZ_FSEEK64(pSrc_file, 0, SEEK_END);
    uncomp_size = MZ_FTELL64(pSrc_file);
    MZ_FSEEK64(pSrc_file, 0, SEEK_SET);

    status = mz_zip_writer_add_cfile(pZip, pArchive_name, pSrc_file, uncomp_size, pFile_time, pComment, comment_size, level_and_flags, NULL, 0, NULL, 0);

    MZ_FCLOSE(pSrc_file);

    return status;
}
#endif /* #ifndef MINIZ_NO_STDIO */

static mz_bool mz_zip_writer_update_zip64_extension_block(mz_zip_array *pNew_ext, mz_zip_archive *pZip, const mz_uint8 *pExt, uint32_t ext_len, mz_uint64 *pComp_size, mz_uint64 *pUncomp_size, mz_uint64 *pLocal_header_ofs, mz_uint32 *pDisk_start)
{
    /* + 64 should be enough for any new zip64 data */
    if (!mz_zip_array_reserve(pZip, pNew_ext, ext_len + 64, MZ_FALSE))
        return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

    mz_zip_array_resize(pZip, pNew_ext, 0, MZ_FALSE);

    if ((pUncomp_size) || (pComp_size) || (pLocal_header_ofs) || (pDisk_start))
    {
        mz_uint8 new_ext_block[64];
        mz_uint8 *pDst = new_ext_block;
        mz_write_le16(pDst, MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID);
        mz_write_le16(pDst + sizeof(mz_uint16), 0);
        pDst += sizeof(mz_uint16) * 2;

        if (pUncomp_size)
        {
            mz_write_le64(pDst, *pUncomp_size);
            pDst += sizeof(mz_uint64);
        }

        if (pComp_size)
        {
            mz_write_le64(pDst, *pComp_size);
            pDst += sizeof(mz_uint64);
        }

        if (pLocal_header_ofs)
        {
            mz_write_le64(pDst, *pLocal_header_ofs);
            pDst += sizeof(mz_uint64);
        }

        if (pDisk_start)
        {
            mz_write_le32(pDst, *pDisk_start);
            pDst += sizeof(mz_uint32);
        }

        mz_write_le16(new_ext_block + sizeof(mz_uint16), (mz_uint16)((pDst - new_ext_block) - sizeof(mz_uint16) * 2));

        if (!mz_zip_array_push_back(pZip, pNew_ext, new_ext_block, pDst - new_ext_block))
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
    }

    if ((pExt) && (ext_len))
    {
        mz_uint32 extra_size_remaining = ext_len;
        const mz_uint8 *pExtra_data = pExt;

        do
        {
            mz_uint32 field_id, field_data_size, field_total_size;

            if (extra_size_remaining < (sizeof(mz_uint16) * 2))
                return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

            field_id = MZ_READ_LE16(pExtra_data);
            field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16));
            field_total_size = field_data_size + sizeof(mz_uint16) * 2;

            if (field_total_size > extra_size_remaining)
                return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

            if (field_id != MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID)
            {
                if (!mz_zip_array_push_back(pZip, pNew_ext, pExtra_data, field_total_size))
                    return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
            }

            pExtra_data += field_total_size;
            extra_size_remaining -= field_total_size;
        } while (extra_size_remaining);
    }

    return MZ_TRUE;
}

/* TODO: This func is now pretty freakin complex due to zip64, split it up? */
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint src_file_index)
{
    mz_uint n, bit_flags, num_alignment_padding_bytes, src_central_dir_following_data_size;
    mz_uint64 src_archive_bytes_remaining, local_dir_header_ofs;
    mz_uint64 cur_src_file_ofs, cur_dst_file_ofs;
    mz_uint32 local_header_u32[(MZ_ZIP_LOCAL_DIR_HEADER_SIZE + sizeof(mz_uint32) - 1) / sizeof(mz_uint32)];
    mz_uint8 *pLocal_header = (mz_uint8 *)local_header_u32;
    mz_uint8 new_central_header[MZ_ZIP_CENTRAL_DIR_HEADER_SIZE];
    size_t orig_central_dir_size;
    mz_zip_internal_state *pState;
    void *pBuf;
    const mz_uint8 *pSrc_central_header;
    mz_zip_archive_file_stat src_file_stat;
    mz_uint32 src_filename_len, src_comment_len, src_ext_len;
    mz_uint32 local_header_filename_size, local_header_extra_len;
    mz_uint64 local_header_comp_size, local_header_uncomp_size;
    mz_bool found_zip64_ext_data_in_ldir = MZ_FALSE;

    /* Sanity checks */
    if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING) || (!pSource_zip->m_pRead))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    pState = pZip->m_pState;

    /* Don't support copying files from zip64 archives to non-zip64, even though in some cases this is possible */
    if ((pSource_zip->m_pState->m_zip64) && (!pZip->m_pState->m_zip64))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    /* Get pointer to the source central dir header and crack it */
    if (NULL == (pSrc_central_header = mz_zip_get_cdh(pSource_zip, src_file_index)))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (MZ_READ_LE32(pSrc_central_header + MZ_ZIP_CDH_SIG_OFS) != MZ_ZIP_CENTRAL_DIR_HEADER_SIG)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    src_filename_len = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_FILENAME_LEN_OFS);
    src_comment_len = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_COMMENT_LEN_OFS);
    src_ext_len = MZ_READ_LE16(pSrc_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS);
    src_central_dir_following_data_size = src_filename_len + src_ext_len + src_comment_len;

    /* TODO: We don't support central dir's >= MZ_UINT32_MAX bytes right now (+32 fudge factor in case we need to add more extra data) */
    if ((pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_central_dir_following_data_size + 32) >= MZ_UINT32_MAX)
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE);

    num_alignment_padding_bytes = mz_zip_writer_compute_padding_needed_for_file_alignment(pZip);

    if (!pState->m_zip64)
    {
        if (pZip->m_total_files == MZ_UINT16_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
    }
    else
    {
        /* TODO: Our zip64 support still has some 32-bit limits that may not be worth fixing. */
        if (pZip->m_total_files == MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
    }

    if (!mz_zip_file_stat_internal(pSource_zip, src_file_index, pSrc_central_header, &src_file_stat, NULL))
        return MZ_FALSE;

    cur_src_file_ofs = src_file_stat.m_local_header_ofs;
    cur_dst_file_ofs = pZip->m_archive_size;

    /* Read the source archive's local dir header */
    if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);

    if (MZ_READ_LE32(pLocal_header) != MZ_ZIP_LOCAL_DIR_HEADER_SIG)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);

    cur_src_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;

    /* Compute the total size we need to copy (filename+extra data+compressed data) */
    local_header_filename_size = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_FILENAME_LEN_OFS);
    local_header_extra_len = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_EXTRA_LEN_OFS);
    local_header_comp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_COMPRESSED_SIZE_OFS);
    local_header_uncomp_size = MZ_READ_LE32(pLocal_header + MZ_ZIP_LDH_DECOMPRESSED_SIZE_OFS);
    src_archive_bytes_remaining = local_header_filename_size + local_header_extra_len + src_file_stat.m_comp_size;

    /* Try to find a zip64 extended information field */
    if ((local_header_extra_len) && ((local_header_comp_size == MZ_UINT32_MAX) || (local_header_uncomp_size == MZ_UINT32_MAX)))
    {
        mz_zip_array file_data_array;
        const mz_uint8 *pExtra_data;
        mz_uint32 extra_size_remaining = local_header_extra_len;

        mz_zip_array_init(&file_data_array, 1);
        if (!mz_zip_array_resize(pZip, &file_data_array, local_header_extra_len, MZ_FALSE))
        {
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        }

        if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, src_file_stat.m_local_header_ofs + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + local_header_filename_size, file_data_array.m_p, local_header_extra_len) != local_header_extra_len)
        {
            mz_zip_array_clear(pZip, &file_data_array);
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
        }

        pExtra_data = (const mz_uint8 *)file_data_array.m_p;

        do
        {
            mz_uint32 field_id, field_data_size, field_total_size;

            if (extra_size_remaining < (sizeof(mz_uint16) * 2))
            {
                mz_zip_array_clear(pZip, &file_data_array);
                return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
            }

            field_id = MZ_READ_LE16(pExtra_data);
            field_data_size = MZ_READ_LE16(pExtra_data + sizeof(mz_uint16));
            field_total_size = field_data_size + sizeof(mz_uint16) * 2;

            if (field_total_size > extra_size_remaining)
            {
                mz_zip_array_clear(pZip, &file_data_array);
                return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
            }

            if (field_id == MZ_ZIP64_EXTENDED_INFORMATION_FIELD_HEADER_ID)
            {
                const mz_uint8 *pSrc_field_data = pExtra_data + sizeof(mz_uint32);

                if (field_data_size < sizeof(mz_uint64) * 2)
                {
                    mz_zip_array_clear(pZip, &file_data_array);
                    return mz_zip_set_error(pZip, MZ_ZIP_INVALID_HEADER_OR_CORRUPTED);
                }

                local_header_uncomp_size = MZ_READ_LE64(pSrc_field_data);
                local_header_comp_size = MZ_READ_LE64(pSrc_field_data + sizeof(mz_uint64)); /* may be 0 if there's a descriptor */

                found_zip64_ext_data_in_ldir = MZ_TRUE;
                break;
            }

            pExtra_data += field_total_size;
            extra_size_remaining -= field_total_size;
        } while (extra_size_remaining);

        mz_zip_array_clear(pZip, &file_data_array);
    }

    if (!pState->m_zip64)
    {
        /* Try to detect if the new archive will most likely wind up too big and bail early (+(sizeof(mz_uint32) * 4) is for the optional descriptor which could be present, +64 is a fudge factor). */
        /* We also check when the archive is finalized so this doesn't need to be perfect. */
        mz_uint64 approx_new_archive_size = cur_dst_file_ofs + num_alignment_padding_bytes + MZ_ZIP_LOCAL_DIR_HEADER_SIZE + src_archive_bytes_remaining + (sizeof(mz_uint32) * 4) +
                                            pState->m_central_dir.m_size + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_central_dir_following_data_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE + 64;

        if (approx_new_archive_size >= MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);
    }

    /* Write dest archive padding */
    if (!mz_zip_writer_write_zeros(pZip, cur_dst_file_ofs, num_alignment_padding_bytes))
        return MZ_FALSE;

    cur_dst_file_ofs += num_alignment_padding_bytes;

    local_dir_header_ofs = cur_dst_file_ofs;
    if (pZip->m_file_offset_alignment)
    {
        MZ_ASSERT((local_dir_header_ofs & (pZip->m_file_offset_alignment - 1)) == 0);
    }

    /* The original zip's local header+ext block doesn't change, even with zip64, so we can just copy it over to the dest zip */
    if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pLocal_header, MZ_ZIP_LOCAL_DIR_HEADER_SIZE) != MZ_ZIP_LOCAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

    cur_dst_file_ofs += MZ_ZIP_LOCAL_DIR_HEADER_SIZE;

    /* Copy over the source archive bytes to the dest archive, also ensure we have enough buf space to handle optional data descriptor */
    if (NULL == (pBuf = pZip->m_pAlloc(pZip->m_pAlloc_opaque, 1, (size_t)MZ_MAX(32U, MZ_MIN((mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE, src_archive_bytes_remaining)))))
        return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

    while (src_archive_bytes_remaining)
    {
        n = (mz_uint)MZ_MIN((mz_uint64)MZ_ZIP_MAX_IO_BUF_SIZE, src_archive_bytes_remaining);
        if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, n) != n)
        {
            pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
        }
        cur_src_file_ofs += n;

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n)
        {
            pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
        }
        cur_dst_file_ofs += n;

        src_archive_bytes_remaining -= n;
    }

    /* Now deal with the optional data descriptor */
    bit_flags = MZ_READ_LE16(pLocal_header + MZ_ZIP_LDH_BIT_FLAG_OFS);
    if (bit_flags & 8)
    {
        /* Copy data descriptor */
        if ((pSource_zip->m_pState->m_zip64) || (found_zip64_ext_data_in_ldir))
        {
            /* src is zip64, dest must be zip64 */

            /* name			uint32_t's */
            /* id				1 (optional in zip64?) */
            /* crc			1 */
            /* comp_size	2 */
            /* uncomp_size 2 */
            if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, (sizeof(mz_uint32) * 6)) != (sizeof(mz_uint32) * 6))
            {
                pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
                return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
            }

            n = sizeof(mz_uint32) * ((MZ_READ_LE32(pBuf) == MZ_ZIP_DATA_DESCRIPTOR_ID) ? 6 : 5);
        }
        else
        {
            /* src is NOT zip64 */
            mz_bool has_id;

            if (pSource_zip->m_pRead(pSource_zip->m_pIO_opaque, cur_src_file_ofs, pBuf, sizeof(mz_uint32) * 4) != sizeof(mz_uint32) * 4)
            {
                pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
                return mz_zip_set_error(pZip, MZ_ZIP_FILE_READ_FAILED);
            }

            has_id = (MZ_READ_LE32(pBuf) == MZ_ZIP_DATA_DESCRIPTOR_ID);

            if (pZip->m_pState->m_zip64)
            {
                /* dest is zip64, so upgrade the data descriptor */
                const mz_uint32 *pSrc_descriptor = (const mz_uint32 *)((const mz_uint8 *)pBuf + (has_id ? sizeof(mz_uint32) : 0));
                const mz_uint32 src_crc32 = pSrc_descriptor[0];
                const mz_uint64 src_comp_size = pSrc_descriptor[1];
                const mz_uint64 src_uncomp_size = pSrc_descriptor[2];

                mz_write_le32((mz_uint8 *)pBuf, MZ_ZIP_DATA_DESCRIPTOR_ID);
                mz_write_le32((mz_uint8 *)pBuf + sizeof(mz_uint32) * 1, src_crc32);
                mz_write_le64((mz_uint8 *)pBuf + sizeof(mz_uint32) * 2, src_comp_size);
                mz_write_le64((mz_uint8 *)pBuf + sizeof(mz_uint32) * 4, src_uncomp_size);

                n = sizeof(mz_uint32) * 6;
            }
            else
            {
                /* dest is NOT zip64, just copy it as-is */
                n = sizeof(mz_uint32) * (has_id ? 4 : 3);
            }
        }

        if (pZip->m_pWrite(pZip->m_pIO_opaque, cur_dst_file_ofs, pBuf, n) != n)
        {
            pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);
        }

        cur_src_file_ofs += n;
        cur_dst_file_ofs += n;
    }
    pZip->m_pFree(pZip->m_pAlloc_opaque, pBuf);

    /* Finally, add the new central dir header */
    orig_central_dir_size = pState->m_central_dir.m_size;

    memcpy(new_central_header, pSrc_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE);

    if (pState->m_zip64)
    {
        /* This is the painful part: We need to write a new central dir header + ext block with updated zip64 fields, and ensure the old fields (if any) are not included. */
        const mz_uint8 *pSrc_ext = pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_filename_len;
        mz_zip_array new_ext_block;

        mz_zip_array_init(&new_ext_block, sizeof(mz_uint8));

        MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_COMPRESSED_SIZE_OFS, MZ_UINT32_MAX);
        MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_DECOMPRESSED_SIZE_OFS, MZ_UINT32_MAX);
        MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, MZ_UINT32_MAX);

        if (!mz_zip_writer_update_zip64_extension_block(&new_ext_block, pZip, pSrc_ext, src_ext_len, &src_file_stat.m_comp_size, &src_file_stat.m_uncomp_size, &local_dir_header_ofs, NULL))
        {
            mz_zip_array_clear(pZip, &new_ext_block);
            return MZ_FALSE;
        }

        MZ_WRITE_LE16(new_central_header + MZ_ZIP_CDH_EXTRA_LEN_OFS, new_ext_block.m_size);

        if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, new_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
        {
            mz_zip_array_clear(pZip, &new_ext_block);
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        }

        if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, src_filename_len))
        {
            mz_zip_array_clear(pZip, &new_ext_block);
            mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE);
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        }

        if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, new_ext_block.m_p, new_ext_block.m_size))
        {
            mz_zip_array_clear(pZip, &new_ext_block);
            mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE);
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        }

        if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE + src_filename_len + src_ext_len, src_comment_len))
        {
            mz_zip_array_clear(pZip, &new_ext_block);
            mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE);
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        }

        mz_zip_array_clear(pZip, &new_ext_block);
    }
    else
    {
        /* sanity checks */
        if (cur_dst_file_ofs > MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);

        if (local_dir_header_ofs >= MZ_UINT32_MAX)
            return mz_zip_set_error(pZip, MZ_ZIP_ARCHIVE_TOO_LARGE);

        MZ_WRITE_LE32(new_central_header + MZ_ZIP_CDH_LOCAL_HEADER_OFS, local_dir_header_ofs);

        if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, new_central_header, MZ_ZIP_CENTRAL_DIR_HEADER_SIZE))
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);

        if (!mz_zip_array_push_back(pZip, &pState->m_central_dir, pSrc_central_header + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, src_central_dir_following_data_size))
        {
            mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE);
            return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
        }
    }

    /* This shouldn't trigger unless we screwed up during the initial sanity checks */
    if (pState->m_central_dir.m_size >= MZ_UINT32_MAX)
    {
        /* TODO: Support central dirs >= 32-bits in size */
        mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE);
        return mz_zip_set_error(pZip, MZ_ZIP_UNSUPPORTED_CDIR_SIZE);
    }

    n = (mz_uint32)orig_central_dir_size;
    if (!mz_zip_array_push_back(pZip, &pState->m_central_dir_offsets, &n, 1))
    {
        mz_zip_array_resize(pZip, &pState->m_central_dir, orig_central_dir_size, MZ_FALSE);
        return mz_zip_set_error(pZip, MZ_ZIP_ALLOC_FAILED);
    }

    pZip->m_total_files++;
    pZip->m_archive_size = cur_dst_file_ofs;

    return MZ_TRUE;
}

mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip)
{
    mz_zip_internal_state *pState;
    mz_uint64 central_dir_ofs, central_dir_size;
    mz_uint8 hdr[256];

    if ((!pZip) || (!pZip->m_pState) || (pZip->m_zip_mode != MZ_ZIP_MODE_WRITING))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    pState = pZip->m_pState;

    if (pState->m_zip64)
    {
        if ((pZip->m_total_files > MZ_UINT32_MAX) || (pState->m_central_dir.m_size >= MZ_UINT32_MAX))
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
    }
    else
    {
        if ((pZip->m_total_files > MZ_UINT16_MAX) || ((pZip->m_archive_size + pState->m_central_dir.m_size + MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) > MZ_UINT32_MAX))
            return mz_zip_set_error(pZip, MZ_ZIP_TOO_MANY_FILES);
    }

    central_dir_ofs = 0;
    central_dir_size = 0;
    if (pZip->m_total_files)
    {
        /* Write central directory */
        central_dir_ofs = pZip->m_archive_size;
        central_dir_size = pState->m_central_dir.m_size;
        pZip->m_central_directory_file_ofs = central_dir_ofs;
        if (pZip->m_pWrite(pZip->m_pIO_opaque, central_dir_ofs, pState->m_central_dir.m_p, (size_t)central_dir_size) != central_dir_size)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        pZip->m_archive_size += central_dir_size;
    }

    if (pState->m_zip64)
    {
        /* Write zip64 end of central directory header */
        mz_uint64 rel_ofs_to_zip64_ecdr = pZip->m_archive_size;

        MZ_CLEAR_OBJ(hdr);
        MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDH_SIG_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIG);
        MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_SIZE_OF_RECORD_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE - sizeof(mz_uint32) - sizeof(mz_uint64));
        MZ_WRITE_LE16(hdr + MZ_ZIP64_ECDH_VERSION_MADE_BY_OFS, 0x031E); /* TODO: always Unix */
        MZ_WRITE_LE16(hdr + MZ_ZIP64_ECDH_VERSION_NEEDED_OFS, 0x002D);
        MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, pZip->m_total_files);
        MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_TOTAL_ENTRIES_OFS, pZip->m_total_files);
        MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_SIZE_OFS, central_dir_size);
        MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDH_CDIR_OFS_OFS, central_dir_ofs);
        if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        pZip->m_archive_size += MZ_ZIP64_END_OF_CENTRAL_DIR_HEADER_SIZE;

        /* Write zip64 end of central directory locator */
        MZ_CLEAR_OBJ(hdr);
        MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDL_SIG_OFS, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIG);
        MZ_WRITE_LE64(hdr + MZ_ZIP64_ECDL_REL_OFS_TO_ZIP64_ECDR_OFS, rel_ofs_to_zip64_ecdr);
        MZ_WRITE_LE32(hdr + MZ_ZIP64_ECDL_TOTAL_NUMBER_OF_DISKS_OFS, 1);
        if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE) != MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE)
            return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

        pZip->m_archive_size += MZ_ZIP64_END_OF_CENTRAL_DIR_LOCATOR_SIZE;
    }

    /* Write end of central directory record */
    MZ_CLEAR_OBJ(hdr);
    MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_SIG_OFS, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIG);
    MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_NUM_ENTRIES_ON_DISK_OFS, MZ_MIN(MZ_UINT16_MAX, pZip->m_total_files));
    MZ_WRITE_LE16(hdr + MZ_ZIP_ECDH_CDIR_TOTAL_ENTRIES_OFS, MZ_MIN(MZ_UINT16_MAX, pZip->m_total_files));
    MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_SIZE_OFS, MZ_MIN(MZ_UINT32_MAX, central_dir_size));
    MZ_WRITE_LE32(hdr + MZ_ZIP_ECDH_CDIR_OFS_OFS, MZ_MIN(MZ_UINT32_MAX, central_dir_ofs));

    if (pZip->m_pWrite(pZip->m_pIO_opaque, pZip->m_archive_size, hdr, MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE) != MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE)
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_WRITE_FAILED);

#ifndef MINIZ_NO_STDIO
    if ((pState->m_pFile) && (MZ_FFLUSH(pState->m_pFile) == EOF))
        return mz_zip_set_error(pZip, MZ_ZIP_FILE_CLOSE_FAILED);
#endif /* #ifndef MINIZ_NO_STDIO */

    pZip->m_archive_size += MZ_ZIP_END_OF_CENTRAL_DIR_HEADER_SIZE;

    pZip->m_zip_mode = MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED;
    return MZ_TRUE;
}

mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **ppBuf, size_t *pSize)
{
    if ((!ppBuf) || (!pSize))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    *ppBuf = NULL;
    *pSize = 0;

    if ((!pZip) || (!pZip->m_pState))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (pZip->m_pWrite != mz_zip_heap_write_func)
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    if (!mz_zip_writer_finalize_archive(pZip))
        return MZ_FALSE;

    *ppBuf = pZip->m_pState->m_pMem;
    *pSize = pZip->m_pState->m_mem_size;
    pZip->m_pState->m_pMem = NULL;
    pZip->m_pState->m_mem_size = pZip->m_pState->m_mem_capacity = 0;

    return MZ_TRUE;
}

mz_bool mz_zip_writer_end(mz_zip_archive *pZip)
{
    return mz_zip_writer_end_internal(pZip, MZ_TRUE);
}

#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags)
{
    return mz_zip_add_mem_to_archive_file_in_place_v2(pZip_filename, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, NULL);
}

mz_bool mz_zip_add_mem_to_archive_file_in_place_v2(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_zip_error *pErr)
{
    mz_bool status, created_new_archive = MZ_FALSE;
    mz_zip_archive zip_archive;
    struct MZ_FILE_STAT_STRUCT file_stat;
    mz_zip_error actual_err = MZ_ZIP_NO_ERROR;

    mz_zip_zero_struct(&zip_archive);
    if ((int)level_and_flags < 0)
        level_and_flags = MZ_DEFAULT_LEVEL;

    if ((!pZip_filename) || (!pArchive_name) || ((buf_size) && (!pBuf)) || ((comment_size) && (!pComment)) || ((level_and_flags & 0xF) > MZ_UBER_COMPRESSION))
    {
        if (pErr)
            *pErr = MZ_ZIP_INVALID_PARAMETER;
        return MZ_FALSE;
    }

    if (!mz_zip_writer_validate_archive_name(pArchive_name))
    {
        if (pErr)
            *pErr = MZ_ZIP_INVALID_FILENAME;
        return MZ_FALSE;
    }

    /* Important: The regular non-64 bit version of stat() can fail here if the file is very large, which could cause the archive to be overwritten. */
    /* So be sure to compile with _LARGEFILE64_SOURCE 1 */
    if (MZ_FILE_STAT(pZip_filename, &file_stat) != 0)
    {
        /* Create a new archive. */
        if (!mz_zip_writer_init_file_v2(&zip_archive, pZip_filename, 0, level_and_flags))
        {
            if (pErr)
                *pErr = zip_archive.m_last_error;
            return MZ_FALSE;
        }

        created_new_archive = MZ_TRUE;
    }
    else
    {
        /* Append to an existing archive. */
        if (!mz_zip_reader_init_file_v2(&zip_archive, pZip_filename, level_and_flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY, 0, 0))
        {
            if (pErr)
                *pErr = zip_archive.m_last_error;
            return MZ_FALSE;
        }

        if (!mz_zip_writer_init_from_reader_v2(&zip_archive, pZip_filename, level_and_flags))
        {
            if (pErr)
                *pErr = zip_archive.m_last_error;

            mz_zip_reader_end_internal(&zip_archive, MZ_FALSE);

            return MZ_FALSE;
        }
    }

    status = mz_zip_writer_add_mem_ex(&zip_archive, pArchive_name, pBuf, buf_size, pComment, comment_size, level_and_flags, 0, 0);
    actual_err = zip_archive.m_last_error;

    /* Always finalize, even if adding failed for some reason, so we have a valid central directory. (This may not always succeed, but we can try.) */
    if (!mz_zip_writer_finalize_archive(&zip_archive))
    {
        if (!actual_err)
            actual_err = zip_archive.m_last_error;

        status = MZ_FALSE;
    }

    if (!mz_zip_writer_end_internal(&zip_archive, status))
    {
        if (!actual_err)
            actual_err = zip_archive.m_last_error;

        status = MZ_FALSE;
    }

    if ((!status) && (created_new_archive))
    {
        /* It's a new archive and something went wrong, so just delete it. */
        int ignoredStatus = MZ_DELETE_FILE(pZip_filename);
        (void)ignoredStatus;
    }

    if (pErr)
        *pErr = actual_err;

    return status;
}

void *mz_zip_extract_archive_file_to_heap_v2(const char *pZip_filename, const char *pArchive_name, const char *pComment, size_t *pSize, mz_uint flags, mz_zip_error *pErr)
{
    mz_uint32 file_index;
    mz_zip_archive zip_archive;
    void *p = NULL;

    if (pSize)
        *pSize = 0;

    if ((!pZip_filename) || (!pArchive_name))
    {
        if (pErr)
            *pErr = MZ_ZIP_INVALID_PARAMETER;

        return NULL;
    }

    mz_zip_zero_struct(&zip_archive);
    if (!mz_zip_reader_init_file_v2(&zip_archive, pZip_filename, flags | MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY, 0, 0))
    {
        if (pErr)
            *pErr = zip_archive.m_last_error;

        return NULL;
    }

    if (mz_zip_reader_locate_file_v2(&zip_archive, pArchive_name, pComment, flags, &file_index))
    {
        p = mz_zip_reader_extract_to_heap(&zip_archive, file_index, pSize, flags);
    }

    mz_zip_reader_end_internal(&zip_archive, p != NULL);

    if (pErr)
        *pErr = zip_archive.m_last_error;

    return p;
}

void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags)
{
    return mz_zip_extract_archive_file_to_heap_v2(pZip_filename, pArchive_name, NULL, pSize, flags, NULL);
}

#endif /* #ifndef MINIZ_NO_STDIO */

#endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS */

/* ------------------- Misc utils */

mz_zip_mode mz_zip_get_mode(mz_zip_archive *pZip)
{
    return pZip ? pZip->m_zip_mode : MZ_ZIP_MODE_INVALID;
}

mz_zip_type mz_zip_get_type(mz_zip_archive *pZip)
{
    return pZip ? pZip->m_zip_type : MZ_ZIP_TYPE_INVALID;
}

mz_zip_error mz_zip_set_last_error(mz_zip_archive *pZip, mz_zip_error err_num)
{
    mz_zip_error prev_err;

    if (!pZip)
        return MZ_ZIP_INVALID_PARAMETER;

    prev_err = pZip->m_last_error;

    pZip->m_last_error = err_num;
    return prev_err;
}

mz_zip_error mz_zip_peek_last_error(mz_zip_archive *pZip)
{
    if (!pZip)
        return MZ_ZIP_INVALID_PARAMETER;

    return pZip->m_last_error;
}

mz_zip_error mz_zip_clear_last_error(mz_zip_archive *pZip)
{
    return mz_zip_set_last_error(pZip, MZ_ZIP_NO_ERROR);
}

mz_zip_error mz_zip_get_last_error(mz_zip_archive *pZip)
{
    mz_zip_error prev_err;

    if (!pZip)
        return MZ_ZIP_INVALID_PARAMETER;

    prev_err = pZip->m_last_error;

    pZip->m_last_error = MZ_ZIP_NO_ERROR;
    return prev_err;
}

const char *mz_zip_get_error_string(mz_zip_error mz_err)
{
    switch (mz_err)
    {
        case MZ_ZIP_NO_ERROR:
            return "no error";
        case MZ_ZIP_UNDEFINED_ERROR:
            return "undefined error";
        case MZ_ZIP_TOO_MANY_FILES:
            return "too many files";
        case MZ_ZIP_FILE_TOO_LARGE:
            return "file too large";
        case MZ_ZIP_UNSUPPORTED_METHOD:
            return "unsupported method";
        case MZ_ZIP_UNSUPPORTED_ENCRYPTION:
            return "unsupported encryption";
        case MZ_ZIP_UNSUPPORTED_FEATURE:
            return "unsupported feature";
        case MZ_ZIP_FAILED_FINDING_CENTRAL_DIR:
            return "failed finding central directory";
        case MZ_ZIP_NOT_AN_ARCHIVE:
            return "not a ZIP archive";
        case MZ_ZIP_INVALID_HEADER_OR_CORRUPTED:
            return "invalid header or archive is corrupted";
        case MZ_ZIP_UNSUPPORTED_MULTIDISK:
            return "unsupported multidisk archive";
        case MZ_ZIP_DECOMPRESSION_FAILED:
            return "decompression failed or archive is corrupted";
        case MZ_ZIP_COMPRESSION_FAILED:
            return "compression failed";
        case MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE:
            return "unexpected decompressed size";
        case MZ_ZIP_CRC_CHECK_FAILED:
            return "CRC-32 check failed";
        case MZ_ZIP_UNSUPPORTED_CDIR_SIZE:
            return "unsupported central directory size";
        case MZ_ZIP_ALLOC_FAILED:
            return "allocation failed";
        case MZ_ZIP_FILE_OPEN_FAILED:
            return "file open failed";
        case MZ_ZIP_FILE_CREATE_FAILED:
            return "file create failed";
        case MZ_ZIP_FILE_WRITE_FAILED:
            return "file write failed";
        case MZ_ZIP_FILE_READ_FAILED:
            return "file read failed";
        case MZ_ZIP_FILE_CLOSE_FAILED:
            return "file close failed";
        case MZ_ZIP_FILE_SEEK_FAILED:
            return "file seek failed";
        case MZ_ZIP_FILE_STAT_FAILED:
            return "file stat failed";
        case MZ_ZIP_INVALID_PARAMETER:
            return "invalid parameter";
        case MZ_ZIP_INVALID_FILENAME:
            return "invalid filename";
        case MZ_ZIP_BUF_TOO_SMALL:
            return "buffer too small";
        case MZ_ZIP_INTERNAL_ERROR:
            return "internal error";
        case MZ_ZIP_FILE_NOT_FOUND:
            return "file not found";
        case MZ_ZIP_ARCHIVE_TOO_LARGE:
            return "archive is too large";
        case MZ_ZIP_VALIDATION_FAILED:
            return "validation failed";
        case MZ_ZIP_WRITE_CALLBACK_FAILED:
            return "write calledback failed";
        default:
            break;
    }

    return "unknown error";
}

/* Note: Just because the archive is not zip64 doesn't necessarily mean it doesn't have Zip64 extended information extra field, argh. */
mz_bool mz_zip_is_zip64(mz_zip_archive *pZip)
{
    if ((!pZip) || (!pZip->m_pState))
        return MZ_FALSE;

    return pZip->m_pState->m_zip64;
}

size_t mz_zip_get_central_dir_size(mz_zip_archive *pZip)
{
    if ((!pZip) || (!pZip->m_pState))
        return 0;

    return pZip->m_pState->m_central_dir.m_size;
}

mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip)
{
    return pZip ? pZip->m_total_files : 0;
}

mz_uint64 mz_zip_get_archive_size(mz_zip_archive *pZip)
{
    if (!pZip)
        return 0;
    return pZip->m_archive_size;
}

mz_uint64 mz_zip_get_archive_file_start_offset(mz_zip_archive *pZip)
{
    if ((!pZip) || (!pZip->m_pState))
        return 0;
    return pZip->m_pState->m_file_archive_start_ofs;
}

MZ_FILE *mz_zip_get_cfile(mz_zip_archive *pZip)
{
    if ((!pZip) || (!pZip->m_pState))
        return 0;
    return pZip->m_pState->m_pFile;
}

size_t mz_zip_read_archive_data(mz_zip_archive *pZip, mz_uint64 file_ofs, void *pBuf, size_t n)
{
    if ((!pZip) || (!pZip->m_pState) || (!pBuf) || (!pZip->m_pRead))
        return mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);

    return pZip->m_pRead(pZip->m_pIO_opaque, file_ofs, pBuf, n);
}

mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size)
{
    mz_uint n;
    const mz_uint8 *p = mz_zip_get_cdh(pZip, file_index);
    if (!p)
    {
        if (filename_buf_size)
            pFilename[0] = '\0';
        mz_zip_set_error(pZip, MZ_ZIP_INVALID_PARAMETER);
        return 0;
    }
    n = MZ_READ_LE16(p + MZ_ZIP_CDH_FILENAME_LEN_OFS);
    if (filename_buf_size)
    {
        n = MZ_MIN(n, filename_buf_size - 1);
        memcpy(pFilename, p + MZ_ZIP_CENTRAL_DIR_HEADER_SIZE, n);
        pFilename[n] = '\0';
    }
    return n + 1;
}

mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat)
{
    return mz_zip_file_stat_internal(pZip, file_index, mz_zip_get_cdh(pZip, file_index), pStat, NULL);
}

mz_bool mz_zip_end(mz_zip_archive *pZip)
{
    if (!pZip)
        return MZ_FALSE;

    if (pZip->m_zip_mode == MZ_ZIP_MODE_READING)
        return mz_zip_reader_end(pZip);
#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS
    else if ((pZip->m_zip_mode == MZ_ZIP_MODE_WRITING) || (pZip->m_zip_mode == MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED))
        return mz_zip_writer_end(pZip);
#endif

    return MZ_FALSE;
}

#ifdef __cplusplus
}
#endif

#endif /*#ifndef MINIZ_NO_ARCHIVE_APIS*/
recoll-1.26.3/utils/chrono.cpp0000644000175000017500000001510213533651561013147 00000000000000/* Copyright (C) 2014 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef TEST_CHRONO
#include "autoconfig.h"

#include 
#include 

#include "chrono.h"

using namespace std;

#ifndef CLOCK_REALTIME
typedef int clockid_t;
#define CLOCK_REALTIME 1
#endif


#define SECONDS(TS1, TS2)                             \
    (float((TS2).tv_sec - (TS1).tv_sec) +             \
     float((TS2).tv_nsec - (TS1).tv_nsec) * 1e-9)

#define MILLIS(TS1, TS2)                                        \
    ((long long)((TS2).tv_sec - (TS1).tv_sec) * 1000LL +        \
     ((TS2).tv_nsec - (TS1).tv_nsec) / 1000000)

#define MICROS(TS1, TS2)                                          \
    ((long long)((TS2).tv_sec - (TS1).tv_sec) * 1000000LL +       \
     ((TS2).tv_nsec - (TS1).tv_nsec) / 1000)

#define NANOS(TS1, TS2)                                           \
    ((long long)((TS2).tv_sec - (TS1).tv_sec) * 1000000000LL +    \
     ((TS2).tv_nsec - (TS1).tv_nsec))



// Using clock_gettime() is nice because it gives us ns res and it helps with
// computing threads work times, but it's also a pita because it forces linking
// with -lrt. So keep it non-default, special development only.
// #define USE_CLOCK_GETTIME

// And wont' bother with gettime() on these.
#if defined(__APPLE__) || defined(_WIN32)
#undef USE_CLOCK_GETTIME
#endif

#ifdef _MSC_VER
#define WIN32_LEAN_AND_MEAN
#include 
#include  // portable: uint64_t   MSVC: __int64 

// MSVC defines this in winsock2.h!?
typedef struct timeval {
    long tv_sec;
    long tv_usec;
} timeval;

int gettimeofday(struct timeval * tp, struct timezone * tzp)
{
    // Note: some broken versions only have 8 trailing zero's, the
    // correct epoch has 9 trailing zero's
    static const uint64_t EPOCH = ((uint64_t) 116444736000000000ULL);

    SYSTEMTIME  system_time;
    FILETIME    file_time;
    uint64_t    time;

    GetSystemTime( &system_time );
    SystemTimeToFileTime( &system_time, &file_time );
    time =  ((uint64_t)file_time.dwLowDateTime )      ;
    time += ((uint64_t)file_time.dwHighDateTime) << 32;

    tp->tv_sec  = (long) ((time - EPOCH) / 10000000L);
    tp->tv_usec = (long) (system_time.wMilliseconds * 1000);
    return 0;
}
#else // -> Not _WIN32
#ifndef USE_CLOCK_GETTIME
// Using gettimeofday then, needs struct timeval
#include 
#endif
#endif



// We use gettimeofday instead of clock_gettime for now and get only
// uS resolution, because clock_gettime is more configuration trouble
// than it's worth
static void gettime(int
#ifdef USE_CLOCK_GETTIME
                    clk_id
#endif
                    , Chrono::TimeSpec *ts)
{
#ifdef USE_CLOCK_GETTIME
    struct timespec mts;
    clock_gettime(clk_id, &mts);
    ts->tv_sec = mts.tv_sec;
    ts->tv_nsec = mts.tv_nsec;
#else
    struct timeval tv;
    gettimeofday(&tv, 0);
    ts->tv_sec = tv.tv_sec;
    ts->tv_nsec = tv.tv_usec * 1000;
#endif
}
///// End system interface

// Note: this not protected against multithread access and not
// reentrant, but this is mostly debug code, and it won't crash, just
// show bad results. Also the frozen thing is not used that much
Chrono::TimeSpec Chrono::o_now;

void Chrono::refnow()
{
    gettime(CLOCK_REALTIME, &o_now);
}

Chrono::Chrono()
{
    restart();
}

// Reset and return value before rest in milliseconds
long Chrono::restart()
{
    TimeSpec now;
    gettime(CLOCK_REALTIME, &now);
    long ret = MILLIS(m_orig, now);
    m_orig = now;
    return ret;
}

long Chrono::urestart()
{
    TimeSpec now;
    gettime(CLOCK_REALTIME, &now);
    long ret = MICROS(m_orig, now);
    m_orig = now;
    return ret;
}

// Get current timer value, milliseconds
long Chrono::millis(bool frozen)
{
    if (frozen) {
        return MILLIS(m_orig, o_now);
    } else {
        TimeSpec now;
        gettime(CLOCK_REALTIME, &now);
        return MILLIS(m_orig, now);
    }
}

//
long Chrono::micros(bool frozen)
{
    if (frozen) {
        return MICROS(m_orig, o_now);
    } else {
        TimeSpec now;
        gettime(CLOCK_REALTIME, &now);
        return MICROS(m_orig, now);
    }
}

long long Chrono::amicros() const
{
    TimeSpec ts;
    ts.tv_sec = 0;
    ts.tv_nsec = 0;
    return MICROS(ts, m_orig);
}

//
long long Chrono::nanos(bool frozen)
{
    if (frozen) {
        return NANOS(m_orig, o_now);
    } else {
        TimeSpec now;
        gettime(CLOCK_REALTIME, &now);
        return NANOS(m_orig, now);
    }
}

float Chrono::secs(bool frozen)
{
    if (frozen) {
        return SECONDS(m_orig, o_now);
    } else {
        TimeSpec now;
        gettime(CLOCK_REALTIME, &now);
        return SECONDS(m_orig, now);
    }
}

#else

///////////////////// test driver


#include 
#include 
#include 
#include 

#include 

#include "chrono.h"

using namespace std;

static char *thisprog;
static void
Usage(void)
{
    fprintf(stderr, "Usage : %s \n", thisprog);
    exit(1);
}

Chrono achrono;
Chrono rchrono;

void
showsecs(long msecs)
{
    fprintf(stderr, "%3.5f S", ((float)msecs) / 1000.0);
}

void
sigint(int sig)
{
    signal(SIGINT, sigint);
    signal(SIGQUIT, sigint);

    fprintf(stderr, "Absolute interval: ");
    showsecs(achrono.millis());
    fprintf(stderr, ". Relative interval: ");
    showsecs(rchrono.restart());
    cerr <<  " Abs micros: " << achrono.amicros() <<
        " Relabs micros: " << rchrono.amicros() - 1430477861905884LL
         << endl;
    fprintf(stderr, ".\n");
    if (sig == SIGQUIT) {
        exit(0);
    }
}

int main(int argc, char **argv)
{

    thisprog = argv[0];
    argc--;
    argv++;

    if (argc != 0) {
        Usage();
    }

    for (int i = 0; i < 50000000; i++);

    cerr << "Start secs: " << achrono.secs() << endl;

    fprintf(stderr, "Type ^C for intermediate result, ^\\ to stop\n");
    signal(SIGINT, sigint);
    signal(SIGQUIT, sigint);
    achrono.restart();
    rchrono.restart();
    while (1) {
        pause();
    }
}

#endif /*TEST_CHRONO*/
recoll-1.26.3/utils/strmatcher.h0000644000175000017500000000500013533651561013474 00000000000000/* Copyright (C) 2012 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _STRMATCHER_H_INCLUDED_
#define _STRMATCHER_H_INCLUDED_

#include 
#include "smallut.h"

// Encapsulating simple wildcard/regexp string matching.

// Matcher class. Interface to either wildcard or regexp yes/no matcher
class StrMatcher {
public:
    StrMatcher(const std::string& exp) 
        : m_sexp(exp) {}
    virtual ~StrMatcher() {};
    virtual bool match(const std::string &val) const = 0;
    virtual std::string::size_type baseprefixlen() const = 0;
    virtual bool setExp(const std::string& newexp) {
	m_sexp = newexp;
	return true;
    }
    virtual bool ok() const {
	return true;
    }
    virtual const std::string& exp() const {
	return m_sexp;
    }
    virtual StrMatcher *clone() const = 0;
    const std::string& getreason() const {
	return m_reason;
    }
protected:
    std::string m_sexp;
    std::string m_reason;
};

class StrWildMatcher : public StrMatcher {
public:
    StrWildMatcher(const std::string& exp)
        : StrMatcher(exp) {}
    virtual ~StrWildMatcher() {}
    virtual bool match(const std::string& val) const override;
    virtual std::string::size_type baseprefixlen() const override;
    virtual StrWildMatcher *clone() const override {
	return new StrWildMatcher(m_sexp);
    }
};

class StrRegexpMatcher : public StrMatcher {
public:
    StrRegexpMatcher(const std::string& exp);
    virtual bool setExp(const std::string& newexp) override;
    virtual ~StrRegexpMatcher() {};
    virtual bool match(const std::string& val) const override;
    virtual std::string::size_type baseprefixlen() const override;
    virtual bool ok() const override;
    virtual StrRegexpMatcher *clone() const override {
	return new StrRegexpMatcher(m_sexp);
    }
private:
    SimpleRegexp m_re;
};

#endif /* _STRMATCHER_H_INCLUDED_ */
recoll-1.26.3/utils/fstreewalk.cpp0000644000175000017500000004160213540204634014023 00000000000000/* Copyright (C) 2004-2019 J.F.Dockes 
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include "autoconfig.h"

#include 
#include 
#include 
#include 
#include "safesysstat.h"
#include 
#include 

#include 
#include 
#include 
#include 

#include "cstr.h"
#include "log.h"
#include "pathut.h"
#include "fstreewalk.h"
#include "transcode.h"

using namespace std;

bool FsTreeWalker::o_useFnmPathname = true;
string FsTreeWalker::o_nowalkfn;

const int FsTreeWalker::FtwTravMask = FtwTravNatural|
    FtwTravBreadth|FtwTravFilesThenDirs|FtwTravBreadthThenDepth;

#ifndef _WIN32
// dev/ino means nothing on Windows. It seems that FileId could replace it
// but we only use this for cycle detection which we just disable.
class DirId {
public:
    dev_t dev;
    ino_t ino;
    DirId(dev_t d, ino_t i) : dev(d), ino(i) {}
    bool operator<(const DirId& r) const {
        return dev < r.dev || (dev == r.dev && ino < r.ino);
    }
};
#endif

class FsTreeWalker::Internal {
public:
    Internal(int opts)
        : options(opts), depthswitch(4), maxdepth(-1), errors(0) {
    }
    int options;
    int depthswitch;
    int maxdepth;
    int basedepth;
    stringstream reason;
    vector skippedNames;
    vector onlyNames;
    vector skippedPaths;
    // When doing Breadth or FilesThenDirs traversal, we keep a list
    // of directory paths to be processed, and we do not recurse.
    deque dirs;
    int errors;
#ifndef _WIN32
    set donedirs;
#endif
    void logsyserr(const char *call, const string ¶m) {
        errors++;
        reason << call << "(" << param << ") : " << errno << " : " << 
            strerror(errno) << endl;
    }
};

FsTreeWalker::FsTreeWalker(int opts)
{
    data = new Internal(opts);
}

FsTreeWalker::~FsTreeWalker()
{
    delete data;
}

void FsTreeWalker::setOpts(int opts)
{
    if (data) {
        data->options = opts;
    }
}
int FsTreeWalker::getOpts()
{
    if (data) {
        return data->options;
    } else {
        return 0;
    }
}
void FsTreeWalker::setDepthSwitch(int ds)
{
    if (data) {
        data->depthswitch = ds;
    }
}
void FsTreeWalker::setMaxDepth(int md)
{
    if (data) {
        data->maxdepth = md;
    }
}

string FsTreeWalker::getReason()
{
    string reason = data->reason.str();
    data->reason.str(string());
    data->errors = 0;
    return reason;
}

int FsTreeWalker::getErrCnt()
{
    return data->errors;
}

bool FsTreeWalker::addSkippedName(const string& pattern)
{
    if (find(data->skippedNames.begin(), 
             data->skippedNames.end(), pattern) == data->skippedNames.end())
        data->skippedNames.push_back(pattern);
    return true;
}
bool FsTreeWalker::setSkippedNames(const vector &patterns)
{
    data->skippedNames = patterns;
    return true;
}
bool FsTreeWalker::inSkippedNames(const string& name)
{
    for (const auto& pattern : data->skippedNames) {
        if (fnmatch(pattern.c_str(), name.c_str(), 0) == 0) {
            return true;
        }
    }
    return false;
}
bool FsTreeWalker::setOnlyNames(const vector &patterns)
{
    data->onlyNames = patterns;
    return true;
}
bool FsTreeWalker::inOnlyNames(const string& name)
{
    if (data->onlyNames.empty()) {
        // Not set: all match
        return true;
    }
    for (const auto& pattern : data->onlyNames) {
        if (fnmatch(pattern.c_str(), name.c_str(), 0) == 0) {
            return true;
        }
    }
    return false;
}

bool FsTreeWalker::addSkippedPath(const string& ipath)
{
    string path = (data->options & FtwNoCanon) ? ipath : path_canon(ipath);
    if (find(data->skippedPaths.begin(), 
             data->skippedPaths.end(), path) == data->skippedPaths.end())
        data->skippedPaths.push_back(path);
    return true;
}
bool FsTreeWalker::setSkippedPaths(const vector &paths)
{
    data->skippedPaths = paths;
    for (vector::iterator it = data->skippedPaths.begin();
         it != data->skippedPaths.end(); it++)
        if (!(data->options & FtwNoCanon))
            *it = path_canon(*it);
    return true;
}
bool FsTreeWalker::inSkippedPaths(const string& path, bool ckparents)
{
    int fnmflags = o_useFnmPathname ? FNM_PATHNAME : 0;
#ifdef FNM_LEADING_DIR
    if (ckparents)
        fnmflags |= FNM_LEADING_DIR;
#endif

    for (vector::const_iterator it = data->skippedPaths.begin(); 
         it != data->skippedPaths.end(); it++) {
#ifndef FNM_LEADING_DIR
        if (ckparents) {
            string mpath = path;
            while (mpath.length() > 2) {
                if (fnmatch(it->c_str(), mpath.c_str(), fnmflags) == 0) 
                    return true;
                mpath = path_getfather(mpath);
            }
        } else 
#endif /* FNM_LEADING_DIR */
            if (fnmatch(it->c_str(), path.c_str(), fnmflags) == 0) {
                return true;
            }
    }
    return false;
}

static inline int slashcount(const string& p)
{
    int n = 0;
    for (unsigned int i = 0; i < p.size(); i++)
        if (p[i] == '/')
            n++;
    return n;
}

FsTreeWalker::Status FsTreeWalker::walk(const string& _top, 
                                        FsTreeWalkerCB& cb)
{
    string top = (data->options & FtwNoCanon) ? _top : path_canon(_top);

    if ((data->options & FtwTravMask) == 0) {
        data->options |= FtwTravNatural;
    }

    data->basedepth = slashcount(top); // Only used for breadthxx
    struct stat st;
    // We always follow symlinks at this point. Makes more sense.
    if (path_fileprops(top, &st) == -1) {
        // Note that we do not return an error if the stat call
        // fails. A temp file may have gone away.
        data->logsyserr("stat", top);
        return errno == ENOENT ? FtwOk : FtwError;
    }

    // Recursive version, using the call stack to store state. iwalk
    // will process files and recursively descend into subdirs in
    // physical order of the current directory.
    if ((data->options & FtwTravMask) == FtwTravNatural) {
        return iwalk(top, &st, cb);
    }

    // Breadth first of filesThenDirs semi-depth first order
    // Managing queues of directories to be visited later, in breadth or
    // depth order. Null marker are inserted in the queue to indicate
    // father directory changes (avoids computing parents all the time).
    data->dirs.push_back(top);
    Status status;
    while (!data->dirs.empty()) {
        string dir, nfather;
        if (data->options & (FtwTravBreadth|FtwTravBreadthThenDepth)) {
            // Breadth first, pop and process an older dir at the
            // front of the queue. This will add any child dirs at the
            // back
            dir = data->dirs.front();
            data->dirs.pop_front();
            if (dir.empty()) {
                // Father change marker. 
                if (data->dirs.empty())
                    break;
                dir = data->dirs.front();
                data->dirs.pop_front();
                nfather = path_getfather(dir);
                if (data->options & FtwTravBreadthThenDepth) {
                    // Check if new depth warrants switch to depth first
                    // traversal (will happen on next loop iteration).
                    int curdepth = slashcount(dir) - data->basedepth;
                    if (curdepth >= data->depthswitch) {
                        //fprintf(stderr, "SWITCHING TO DEPTH FIRST\n");
                        data->options &= ~FtwTravMask;
                        data->options |= FtwTravFilesThenDirs;
                    }
                }
            }
        } else {
            // Depth first, pop and process latest dir
            dir = data->dirs.back();
            data->dirs.pop_back();
            if (dir.empty()) {
                // Father change marker. 
                if (data->dirs.empty())
                    break;
                dir = data->dirs.back();
                data->dirs.pop_back();
                nfather = path_getfather(dir);
            }
        }

        // If changing parent directory, advise our user.
        if (!nfather.empty()) {
            if (path_fileprops(nfather, &st) == -1) {
                data->logsyserr("stat", nfather);
                return errno == ENOENT ? FtwOk : FtwError;
            }
            if ((status = cb.processone(nfather, &st, FtwDirReturn)) & 
                (FtwStop|FtwError)) {
                return status;
            }
        }

        if (path_fileprops(dir, &st) == -1) {
            data->logsyserr("stat", dir);
            return errno == ENOENT ? FtwOk : FtwError;
        }
        // iwalk will not recurse in this case, just process file entries
        // and append subdir entries to the queue.
        status = iwalk(dir, &st, cb);
        if (status != FtwOk)
            return status;
    }
    return FtwOk;
}

#ifdef _WIN32
#define DIRENT _wdirent
#define DIRHDL _WDIR
#define OPENDIR _wopendir
#define CLOSEDIR _wclosedir
#define READDIR _wreaddir
#else
#define DIRENT dirent
#define DIRHDL DIR
#define OPENDIR opendir
#define CLOSEDIR closedir
#define READDIR readdir
#endif

// Note that the 'norecurse' flag is handled as part of the directory read. 
// This means that we always go into the top 'walk()' parameter if it is a 
// directory, even if norecurse is set. Bug or Feature ?
FsTreeWalker::Status FsTreeWalker::iwalk(const string &top, 
                                         struct stat *stp,
                                         FsTreeWalkerCB& cb)
{
    Status status = FtwOk;
    bool nullpush = false;

    // Tell user to process the top entry itself
    if (S_ISDIR(stp->st_mode)) {
        if ((status = cb.processone(top, stp, FtwDirEnter)) & 
            (FtwStop|FtwError)) {
            return status;
        }
    } else if (S_ISREG(stp->st_mode)) {
        return cb.processone(top, stp, FtwRegular);
    } else {
        return status;
    }

    int curdepth = slashcount(top) - data->basedepth;
    if (data->maxdepth >= 0 && curdepth >= data->maxdepth) {
        LOGDEB1("FsTreeWalker::iwalk: Maxdepth reached: ["  << (top) << "]\n" );
        return status;
    }

    // This is a directory, read it and process entries:

#ifndef _WIN32
    // Detect if directory already seen. This could just be several
    // symlinks pointing to the same place (if FtwFollow is set), it
    // could also be some other kind of cycle. In any case, there is
    // no point in entering again.
    // For now, we'll ignore the "other kind of cycle" part and only monitor
    // this is FtwFollow is set
    if (data->options & FtwFollow) {
        DirId dirid(stp->st_dev, stp->st_ino);
        if (data->donedirs.find(dirid) != data->donedirs.end()) {
            LOGINFO("Not processing [" << top <<
                    "] (already seen as other path)\n");
            return status;
        }
        data->donedirs.insert(dirid);
    }
#endif

    SYSPATH(top, systop);
    DIRHDL *d = OPENDIR(systop);
    if (nullptr == d) {
        data->logsyserr("opendir", top);
#ifdef _WIN32
        int rc = GetLastError();
        LOGERR("opendir failed: LastError " << rc << endl);
        if (rc == ERROR_NETNAME_DELETED) {
            // 64: share disconnected.
            // Not too sure of the errno in this case.
            // Make sure it's not one of the permissible ones
            errno = ENODEV;
        }
#endif
        switch (errno) {
        case EPERM:
        case EACCES:
        case ENOENT:
#ifdef _WIN32
            // We get this quite a lot, don't know why. To be checked.
        case EINVAL:
#endif
            // No error set: indexing will continue in other directories
            goto out;
        default:
            status = FtwError;
            goto out;
        }
    }

    struct DIRENT *ent;
    while (errno = 0, ((ent = READDIR(d)) != 0)) {
        string fn;
        struct stat st;
#ifdef _WIN32
        string sdname;
        if (!wchartoutf8(ent->d_name, sdname)) {
            LOGERR("wchartoutf8 failed in " << top << endl);
            continue;
        }
        const char *dname = sdname.c_str();
#else
        const char *dname = ent->d_name;
#endif
        // Maybe skip dotfiles
        if ((data->options & FtwSkipDotFiles) && dname[0] == '.')
            continue;
        // Skip . and ..
        if (!strcmp(dname, ".") || !strcmp(dname, "..")) 
            continue;

        // Skipped file names match ?
        if (!data->skippedNames.empty()) {
            if (inSkippedNames(dname))
                continue;
        }
        fn = path_cat(top, dname);
        int statret =  path_fileprops(fn.c_str(), &st, data->options&FtwFollow);
        if (statret == -1) {
            data->logsyserr("stat", fn);
#ifdef _WIN32
            int rc = GetLastError();
            LOGERR("stat failed: LastError " << rc << endl);
            if (rc == ERROR_NETNAME_DELETED) {
                status = FtwError;
                goto out;
            }
#endif
            continue;
        }

        if (!data->skippedPaths.empty()) {
            // We do not check the ancestors. This means that you can have
            // a topdirs member under a skippedPath, to index a portion of
            // an ignored area. This is the way it had always worked, but
            // this was broken by 1.13.00 and the systematic use of 
            // FNM_LEADING_DIR
            if (inSkippedPaths(fn, false))
                continue;
        }

        if (S_ISDIR(st.st_mode)) {
            if (!o_nowalkfn.empty() && path_exists(path_cat(fn, o_nowalkfn))) {
                continue;
            }
            if (data->options & FtwNoRecurse) {
                status = cb.processone(fn, &st, FtwDirEnter);
            } else {
                if (data->options & FtwTravNatural) {
                    status = iwalk(fn, &st, cb);
                } else {
                    // If first subdir, push marker to separate
                    // from entries for other dir. This is to help
                    // with generating DirReturn callbacks
                    if (!nullpush) {
                        if (!data->dirs.empty() && 
                            !data->dirs.back().empty())
                            data->dirs.push_back(cstr_null);
                        nullpush = true;
                    }
                    data->dirs.push_back(fn);
                    continue;
                }
            }
            // Note: only recursive case gets here.
            if (status & (FtwStop|FtwError))
                goto out;
            if (!(data->options & FtwNoRecurse)) 
                if ((status = cb.processone(top, &st, FtwDirReturn)) 
                    & (FtwStop|FtwError))
                    goto out;
        } else if (S_ISREG(st.st_mode) || S_ISLNK(st.st_mode)) {
            // Filtering patterns match ?
            if (!data->onlyNames.empty()) {
                if (!inOnlyNames(dname))
                    continue;
            }
            if ((status = cb.processone(fn, &st, FtwRegular)) & 
                (FtwStop|FtwError)) {
                goto out;
            }
        }
        // We ignore other file types (devices etc...)
    } // readdir loop
    if (errno) {
        // Actual readdir error, not eof.
        data->logsyserr("readdir", top);
#ifdef _WIN32
        int rc = GetLastError();
        LOGERR("Readdir failed: LastError " << rc << endl);
        if (rc == ERROR_NETNAME_DELETED) {
            status = FtwError;
            goto out;
        }
#endif
    }

out:
    if (d)
        CLOSEDIR(d);
    return status;
}


int64_t fsTreeBytes(const string& topdir)
{
    class bytesCB : public FsTreeWalkerCB {
    public:
        FsTreeWalker::Status processone(const string &path, 
                                        const struct stat *st,
                                        FsTreeWalker::CbFlag flg) {
            if (flg == FsTreeWalker::FtwDirEnter ||
                flg == FsTreeWalker::FtwRegular) {
#ifdef _WIN32
                totalbytes += st->st_size;
#else
                totalbytes += st->st_blocks * 512;
#endif
            }
            return FsTreeWalker::FtwOk;
        }
        int64_t totalbytes{0};
    };
    FsTreeWalker walker;
    bytesCB cb;
    FsTreeWalker::Status status = walker.walk(topdir, cb);
    if (status != FsTreeWalker::FtwOk) {
        LOGERR("fsTreeBytes: walker failed: " << walker.getReason() << endl);
        return -1;
    }
    return cb.totalbytes;
}
recoll-1.26.3/utils/pathut.h0000644000175000017500000001531613533651561012640 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _PATHUT_H_INCLUDED_
#define _PATHUT_H_INCLUDED_

#include 
#include 
#include 

// Must be called in main thread before starting other threads
extern void pathut_init_mt();

/// Add a / at the end if none there yet.
extern void path_catslash(std::string& s);
/// Concatenate 2 paths
extern std::string path_cat(const std::string& s1, const std::string& s2);
/// Get the simple file name (get rid of any directory path prefix
extern std::string path_getsimple(const std::string& s);
/// Simple file name + optional suffix stripping
extern std::string path_basename(const std::string& s,
                                 const std::string& suff = std::string());
/// Component after last '.'
extern std::string path_suffix(const std::string& s);
/// Get the father directory
extern std::string path_getfather(const std::string& s);
/// Get the current user's home directory
extern std::string path_home();
/// Expand ~ at the beginning of std::string
extern std::string path_tildexpand(const std::string& s);
/// Use getcwd() to make absolute path if needed. Beware: ***this can fail***
/// we return an empty path in this case.
extern std::string path_absolute(const std::string& s);
/// Clean up path by removing duplicated / and resolving ../ + make it absolute
extern std::string path_canon(const std::string& s, const std::string *cwd = 0);
/// Use glob(3) to return the file names matching pattern inside dir
extern std::vector path_dirglob(const std::string& dir,
        const std::string pattern);
/// Encode according to rfc 1738
extern std::string url_encode(const std::string& url,
                              std::string::size_type offs = 0);
extern std::string url_decode(const std::string& encoded);
//// Convert to file path if url is like file://. This modifies the
//// input (and returns a copy for convenience)
extern std::string fileurltolocalpath(std::string url);
/// Test for file:/// url
extern bool urlisfileurl(const std::string& url);
///
extern std::string url_parentfolder(const std::string& url);

/// Return the host+path part of an url. This is not a general
/// routine, it does the right thing only in the recoll context
extern std::string url_gpath(const std::string& url);

/// Stat parameter and check if it's a directory
extern bool path_isdir(const std::string& path);

/// Retrieve file size
extern long long path_filesize(const std::string& path);

/// Retrieve essential file attributes. This is used rather than a
/// bare stat() to ensure consistent use of the time fields (on
/// windows, we set ctime=mtime as ctime is actually the creation
/// time, for which we have no use).
/// Only st_mtime, st_ctime, st_size, st_mode (file type bits) are set on
/// all systems. st_dev and st_ino are set for special posix usage.
/// The rest is zeroed.
/// @ret 0 for success
struct stat;
extern int path_fileprops(const std::string path, struct stat *stp,
                          bool follow = true);

/// Check that path is traversable and last element exists
/// Returns true if last elt could be checked to exist. False may mean that
/// the file/dir does not exist or that an error occurred.
extern bool path_exists(const std::string& path);
/// Same but must be readable
extern bool path_readable(const std::string& path);

/// Return separator for PATH environment variable
extern std::string path_PATHsep();

#ifdef _WIN32
#define SYSPATH(PATH, SPATH) wchar_t PATH ## _buf[2048];      \
    utf8towchar(PATH, PATH ## _buf, 2048);                    \
    wchar_t *SPATH = PATH ## _buf;
#else
#define SYSPATH(PATH, SPATH) const char *SPATH = PATH.c_str()
#endif

/// Dump directory
extern bool readdir(const std::string& dir, std::string& reason,
                    std::set& entries);

/** A small wrapper around statfs et al, to return percentage of disk
    occupation
    @param[output] pc percent occupied
    @param[output] avmbs Mbs available to non-superuser. Mb=1024*1024
*/
bool fsocc(const std::string& path, int *pc, long long *avmbs = 0);

/// mkdir -p
extern bool path_makepath(const std::string& path, int mode);

/// Where we create the user data subdirs
extern std::string path_homedata();
/// Test if path is absolute
extern bool path_isabsolute(const std::string& s);

/// Test if path is root (x:/). root is defined by root/.. == root
extern bool path_isroot(const std::string& p);

/// Test if sub is a subdirectory of top. This is a textual test,
/// links not allowed
extern bool path_isdesc(const std::string& top, const std::string& sub);

/// Turn absolute path into file:// url
extern std::string path_pathtofileurl(const std::string& path);

/// URI parser, loosely from rfc2396.txt
class ParsedUri {
public:
    ParsedUri(std::string uri);
    bool parsed{false};
    std::string scheme;
    std::string user;
    std::string pass;
    std::string host;
    std::string port;
    std::string path;
    std::string query;
    std::vector> parsedquery;
    std::string fragment;
};

#ifdef _WIN32
/// Convert \ separators to /
void path_slashize(std::string& s);
void path_backslashize(std::string& s);
#include "safeunistd.h"
#else
#include 
#endif

/// Lock/pid file class. This is quite close to the pidfile_xxx
/// utilities in FreeBSD with a bit more encapsulation. I'd have used
/// the freebsd code if it was available elsewhere
class Pidfile {
public:
    Pidfile(const std::string& path)    : m_path(path), m_fd(-1) {}
    ~Pidfile();
    /// Open/create the pid file.
    /// @return 0 if ok, > 0 for pid of existing process, -1 for other error.
    pid_t open();
    /// Write pid into the pid file
    /// @return 0 ok, -1 error
    int write_pid();
    /// Close the pid file (unlocks)
    int close();
    /// Delete the pid file
    int remove();
    const std::string& getreason() {
        return m_reason;
    }
private:
    std::string m_path;
    int    m_fd;
    std::string m_reason;
    pid_t read_pid();
    int flopen();
};

#endif /* _PATHUT_H_INCLUDED_ */
recoll-1.26.3/utils/cancelcheck.cpp0000644000175000017500000000162113533651561014103 00000000000000/* Copyright (C) 2005 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include "cancelcheck.h"

CancelCheck& CancelCheck::instance()
{
    static CancelCheck ck;
    return ck;
}
recoll-1.26.3/utils/copyfile.cpp0000644000175000017500000001567213566424763013515 00000000000000/* Copyright (C) 2005 J.F.Dockes 
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef TEST_COPYFILE
#include "autoconfig.h"

#include 
#include 
#include "safefcntl.h"
#include 
#include "safesysstat.h"
#include "safeunistd.h"
#ifndef _WIN32
#include 
#include 
#define O_BINARY 0
#endif

#include 

#include "copyfile.h"
#include "log.h"

using namespace std;

#define CPBSIZ 8192

bool copyfile(const char *src, const char *dst, string &reason, int flags)
{
    int sfd = -1;
    int dfd = -1;
    bool ret = false;
    char buf[CPBSIZ];
    int oflags = O_WRONLY|O_CREAT|O_TRUNC|O_BINARY;

    LOGDEB("copyfile: "  << (src) << " to "  << (dst) << "\n" );

    if ((sfd = ::open(src, O_RDONLY, 0)) < 0) {
        reason += string("open ") + src + ": " + strerror(errno);
        goto out;
    }

    if (flags & COPYFILE_EXCL) {
        oflags |= O_EXCL;
    }

    if ((dfd = ::open(dst, oflags, 0644)) < 0) {
        reason += string("open/creat ") + dst + ": " + strerror(errno);
        // If we fail because of an open/truncate error, we do not
        // want to unlink the file, we might succeed...
        flags |= COPYFILE_NOERRUNLINK;
        goto out;
    }

    for (;;) {
        int didread;
        didread = ::read(sfd, buf, CPBSIZ);
        if (didread < 0) {
            reason += string("read src ") + src + ": " + strerror(errno);
            goto out;
        }
        if (didread == 0)
            break;
        if (::write(dfd, buf, didread) != didread) {
            reason += string("write dst ") + src + ": " + strerror(errno);
            goto out;
        }
    }

    ret = true;
out:
    if (ret == false && !(flags©FILE_NOERRUNLINK))
        ::unlink(dst);
    if (sfd >= 0)
        ::close(sfd);
    if (dfd >= 0)
        ::close(dfd);
    return ret;
}

bool stringtofile(const string& dt, const char *dst, string& reason,
                  int flags)
{
    LOGDEB("stringtofile:\n" );
    int dfd = -1;
    bool ret = false;
    int oflags = O_WRONLY|O_CREAT|O_TRUNC|O_BINARY;

    LOGDEB("stringtofile: "  << ((unsigned int)dt.size()) << " bytes to "  << (dst) << "\n" );

    if (flags & COPYFILE_EXCL) {
        oflags |= O_EXCL;
    }

    if ((dfd = ::open(dst, oflags, 0644)) < 0) {
        reason += string("open/creat ") + dst + ": " + strerror(errno);
        // If we fail because of an open/truncate error, we do not
        // want to unlink the file, we might succeed...
        flags |= COPYFILE_NOERRUNLINK;
        goto out;
    }

    if (::write(dfd, dt.c_str(), size_t(dt.size())) != ssize_t(dt.size())) {
        reason += string("write dst ") + ": " + strerror(errno);
        goto out;
    }

    ret = true;
out:
    if (ret == false && !(flags©FILE_NOERRUNLINK))
        ::unlink(dst);
    if (dfd >= 0)
        ::close(dfd);
    return ret;
}

bool renameormove(const char *src, const char *dst, string &reason)
{
#ifdef _WIN32
    // Windows refuses to rename to an existing file. It appears that
    // there are workarounds (See MoveFile, MoveFileTransacted), but
    // anyway we are not expecting atomicity here.
    unlink(dst);
#endif
    
    // First try rename(2). If this succeeds we're done. If this fails
    // with EXDEV, try to copy. Unix really should have a library function
    // for this.
    if (rename(src, dst) == 0) {
        return true;
    }
    if (errno != EXDEV) {
        reason += string("rename(2) failed: ") + strerror(errno);
        return false;
    } 

    struct stat st;
    if (stat(src, &st) < 0) {
        reason += string("Can't stat ") + src + " : " + strerror(errno);
        return false;
    }
    if (!copyfile(src, dst, reason))
        return false;

    struct stat st1;
    if (stat(dst, &st1) < 0) {
        reason += string("Can't stat ") + dst + " : " + strerror(errno);
        return false;
    }

#ifndef _WIN32
    // Try to preserve modes, owner, times. This may fail for a number
    // of reasons
    if ((st1.st_mode & 0777) != (st.st_mode & 0777)) {
        if (chmod(dst, st.st_mode&0777) != 0) {
            reason += string("Chmod ") + dst + "Error : " + strerror(errno);
        }
    }
    if (st.st_uid != st1.st_uid || st.st_gid != st1.st_gid) {
        if (chown(dst, st.st_uid, st.st_gid) != 0) {
            reason += string("Chown ") + dst + "Error : " + strerror(errno);
        }
    }
    struct timeval times[2];
    times[0].tv_sec = st.st_atime;
    times[0].tv_usec = 0;
    times[1].tv_sec = st.st_mtime;
    times[1].tv_usec = 0;
    utimes(dst, times);
#endif
    // All ok, get rid of origin
    if (unlink(src) < 0) {
        reason += string("Can't unlink ") + src + "Error : " + strerror(errno);
    }

    return true;
}


#else 

#include 
#include 
#include 

#include 
#include 

#include "copyfile.h"

using namespace std;

static int     op_flags;
#define OPT_MOINS 0x1
#define OPT_m     0x2
#define OPT_e     0x4

static const char *thisprog;
static char usage [] =
    "trcopyfile [-m] src dst\n"
    " -m : move instead of copying\n"
    " -e : fail if dest exists (only for copy)\n"
    "\n"
    ;
static void
Usage(void)
{
    fprintf(stderr, "%s: usage:\n%s", thisprog, usage);
    exit(1);
}

int main(int argc, const char **argv)
{
    thisprog = argv[0];
    argc--; argv++;

    while (argc > 0 && **argv == '-') {
        (*argv)++;
        if (!(**argv))
            /* Cas du "adb - core" */
            Usage();
        while (**argv)
            switch (*(*argv)++) {
            case 'm':   op_flags |= OPT_m; break;
            case 'e':   op_flags |= OPT_e; break;
            default: Usage();   break;
            }
        argc--; argv++;
    }

    if (argc != 2)
        Usage();
    string src = *argv++;argc--;
    string dst = *argv++;argc--;
    bool ret;
    string reason;
    if (op_flags & OPT_m) {
        ret = renameormove(src.c_str(), dst.c_str(), reason);
    } else {
        int flags = 0;
        if (op_flags & OPT_e) {
            flags |= COPYFILE_EXCL;
        }
        ret = copyfile(src.c_str(), dst.c_str(), reason, flags);
    }
    if (!ret) {
        cerr << reason << endl;
        exit(1);
    }  else {
        cout << "Succeeded" << endl;
        if (!reason.empty()) {
            cout << "Warnings: " << reason << endl;
        }
        exit(0);
    }
}

#endif

recoll-1.26.3/utils/wipedir.h0000644000175000017500000000211213533651561012764 00000000000000/*
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _FILEUT_H_INCLUDED_
#define _FILEUT_H_INCLUDED_
/* Copyright (C) 2004 J.F.Dockes */

#include 

/**
 *  Remove all files inside directory.
 * @return  0 if ok, count of remaining entries (ie: subdirs), or -1 for error
 */
int wipedir(const std::string& dirname, bool topalso = 0, bool recurse = 0);

#endif /* _FILEUT_H_INCLUDED_ */
recoll-1.26.3/utils/idfile.cpp0000644000175000017500000001237013533651561013117 00000000000000/* Copyright (C) 2005 J.F.Dockes 
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef TEST_IDFILE
#include "autoconfig.h"

#include 
#include 
#include 

#include 
#include 

#include "idfile.h"
#include "log.h"

using namespace std;

// Bogus code to avoid bogus valgrind mt warnings about the
// initialization of treat_mbox_...  which I can't even remember the
// use of (it's not documented or ever set)
static int treat_mbox_as_rfc822;
class InitTMAR {
public:
    InitTMAR() {
        treat_mbox_as_rfc822 = getenv("RECOLL_TREAT_MBOX_AS_RFC822") ? 1 : -1;
    }
};
static InitTMAR initTM;

/** 
 * This code is currently ONLY used to identify mbox and mail message files
 * which are badly handled by standard mime type identifiers
 * There is a very old (circa 1990) mbox format using blocks of ^A (0x01) chars
 * to separate messages, that we don't recognize currently
 */

// Mail headers we compare to:
static const char *mailhs[] = {"From: ", "Received: ", "Message-Id: ", "To: ", 
			       "Date: ", "Subject: ", "Status: ", 
			       "In-Reply-To: "};
static const int mailhsl[] = {6, 10, 12, 4, 6, 9, 8, 13};
static const int nmh = sizeof(mailhs) / sizeof(char *);

const int wantnhead = 3;

// fn is for message printing
static string idFileInternal(istream& input, const char *fn)
{
    bool line1HasFrom = false;
    bool gotnonempty = false;
    int lookslikemail = 0;

    // emacs VM sometimes inserts very long lines with continuations or
    // not (for folder information). This forces us to look at many
    // lines and long ones
    int lnum = 1;
    for (int loop = 1; loop < 200; loop++, lnum++) {

#define LL 2*1024
	char cline[LL+1];
	cline[LL] = 0;
	input.getline(cline, LL-1);
	if (input.fail()) {
	    if (input.bad()) {
		LOGERR("idfile: error while reading ["  << (fn) << "]\n" );
		return string();
	    }
	    // Must be eof ?
	    break;
	}

	// gcount includes the \n
	std::streamsize ll = input.gcount() - 1; 
	if (ll > 0)
	    gotnonempty = true;

	LOGDEB2("idfile: lnum "  << (lnum) << " ll "  << ((unsigned int)ll) << ": ["  << (cline) << "]\n" );

	// Check for a few things that can't be found in a mail file,
	// (optimization to get a quick negative)

	// Empty lines
	if (ll <= 0) {
	    // Accept a few empty lines at the beginning of the file,
	    // otherwise this is the end of headers
	    if (gotnonempty || lnum > 10) {
		LOGDEB2("Got empty line\n" );
		break;
	    } else {
		// Don't increment the line counter for initial empty lines.
		lnum--;
		continue;
	    }
	}

	// emacs vm can insert VERY long header lines.
	if (ll > LL - 20) {
	    LOGDEB2("idFile: Line too long\n" );
	    return string();
	}

	// Check for mbox 'From ' line
	if (lnum == 1 && !strncmp("From ", cline, 5)) {
	    if (treat_mbox_as_rfc822 == -1) {
		line1HasFrom = true;
		LOGDEB2("idfile: line 1 has From_\n" );
	    }
	    continue;
	} 

	// Except for a possible first line with 'From ', lines must
	// begin with whitespace or have a colon 
	// (hope no one comes up with a longer header name !
	// Take care to convert to unsigned char because ms ctype does
	// like negative values
	if (!isspace((unsigned char)cline[0])) {
	    char *cp = strchr(cline, ':');
	    if (cp == 0 || (cp - cline) > 70) {
		LOGDEB2("idfile: can't be mail header line: ["  << (cline) << "]\n" );
		break;
	    }
	}

	// Compare to known headers
	for (int i = 0; i < nmh; i++) {
	    if (!strncasecmp(mailhs[i], cline, mailhsl[i])) {
		//fprintf(stderr, "Got [%s]\n", mailhs[i]);
		lookslikemail++;
		break;
	    }
	}
	if (lookslikemail >= wantnhead)
	    break;
    }
    if (line1HasFrom)
	lookslikemail++;

    if (lookslikemail >= wantnhead)
	return line1HasFrom ? string("text/x-mail") : string("message/rfc822");

    return string();
}

string idFile(const char *fn)
{
    ifstream input;
    input.open(fn, ios::in);
    if (!input.is_open()) {
	LOGERR("idFile: could not open ["  << (fn) << "]\n" );
	return string();
    }
    return idFileInternal(input, fn);
}

string idFileMem(const string& data)
{
    stringstream s(data, stringstream::in);
    return idFileInternal(s, "");
}

#else

#include 
#include 

#include 
#include 

#include 

using namespace std;

#include "log.h"

#include "idfile.h"

int main(int argc, char **argv)
{
    if (argc < 2) {
	cerr << "Usage: idfile filename" << endl;
	exit(1);
    }
    DebugLog::getdbl()->setloglevel(DEBDEB1);
    DebugLog::setfilename("stderr");
    for (int i = 1; i < argc; i++) {
	string mime = idFile(argv[i]);
	cout << argv[i] << " : " << mime << endl;
    }
    exit(0);
}

#endif

recoll-1.26.3/utils/base64.h0000644000175000017500000000237313533651561012416 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _BASE64_H_INCLUDED_
#define _BASE64_H_INCLUDED_
#include 

void base64_encode(const std::string& in, std::string& out);
bool base64_decode(const std::string& in, std::string& out);
inline std::string base64_encode(const std::string& in)
{
    std::string o;
    base64_encode(in, o);
    return o;
}
inline std::string base64_decode(const std::string& in)
{
    std::string o;
    if (base64_decode(in, o))
	return o;
    return std::string();
}

#endif /* _BASE64_H_INCLUDED_ */
recoll-1.26.3/utils/ecrontab.h0000644000175000017500000000550313533651561013125 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _ECRONTAB_H_INCLUDED_
#define _ECRONTAB_H_INCLUDED_

/** Utility function to manage lines inside a user crontab 
 * 
 * Lines managed by this routine are marked with a hopefuly unique marker
 * and discriminated by a selector, both environment variable settings. 
 * Example:
 *  30 8 * * * RCLCRONTAB_RCLINDEX= RECOLL_CONFDIR=/path/to/dir recollindex ...
 * RCLCRONTAB_RCLINDEX is the line marker, and the RECOLL_CONFDIR value
 * allows selecting the affected line. 
 *
 * This approach allows leaving alone lines which do have a
 * RECOLL_CONFDIR value but not managed by us. The marker and selector
 * values are chosen by the caller, which should apply some thought to
 * chosing sane values.
 */

#include 
#include 
using std::string;
using std::vector;

/** Add, replace or delete a command inside a crontab file
 *
 * @param marker selects lines managed by this module and should take the form
 *  of a (possibly empty) environment variable assignement.
 * @param id selects the appropriate line to affect and will usually be an 
 *   actual variable assignment (see above)
 * @param sched is a standard cron schedule spec (ie: 30 8 * * *)
 * @param cmd is the command to execute (the last part of the line). 
 *    Set it to an empty string to delete the line from the crontab
 * @param reason error message
 *
 * "marker" and "id" should look like reasonable env variable assignements. 
 * Only ascii capital letters, numbers and _ before the '='
 */
bool editCrontab(const string& marker, const string& id, 
		 const string& sched, const string& cmd,
		 string& reason
    );

/**
 * check crontab for unmanaged lines
 * @param marker same as above, typically RCLCRONTAB_RCLINDEX=
 * @param data string to look for on lines NOT marked, typically "recollindex"
 * @return true if unmanaged lines exist, false else.
 */
bool checkCrontabUnmanaged(const string& marker, const string& data);

/** Retrieve the scheduling for a crontab entry */
bool getCrontabSched(const string& marker, const string& id, 
		     vector& sched);

#endif /* _ECRONTAB_H_INCLUDED_ */
recoll-1.26.3/utils/zlibut.h0000644000175000017500000000065613347664027012651 00000000000000#ifndef _ZLIBUT_H_INCLUDED_
#define _ZLIBUT_H_INCLUDED_

#include 

class ZLibUtBuf {
public:
    ZLibUtBuf();
    ~ZLibUtBuf();
    char *getBuf() const;
    char *takeBuf();
    size_t getCnt();

    class Internal;
    Internal *m;
};

bool inflateToBuf(const void* inp, unsigned int inlen, ZLibUtBuf& buf);
bool deflateToBuf(const void* inp, unsigned int inlen, ZLibUtBuf& buf);

#endif /* _ZLIBUT_H_INCLUDED_ */
recoll-1.26.3/utils/pxattr.cpp0000644000175000017500000007111113303776060013200 00000000000000/*
Copyright (c) 2009 Jean-Francois Dockes

Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
*/

/** \file pxattr.cpp 
    \brief Portable External Attributes API
 */

// PXALINUX: platforms like kfreebsd which aren't linux but use the
// same xattr interface
#if defined(__gnu_linux__) || \
    (defined(__FreeBSD_kernel__)&&defined(__GLIBC__)&&!defined(__FreeBSD__)) ||\
    defined(__CYGWIN__)
#define PXALINUX
#endif

// If the platform is not known yet, let this file be empty instead of
// breaking the compile, this will let the build work if the rest of
// the software is not actually calling us. If it does call us, this
// will bring attention to the necessity of a port.
//
// If the platform is known not to support extattrs (e.g.__OpenBSD__),
// just let the methods return errors (like they would on a non-xattr
// fs on e.g. linux)

#if defined(__DragonFly__) || defined(__OpenBSD__)
#define HAS_NO_XATTR
#endif

#if defined(__FreeBSD__) || defined(PXALINUX) || defined(__APPLE__) \
    || defined(HAS_NO_XATTR)


#ifndef TEST_PXATTR
#include 
#include 
#include 
#include 

#if defined(__FreeBSD__)
#include 
#include 
#elif defined(PXALINUX)
#include 
#elif defined(__APPLE__)
#include 
#elif defined(HAS_NO_XATTR)
#else
#error "Unknown system can't compile"
#endif

#include "pxattr.h"

namespace pxattr {

class AutoBuf {
public:
    char *buf;
    AutoBuf() : buf(0) {}
    ~AutoBuf() {if (buf) free(buf); buf = 0;}
    bool alloc(int n) 
    {
	if (buf) {
	    free(buf);
	    buf = 0;
	}
	buf = (char *)malloc(n); 
	return buf != 0;
    }
};

static bool 
get(int fd, const string& path, const string& _name, string *value,
    flags flags, nspace dom)
{
    string name;
    if (!sysname(dom, _name, &name)) 
	return false;

    ssize_t ret = -1;
    AutoBuf buf;

#if defined(__FreeBSD__)
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = extattr_get_link(path.c_str(), EXTATTR_NAMESPACE_USER, 
				   name.c_str(), 0, 0);
	} else {
	    ret = extattr_get_file(path.c_str(), EXTATTR_NAMESPACE_USER, 
				   name.c_str(), 0, 0);
	}
    } else {
	ret = extattr_get_fd(fd, EXTATTR_NAMESPACE_USER, name.c_str(), 0, 0);
    }
    if (ret < 0)
	return false;
    if (!buf.alloc(ret+1)) // Don't want to deal with possible ret=0
	return false;
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = extattr_get_link(path.c_str(), EXTATTR_NAMESPACE_USER, 
				   name.c_str(), buf.buf, ret);
	} else {
	    ret = extattr_get_file(path.c_str(), EXTATTR_NAMESPACE_USER, 
				   name.c_str(), buf.buf, ret);
	}
    } else {
	ret = extattr_get_fd(fd, EXTATTR_NAMESPACE_USER, 
			     name.c_str(), buf.buf, ret);
    }
#elif defined(PXALINUX)
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = lgetxattr(path.c_str(), name.c_str(), 0, 0);
	} else {
	    ret = getxattr(path.c_str(), name.c_str(), 0, 0);
	}
    } else {
	ret = fgetxattr(fd, name.c_str(), 0, 0);
    }
    if (ret < 0)
	return false;
    if (!buf.alloc(ret+1)) // Don't want to deal with possible ret=0
	return false;
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = lgetxattr(path.c_str(), name.c_str(), buf.buf, ret);
	} else {
	    ret = getxattr(path.c_str(), name.c_str(), buf.buf, ret);
	}
    } else {
	ret = fgetxattr(fd, name.c_str(), buf.buf, ret);
    }
#elif defined(__APPLE__)
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = getxattr(path.c_str(), name.c_str(), 0, 0, 0, XATTR_NOFOLLOW);
	} else {
	    ret = getxattr(path.c_str(), name.c_str(), 0, 0, 0, 0);
	}
    } else {
	ret = fgetxattr(fd, name.c_str(), 0, 0, 0, 0);
    }
    if (ret < 0)
	return false;
    if (!buf.alloc(ret+1)) // Don't want to deal with possible ret=0
	return false;
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = getxattr(path.c_str(), name.c_str(), buf.buf, ret, 0, 
			   XATTR_NOFOLLOW);
	} else {
	    ret = getxattr(path.c_str(), name.c_str(), buf.buf, ret, 0, 0);
	}
    } else {
	ret = fgetxattr(fd, name.c_str(), buf.buf, ret, 0, 0);
    }
#else
    errno = ENOTSUP;
#endif

    if (ret >= 0)
	value->assign(buf.buf, ret);
    return ret >= 0;
}

static bool 
set(int fd, const string& path, const string& _name, 
    const string& value, flags flags, nspace dom)
{
    string name;
    if (!sysname(dom, _name, &name)) 
	return false;

    ssize_t ret = -1;

#if defined(__FreeBSD__)
    
    if (flags & (PXATTR_CREATE|PXATTR_REPLACE)) {
	// Need to test existence
	bool exists = false;
	ssize_t eret;
	if (fd < 0) {
	    if (flags & PXATTR_NOFOLLOW) {
		eret = extattr_get_link(path.c_str(), EXTATTR_NAMESPACE_USER, 
				       name.c_str(), 0, 0);
	    } else {
		eret = extattr_get_file(path.c_str(), EXTATTR_NAMESPACE_USER, 
				       name.c_str(), 0, 0);
	    }
	} else {
	    eret = extattr_get_fd(fd, EXTATTR_NAMESPACE_USER, 
				  name.c_str(), 0, 0);
	}
	if (eret >= 0)
	    exists = true;
	if (eret < 0 && errno != ENOATTR)
	    return false;
	if ((flags & PXATTR_CREATE) && exists) {
	    errno = EEXIST;
	    return false;
	}
	if ((flags & PXATTR_REPLACE) && !exists) {
	    errno = ENOATTR;
	    return false;
	}
    }
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = extattr_set_link(path.c_str(), EXTATTR_NAMESPACE_USER, 
				   name.c_str(), value.c_str(), value.length());
	} else {
	    ret = extattr_set_file(path.c_str(), EXTATTR_NAMESPACE_USER, 
				   name.c_str(), value.c_str(), value.length());
	}
    } else {
	ret = extattr_set_fd(fd, EXTATTR_NAMESPACE_USER, 
			     name.c_str(), value.c_str(), value.length());
    }
#elif defined(PXALINUX)
    int opts = 0;
    if (flags & PXATTR_CREATE)
	opts = XATTR_CREATE;
    else if (flags & PXATTR_REPLACE)
	opts = XATTR_REPLACE;
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = lsetxattr(path.c_str(), name.c_str(), value.c_str(), 
			    value.length(), opts);
	} else {
	    ret = setxattr(path.c_str(), name.c_str(), value.c_str(), 
			   value.length(), opts);
	}
    } else {
	ret = fsetxattr(fd, name.c_str(), value.c_str(), value.length(), opts);
    }
#elif defined(__APPLE__)
    int opts = 0;
    if (flags & PXATTR_CREATE)
	opts = XATTR_CREATE;
    else if (flags & PXATTR_REPLACE)
	opts = XATTR_REPLACE;
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = setxattr(path.c_str(), name.c_str(), value.c_str(), 
			   value.length(),  0, XATTR_NOFOLLOW|opts);
	} else {
	    ret = setxattr(path.c_str(), name.c_str(), value.c_str(), 
			   value.length(),  0, opts);
	}
    } else {
	ret = fsetxattr(fd, name.c_str(), value.c_str(), 
			value.length(), 0, opts);
    }
#else
    errno = ENOTSUP;
#endif
    return ret >= 0;
}

static bool 
del(int fd, const string& path, const string& _name, flags flags, nspace dom) 
{
    string name;
    if (!sysname(dom, _name, &name)) 
	return false;

    int ret = -1;

#if defined(__FreeBSD__)
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = extattr_delete_link(path.c_str(), EXTATTR_NAMESPACE_USER,
				      name.c_str());
	} else {
	    ret = extattr_delete_file(path.c_str(), EXTATTR_NAMESPACE_USER,
				      name.c_str());
	}
    } else {
	ret = extattr_delete_fd(fd, EXTATTR_NAMESPACE_USER, name.c_str());
    }
#elif defined(PXALINUX)
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = lremovexattr(path.c_str(), name.c_str());
	} else {
	    ret = removexattr(path.c_str(), name.c_str());
	}
    } else {
	ret = fremovexattr(fd, name.c_str());
    }
#elif defined(__APPLE__)
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = removexattr(path.c_str(), name.c_str(), XATTR_NOFOLLOW);
	} else {
	    ret = removexattr(path.c_str(), name.c_str(), 0);
	}
    } else {
	ret = fremovexattr(fd, name.c_str(), 0);
    }
#else
    errno = ENOTSUP;
#endif
    return ret >= 0;
}

static bool 
list(int fd, const string& path, vector* names, flags flags, nspace dom)
{
    ssize_t ret = -1;
    AutoBuf buf;

#if defined(__FreeBSD__)
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = extattr_list_link(path.c_str(), EXTATTR_NAMESPACE_USER, 0, 0);
	} else {
	    ret = extattr_list_file(path.c_str(), EXTATTR_NAMESPACE_USER, 0, 0);
	}
    } else {
	ret = extattr_list_fd(fd, EXTATTR_NAMESPACE_USER, 0, 0);
    }
    if (ret < 0) 
	return false;
    if (!buf.alloc(ret+1)) // NEEDED on FreeBSD (no ending null)
	return false;
    buf.buf[ret] = 0;
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = extattr_list_link(path.c_str(), EXTATTR_NAMESPACE_USER, 
				    buf.buf, ret);
	} else {
	    ret = extattr_list_file(path.c_str(), EXTATTR_NAMESPACE_USER, 
				    buf.buf, ret);
	}
    } else {
	ret = extattr_list_fd(fd, EXTATTR_NAMESPACE_USER, buf.buf, ret);
    }
#elif defined(PXALINUX)
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = llistxattr(path.c_str(), 0, 0);
	} else {
	    ret = listxattr(path.c_str(), 0, 0);
	}
    } else {
	ret = flistxattr(fd, 0, 0);
    }
    if (ret < 0) 
	return false;
    if (!buf.alloc(ret+1)) // Don't want to deal with possible ret=0
	return false;
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = llistxattr(path.c_str(), buf.buf, ret);
	} else {
	    ret = listxattr(path.c_str(), buf.buf, ret);
	}
    } else {
	ret = flistxattr(fd, buf.buf, ret);
    }
#elif defined(__APPLE__)
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = listxattr(path.c_str(), 0, 0, XATTR_NOFOLLOW);
	} else {
	    ret = listxattr(path.c_str(), 0, 0, 0);
	}
    } else {
	ret = flistxattr(fd, 0, 0, 0);
    }
    if (ret < 0) 
	return false;
    if (!buf.alloc(ret+1)) // Don't want to deal with possible ret=0
	return false;
    if (fd < 0) {
	if (flags & PXATTR_NOFOLLOW) {
	    ret = listxattr(path.c_str(), buf.buf, ret, XATTR_NOFOLLOW);
	} else {
	    ret = listxattr(path.c_str(), buf.buf, ret, 0);
	}
    } else {
	ret = flistxattr(fd, buf.buf, ret, 0);
    }
#else
    errno = ENOTSUP;
#endif

    if (ret < 0)
        return false;

    char *bufstart = buf.buf;

    // All systems return a 0-separated string list except FreeBSD
    // which has length, value pairs, length is a byte. 
#if defined(__FreeBSD__)
    char *cp = buf.buf;
    unsigned int len;
    while (cp < buf.buf + ret + 1) {
	len = *cp;
	*cp = 0;
	cp += len + 1;
    }
    bufstart = buf.buf + 1;
    *cp = 0; // don't forget, we allocated one more
#endif


    if (ret > 0) {
	int pos = 0;
	while (pos < ret) {
	    string n = string(bufstart + pos);
	    string n1;
	    if (pxname(PXATTR_USER, n, &n1)) {
		names->push_back(n1);
	    }
	    pos += n.length() + 1;
	}
    }
    return true;
}

static const string nullstring("");

bool get(const string& path, const string& _name, string *value,
	 flags flags, nspace dom)
{
    return get(-1, path, _name, value, flags, dom);
}
bool get(int fd, const string& _name, string *value, flags flags, nspace dom)
{
    return get(fd, nullstring, _name, value, flags, dom);
}
bool set(const string& path, const string& _name, const string& value,
	 flags flags, nspace dom)
{
    return set(-1, path, _name, value, flags, dom);
}
bool set(int fd, const string& _name, const string& value, 
	 flags flags, nspace dom)
{
    return set(fd, nullstring, _name, value, flags, dom);
}
bool del(const string& path, const string& _name, flags flags, nspace dom) 
{
    return del(-1, path, _name, flags, dom);
}
bool del(int fd, const string& _name, flags flags, nspace dom) 
{
    return del(fd, nullstring, _name, flags, dom);
}
bool list(const string& path, vector* names, flags flags, nspace dom)
{
    return list(-1, path, names, flags, dom);
}
bool list(int fd, vector* names, flags flags, nspace dom)
{
    return list(fd, nullstring, names, flags, dom);
}

#if defined(PXALINUX) || defined(COMPAT1)
static const string userstring("user.");
#else
static const string userstring("");
#endif
bool sysname(nspace dom, const string& pname, string* sname)
{
    if (dom != PXATTR_USER) {
	errno = EINVAL;
	return false;
     }
    *sname = userstring + pname;
    return true;
}

bool pxname(nspace dom, const string& sname, string* pname) 
{
    if (!userstring.empty() && sname.find(userstring) != 0) {
	errno = EINVAL;
	return false;
    }
    *pname = sname.substr(userstring.length());
    return true;
}

} // namespace pxattr

#else // TEST_PXATTR Testing / driver ->

#include "pxattr.h"

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

#include 
#include 
#include 
#include 
#include 

using namespace std;


static int antiverbose;

static void printsyserr(const string& msg)
{
    if (antiverbose >= 2)
        return;
    cerr << msg << " " << strerror(errno) << endl;
}

#define message(X)                              \
    {                                           \
        if (antiverbose == 0) {                 \
            cout << X;                          \
        }                                       \
    }
    
static void dotests();

// \-quote character c in input \ -> \\, nl -> \n cr -> \rc -> \c
static void quote(const string& in, string& out, int c)
{
    out.clear();
    for (string::const_iterator it = in.begin(); it != in.end(); it++) {
	if (*it == '\\') {
	    out += "\\\\";
	} else if (*it == "\n"[0]) {
	    out += "\\n";
	} else if (*it == "\r"[0]) {
	    out += "\\r";
	} else if (*it == c) {
	    out += "\\";
	    out += c;
	} else {
	    out += *it;
	}
    }
}

// \-unquote input \n -> nl, \r -> cr, \c -> c
static void unquote(const string& in, string& out)
{
    out.clear();
    for (unsigned int i = 0; i < in.size(); i++) {
	if (in[i] == '\\') {
	    if (i == in.size() -1) {
		out += in[i];
	    } else {
		int c = in[++i];
		switch (c) {
		case 'n': out += "\n";break;
		case 'r': out += "\r";break;
		default: out += c;
		}
	    }
	} else {
	    out += in[i];
	}
    }
}

// Find first unquoted c in input: c preceded by odd number of backslashes
string::size_type find_first_unquoted(const string& in, int c)
{
    int q = 0;
    for (unsigned int i = 0;i < in.size(); i++) {
	if (in[i] == '\\') {
	    q++;
	} else if (in[i] == c) {
	    if (q&1) {
		// quoted
		q = 0;
	    } else {
		return i;
	    }
	} else {
	    q = 0;
	}
    }
    return string::npos;
}

static const string PATH_START("Path: ");
static bool listattrs(const string& path)
{
    vector names;
    if (!pxattr::list(path, &names)) {
	if (errno == ENOENT) {
	    return false;
	}
	printsyserr("pxattr::list");
	exit(1);
    }
    if (names.empty()) {
        return true;
    }

    // Sorting the names would not be necessary but it makes easier comparing
    // backups
    sort(names.begin(), names.end());

    string quoted;
    quote(path, quoted, 0);
    message(PATH_START << quoted << endl);
    for (vector::const_iterator it = names.begin(); 
	 it != names.end(); it++) {
	string value;
	if (!pxattr::get(path, *it, &value)) {
	    if (errno == ENOENT) {
		return false;
	    }
	    printsyserr("pxattr::get");
	    exit(1);
	}
	quote(*it, quoted, '=');
	message(" " << quoted << "=");
	quote(value, quoted, 0);
	message(quoted << endl);
    }
    return true;
}

bool setxattr(const string& path, const string& name, const string& value)
{
    if (!pxattr::set(path, name, value)) {
	printsyserr("pxattr::set");
	return false;
    }
    return true;
}

bool printxattr(const string &path, const string& name)
{
    string value;
    if (!pxattr::get(path, name, &value)) {
	if (errno == ENOENT) {
	    return false;
	}
	printsyserr("pxattr::get");
        return false;
    }
    message(PATH_START << path << endl);
    message(" " << name << " => " << value << endl);
    return true;
}

bool delxattr(const string &path, const string& name) 
{
    if (pxattr::del(path, name) < 0) {
	printsyserr("pxattr::del");
        return false;
    }
    return true;
}

// Restore xattrs stored in file created by pxattr -lR output
static void restore(const char *backupnm)
{
    istream *input;
    ifstream fin;
    if (!strcmp(backupnm, "stdin")) {
	input = &cin;
    } else {
	fin.open(backupnm, ios::in);
	input = &fin;
    }

    bool done = false;
    int linenum = 0;
    string path;
    map attrs;
    while (!done) {
	string line;
	getline(*input, line);
	if (!input->good()) {
	    if (input->bad()) {
                cerr << "Input I/O error" << endl;
		exit(1);
	    }
	    done = true;
	} else {
	    linenum++;
	}

	// message("Got line " << linenum << " : [" << line << "] done " << 
	// done << endl);

	if (line.find(PATH_START) == 0 || done) {
	    if (!path.empty() && !attrs.empty()) {
		for (map::const_iterator it = attrs.begin();
		     it != attrs.end(); it++) {
		    setxattr(path, it->first, it->second);
		}
	    }
	    if (!done) {
		line = line.substr(PATH_START.size(), string::npos);
		unquote(line, path);
		attrs.clear();
	    }
	} else if (line.empty()) {
	    continue;
	} else {
	    // Should be attribute line
	    if (line[0] != ' ') {
		cerr << "Found bad line (no space) at " << linenum << endl;
		exit(1);
	    }
	    string::size_type pos = find_first_unquoted(line, '=');
	    if (pos == string::npos || pos < 2 || pos >= line.size()) {
		cerr << "Found bad line at " << linenum << endl;
		exit(1);
	    }
	    string qname = line.substr(1, pos-1);
	    pair entry;
	    unquote(qname, entry.first);
	    unquote(line.substr(pos+1), entry.second);
	    attrs.insert(entry);
	}
    }
}

static char *thisprog;
static char usage [] =
"pxattr [-hs] -n name pathname [...] : show value for name\n"
"pxattr [-hs] -n name -r regexp pathname [...] : test value against regexp\n"
"pxattr [-hs] -n name -v value pathname [...] : add/replace attribute\n"
"pxattr [-hs] -x name pathname [...] : delete attribute\n"
"pxattr [-hs] [-l] [-R] pathname [...] : list attribute names and values\n"
"  For all the options above, if no pathname arguments are given, pxattr\n"
"  will read file names on stdin, one per line.\n"
" [-h] : don't follow symbolic links (act on link itself)\n"
" [-R] : recursive listing. Args should be directory(ies)\n"
" [-s] : be silent. With one option stdout is suppressed, with 2 stderr too\n"
"pxattr -S  Restore xattrs from file created by pxattr -lR output\n"
"               if backupfile is 'stdin', reads from stdin\n"
"pxattr -T: run tests on temp file in current directory" 
"\n"
;
static void
Usage(void)
{
    fprintf(stderr, "%s: usage:\n%s", thisprog, usage);
    exit(1);
}

static int     op_flags;
#define OPT_MOINS 0x1
#define OPT_h     0x2
#define OPT_l     0x4
#define OPT_n	  0x8
#define OPT_r     0x10
#define OPT_R     0x20
#define OPT_S     0x40
#define OPT_T     0x80
#define OPT_s     0x100
#define OPT_v	  0x200
#define OPT_x     0x400

// Static values for ftw
static string name, value;

bool regex_test(const char *path, regex_t *preg)
{
    string value;
    if (!pxattr::get(path, name, &value)) {
	if (errno == ENOENT) {
	    return false;
	}
	printsyserr("pxattr::get");
        return false;
    }

    int ret = regexec(preg, value.c_str(), 0, 0, 0);
    if (ret == 0) {
        message(path << endl);
        return true;
    } else if (ret == REG_NOMATCH) {
        return false;
    } else {
        char errmsg[200];
        regerror(ret, preg, errmsg, 200);
        errno = 0;
        printsyserr("regexec");
        return false;
    }
}

bool processfile(const char* fn, const struct stat *, int)
{
    //message("processfile " << fn << " opflags " << op_flags << endl);

    if (op_flags & OPT_l) {
	return listattrs(fn);
    } else if (op_flags & OPT_n) {
	if (op_flags & OPT_v) {
	    return setxattr(fn, name, value);
	} else {
	    return printxattr(fn, name);
	} 
    } else if (op_flags & OPT_x) {
	return delxattr(fn, name);
    }
    Usage();
}

int ftwprocessfile(const char* fn, const struct stat *sb, int typeflag)
{
    processfile(fn, sb, typeflag);
    return 0;
}

int main(int argc, char **argv)
{
    const char *regexp_string;
    thisprog = argv[0];
    argc--; argv++;
    
    while (argc > 0 && **argv == '-') {
	(*argv)++;
	if (!(**argv))
	    /* Cas du "adb - core" */
	    Usage();
	while (**argv)
	    switch (*(*argv)++) {
	    case 'l':	op_flags |= OPT_l; break;
	    case 'n':	op_flags |= OPT_n; if (argc < 2)  Usage();
		name = *(++argv); argc--; 
		goto b1;
	    case 'R':	op_flags |= OPT_R; break;
	    case 'r':	op_flags |= OPT_r; if (argc < 2)  Usage();
		regexp_string = *(++argv); argc--; 
		goto b1;
	    case 's':	antiverbose++; break;
	    case 'S':	op_flags |= OPT_S; break;
	    case 'T':	op_flags |= OPT_T; break;
	    case 'v':	op_flags |= OPT_v; if (argc < 2)  Usage();
		value = *(++argv); argc--; 
		goto b1;
	    case 'x':	op_flags |= OPT_x; if (argc < 2)  Usage();
		name = *(++argv); argc--; 
		goto b1;
	    default: Usage();	break;
	    }
    b1: argc--; argv++;
    }

    if (op_flags & OPT_T)  {
	if (argc > 0)
	    Usage();
	dotests();
	exit(0);
    }
    if ((op_flags & OPT_r) && !(op_flags & OPT_n)) {
        Usage();
    }
    
    if (op_flags & OPT_S)  {
	if (argc != 1)
	    Usage();
	restore(argv[0]);
	exit(0);
    }
    regex_t regexp;
    if (op_flags & OPT_r) {
        int err = regcomp(®exp, regexp_string, REG_NOSUB|REG_EXTENDED);
        if (err) {
            char errmsg[200];
            regerror(err, ®exp, errmsg, 200);
            cerr << "regcomp(" << regexp_string << ") error: " << errmsg <<
                endl;
            exit(1);
        }
    }
    
    // Default option is 'list'
    if ((op_flags&(OPT_l|OPT_n|OPT_x)) == 0)
	op_flags |= OPT_l;

    bool readstdin = false;
    if (argc == 0)
	readstdin = true;

    int exitvalue = 0;
    for (;;) {
	const char *fn = 0;
	if (argc > 0) {
	    fn = *argv++; 
	    argc--;
	} else if (readstdin) {
	    static char filename[1025];
	    if (!fgets(filename, 1024, stdin))
		break;
	    filename[strlen(filename)-1] = 0;
	    fn = filename;
	} else
	    break;

	if (op_flags & OPT_R) {
	    if (ftw(fn, ftwprocessfile, 20))
		exit(1);
	} else if (op_flags & OPT_r) {
            if (!regex_test(fn, ®exp)) {
                exitvalue = 1;
            }
        } else {
	    if (!processfile(fn, 0, 0)) {
                exitvalue = 1;
            }
	}
    } 

    exit(exitvalue);
}

static void fatal(const string& s)
{
    printsyserr(s.c_str());
    exit(1);
}

static bool testbackups()
{
    static const char *top = "ttop";
    static const char *d1 = "d1";
    static const char *d2 = "d2";
    static const char *tfn1 = "tpxattr1.txt";
    static const char *tfn2 = "tpxattr2.txt";
    static const char *dump = "attrdump.txt";
    static const char *NAMES[] = {"ORG.PXATTR.NAME1", 
				  "ORG=PXATTR\"=\\=\n", 
				  "=", "Name4"};
    static const char *VALUES[] = 
	{"VALUE1", "VALUE2", "VALUE3=VALUE3equal",
	 "VALUE4\n is more like"
	 " normal text\n with new lines and \"\\\" \\\" backslashes"};

    static const int nattrs = sizeof(NAMES) / sizeof(char *);

    if (mkdir(top, 0777))
	fatal("Cant mkdir ttop");
    if (chdir(top))
	fatal("cant chdir ttop");
    if (mkdir(d1, 0777) || mkdir(d2, 0777))
	fatal("Can't mkdir ttdop/dx\n");
    if (chdir(d1))
	fatal("chdir d1");

    int fd;
    if ((fd = open(tfn1, O_RDWR|O_CREAT, 0755)) < 0)
	fatal("create d1/tpxattr1.txt");
    /* Set attrs */
    for (int i = 0; i < nattrs; i++) {
	if (!pxattr::set(fd, NAMES[i], VALUES[i]))
	    fatal("pxattr::set");
    }
    close(fd);
    if ((fd = open(tfn2, O_RDWR|O_CREAT, 0755)) < 0)
	fatal("create d1/tpxattr2.txt");
    /* Set attrs */
    for (int i = 0; i < nattrs; i++) {
	if (!pxattr::set(fd, NAMES[i], VALUES[i]))
	    fatal("pxattr::set");
    }
    close(fd);

    /* Create dump */
    string cmd;
    cmd = string("pxattr -lR . > " ) + dump;
    if (system(cmd.c_str()))
	fatal(cmd + " in d1");
    if (chdir("../d2"))
	fatal("chdir ../d2");
    if (close(open(tfn1, O_RDWR|O_CREAT, 0755)))
	fatal("create d2/tpxattr.txt");
    if (close(open(tfn2, O_RDWR|O_CREAT, 0755)))
	fatal("create d2/tpxattr.txt");
    cmd = string("pxattr -S ../d1/" ) + dump;
    if (system(cmd.c_str()))
	fatal(cmd);
    cmd = string("pxattr -lR . > " ) + dump;
    if (system(cmd.c_str()))
	fatal(cmd + " in d2");
    cmd = string("diff ../d1/") + dump + " " + dump;
    if (system(cmd.c_str()))
	fatal(cmd);
    cmd = string("cat ") + dump;
    system(cmd.c_str());

    if (1) {
	unlink(dump);
	unlink(tfn1);
	unlink(tfn2);
	if (chdir("../d1"))
	    fatal("chdir ../d1");
	unlink(dump);
	unlink(tfn1);
	unlink(tfn2);
	if (chdir("../"))
	    fatal("chdir .. 1");
	if (rmdir(d1))
	    fatal("rmdir d1");
	if (rmdir(d2))
	    fatal("rmdir d2");
	if (chdir("../"))
	    fatal("chdir .. 2");
	if (rmdir(top))
	    fatal("rmdir ttop");
    }
    return true;
}

static void dotests()
{
    static const char *tfn = "pxattr_testtmp.xyz";
    static const char *NAMES[] = {"ORG.PXATTR.NAME1", "ORG.PXATTR.N2", 
				  "ORG.PXATTR.LONGGGGGGGGisSSSHHHHHHHHHNAME3"};
    static const char *VALUES[] = {"VALUE1", "VALUE2", "VALUE3"};

    /* Create test file if it doesn't exist, remove all attributes */
    int fd = open(tfn, O_RDWR|O_CREAT, 0755);
    if (fd < 0) {
	printsyserr("open/create");
	exit(1);
    }

    if (!antiverbose)
	message("Cleanup old attrs\n");
    vector names;
    if (!pxattr::list(tfn, &names)) {
	printsyserr("pxattr::list");
	exit(1);
    }
    for (vector::const_iterator it = names.begin(); 
	 it != names.end(); it++) {
	string value;
	if (!pxattr::del(fd, *it)) {
	    printsyserr("pxattr::del");
	    exit(1);
	}
    }
    /* Check that there are no attributes left */
    names.clear();
    if (!pxattr::list(tfn, &names)) {
	printsyserr("pxattr::list");
	exit(1);
    }
    if (names.size() != 0) {
	errno=0;printsyserr("Attributes remain after initial cleanup !\n");
	for (vector::const_iterator it = names.begin();
	     it != names.end(); it++) {
            if (antiverbose < 2)
                cerr << *it << endl;
	}
	exit(1);
    }

    /* Create attributes, check existence and value */
    message("Creating extended attributes\n");
    for (int i = 0; i < 3; i++) {
	if (!pxattr::set(fd, NAMES[i], VALUES[i])) {
	    printsyserr("pxattr::set");
	    exit(1);
	}
    }
    message("Checking creation\n");
    for (int i = 0; i < 3; i++) {
	string value;
	if (!pxattr::get(tfn, NAMES[i], &value)) {
	    printsyserr("pxattr::get");
	    exit(1);
	}
	if (value.compare(VALUES[i])) {
            errno = 0;
	    printsyserr("Wrong value after create !");
	    exit(1);
	}
    }

    /* Delete one, check list */
    message("Delete one\n");
    if (!pxattr::del(tfn, NAMES[1])) {
	printsyserr("pxattr::del one name");
	exit(1);
    }
    message("Check list\n");
    for (int i = 0; i < 3; i++) {
	string value;
	if (!pxattr::get(fd, NAMES[i], &value)) {
	    if (i == 1)
		continue;
	    printsyserr("pxattr::get");
	    exit(1);
	} else if (i == 1) {
	    errno=0;
            printsyserr("Name at index 1 still exists after deletion\n");
	    exit(1);
	}
	if (value.compare(VALUES[i])) {
            errno = 0;
	    printsyserr("Wrong value after delete 1 !");
	    exit(1);
	}
    }

    /* Test the CREATE/REPLACE flags */
    // Set existing with flag CREATE should fail
    message("Testing CREATE/REPLACE flags use\n");
    if (pxattr::set(tfn, NAMES[0], VALUES[0], pxattr::PXATTR_CREATE)) {
	errno=0;printsyserr("Create existing with flag CREATE succeeded !\n");
	exit(1);
    }
    // Set new with flag REPLACE should fail
    if (pxattr::set(tfn, NAMES[1], VALUES[1], pxattr::PXATTR_REPLACE)) {
	errno=0;printsyserr("Create new with flag REPLACE succeeded !\n");
	exit(1);
    }
    // Set new with flag CREATE should succeed
    if (!pxattr::set(fd, NAMES[1], VALUES[1], pxattr::PXATTR_CREATE)) {
	errno=0;printsyserr("Create new with flag CREATE failed !\n");
	exit(1);
    }
    // Set existing with flag REPLACE should succeed
    if (!pxattr::set(fd, NAMES[0], VALUES[0], pxattr::PXATTR_REPLACE)) {
	errno=0;printsyserr("Create existing with flag REPLACE failed !\n");
	exit(1);
    }
    close(fd);
    unlink(tfn);

    if (testbackups())
	exit(0);
    exit(1);
}
#endif // Testing pxattr

#endif // Supported systems.
recoll-1.26.3/utils/fileudi.h0000644000175000017500000000252313533651561012750 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _FILEUDI_H_INCLUDED_
#define _FILEUDI_H_INCLUDED_
#include 
using std::string;

// Unique Document Ids for the file-based indexer (main Recoll
// indexer).  Document Ids are built from a concatenation of the file
// path and the internal path (ie: email number inside
// folder/attachment number/etc.)  As the size of Xapian terms is
// limited, the Id path is truncated to a maximum length, and completed
// by a hash of the remainder (including the ipath)

extern void make_udi(const string& fn, const string& ipath, string &udi);

#endif /* _FILEUDI_H_INCLUDED_ */
recoll-1.26.3/utils/ecrontab.cpp0000644000175000017500000001512313533651561013457 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef TEST_ECRONTAB
#include "autoconfig.h"

#include 

#include "ecrontab.h"
#include "execmd.h"
#include "smallut.h"
#include "log.h"

// Read crontab file and split it into lines.
static bool eCrontabGetLines(vector& lines)
{
    string crontab;
    ExecCmd croncmd;
    vector args; 
    int status;

    // Retrieve current crontab contents. An error here means that no
    // crontab exists, and is not fatal, but we return a different
    // status than for an empty one
    args.push_back("-l");
    if ((status = croncmd.doexec("crontab", args, 0, &crontab))) {
	lines.clear();
	return false;
    }

    // Split crontab into lines
    stringToTokens(crontab, lines, "\n");
    return true;
}

// Concatenate lines and write crontab
static bool eCrontabWriteFile(const vector& lines, string& reason)
{
    string crontab;
    ExecCmd croncmd;
    vector args; 
    int status;

    for (vector::const_iterator it = lines.begin();
	 it != lines.end(); it++) {
	crontab += *it + "\n";
    }

    args.push_back("-");
    if ((status = croncmd.doexec("crontab", args, &crontab, 0))) {
	char nbuf[30]; 
	sprintf(nbuf, "0x%x", status);
	reason = string("Exec crontab -l failed: status: ") + nbuf;
	return false;
    }
    return true;
}

// Add / change / delete entry identified by marker and id
bool editCrontab(const string& marker, const string& id, 
		      const string& sched, const string& cmd, string& reason)
{
    vector lines;

    if (!eCrontabGetLines(lines)) {
	// Special case: cmd is empty, no crontab, don't create one
	if (cmd.empty())
	    return true;
    }

    // Remove old copy if any
    for (vector::iterator it = lines.begin();
	 it != lines.end(); it++) {
	// Skip comment
	if (it->find_first_of("#") == it->find_first_not_of(" \t"))
	    continue;

	if (it->find(marker) != string::npos && 
	    it->find(id) != string::npos) {
	    lines.erase(it);
	    break;
	}
    }

    if (!cmd.empty()) {
	string nline = sched + " " + marker + " " + id + " " + cmd;
	lines.push_back(nline);
    }
    
    if (!eCrontabWriteFile(lines, reason))
	return false;

    return true;
}

bool checkCrontabUnmanaged(const string& marker, const string& data)
{
    vector lines;
    if (!eCrontabGetLines(lines)) {
	// No crontab, answer is no
	return false;
    }
    // Scan crontab
    for (vector::iterator it = lines.begin();
	 it != lines.end(); it++) {
	if (it->find(marker) == string::npos && 
	    it->find(data) != string::npos) {
	    return true;
	}
    }
    return false;
}

/** Retrieve the scheduling for a crontab entry */
bool getCrontabSched(const string& marker, const string& id, 
		     vector& sched) 
{
    LOGDEB0("getCrontabSched: marker["  << (marker) << "], id["  << (id) << "]\n" );
    vector lines;
    if (!eCrontabGetLines(lines)) {
	// No crontab, answer is no
	sched.clear();
	return false;
    }
    string line;

    for (vector::iterator it = lines.begin();
	 it != lines.end(); it++) {
	// Skip comment
	if (it->find_first_of("#") == it->find_first_not_of(" \t"))
	    continue;

	if (it->find(marker) != string::npos && 
	    it->find(id) != string::npos) {
	    line = *it;
	    break;
	}
    }

    stringToTokens(line, sched);
    sched.resize(5);
    return true;
}

#else // TEST ->

#include 
#include 
#include 
#include 
#include 

#include 
#include 

using namespace std;

#include "ecrontab.h"


static char *thisprog;

static char usage [] =
" -a add or replace crontab line \n"
" -d delete crontab line \n"
" -s get scheduling \n"    
" -c  check for unmanaged lines for string\n"
;
static void
Usage(void)
{
    fprintf(stderr, "%s: usage:\n%s", thisprog, usage);
    exit(1);
}

static int     op_flags;
#define OPT_MOINS 0x1
#define OPT_a	  0x2 
#define OPT_d	  0x4 
#define OPT_w     0x8
#define OPT_c     0x10
#define OPT_s     0x20

const string& marker("RCLCRON_RCLINDEX=");
// Note of course the -w does not make sense for a cron entry
const string& cmd0("recollindex -w ");
const string& id("RECOLL_CONFDIR=\"/home/dockes/.recoll/\"");
const string& sched("30 8 * 1 *");

int main(int argc, char **argv)
{
  thisprog = argv[0];
  argc--; argv++;

  string wt = "10";
  string cmd;

  while (argc > 0 && **argv == '-') {
    (*argv)++;
    if (!(**argv))
      /* Cas du "adb - core" */
      Usage();
    while (**argv)
      switch (*(*argv)++) {
      case 'a':	op_flags |= OPT_a; break;
      case 'c':	op_flags |= OPT_c; if (argc < 2)  Usage();
	  cmd = *(++argv); argc--; 
	  goto b1;
      case 'd':	op_flags |= OPT_d; break;
      case 's':	op_flags |= OPT_s; break;
      case 'w':	op_flags |= OPT_w; if (argc < 2)  Usage();
	  wt = *(++argv); argc--; 
	  goto b1;
	  
      default: Usage();	break;
      }
  b1: argc--; argv++;
  }

  if (argc != 0)
    Usage();

  string reason;
  bool status = false;
  
  if (op_flags & OPT_a) {
      cmd = cmd0 + wt;
      status = editCrontab(marker, id, sched, cmd, reason);
  } else if (op_flags & OPT_d) {
      status = editCrontab(marker, id, sched, "", reason);
  } else if (op_flags & OPT_s) {
      vector sched;
      if (!(status = getCrontabSched(marker, id, sched))) {
	  cerr << "getCrontabSched failed: " << reason << endl;
	  exit(1);
      }
      cout << "sched vec size " << sched.size() << endl;
      cout << "mins " << sched[0] << " hours " << sched[1] <<
	  " days of month " << sched[2] << " months " << sched[3] << 
	  " days of week " << sched[4] << endl;
      exit(0);
      
  } else if (op_flags & OPT_c) {
      if ((status = checkCrontabUnmanaged(marker, cmd))) {
	  cerr << "crontab has unmanaged lines for " << cmd << endl;
	  exit(1);
      }
      exit(0);
  } else {
      Usage();
  }
  if (!status) {
      cerr << "editCrontab failed: " << reason << endl;
      exit(1);
  }
  exit(0);
}
#endif // TEST

recoll-1.26.3/utils/base64.cpp0000644000175000017500000002450113533651561012746 00000000000000/* Copyright (C) 2005 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include 

#include 
#include 
#include 

using std::string;

#undef DEBUG_BASE64 
#ifdef DEBUG_BASE64
#define DPRINT(X) fprintf X
#else
#define DPRINT(X)
#endif

// This is adapted from FreeBSD's code, quite modified for performance.
// Tests on a Mac pro 2.1G with a 166MB base64 file
//
// The original version used strchr to lookup the base64 value from
// the input code:
//   real    0m13.053s user  0m12.574s sys   0m0.471s
// Using a direct access, 256 entries table:
//   real    0m3.073s user   0m2.600s sys    0m0.439s
// Using a variable to hold the array length (instead of in.length()):
//   real    0m2.972s user   0m2.527s sys    0m0.433s
// Using values from the table instead of isspace() (final)
//   real    0m2.513s user   0m2.059s sys    0m0.439s
//
// The table has one entry per char value (0-256). Invalid base64
// chars take value 256, whitespace 255, Pad ('=') 254. 
// Valid char points contain their base64 value (0-63) 
static const int b64values[] = {
/* 0 */ 256,/* 1 */ 256,/* 2 */ 256,/* 3 */ 256,/* 4 */ 256,
/* 5 */ 256,/* 6 */ 256,/* 7 */ 256,/* 8 */ 256,
/*9 ht */ 255,/* 10 nl */ 255,/* 11 vt */ 255,/* 12 np/ff*/ 255,/* 13 cr */ 255,
/* 14 */ 256,/* 15 */ 256,/* 16 */ 256,/* 17 */ 256,/* 18 */ 256,/* 19 */ 256,
/* 20 */ 256,/* 21 */ 256,/* 22 */ 256,/* 23 */ 256,/* 24 */ 256,/* 25 */ 256,
/* 26 */ 256,/* 27 */ 256,/* 28 */ 256,/* 29 */ 256,/* 30 */ 256,/* 31 */ 256,
/* 32 sp  */ 255,
/* ! */ 256,/* " */ 256,/* # */ 256,/* $ */ 256,/* % */ 256,
/* & */ 256,/* ' */ 256,/* ( */ 256,/* ) */ 256,/* * */ 256,
/* + */ 62,
/* , */ 256,/* - */ 256,/* . */ 256,
/* / */ 63,
/* 0 */ 52,/* 1 */ 53,/* 2 */ 54,/* 3 */ 55,/* 4 */ 56,/* 5 */ 57,/* 6 */ 58,
/* 7 */ 59,/* 8 */ 60,/* 9 */ 61,
/* : */ 256,/* ; */ 256,/* < */ 256,
/* = */ 254,
/* > */ 256,/* ? */ 256,/* @ */ 256,
/* A */ 0,/* B */ 1,/* C */ 2,/* D */ 3,/* E */ 4,/* F */ 5,/* G */ 6,/* H */ 7,
/* I */ 8,/* J */ 9,/* K */ 10,/* L */ 11,/* M */ 12,/* N */ 13,/* O */ 14,
/* P */ 15,/* Q */ 16,/* R */ 17,/* S */ 18,/* T */ 19,/* U */ 20,/* V */ 21,
/* W */ 22,/* X */ 23,/* Y */ 24,/* Z */ 25,
/* [ */ 256,/* \ */ 256,/* ] */ 256,/* ^ */ 256,/* _ */ 256,/* ` */ 256,
/* a */ 26,/* b */ 27,/* c */ 28,/* d */ 29,/* e */ 30,/* f */ 31,/* g */ 32,
/* h */ 33,/* i */ 34,/* j */ 35,/* k */ 36,/* l */ 37,/* m */ 38,/* n */ 39,
/* o */ 40,/* p */ 41,/* q */ 42,/* r */ 43,/* s */ 44,/* t */ 45,/* u */ 46,
/* v */ 47,/* w */ 48,/* x */ 49,/* y */ 50,/* z */ 51,
/* { */ 256,/* | */ 256,/* } */ 256,/* ~ */ 256,
256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,
256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,
256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,
256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,
256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,
256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,256,
256,256,256,256,256,256,256,256,
};
static const char Base64[] =
    "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
static const char Pad64 = '=';

bool base64_decode(const string& in, string& out)
{
    int io = 0, state = 0, ch = 0;
    unsigned int ii = 0;
    out.clear();
    size_t ilen = in.length();
    out.reserve(ilen);

    for (ii = 0; ii < ilen; ii++) {
	ch = (unsigned char)in[ii];
	int value = b64values[ch];

	if (value == 255)        /* Skip whitespace anywhere. */
	    continue;
	if (ch == Pad64)
	    break;
	if (value == 256) {
	    /* A non-base64 character. */
	    DPRINT((stderr, "base64_dec: non-base64 char at pos %d\n", ii));
	    return false;
	}

	switch (state) {
	case 0:
	    out += value << 2;
	    state = 1;
	    break;
	case 1:
	    out[io]   |=  value >> 4;
	    out += (value & 0x0f) << 4 ;
	    io++;
	    state = 2;
	    break;
	case 2:
	    out[io]   |=  value >> 2;
	    out += (value & 0x03) << 6;
	    io++;
	    state = 3;
	    break;
	case 3:
	    out[io] |= value;
	    io++;
	    state = 0;
	    break;
	default:
	    fprintf(stderr, "base64_dec: internal!bad state!\n");
	    return false;
	}
    }

    /*
     * We are done decoding Base-64 chars.  Let's see if we ended
     * on a byte boundary, and/or with erroneous trailing characters.
     */

    if (ch == Pad64) {		/* We got a pad char. */
	ch = in[ii++];		/* Skip it, get next. */
	switch (state) {
	case 0:		/* Invalid = in first position */
	case 1:		/* Invalid = in second position */
	    DPRINT((stderr, "base64_dec: pad char in state 0/1\n"));
	    return false;

	case 2:		/* Valid, means one byte of info */
			/* Skip any number of spaces. */
	    for (; ii < in.length(); ch = in[ii++])
		if (!isspace((unsigned char)ch))
		    break;
	    /* Make sure there is another trailing = sign. */
	    if (ch != Pad64) {
		DPRINT((stderr, "base64_dec: missing pad char!\n"));
		// Well, there are bad encoders out there. Let it pass
		// return false;
	    }
	    ch = in[ii++];		/* Skip the = */
	    /* Fall through to "single trailing =" case. */
	    /* FALLTHROUGH */

	case 3:	    /* Valid, means two bytes of info */
	    /*
	     * We know this char is an =.  Is there anything but
	     * whitespace after it?
	     */
	    for (; ii < in.length(); ch = in[ii++])
		if (!isspace((unsigned char)ch)) {
		    DPRINT((stderr, "base64_dec: non-white at eod: 0x%x\n", 
			    (unsigned int)((unsigned char)ch)));
		    // Well, there are bad encoders out there. Let it pass
		    //return false;
		}

	    /*
	     * Now make sure for cases 2 and 3 that the "extra"
	     * bits that slopped past the last full byte were
	     * zeros.  If we don't check them, they become a
	     * subliminal channel.
	     */
	    if (out[io] != 0) {
		DPRINT((stderr, "base64_dec: bad extra bits!\n"));
		// Well, there are bad encoders out there. Let it pass
		out[io] = 0;
		// return false;
	    }
	    // We've appended an extra 0.
	    out.resize(io);
	}
    } else {
	/*
	 * We ended by seeing the end of the string.  Make sure we
	 * have no partial bytes lying around.
	 */
	if (state != 0) {
	    DPRINT((stderr, "base64_dec: bad final state\n"));
	    return false;
	}
    }

    DPRINT((stderr, "base64_dec: ret ok, io %d sz %d len %d value [%s]\n", 
	    io, (int)out.size(), (int)out.length(), out.c_str()));
    return true;
}

#undef Assert
#define Assert(X)

void base64_encode(const string &in, string &out)
{
    unsigned char input[3];
    unsigned char output[4];

    out.clear();

    string::size_type srclength = in.length();
    int sidx = 0;
    while (2 < srclength) {
	input[0] = in[sidx++];
	input[1] = in[sidx++];
	input[2] = in[sidx++];
	srclength -= 3;

	output[0] = input[0] >> 2;
	output[1] = ((input[0] & 0x03) << 4) + (input[1] >> 4);
	output[2] = ((input[1] & 0x0f) << 2) + (input[2] >> 6);
	output[3] = input[2] & 0x3f;
	Assert(output[0] < 64);
	Assert(output[1] < 64);
	Assert(output[2] < 64);
	Assert(output[3] < 64);

	out += Base64[output[0]];
	out += Base64[output[1]];
	out += Base64[output[2]];
	out += Base64[output[3]];
    }
    
    /* Now we worry about padding. */
    if (0 != srclength) {
	/* Get what's left. */
	input[0] = input[1] = input[2] = '\0';
	for (string::size_type i = 0; i < srclength; i++)
	    input[i] = in[sidx++];
	
	output[0] = input[0] >> 2;
	output[1] = ((input[0] & 0x03) << 4) + (input[1] >> 4);
	output[2] = ((input[1] & 0x0f) << 2) + (input[2] >> 6);
	Assert(output[0] < 64);
	Assert(output[1] < 64);
	Assert(output[2] < 64);

	out += Base64[output[0]];
	out += Base64[output[1]];
	if (srclength == 1)
	    out += Pad64;
	else
	    out += Base64[output[2]];
	out += Pad64;
    }
    return;
}

#ifdef TEST_BASE64
#include 
#include 

#include "readfile.h"

const char *thisprog;
static char usage [] = "testfile\n\n"
;
static void
Usage(void)
{
    fprintf(stderr, "%s: usage:\n%s", thisprog, usage);
    exit(1);
}

static int     op_flags;
#define OPT_MOINS 0x1
#define OPT_i	  0x2 
#define OPT_P	  0x4 

int main(int argc, char **argv)
{
    thisprog = argv[0];
    argc--; argv++;

    while (argc > 0 && **argv == '-') {
	(*argv)++;
	if (!(**argv))
	    /* Cas du "adb - core" */
	    Usage();
	while (**argv)
	    switch (*(*argv)++) {
	    case 'i':	op_flags |= OPT_i; break;
	    default: Usage();	break;
	    }
	argc--; argv++;
    }
    
    if (op_flags & OPT_i)  {
	const char *values[] = {"", "1", "12", "123", "1234", 
				"12345", "123456"};
	int nvalues = sizeof(values) / sizeof(char *);
	string in, out, back;
	int err = 0;
	for (int i = 0; i < nvalues; i++) {
	    in = values[i];
	    base64_encode(in, out);
	    base64_decode(out, back);
	    if (in != back) {
		fprintf(stderr, "In [%s] %d != back [%s] %d (out [%s] %d\n", 
			in.c_str(), int(in.length()), 
			back.c_str(), int(back.length()),
			out.c_str(), int(out.length())
			);
		err++;
	    }
	}
	in.erase();
	in += char(0);
	in += char(0);
	in += char(0);
	in += char(0);
	base64_encode(in, out);
	base64_decode(out, back);
	if (in != back) {
	    fprintf(stderr, "In [%s] %d != back [%s] %d (out [%s] %d\n", 
		    in.c_str(), int(in.length()), 
		    back.c_str(), int(back.length()),
		    out.c_str(), int(out.length())
		    );
	    err++;
	}
	exit(!(err == 0));
    } else {
	if (argc > 1)
	    Usage();
	string infile;
	if (argc == 1)
	    infile = *argv++;argc--;
	string idata, reason;
	if (!file_to_string(infile, idata, &reason)) {
	    fprintf(stderr, "Can't read file: %s\n", reason.c_str());
	    exit(1);
	}
	string odata;
	if (!base64_decode(idata, odata)) {
	    fprintf(stderr, "Decoding failed\n");
	    exit(1);
	}
	fwrite(odata.c_str(), 1,
	       odata.size() * sizeof(string::value_type), stdout);
	exit(0);
    }
}
#endif
recoll-1.26.3/utils/strmatcher.cpp0000644000175000017500000000410413533651561014033 00000000000000/* Copyright (C) 2012 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include "autoconfig.h"
#include "strmatcher.h"

#include 
#include 
#include 

#include 

#include "cstr.h"
#include "log.h"
#include "pathut.h"

using namespace std;

bool StrWildMatcher::match(const string& val) const
{
    LOGDEB2("StrWildMatcher::match ["<< m_sexp<< "] against [" << val << "]\n");
    int ret = fnmatch(m_sexp.c_str(), val.c_str(), FNM_NOESCAPE);
    switch (ret) {
    case 0: return true;
    case FNM_NOMATCH: return false;
    default:
	LOGINFO("StrWildMatcher::match:err: e [" << m_sexp << "] s [" << val
                << "] (" << url_encode(val) << ") ret " << ret << "\n");
	return false;
    }
}

string::size_type StrWildMatcher::baseprefixlen() const
{
    return m_sexp.find_first_of(cstr_wildSpecStChars);
}

StrRegexpMatcher::StrRegexpMatcher(const string& exp)
    : StrMatcher(exp),
      m_re(exp, SimpleRegexp::SRE_NOSUB)
{
}

bool StrRegexpMatcher::setExp(const string& exp)
{
    m_re = SimpleRegexp(exp, SimpleRegexp::SRE_NOSUB);
    return m_re.ok();
}

bool StrRegexpMatcher::match(const string& val) const
{
    if (!m_re.ok()) 
	return false;
    return m_re(val);
}

string::size_type StrRegexpMatcher::baseprefixlen() const
{
    return m_sexp.find_first_of(cstr_regSpecStChars);
}

bool StrRegexpMatcher::ok() const
{
    return m_re.ok();
}

recoll-1.26.3/utils/md5ut.cpp0000644000175000017500000000333513533651561012722 00000000000000/* Copyright (C) 2015 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include "autoconfig.h"

#include 
#include 

#include "md5ut.h"
#include "readfile.h"

using namespace std;

// Quite incredibly if this class is named FileScanMd5 like the
// different one in readfile.cpp, the vtables get mixed up and mh_xslt
// crashes while calling a virtual function (gcc 6.3 and 7.3)
class FileScanMd5loc : public FileScanDo {
public:
    FileScanMd5loc(string& d) : digest(d) {}
    virtual bool init(int64_t, string *)
    {
	MD5Init(&ctx);
	return true;
    }
    virtual bool data(const char *buf, int cnt, string*)
    {
	MD5Update(&ctx, (const unsigned char*)buf, cnt);
	return true;
    }
    string &digest;
    MD5_CTX ctx;
};

bool MD5File(const string& filename, string &digest, string *reason)
{
    FileScanMd5loc md5er(digest);
    if (!file_scan(filename, &md5er, reason))
	return false;
    // We happen to know that digest and md5er.digest are the same object
    MD5Final(md5er.digest, &md5er.ctx);
    return true;
}
recoll-1.26.3/utils/smallut.cpp0000644000175000017500000011017713546576741013362 00000000000000/* Copyright (C) 2006-2016 J.F.Dockes
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 *   02110-1301 USA
 */
#include 
#include 
#include 

#ifdef _WIN32
// needed for localtime_r under mingw?
#define _POSIX_THREAD_SAFE_FUNCTIONS
#endif
#include 
#include 
#include 
#include 
#include 

// Older compilers don't support stdc++ regex, but Windows does not
// have the Linux one. Have a simple class to solve the simple cases.
#if defined(_WIN32)
#define USE_STD_REGEX
#include 
#else
#define USE_LINUX_REGEX
#include 
#endif

#include 
#include 
#include 
#include 
#include 

#include "smallut.h"

using namespace std;

int stringicmp(const string& s1, const string& s2)
{
    string::const_iterator it1 = s1.begin();
    string::const_iterator it2 = s2.begin();
    string::size_type size1 = s1.length(), size2 = s2.length();
    char c1, c2;

    if (size1 < size2) {
        while (it1 != s1.end()) {
            c1 = ::toupper(*it1);
            c2 = ::toupper(*it2);
            if (c1 != c2) {
                return c1 > c2 ? 1 : -1;
            }
            ++it1;
            ++it2;
        }
        return size1 == size2 ? 0 : -1;
    } else {
        while (it2 != s2.end()) {
            c1 = ::toupper(*it1);
            c2 = ::toupper(*it2);
            if (c1 != c2) {
                return c1 > c2 ? 1 : -1;
            }
            ++it1;
            ++it2;
        }
        return size1 == size2 ? 0 : 1;
    }
}
void stringtolower(string& io)
{
    string::iterator it = io.begin();
    string::iterator ite = io.end();
    while (it != ite) {
        *it = ::tolower(*it);
        it++;
    }
}
string stringtolower(const string& i)
{
    string o = i;
    stringtolower(o);
    return o;
}

void stringtoupper(string& io)
{
    string::iterator it = io.begin();
    string::iterator ite = io.end();
    while (it != ite) {
        *it = ::toupper(*it);
        it++;
    }
}
string stringtoupper(const string& i)
{
    string o = i;
    stringtoupper(o);
    return o;
}

extern int stringisuffcmp(const string& s1, const string& s2)
{
    string::const_reverse_iterator r1 = s1.rbegin(), re1 = s1.rend(),
                                   r2 = s2.rbegin(), re2 = s2.rend();
    while (r1 != re1 && r2 != re2) {
        char c1 = ::toupper(*r1);
        char c2 = ::toupper(*r2);
        if (c1 != c2) {
            return c1 > c2 ? 1 : -1;
        }
        ++r1;
        ++r2;
    }
    return 0;
}

//  s1 is already lowercase
int stringlowercmp(const string& s1, const string& s2)
{
    string::const_iterator it1 = s1.begin();
    string::const_iterator it2 = s2.begin();
    string::size_type size1 = s1.length(), size2 = s2.length();
    char c2;

    if (size1 < size2) {
        while (it1 != s1.end()) {
            c2 = ::tolower(*it2);
            if (*it1 != c2) {
                return *it1 > c2 ? 1 : -1;
            }
            ++it1;
            ++it2;
        }
        return size1 == size2 ? 0 : -1;
    } else {
        while (it2 != s2.end()) {
            c2 = ::tolower(*it2);
            if (*it1 != c2) {
                return *it1 > c2 ? 1 : -1;
            }
            ++it1;
            ++it2;
        }
        return size1 == size2 ? 0 : 1;
    }
}

//  s1 is already uppercase
int stringuppercmp(const string& s1, const string& s2)
{
    string::const_iterator it1 = s1.begin();
    string::const_iterator it2 = s2.begin();
    string::size_type size1 = s1.length(), size2 = s2.length();
    char c2;

    if (size1 < size2) {
        while (it1 != s1.end()) {
            c2 = ::toupper(*it2);
            if (*it1 != c2) {
                return *it1 > c2 ? 1 : -1;
            }
            ++it1;
            ++it2;
        }
        return size1 == size2 ? 0 : -1;
    } else {
        while (it2 != s2.end()) {
            c2 = ::toupper(*it2);
            if (*it1 != c2) {
                return *it1 > c2 ? 1 : -1;
            }
            ++it1;
            ++it2;
        }
        return size1 == size2 ? 0 : 1;
    }
}

bool beginswith(const std::string& big, const std::string& small)
{
    if (big.compare(0, small.size(), small)) {
        return false;
    }
    return true;
}

// Compare charset names, removing the more common spelling variations
bool samecharset(const string& cs1, const string& cs2)
{
    string mcs1, mcs2;
    // Remove all - and _, turn to lowecase
    for (unsigned int i = 0; i < cs1.length(); i++) {
        if (cs1[i] != '_' && cs1[i] != '-') {
            mcs1 += ::tolower(cs1[i]);
        }
    }
    for (unsigned int i = 0; i < cs2.length(); i++) {
        if (cs2[i] != '_' && cs2[i] != '-') {
            mcs2 += ::tolower(cs2[i]);
        }
    }
    return mcs1 == mcs2;
}

template  bool stringToStrings(const string& s, T& tokens,
                                        const string& addseps)
{
    string current;
    tokens.clear();
    enum states {SPACE, TOKEN, INQUOTE, ESCAPE};
    states state = SPACE;
    for (unsigned int i = 0; i < s.length(); i++) {
        switch (s[i]) {
        case '"':
            switch (state) {
            case SPACE:
                state = INQUOTE;
                continue;
            case TOKEN:
                current += '"';
                continue;
            case INQUOTE:
                tokens.insert(tokens.end(), current);
                current.clear();
                state = SPACE;
                continue;
            case ESCAPE:
                current += '"';
                state = INQUOTE;
                continue;
            }
            break;
        case '\\':
            switch (state) {
            case SPACE:
            case TOKEN:
                current += '\\';
                state = TOKEN;
                continue;
            case INQUOTE:
                state = ESCAPE;
                continue;
            case ESCAPE:
                current += '\\';
                state = INQUOTE;
                continue;
            }
            break;

        case ' ':
        case '\t':
        case '\n':
        case '\r':
            switch (state) {
            case SPACE:
                continue;
            case TOKEN:
                tokens.insert(tokens.end(), current);
                current.clear();
                state = SPACE;
                continue;
            case INQUOTE:
            case ESCAPE:
                current += s[i];
                continue;
            }
            break;

        default:
            if (!addseps.empty() && addseps.find(s[i]) != string::npos) {
                switch (state) {
                case ESCAPE:
                    state = INQUOTE;
                    break;
                case INQUOTE:
                    break;
                case SPACE:
                    tokens.insert(tokens.end(), string(1, s[i]));
                    continue;
                case TOKEN:
                    tokens.insert(tokens.end(), current);
                    current.erase();
                    tokens.insert(tokens.end(), string(1, s[i]));
                    state = SPACE;
                    continue;
                }
            } else switch (state) {
                case ESCAPE:
                    state = INQUOTE;
                    break;
                case SPACE:
                    state = TOKEN;
                    break;
                case TOKEN:
                case INQUOTE:
                    break;
                }
            current += s[i];
        }
    }
    switch (state) {
    case SPACE:
        break;
    case TOKEN:
        tokens.insert(tokens.end(), current);
        break;
    case INQUOTE:
    case ESCAPE:
        return false;
    }
    return true;
}

template bool stringToStrings >(const string&,
        list&, const string&);
template bool stringToStrings >(const string&,
        vector&, const string&);
template bool stringToStrings >(const string&,
        set&, const string&);
template bool stringToStrings >
(const string&, std::unordered_set&, const string&);

template  void stringsToString(const T& tokens, string& s)
{
    for (typename T::const_iterator it = tokens.begin();
            it != tokens.end(); it++) {
        bool hasblanks = false;
        if (it->find_first_of(" \t\n") != string::npos) {
            hasblanks = true;
        }
        if (it != tokens.begin()) {
            s.append(1, ' ');
        }
        if (hasblanks) {
            s.append(1, '"');
        }
        for (unsigned int i = 0; i < it->length(); i++) {
            char car = it->at(i);
            if (car == '"') {
                s.append(1, '\\');
                s.append(1, car);
            } else {
                s.append(1, car);
            }
        }
        if (hasblanks) {
            s.append(1, '"');
        }
    }
}
template void stringsToString >(const list&, string&);
template void stringsToString >(const vector&, string&);
template void stringsToString >(const set&, string&);
template void stringsToString >(const unordered_set&, string&);
template  string stringsToString(const T& tokens)
{
    string out;
    stringsToString(tokens, out);
    return out;
}
template string stringsToString >(const list&);
template string stringsToString >(const vector&);
template string stringsToString >(const set&);
template string stringsToString >(const unordered_set&);

template  void stringsToCSV(const T& tokens, string& s,
                                     char sep)
{
    s.erase();
    for (typename T::const_iterator it = tokens.begin();
            it != tokens.end(); it++) {
        bool needquotes = false;
        if (it->empty() ||
                it->find_first_of(string(1, sep) + "\"\n") != string::npos) {
            needquotes = true;
        }
        if (it != tokens.begin()) {
            s.append(1, sep);
        }
        if (needquotes) {
            s.append(1, '"');
        }
        for (unsigned int i = 0; i < it->length(); i++) {
            char car = it->at(i);
            if (car == '"') {
                s.append(2, '"');
            } else {
                s.append(1, car);
            }
        }
        if (needquotes) {
            s.append(1, '"');
        }
    }
}
template void stringsToCSV >(const list&, string&, char);
template void stringsToCSV >(const vector&, string&,
        char);

void stringToTokens(const string& str, vector& tokens,
                    const string& delims, bool skipinit)
{
    string::size_type startPos = 0, pos;

    // Skip initial delims, return empty if this eats all.
    if (skipinit &&
            (startPos = str.find_first_not_of(delims, 0)) == string::npos) {
        return;
    }
    while (startPos < str.size()) {
        // Find next delimiter or end of string (end of token)
        pos = str.find_first_of(delims, startPos);

        // Add token to the vector and adjust start
        if (pos == string::npos) {
            tokens.push_back(str.substr(startPos));
            break;
        } else if (pos == startPos) {
            // Dont' push empty tokens after first
            if (tokens.empty()) {
                tokens.push_back(string());
            }
            startPos = ++pos;
        } else {
            tokens.push_back(str.substr(startPos, pos - startPos));
            startPos = ++pos;
        }
    }
}

void stringSplitString(const string& str, vector& tokens,
                       const string& sep)
{
    if (str.empty() || sep.empty())
        return;

    string::size_type startPos = 0, pos;

    while (startPos < str.size()) {
        // Find next delimiter or end of string (end of token)
        pos = str.find(sep, startPos);
        // Add token to the vector and adjust start
        if (pos == string::npos) {
            tokens.push_back(str.substr(startPos));
            break;
        } else if (pos == startPos) {
            // Initial or consecutive separators
            tokens.push_back(string());
        } else {
            tokens.push_back(str.substr(startPos, pos - startPos));
        }
        startPos = pos + sep.size();
    }
}

bool stringToBool(const string& s)
{
    if (s.empty()) {
        return false;
    }
    if (isdigit(s[0])) {
        int val = atoi(s.c_str());
        return val ? true : false;
    }
    if (s.find_first_of("yYtT") == 0) {
        return true;
    }
    return false;
}

void trimstring(string& s, const char *ws)
{
    rtrimstring(s, ws);
    ltrimstring(s, ws);
}

void rtrimstring(string& s, const char *ws)
{
    string::size_type pos = s.find_last_not_of(ws);
    if (pos == string::npos) {
        s.clear();
    } else if (pos != s.length() - 1) {
        s.replace(pos + 1, string::npos, string());
    }
}

void ltrimstring(string& s, const char *ws)
{
    string::size_type pos = s.find_first_not_of(ws);
    if (pos == string::npos) {
        s.clear();
        return;
    }
    s.replace(0, pos, string());
}

// Remove some chars and replace them with spaces
string neutchars(const string& str, const string& chars, char rep)
{
    string out;
    neutchars(str, out, chars, rep);
    return out;
}
void neutchars(const string& str, string& out, const string& chars, char rep)
{
    string::size_type startPos, pos;

    for (pos = 0;;) {
        // Skip initial chars, break if this eats all.
        if ((startPos = str.find_first_not_of(chars, pos)) == string::npos) {
            break;
        }
        // Find next delimiter or end of string (end of token)
        pos = str.find_first_of(chars, startPos);
        // Add token to the output. Note: token cant be empty here
        if (pos == string::npos) {
            out += str.substr(startPos);
        } else {
            out += str.substr(startPos, pos - startPos) + rep;
        }
    }
}


/* Truncate a string to a given maxlength, avoiding cutting off midword
 * if reasonably possible. Note: we could also use textsplit, stopping when
 * we have enough, this would be cleanly utf8-aware but would remove
 * punctuation */
static const string cstr_SEPAR = " \t\n\r-:.;,/[]{}";
string truncate_to_word(const string& input, string::size_type maxlen)
{
    string output;
    if (input.length() <= maxlen) {
        output = input;
    } else {
        output = input.substr(0, maxlen);
        string::size_type space = output.find_last_of(cstr_SEPAR);
        // Original version only truncated at space if space was found after
        // maxlen/2. But we HAVE to truncate at space, else we'd need to do
        // utf8 stuff to avoid truncating at multibyte char. In any case,
        // not finding space means that the text probably has no value.
        // Except probably for Asian languages, so we may want to fix this
        // one day
        if (space == string::npos) {
            output.erase();
        } else {
            output.erase(space);
        }
    }
    return output;
}

// Escape things that would look like markup
string escapeHtml(const string& in)
{
    string out;
    for (string::size_type pos = 0; pos < in.length(); pos++) {
	switch(in.at(pos)) {
	case '<': out += "<"; break;
	case '>': out += ">"; break;
	case '&': out += "&"; break;
	case '"': out += """; break;
	default: out += in.at(pos); break;
	}
    }
    return out;
}

string escapeShell(const string& in)
{
    string out;
    out += "\"";
    for (string::size_type pos = 0; pos < in.length(); pos++) {
        switch (in.at(pos)) {
        case '$':
            out += "\\$";
            break;
        case '`':
            out += "\\`";
            break;
        case '"':
            out += "\\\"";
            break;
        case '\n':
            out += "\\\n";
            break;
        case '\\':
            out += "\\\\";
            break;
        default:
            out += in.at(pos);
        }
    }
    out += "\"";
    return out;
}

// Escape value to be suitable as C++ source double-quoted string (for
// generating a c++ program
string makeCString(const string& in)
{
    string out;
    out += "\"";
    for (string::size_type pos = 0; pos < in.length(); pos++) {
        switch (in.at(pos)) {
        case '"':
            out += "\\\"";
            break;
        case '\n':
            out += "\\n";
            break;
        case '\r':
            out += "\\r";
            break;
        case '\\':
            out += "\\\\";
            break;
        default:
            out += in.at(pos);
        }
    }
    out += "\"";
    return out;
}


// Substitute printf-like percent cmds inside a string
bool pcSubst(const string& in, string& out, const map& subs)
{
    string::const_iterator it;
    for (it = in.begin(); it != in.end(); it++) {
        if (*it == '%') {
            if (++it == in.end()) {
                out += '%';
                break;
            }
            if (*it == '%') {
                out += '%';
                continue;
            }
            map::const_iterator tr;
            if ((tr = subs.find(*it)) != subs.end()) {
                out += tr->second;
            } else {
                // We used to do "out += *it;" here but this does not make
                // sense
            }
        } else {
            out += *it;
        }
    }
    return true;
}

bool pcSubst(const string& in, string& out, const map& subs)
{
    out.erase();
    string::size_type i;
    for (i = 0; i < in.size(); i++) {
        if (in[i] == '%') {
            if (++i == in.size()) {
                out += '%';
                break;
            }
            if (in[i] == '%') {
                out += '%';
                continue;
            }
            string key = "";
            if (in[i] == '(') {
                if (++i == in.size()) {
                    out += string("%(");
                    break;
                }
                string::size_type j = in.find_first_of(")", i);
                if (j == string::npos) {
                    // ??concatenate remaining part and stop
                    out += in.substr(i - 2);
                    break;
                }
                key = in.substr(i, j - i);
                i = j;
            } else {
                key = in[i];
            }
            map::const_iterator tr;
            if ((tr = subs.find(key)) != subs.end()) {
                out += tr->second;
            } else {
                // Substitute to nothing, that's the reasonable thing to do
                // instead of keeping the %(key)
                // out += key.size()==1? key : string("(") + key + string(")");
            }
        } else {
            out += in[i];
        }
    }
    return true;
}
inline static int ulltorbuf(uint64_t val, char *rbuf)
{
    int idx;
    for (idx = 0; val; idx++) {
        rbuf[idx] = '0' + val % 10;
        val /= 10;
    }
    while (val);
    rbuf[idx] = 0;
    return idx;
}

inline static void ullcopyreverse(const char *rbuf, string& buf, int idx)
{
    buf.reserve(idx + 1);
    for (int i = idx - 1; i >= 0; i--) {
        buf.push_back(rbuf[i]);
    }
}

void ulltodecstr(uint64_t val, string& buf)
{
    buf.clear();
    if (val == 0) {
        buf = "0";
        return;
    }

    char rbuf[30];
    int idx = ulltorbuf(val, rbuf);

    ullcopyreverse(rbuf, buf, idx);
    return;
}

void lltodecstr(int64_t val, string& buf)
{
    buf.clear();
    if (val == 0) {
        buf = "0";
        return;
    }

    bool neg = val < 0;
    if (neg) {
        val = -val;
    }

    char rbuf[30];
    int idx = ulltorbuf(val, rbuf);

    if (neg) {
        rbuf[idx++] = '-';
    }
    rbuf[idx] = 0;

    ullcopyreverse(rbuf, buf, idx);
    return;
}

string lltodecstr(int64_t val)
{
    string buf;
    lltodecstr(val, buf);
    return buf;
}

string ulltodecstr(uint64_t val)
{
    string buf;
    ulltodecstr(val, buf);
    return buf;
}

// Convert byte count into unit (KB/MB...) appropriate for display
string displayableBytes(int64_t size)
{
    const char *unit;

    double roundable = 0;
    if (size < 1000) {
        unit = " B ";
        roundable = double(size);
    } else if (size < 1E6) {
        unit = " KB ";
        roundable = double(size) / 1E3;
    } else if (size < 1E9) {
        unit = " MB ";
        roundable = double(size) / 1E6;
    } else {
        unit = " GB ";
        roundable = double(size) / 1E9;
    }
    size = int64_t(round(roundable));
    return lltodecstr(size).append(unit);
}

string breakIntoLines(const string& in, unsigned int ll,
                      unsigned int maxlines)
{
    string query = in;
    string oq;
    unsigned int nlines = 0;
    while (query.length() > 0) {
        string ss = query.substr(0, ll);
        if (ss.length() == ll) {
            string::size_type pos = ss.find_last_of(" ");
            if (pos == string::npos) {
                pos = query.find_first_of(" ");
                if (pos != string::npos) {
                    ss = query.substr(0, pos + 1);
                } else {
                    ss = query;
                }
            } else {
                ss = ss.substr(0, pos + 1);
            }
        }
        // This cant happen, but anyway. Be very sure to avoid an infinite loop
        if (ss.length() == 0) {
            oq = query;
            break;
        }
        oq += ss + "\n";
        if (nlines++ >= maxlines) {
            oq += " ... \n";
            break;
        }
        query = query.substr(ss.length());
    }
    return oq;
}

// Date is Y[-M[-D]]
static bool parsedate(vector::const_iterator& it,
                      vector::const_iterator end, DateInterval *dip)
{
    dip->y1 = dip->m1 = dip->d1 = dip->y2 = dip->m2 = dip->d2 = 0;
    if (it->length() > 4 || !it->length() ||
            it->find_first_not_of("0123456789") != string::npos) {
        return false;
    }
    if (it == end || sscanf(it++->c_str(), "%d", &dip->y1) != 1) {
        return false;
    }
    if (it == end || *it == "/") {
        return true;
    }
    if (*it++ != "-") {
        return false;
    }

    if (it->length() > 2 || !it->length() ||
            it->find_first_not_of("0123456789") != string::npos) {
        return false;
    }
    if (it == end || sscanf(it++->c_str(), "%d", &dip->m1) != 1) {
        return false;
    }
    if (it == end || *it == "/") {
        return true;
    }
    if (*it++ != "-") {
        return false;
    }

    if (it->length() > 2 || !it->length() ||
            it->find_first_not_of("0123456789") != string::npos) {
        return false;
    }
    if (it == end || sscanf(it++->c_str(), "%d", &dip->d1) != 1) {
        return false;
    }

    return true;
}

// Called with the 'P' already processed. Period ends at end of string
// or at '/'. We dont' do a lot effort at validation and will happily
// accept 10Y1Y4Y (the last wins)
static bool parseperiod(vector::const_iterator& it,
                        vector::const_iterator end, DateInterval *dip)
{
    dip->y1 = dip->m1 = dip->d1 = dip->y2 = dip->m2 = dip->d2 = 0;
    while (it != end) {
        int value;
        if (it->find_first_not_of("0123456789") != string::npos) {
            return false;
        }
        if (sscanf(it++->c_str(), "%d", &value) != 1) {
            return false;
        }
        if (it == end || it->empty()) {
            return false;
        }
        switch (it->at(0)) {
        case 'Y':
        case 'y':
            dip->y1 = value;
            break;
        case 'M':
        case 'm':
            dip->m1 = value;
            break;
        case 'D':
        case 'd':
            dip->d1 = value;
            break;
        default:
            return false;
        }
        it++;
        if (it == end) {
            return true;
        }
        if (*it == "/") {
            return true;
        }
    }
    return true;
}

#ifdef _WIN32
int setenv(const char *name, const char *value, int overwrite)
{
    if (!overwrite) {
        const char *cp = getenv(name);
        if (cp) {
            return -1;
        }
    }
    return _putenv_s(name, value);
}
void unsetenv(const char *name)
{
    _putenv_s(name, "");
}
#endif

time_t portable_timegm(struct tm *tm)
{
    time_t ret;
    char *tz;

    tz = getenv("TZ");
    setenv("TZ", "", 1);
    tzset();
    ret = mktime(tm);
    if (tz) {
        setenv("TZ", tz, 1);
    } else {
        unsetenv("TZ");
    }
    tzset();
    return ret;
}

#if 0
static void cerrdip(const string& s, DateInterval *dip)
{
    cerr << s << dip->y1 << "-" << dip->m1 << "-" << dip->d1 << "/"
         << dip->y2 << "-" << dip->m2 << "-" << dip->d2
         << endl;
}
#endif

// Compute date + period. Won't work out of the unix era.
// or pre-1970 dates. Just convert everything to unixtime and
// seconds (with average durations for months/years), add and convert
// back
static bool addperiod(DateInterval *dp, DateInterval *pp)
{
    struct tm tm;
    // Create a struct tm with possibly non normalized fields and let
    // timegm sort it out
    memset(&tm, 0, sizeof(tm));
    tm.tm_year = dp->y1 - 1900 + pp->y1;
    tm.tm_mon = dp->m1 + pp->m1 - 1;
    tm.tm_mday = dp->d1 + pp->d1;
    time_t tres = mktime(&tm);
    localtime_r(&tres, &tm);
    dp->y1 = tm.tm_year + 1900;
    dp->m1 = tm.tm_mon + 1;
    dp->d1 = tm.tm_mday;
    //cerrdip("Addperiod return", dp);
    return true;
}
int monthdays(int mon, int year)
{
    switch (mon) {
    // We are returning a few too many 29 days februaries, no problem
    case 2:
        return (year % 4) == 0 ? 29 : 28;
    case 1:
    case 3:
    case 5:
    case 7:
    case 8:
    case 10:
    case 12:
        return 31;
    default:
        return 30;
    }
}
bool parsedateinterval(const string& s, DateInterval *dip)
{
    vector vs;
    dip->y1 = dip->m1 = dip->d1 = dip->y2 = dip->m2 = dip->d2 = 0;
    DateInterval p1, p2, d1, d2;
    p1 = p2 = d1 = d2 = *dip;
    bool hasp1 = false, hasp2 = false, hasd1 = false, hasd2 = false,
         hasslash = false;

    if (!stringToStrings(s, vs, "PYMDpymd-/")) {
        return false;
    }
    if (vs.empty()) {
        return false;
    }

    vector::const_iterator it = vs.begin();
    if (*it == "P" || *it == "p") {
        it++;
        if (!parseperiod(it, vs.end(), &p1)) {
            return false;
        }
        hasp1 = true;
        //cerrdip("p1", &p1);
        p1.y1 = -p1.y1;
        p1.m1 = -p1.m1;
        p1.d1 = -p1.d1;
    } else if (*it == "/") {
        hasslash = true;
        goto secondelt;
    } else {
        if (!parsedate(it, vs.end(), &d1)) {
            return false;
        }
        hasd1 = true;
    }

    // Got one element and/or /
secondelt:
    if (it != vs.end()) {
        if (*it != "/") {
            return false;
        }
        hasslash = true;
        it++;
        if (it == vs.end()) {
            // ok
        } else if (*it == "P" || *it == "p") {
            it++;
            if (!parseperiod(it, vs.end(), &p2)) {
                return false;
            }
            hasp2 = true;
        } else {
            if (!parsedate(it, vs.end(), &d2)) {
                return false;
            }
            hasd2 = true;
        }
    }

    // 2 periods dont' make sense
    if (hasp1 && hasp2) {
        return false;
    }
    // Nothing at all doesn't either
    if (!hasp1 && !hasd1 && !hasp2 && !hasd2) {
        return false;
    }

    // Empty part means today IF other part is period, else means
    // forever (stays at 0)
    time_t now = time(0);
    struct tm *tmnow = gmtime(&now);
    if ((!hasp1 && !hasd1) && hasp2) {
        d1.y1 = 1900 + tmnow->tm_year;
        d1.m1 = tmnow->tm_mon + 1;
        d1.d1 = tmnow->tm_mday;
        hasd1 = true;
    } else if ((!hasp2 && !hasd2) && hasp1) {
        d2.y1 = 1900 + tmnow->tm_year;
        d2.m1 = tmnow->tm_mon + 1;
        d2.d1 = tmnow->tm_mday;
        hasd2 = true;
    }

    // Incomplete dates have different meanings depending if there is
    // a period or not (actual or infinite indicated by a / + empty)
    //
    // If there is no explicit period, an incomplete date indicates a
    // period of the size of the uncompleted elements. Ex: 1999
    // actually means 1999/P12M
    //
    // If there is a period, the incomplete date should be extended
    // to the beginning or end of the unspecified portion. Ex: 1999/
    // means 1999-01-01/ and /1999 means /1999-12-31
    if (hasd1) {
        if (!(hasslash || hasp2)) {
            if (d1.m1 == 0) {
                p2.m1 = 12;
                d1.m1 = 1;
                d1.d1 = 1;
            } else if (d1.d1 == 0) {
                d1.d1 = 1;
                p2.d1 = monthdays(d1.m1, d1.y1);
            }
            hasp2 = true;
        } else {
            if (d1.m1 == 0) {
                d1.m1 = 1;
                d1.d1 = 1;
            } else if (d1.d1 == 0) {
                d1.d1 = 1;
            }
        }
    }
    // if hasd2 is true we had a /
    if (hasd2) {
        if (d2.m1 == 0) {
            d2.m1 = 12;
            d2.d1 = 31;
        } else if (d2.d1 == 0) {
            d2.d1 = monthdays(d2.m1, d2.y1);
        }
    }
    if (hasp1) {
        // Compute d1
        d1 = d2;
        if (!addperiod(&d1, &p1)) {
            return false;
        }
    } else if (hasp2) {
        // Compute d2
        d2 = d1;
        if (!addperiod(&d2, &p2)) {
            return false;
        }
    }

    dip->y1 = d1.y1;
    dip->m1 = d1.m1;
    dip->d1 = d1.d1;
    dip->y2 = d2.y1;
    dip->m2 = d2.m1;
    dip->d2 = d2.d1;
    return true;
}


void catstrerror(string *reason, const char *what, int _errno)
{
    if (!reason) {
        return;
    }
    if (what) {
        reason->append(what);
    }

    reason->append(": errno: ");

    char nbuf[20];
    sprintf(nbuf, "%d", _errno);
    reason->append(nbuf);

    reason->append(" : ");

#if defined(sun) || defined(_WIN32)
    // Note: sun strerror is noted mt-safe ??
    reason->append(strerror(_errno));
#else
#define ERRBUFSZ 200
    char errbuf[ERRBUFSZ];
    // There are 2 versions of strerror_r.
    // - The GNU one returns a pointer to the message (maybe
    //   static storage or supplied buffer).
    // - The POSIX one always stores in supplied buffer and
    //   returns 0 on success. As the possibility of error and
    //   error code are not specified, we're basically doomed
    //   cause we can't use a test on the 0 value to know if we
    //   were returned a pointer...
    // Also couldn't find an easy way to disable the gnu version without
    // changing the cxxflags globally, so forget it. Recent gnu lib versions
    // normally default to the posix version.
    // At worse we get no message at all here.
    errbuf[0] = 0;
    // We don't use ret, it's there to silence a cc warning
    auto ret = strerror_r(_errno, errbuf, ERRBUFSZ);
    (void)ret;
    reason->append(errbuf);
#endif
}


static std::unordered_map lang_to_code {
    {"be", "cp1251"},
    {"bg", "cp1251"},
    {"cs", "iso-8859-2"},
    {"el", "iso-8859-7"},
    {"he", "iso-8859-8"},
    {"hr", "iso-8859-2"},
    {"hu", "iso-8859-2"},
    {"ja", "eucjp"},
    {"kk", "pt154"},
    {"ko", "euckr"},
    {"lt", "iso-8859-13"},
    {"lv", "iso-8859-13"},
    {"pl", "iso-8859-2"},
    {"rs", "iso-8859-2"},
    {"ro", "iso-8859-2"},
    {"ru", "koi8-r"},
    {"sk", "iso-8859-2"},
    {"sl", "iso-8859-2"},
    {"sr", "iso-8859-2"},
    {"th", "iso-8859-11"},
    {"tr", "iso-8859-9"},
    {"uk", "koi8-u"},
};
static const string cstr_cp1252("CP1252");

string langtocode(const string& lang)
{
    const auto it = lang_to_code.find(lang);

    // Use cp1252 by default...
    if (it == lang_to_code.end()) {
        return cstr_cp1252;
    }

    return it->second;
}

string localelang()
{
    const char *lang = getenv("LANG");

    if (lang == 0 || *lang == 0 || !strcmp(lang, "C") ||
            !strcmp(lang, "POSIX")) {
        return "en";
    }
    string locale(lang);
    string::size_type under = locale.find_first_of("_");
    if (under == string::npos) {
        return locale;
    }
    return locale.substr(0, under);
}

#ifdef USE_STD_REGEX

class SimpleRegexp::Internal {
public:
    Internal(const string& exp, int flags, int nm)
        : expr(exp,
               basic_regex::flag_type(
                   regex_constants::extended |
                   ((flags&SRE_ICASE) ? int(regex_constants::icase) : 0) |
                   ((flags&SRE_NOSUB) ? int(regex_constants::nosubs) : 0)
                   )), ok(true), nmatch(nm) {
    }
    std::regex expr;
    std::smatch res;
    bool ok;
    int nmatch;
};

bool SimpleRegexp::simpleMatch(const string& val) const
{
    if (!ok())
        return false;
    return regex_search(val, m->res, m->expr);
}

string SimpleRegexp::getMatch(const string& val, int i) const
{
    return m->res.str(i);
}

#else // -> !WIN32

class SimpleRegexp::Internal {
public:
    Internal(const string& exp, int flags, int nm) : nmatch(nm) {
        if (regcomp(&expr, exp.c_str(), REG_EXTENDED |
                    ((flags&SRE_ICASE) ? REG_ICASE : 0) |
                    ((flags&SRE_NOSUB) ? REG_NOSUB : 0)) == 0) {
            ok = true;
        } else {
            ok = false;
        }
        matches.resize(nmatch+1);
    }
    ~Internal() {
        regfree(&expr);
    }
    bool ok;
    regex_t expr;
    int nmatch;
    vector matches;
};

bool SimpleRegexp::simpleMatch(const string& val) const
{
    if (!ok())
        return false;
    if (regexec(&m->expr, val.c_str(), m->nmatch+1, &m->matches[0], 0) == 0) {
        return true;
    } else {
        return false;
    }
}

string SimpleRegexp::getMatch(const string& val, int i) const
{
    if (i > m->nmatch) {
        return string();
    }
    return val.substr(m->matches[i].rm_so,
                      m->matches[i].rm_eo - m->matches[i].rm_so);
}

#endif // win/notwinf

SimpleRegexp::SimpleRegexp(const string& exp, int flags, int nmatch)
    : m(new Internal(exp, flags, nmatch))
{
}

SimpleRegexp::~SimpleRegexp()
{
    delete m;
}

bool SimpleRegexp::ok() const
{
    return m->ok;
}

bool SimpleRegexp::operator() (const string& val) const
{
    return simpleMatch(val);
}

string flagsToString(const vector& flags, unsigned int val)
{
    const char *s;
    string out;
    for (auto& flag : flags) {
        if ((val & flag.value) == flag.value) {
            s = flag.yesname;
        } else {
            s = flag.noname;
        }
        if (s && *s) {
            /* We have something to write */
            if (out.length()) {
                // If not first, add '|' separator
                out.append("|");
            }
            out.append(s);
        }
    }
    return out;
}

string valToString(const vector& flags, unsigned int val)
{
    string out;
    for (auto& flag : flags) {
        if (flag.value == val) {
            out = flag.yesname;
            return out;
        }
    }
    {
        char mybuf[100];
        sprintf(mybuf, "Unknown Value 0x%x", val);
        out = mybuf;
    }
    return out;
}

unsigned int stringToFlags(const vector& flags,
                           const string& input, const char *sep)
{
    unsigned int out = 0;

    vector toks;
    stringToTokens(input, toks, sep);
    for (auto& tok: toks) {
        trimstring(tok);
        for (auto& flag : flags) {
            if (!tok.compare(flag.yesname)) {
                /* Note: we don't break: the same name could conceivably
                   set several flags. */
                out |= flag.value;
            }
        }
    }
    return out;
}


// Initialization for static stuff to be called from main thread before going
// multiple
void smallut_init_mt()
{
    // Init langtocode() static table
    langtocode("");
}
recoll-1.26.3/utils/utf8iter.cpp0000644000175000017500000000225713533651561013440 00000000000000/* Copyright (C) 2017-2019 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include "utf8iter.h"
#include 

using std::string;

void utf8truncate(std::string& s, int maxlen)
{
    if (s.size() <= string::size_type(maxlen)) {
        return;
    }
    Utf8Iter iter(s);
    string::size_type pos = 0;
    while (iter++ != string::npos)
        if (iter.getBpos() < string::size_type(maxlen)) {
            pos = iter.getBpos();
        }

    s.erase(pos);
}
recoll-1.26.3/utils/dlib.cpp0000644000175000017500000000370213566424763012604 00000000000000/* Copyright (C) 2017-2019 J.F.Dockes
 *
 * License: GPL 2.1
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2.1 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program; if not, write to the
 * Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifdef BUILDING_RECOLL
#include "autoconfig.h"
#else
#include "config.h"
#endif

#include "dlib.h"

#include "pathut.h"
#include "smallut.h"

#ifdef _WIN32
#include "safewindows.h"
#elif defined(HAVE_DLOPEN)
#include 
#else
#error dlib.cpp not ported on this system
#endif

void *dlib_open(const std::string& libname, int flags)
{
#ifdef _WIN32
    return LoadLibraryA(libname.c_str());
#elif defined(HAVE_DLOPEN)
    return dlopen(libname.c_str(), RTLD_LAZY);
#else
    return nullptr;
#endif
}

void *dlib_sym(void *handle, const char *name)
{
#ifdef _WIN32
    return (void *)::GetProcAddress((HMODULE)handle, name);
#elif defined(HAVE_DLOPEN)
    return dlsym(handle, name);
#else
    return nullptr;
#endif
}

void dlib_close(void *handle)
{
#ifdef _WIN32
    ::FreeLibrary((HMODULE)handle);
#elif defined(HAVE_DLOPEN)
    dlclose(handle);
#endif
}

const char *dlib_error()
{
#ifdef _WIN32
    int error = GetLastError();
    static std::string errorstring;
    errorstring = std::string("dlopen/dlsym error: ") + lltodecstr(error);
    return errorstring.c_str();
#elif defined(HAVE_DLOPEN)
    return dlerror();
#else
    return "??? dlib not ported";
#endif
}    
recoll-1.26.3/utils/cpuconf.h0000644000175000017500000000224613533651561012766 00000000000000/* Copyright (C) 2013 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _CPUCONF_H_INCLUDED_
#define _CPUCONF_H_INCLUDED_

/** Guess how many CPUs there are on this machine, to help with configuring
    threads */
struct CpuConf {
    CpuConf()
	: ncpus(1)
    {}
    // Virtual ones, including hyperthreading, we only care about this for now
    int ncpus; 
//    int ncores;
//    int nsockets;
};

extern bool getCpuConf(CpuConf& conf);

#endif /* _CPUCONF_H_INCLUDED_ */
recoll-1.26.3/utils/rclutil.h0000644000175000017500000000723113566424763013016 00000000000000/* Copyright (C) 2016 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#ifndef _RCLUTIL_H_INCLUDED_
#define _RCLUTIL_H_INCLUDED_
#include "autoconfig.h"

// Misc stuff not generic enough to get into smallut or pathut

#include 
#include 
#include 


extern void rclutil_init_mt();

/// Sub-directory for default recoll config (e.g: .recoll)
extern std::string path_defaultrecollconfsubdir();
// Check if path is either non-existing or an empty directory.
extern bool path_empty(const std::string& path);

/// e.g. /usr/share/recoll. Depends on OS and config
extern const std::string& path_pkgdatadir();

#ifdef _WIN32
extern std::string path_thisexecpath();
#endif

/// Transcode to utf-8 if possible or url encoding, for display.
extern bool printableUrl(const std::string& fcharset,
                         const std::string& in, std::string& out);
/// Same but, in the case of a Windows local path, also turn "c:/" into
/// "/c/" This should be used only for splitting the path in rcldb.
extern std::string url_gpathS(const std::string& url);

/// Retrieve the temp dir location: $RECOLL_TMPDIR else $TMPDIR else /tmp
extern const std::string& tmplocation();

/// Create temporary directory (inside the temp location)
extern bool maketmpdir(std::string& tdir, std::string& reason);

/// Temporary file class
class TempFile {
public:
    TempFile(const std::string& suffix);
    TempFile();
    const char *filename() const;
    const std::string& getreason() const;
    void setnoremove(bool onoff);
    bool ok() const;
    // Attempt to delete all files which could not be deleted on the
    // first try (typically on Windows: because they are open by some
    // process). Called after clearing the mimeHandler cache. Does
    // nothing if not _WIN32
    static void tryRemoveAgain();
    // Also for Windows: for adding the temp files path to the default
    // skippedPaths
    static const std::string& rcltmpdir();

    class Internal;
private:
    std::shared_ptr m;
};

/// Temporary directory class. Recursively deleted by destructor.
class TempDir {
public:
    TempDir();
    ~TempDir();
    const char *dirname() {
        return m_dirname.c_str();
    }
    const std::string& getreason() {
        return m_reason;
    }
    bool ok() {
        return !m_dirname.empty();
    }
    /// Recursively delete contents but not self.
    bool wipe();
private:
    std::string m_dirname;
    std::string m_reason;
    TempDir(const TempDir&) {}
    TempDir& operator=(const TempDir&) {
        return *this;
    };
};

// Freedesktop thumbnail standard path routine
// On return, path will have the appropriate value in all cases,
// returns true if the file already exists
extern bool thumbPathForUrl(const std::string& url, int size,
                            std::string& path);

// Duplicate (unordered)map while ensuring no shared
// string data (to pass to other thread):
template  void map_ss_cp_noshr(T s, T *d);


#endif /* _RCLUTIL_H_INCLUDED_ */
recoll-1.26.3/utils/circache.cpp0000644000175000017500000014034613566506614013435 00000000000000/* Copyright (C) 2009 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#ifndef TEST_CIRCACHE
#include "autoconfig.h"

#include "circache.h"

#include 
#include 
#include 
#include "safefcntl.h"
#include 
#include "safesysstat.h"
#include "safeunistd.h"
#include 
#include 
#include 

#include 

#include "chrono.h"
#include "zlibut.h"

#ifndef _WIN32
#include 
#define O_BINARY 0
#else
struct iovec {
    void *iov_base;
    size_t iov_len;
};
static ssize_t writev(int fd, const struct iovec *iov, int iovcnt)
{
    ssize_t tot = 0;
    for (int i = 0; i < iovcnt; i++) {
        ssize_t ret = ::write(fd, iov[i].iov_base, iov[i].iov_len);
        if (ret > 0) {
            tot += ret;
        }
        if (ret != (ssize_t)iov[i].iov_len) {
            return ret == -1 ? -1 : tot;
        }
    }
    return tot;
}
#endif


#include 
#include 
#include 

#include "cstr.h"
#include "circache.h"
#include "conftree.h"
#include "log.h"
#include "smallut.h"
#include "md5.h"

using namespace std;

/** Temp buffer with automatic deallocation */
struct TempBuf {
    TempBuf()
        : m_buf(0) {
    }
    TempBuf(int n) {
        m_buf = (char *)malloc(n);
    }
    ~TempBuf() {
        if (m_buf) {
            free(m_buf);
        }
    }
    char *setsize(int n) {
        return (m_buf = (char *)realloc(m_buf, n));
    }
    char *buf() {
        return m_buf;
    }
    char *m_buf;
};

/*
 * File structure:
 * - Starts with a 1-KB header block, with a param dictionary.
 * - Stored items follow. Each item has a header and 2 segments for
 *   the metadata and the data.
 *   The segment sizes are stored in the ascii header/marker:
 *     circacheSizes = xxx yyy zzz
 *     xxx bytes of metadata
 *     yyy bytes of data
 *     zzz bytes of padding up to next object (only one entry has non zero)
 *
 * There is a write position, which can be at eof while
 * the file is growing, or inside the file if we are recycling. This is stored
 * in the header (oheadoffs), together with the maximum size
 *
 * If we are recycling, we have to take care to compute the size of the
 * possible remaining area from the last object invalidated by the write,
 * pad it with neutral data and store the size in the new header. To help with
 * this, the address for the last object written is also kept in the header
 * (nheadoffs, npadsize)
 *
 */

// First block size
#define CIRCACHE_FIRSTBLOCK_SIZE 1024

// Entry header.
// 2x32 1x64 bits ints as hex integers + 1 x 16 bits flag + at least 1 zero
//                          15 +             2x9 + 17 + 3 + 1 = 54
static const char *headerformat = "circacheSizes = %x %x %llx %hx";
#define CIRCACHE_HEADER_SIZE 64

class EntryHeaderData {
public:
    EntryHeaderData() : dicsize(0), datasize(0), padsize(0), flags(0) {}
    unsigned int dicsize;
    unsigned int datasize;
    uint64_t padsize;
    unsigned short flags;
};
enum EntryFlags {EFNone = 0, EFDataCompressed = 1};

// A callback class for the header-hopping function.
class CCScanHook {
public:
    virtual ~CCScanHook() {}
    enum status {Stop, Continue, Error, Eof};
    virtual status takeone(int64_t offs, const string& udi,
                           const EntryHeaderData& d) = 0;
};

// We have an auxiliary in-memory multimap of hashed-udi -> offset to
// speed things up. This is created the first time the file is scanned
// (on the first get), and not saved to disk.

// The map key: hashed udi. As a very short hash seems sufficient,
// maybe we could find something faster/simpler than md5?
#define UDIHLEN 4
class UdiH {
public:
    unsigned char h[UDIHLEN];

    UdiH(const string& udi) {
        MD5_CTX ctx;
        MD5Init(&ctx);
        MD5Update(&ctx, (const unsigned char*)udi.c_str(), udi.length());
        unsigned char md[16];
        MD5Final(md, &ctx);
        memcpy(h, md, UDIHLEN);
    }

    string asHexString() const {
        static const char hex[] = "0123456789abcdef";
        string out;
        for (int i = 0; i < UDIHLEN; i++) {
            out.append(1, hex[h[i] >> 4]);
            out.append(1, hex[h[i] & 0x0f]);
        }
        return out;
    }
    bool operator==(const UdiH& r) const {
        for (int i = 0; i < UDIHLEN; i++)
            if (h[i] != r.h[i]) {
                return false;
            }
        return true;
    }
    bool operator<(const UdiH& r) const {
        for (int i = 0; i < UDIHLEN; i++) {
            if (h[i] < r.h[i]) {
                return true;
            }
            if (h[i] > r.h[i]) {
                return false;
            }
        }
        return false;
    }
};
typedef multimap kh_type;
typedef multimap::value_type kh_value_type;

class CirCacheInternal {
public:
    int m_fd;
    ////// These are cache persistent state and written to the first block:
    // Maximum file size, after which we begin reusing old space
    int64_t m_maxsize;
    // Offset of the oldest header, or max file offset (file size)
    // while the file is growing. This is the next write position.
    int64_t m_oheadoffs;
    // Offset of last write (newest header)
    int64_t m_nheadoffs;
    // Pad size for newest entry.
    int64_t m_npadsize;
    // Keep history or only last entry
    bool  m_uniquentries;
    ///////////////////// End header entries

    // A place to hold data when reading
    char  *m_buffer;
    size_t m_bufsiz;

    // Error messages
    ostringstream m_reason;

    // State for rewind/next/getcurrent operation. This could/should
    // be moved to a separate iterator.
    int64_t  m_itoffs;
    EntryHeaderData m_ithd;

    // Offset cache
    kh_type m_ofskh;
    bool    m_ofskhcplt; // Has cache been fully read since open?

    // Add udi->offset translation to map
    bool khEnter(const string& udi, int64_t ofs) {
        UdiH h(udi);

        LOGDEB2("Circache::khEnter: h " << h.asHexString() << " offs " << ofs << " udi [" << udi << "]\n");

        pair p = m_ofskh.equal_range(h);

        if (p.first != m_ofskh.end() && p.first->first == h) {
            for (kh_type::iterator it = p.first; it != p.second; it++) {
                LOGDEB2("Circache::khEnter: col h " << it->first.asHexString() << ", ofs " << it->second << "\n");
                if (it->second == ofs) {
                    // (h,offs) already there. Happens
                    LOGDEB2("Circache::khEnter: already there\n");
                    return true;
                }
            }
        }
        m_ofskh.insert(kh_value_type(h, ofs));
        LOGDEB2("Circache::khEnter: inserted\n");
        return true;
    }
    void khDump() {
        for (kh_type::const_iterator it = m_ofskh.begin();
                it != m_ofskh.end(); it++) {
            LOGDEB("Circache::KHDUMP: " << it->first.asHexString() << " " << it->second << "\n");
        }
    }

    // Return vector of candidate offsets for udi (possibly several
    // because there may be hash collisions, and also multiple
    // instances).
    bool khFind(const string& udi, vector& ofss) {
        ofss.clear();

        UdiH h(udi);

        LOGDEB2("Circache::khFind: h " << h.asHexString() << " udi [" << udi << "]\n");

        pair p = m_ofskh.equal_range(h);

#if 0
        if (p.first == m_ofskh.end()) {
            LOGDEB("KHFIND: FIRST END()\n");
        }
        if (p.second == m_ofskh.end()) {
            LOGDEB("KHFIND: SECOND END()\n");
        }
        if (!(p.first->first == h))
            LOGDEB("KHFIND: NOKEY: " << p.first->first.asHexString() << " " << p.second->first.asHexString() << "\n");
#endif

        if (p.first == m_ofskh.end() || !(p.first->first == h)) {
            return false;
        }

        for (kh_type::iterator it = p.first; it != p.second; it++) {
            ofss.push_back(it->second);
        }
        return true;
    }
    // Clear entry for udi/offs
    bool khClear(const pair& ref) {
        UdiH h(ref.first);
        pair p = m_ofskh.equal_range(h);
        if (p.first != m_ofskh.end() && (p.first->first == h)) {
            for (kh_type::iterator it = p.first; it != p.second;) {
                kh_type::iterator tmp = it++;
                if (tmp->second == ref.second) {
                    m_ofskh.erase(tmp);
                }
            }
        }
        return true;
    }
    // Clear entries for vector of udi/offs
    bool khClear(const vector >& udis) {
        for (vector >::const_iterator it = udis.begin();
                it != udis.end(); it++) {
            khClear(*it);
        }
        return true;
    }
    // Clear all entries for udi
    bool khClear(const string& udi) {
        UdiH h(udi);
        pair p = m_ofskh.equal_range(h);
        if (p.first != m_ofskh.end() && (p.first->first == h)) {
            for (kh_type::iterator it = p.first; it != p.second;) {
                kh_type::iterator tmp = it++;
                m_ofskh.erase(tmp);
            }
        }
        return true;
    }
    CirCacheInternal()
        : m_fd(-1), m_maxsize(-1), m_oheadoffs(-1),
          m_nheadoffs(0), m_npadsize(0), m_uniquentries(false),
          m_buffer(0), m_bufsiz(0), m_ofskhcplt(false) {
    }

    ~CirCacheInternal() {
        if (m_fd >= 0) {
            close(m_fd);
        }
        if (m_buffer) {
            free(m_buffer);
        }
    }

    char *buf(size_t sz) {
        if (m_bufsiz >= sz) {
            return m_buffer;
        }
        if ((m_buffer = (char *)realloc(m_buffer, sz))) {
            m_bufsiz = sz;
        } else {
            m_reason << "CirCache:: realloc(" << sz << ") failed";
            m_bufsiz = 0;
        }
        return m_buffer;
    }

    // Name for the cache file
    string datafn(const string& d) {
        return  path_cat(d, "circache.crch");
    }

    bool writefirstblock() {
        if (m_fd < 0) {
            m_reason << "writefirstblock: not open ";
            return false;
        }

        ostringstream s;
        s <<
          "maxsize = " << m_maxsize << "\n" <<
          "oheadoffs = " << m_oheadoffs << "\n" <<
          "nheadoffs = " << m_nheadoffs << "\n" <<
          "npadsize = " << m_npadsize   << "\n" <<
          "unient = " << m_uniquentries << "\n" <<
          "                                                              " <<
          "                                                              " <<
          "                                                              " <<
          "\0";

        int sz = int(s.str().size());
        assert(sz < CIRCACHE_FIRSTBLOCK_SIZE);
        lseek(m_fd, 0, 0);
        if (write(m_fd, s.str().c_str(), sz) != sz) {
            m_reason << "writefirstblock: write() failed: errno " << errno;
            return false;
        }
        return true;
    }

    bool readfirstblock() {
        if (m_fd < 0) {
            m_reason << "readfirstblock: not open ";
            return false;
        }

        char bf[CIRCACHE_FIRSTBLOCK_SIZE];

        lseek(m_fd, 0, 0);
        if (read(m_fd, bf, CIRCACHE_FIRSTBLOCK_SIZE) !=
                CIRCACHE_FIRSTBLOCK_SIZE) {
            m_reason << "readfirstblock: read() failed: errno " << errno;
            return false;
        }
        string s(bf, CIRCACHE_FIRSTBLOCK_SIZE);
        ConfSimple conf(s, 1);
        string value;
        if (!conf.get("maxsize", value, cstr_null)) {
            m_reason << "readfirstblock: conf get maxsize failed";
            return false;
        }
        m_maxsize = atoll(value.c_str());
        if (!conf.get("oheadoffs", value, cstr_null)) {
            m_reason << "readfirstblock: conf get oheadoffs failed";
            return false;
        }
        m_oheadoffs = atoll(value.c_str());
        if (!conf.get("nheadoffs", value, cstr_null)) {
            m_reason << "readfirstblock: conf get nheadoffs failed";
            return false;
        }
        m_nheadoffs = atoll(value.c_str());
        if (!conf.get("npadsize", value, cstr_null)) {
            m_reason << "readfirstblock: conf get npadsize failed";
            return false;
        }
        m_npadsize = atoll(value.c_str());
        if (!conf.get("unient", value, cstr_null)) {
            m_uniquentries = false;
        } else {
            m_uniquentries = stringToBool(value);
        }
        return true;
    }

    bool writeEntryHeader(int64_t offset, const EntryHeaderData& d,
                          bool eraseData = false) {
        if (m_fd < 0) {
            m_reason << "writeEntryHeader: not open ";
            return false;
        }
        char bf[CIRCACHE_HEADER_SIZE];
        memset(bf, 0, CIRCACHE_HEADER_SIZE);
        snprintf(bf, CIRCACHE_HEADER_SIZE,
                 headerformat, d.dicsize, d.datasize, d.padsize, d.flags);
        if (lseek(m_fd, offset, 0) != offset) {
            m_reason << "CirCache::weh: lseek(" << offset <<
                     ") failed: errno " << errno;
            return false;
        }
        if (write(m_fd, bf, CIRCACHE_HEADER_SIZE) !=  CIRCACHE_HEADER_SIZE) {
            m_reason << "CirCache::weh: write failed. errno " << errno;
            return false;
        }
        if (eraseData == true) {
            if (d.dicsize || d.datasize) {
                m_reason << "CirCache::weh: erase requested but not empty";
                return false;
            }
            string buf(d.padsize, ' ');
            if (write(m_fd, buf.c_str(), d.padsize) != (ssize_t)d.padsize) {
                m_reason << "CirCache::weh: write failed. errno " << errno;
                return false;
            }
        }
        return true;
    }

    CCScanHook::status readEntryHeader(int64_t offset, EntryHeaderData& d) {
        if (m_fd < 0) {
            m_reason << "readEntryHeader: not open ";
            return CCScanHook::Error;
        }

        if (lseek(m_fd, offset, 0) != offset) {
            m_reason << "readEntryHeader: lseek(" << offset <<
                     ") failed: errno " << errno;
            return CCScanHook::Error;
        }
        char bf[CIRCACHE_HEADER_SIZE];

        int ret = read(m_fd, bf, CIRCACHE_HEADER_SIZE);
        if (ret == 0) {
            // Eof
            m_reason << " Eof ";
            return CCScanHook::Eof;
        }
        if (ret != CIRCACHE_HEADER_SIZE) {
            m_reason << " readheader: read failed errno " << errno;
            return CCScanHook::Error;
        }
        if (sscanf(bf, headerformat, &d.dicsize, &d.datasize,
                   &d.padsize, &d.flags) != 4) {
            m_reason << " readEntryHeader: bad header at " <<
                     offset << " [" << bf << "]";
            return CCScanHook::Error;
        }
        LOGDEB2("Circache:readEntryHeader: dcsz " << d.dicsize << " dtsz " << d.datasize << " pdsz " << d.padsize <<
                " flgs " << d.flags << "\n");
        return CCScanHook::Continue;
    }

    CCScanHook::status scan(int64_t startoffset, CCScanHook *user,
                            bool fold = false) {
        if (m_fd < 0) {
            m_reason << "scan: not open ";
            return CCScanHook::Error;
        }

        int64_t so0 = startoffset;
        bool already_folded = false;

        while (true) {
            if (already_folded && startoffset == so0) {
                m_ofskhcplt = true;
                return CCScanHook::Eof;
            }

            EntryHeaderData d;
            CCScanHook::status st;
            switch ((st = readEntryHeader(startoffset, d))) {
            case CCScanHook::Continue:
                break;
            case CCScanHook::Eof:
                if (fold && !already_folded) {
                    already_folded = true;
                    startoffset = CIRCACHE_FIRSTBLOCK_SIZE;
                    continue;
                }
            /* FALLTHROUGH */
            default:
                return st;
            }

            string udi;
            if (d.dicsize) {
                // d.dicsize is 0 for erased entries
                char *bf;
                if ((bf = buf(d.dicsize + 1)) == 0) {
                    return CCScanHook::Error;
                }
                bf[d.dicsize] = 0;
                if (read(m_fd, bf, d.dicsize) != int(d.dicsize)) {
                    m_reason << "scan: read failed errno " << errno;
                    return CCScanHook::Error;
                }
                string b(bf, d.dicsize);
                ConfSimple conf(b, 1);

                if (!conf.get("udi", udi, cstr_null)) {
                    m_reason << "scan: no udi in dic";
                    return CCScanHook::Error;
                }
                khEnter(udi, startoffset);
            }

            // Call callback
            CCScanHook::status a =
                user->takeone(startoffset, udi, d);
            switch (a) {
            case CCScanHook::Continue:
                break;
            default:
                return a;
            }

            startoffset += CIRCACHE_HEADER_SIZE + d.dicsize +
                           d.datasize + d.padsize;
        }
    }

    bool readHUdi(int64_t hoffs, EntryHeaderData& d, string& udi) {
        if (readEntryHeader(hoffs, d) != CCScanHook::Continue) {
            return false;
        }
        string dic;
        if (!readDicData(hoffs, d, dic, 0)) {
            return false;
        }
        if (d.dicsize == 0) {
            // This is an erased entry
            udi.erase();
            return true;
        }
        ConfSimple conf(dic);
        if (!conf.get("udi", udi)) {
            m_reason << "Bad file: no udi in dic";
            return false;
        }
        return true;
    }

    bool readDicData(int64_t hoffs, EntryHeaderData& hd, string& dic,
                     string* data) {
        int64_t offs = hoffs + CIRCACHE_HEADER_SIZE;
        // This syscall could be avoided in some cases if we saved the offset
        // at each seek. In most cases, we just read the header and we are
        // at the right position
        if (lseek(m_fd, offs, 0) != offs) {
            m_reason << "CirCache::get: lseek(" << offs << ") failed: " <<
                     errno;
            return false;
        }
        char *bf = 0;
        if (hd.dicsize) {
            bf = buf(hd.dicsize);
            if (bf == 0) {
                return false;
            }
            if (read(m_fd, bf, hd.dicsize) != int(hd.dicsize)) {
                m_reason << "CirCache::get: read() failed: errno " << errno;
                return false;
            }
            dic.assign(bf, hd.dicsize);
        } else {
            dic.erase();
        }
        if (data == 0) {
            return true;
        }

        if (hd.datasize) {
            bf = buf(hd.datasize);
            if (bf == 0) {
                return false;
            }
            if (read(m_fd, bf, hd.datasize) != int(hd.datasize)) {
                m_reason << "CirCache::get: read() failed: errno " << errno;
                return false;
            }

            if (hd.flags & EFDataCompressed) {
                LOGDEB1("Circache:readdicdata: data compressed\n");
                ZLibUtBuf buf;
                if (!inflateToBuf(bf, hd.datasize, buf)) {
                    m_reason << "CirCache: decompression failed ";
                    return false;
                }
                data->assign(buf.getBuf(), buf.getCnt());
            } else {
                LOGDEB1("Circache:readdicdata: data NOT compressed\n");
                data->assign(bf, hd.datasize);
            }
        } else {
            data->erase();
        }
        return true;
    }

};

CirCache::CirCache(const string& dir)
    : m_dir(dir)
{
    m_d = new CirCacheInternal;
    LOGDEB0("CirCache: [" << m_dir << "]\n");
}

CirCache::~CirCache()
{
    delete m_d;
    m_d = 0;
}

string CirCache::getReason()
{
    return m_d ? m_d->m_reason.str() : "Not initialized";
}

// A scan callback which just records the last header offset and
// padsize seen. This is used with a scan(nofold) to find the last
// physical record in the file
class CCScanHookRecord : public  CCScanHook {
public:
    int64_t headoffs;
    int64_t padsize;
    CCScanHookRecord()
        : headoffs(0), padsize(0) {
    }
    virtual status takeone(int64_t offs, const string& udi,
                           const EntryHeaderData& d) {
        headoffs = offs;
        padsize = d.padsize;
        LOGDEB2("CCScanHookRecord::takeone: offs " << headoffs << " padsize " << padsize << "\n");
        return Continue;
    }
};

string CirCache::getpath()
{
    return m_d->datafn(m_dir);
}

bool CirCache::create(int64_t maxsize, int flags)
{
    LOGDEB("CirCache::create: [" << m_dir << "] maxsz " << maxsize << " flags 0x" << std::hex << flags <m_reason << "CirCache::create: mkdir(" << m_dir <<
                          ") failed" << " errno " << errno;
            return false;
        }
    } else {
        // If the file exists too, and truncate is not set, switch
        // to open-mode. Still may need to update header params.
        if (access(m_d->datafn(m_dir).c_str(), 0) >= 0 &&
                !(flags & CC_CRTRUNCATE)) {
            if (!open(CC_OPWRITE)) {
                return false;
            }
            if (maxsize == m_d->m_maxsize &&
                    ((flags & CC_CRUNIQUE) != 0) == m_d->m_uniquentries) {
                LOGDEB("Header unchanged, no rewrite\n");
                return true;
            }
            // If the new maxsize is bigger than current size, we need
            // to stop recycling if this is what we are doing.
            if (maxsize > m_d->m_maxsize && maxsize > st.st_size) {
                // Scan the file to find the last physical record. The
                // ohead is set at physical eof, and nhead is the last
                // scanned record
                CCScanHookRecord rec;
                m_d->scan(CIRCACHE_FIRSTBLOCK_SIZE, &rec, false);
                m_d->m_oheadoffs = lseek(m_d->m_fd, 0, SEEK_END);
                m_d->m_nheadoffs = rec.headoffs;
                m_d->m_npadsize = rec.padsize;
            }
            m_d->m_maxsize = maxsize;
            m_d->m_uniquentries = ((flags & CC_CRUNIQUE) != 0);
            LOGDEB2("CirCache::create: rewriting header with maxsize " << m_d->m_maxsize << " oheadoffs " <<
                    m_d->m_oheadoffs << " nheadoffs " << m_d->m_nheadoffs << " npadsize " << m_d->m_npadsize <<
                    " unient " << m_d->m_uniquentries << "\n");
            return m_d->writefirstblock();
        }
        // Else fallthrough to create file
    }

    if ((m_d->m_fd = ::open(m_d->datafn(m_dir).c_str(),
                            O_CREAT | O_RDWR | O_TRUNC | O_BINARY, 0666)) < 0) {
        m_d->m_reason << "CirCache::create: open/creat(" <<
                      m_d->datafn(m_dir) << ") failed " << "errno " << errno;
        return false;
    }

    m_d->m_maxsize = maxsize;
    m_d->m_oheadoffs = CIRCACHE_FIRSTBLOCK_SIZE;
    m_d->m_uniquentries = ((flags & CC_CRUNIQUE) != 0);

    char buf[CIRCACHE_FIRSTBLOCK_SIZE];
    memset(buf, 0, CIRCACHE_FIRSTBLOCK_SIZE);
    if (::write(m_d->m_fd, buf, CIRCACHE_FIRSTBLOCK_SIZE) !=
            CIRCACHE_FIRSTBLOCK_SIZE) {
        m_d->m_reason << "CirCache::create: write header failed, errno "
                     << errno;
        return false;
    }
    return m_d->writefirstblock();
}

bool CirCache::open(OpMode mode)
{
    if (m_d == 0) {
        LOGERR("CirCache::open: null data\n");
        return false;
    }

    if (m_d->m_fd >= 0) {
        ::close(m_d->m_fd);
    }

    if ((m_d->m_fd = ::open(m_d->datafn(m_dir).c_str(),
                            mode == CC_OPREAD ?
                            O_RDONLY | O_BINARY : O_RDWR | O_BINARY)) < 0) {
        m_d->m_reason << "CirCache::open: open(" << m_d->datafn(m_dir) <<
                      ") failed " << "errno " << errno;
        return false;
    }
    return m_d->readfirstblock();
}

class CCScanHookDump : public  CCScanHook {
public:
    virtual status takeone(int64_t offs, const string& udi,
                           const EntryHeaderData& d) {
        cout << "Scan: offs " << offs << " dicsize " << d.dicsize
            << " datasize " << d.datasize << " padsize " << d.padsize <<
             " flags " << d.flags <<
             " udi [" << udi << "]" << endl;
        return Continue;
    }
};

bool CirCache::dump()
{
    CCScanHookDump dumper;

    // Start at oldest header. This is eof while the file is growing, scan will
    // fold to bot at once.
    int64_t start = m_d->m_oheadoffs;

    switch (m_d->scan(start, &dumper, true)) {
    case CCScanHook::Stop:
        cout << "Scan returns Stop??" << endl;
        return false;
    case CCScanHook::Continue:
        cout << "Scan returns Continue ?? " << CCScanHook::Continue << " " <<
             getReason() << endl;
        return false;
    case CCScanHook::Error:
        cout << "Scan returns Error: " << getReason() << endl;
        return false;
    case CCScanHook::Eof:
        cout << "Scan returns Eof (ok)" << endl;
        return true;
    default:
        cout << "Scan returns Unknown ??" << endl;
        return false;
    }
}

class CCScanHookGetter : public  CCScanHook {
public:
    string  m_udi;
    int     m_targinstance;
    int     m_instance;
    int64_t   m_offs;
    EntryHeaderData m_hd;

    CCScanHookGetter(const string& udi, int ti)
        : m_udi(udi), m_targinstance(ti), m_instance(0), m_offs(0) {}

    virtual status takeone(int64_t offs, const string& udi,
                           const EntryHeaderData& d) {
        LOGDEB2("Circache:Scan: off " << offs << " udi [" << udi << "] dcsz " << d.dicsize << " dtsz " << d.datasize <<
                " pdsz " << d.padsize << " flgs " << d.flags << "\n");
        if (!m_udi.compare(udi)) {
            m_instance++;
            m_offs = offs;
            m_hd = d;
            if (m_instance == m_targinstance) {
                return Stop;
            }
        }
        return Continue;
    }
};

// instance == -1 means get latest. Otherwise specify from 1+
bool CirCache::get(const string& udi, string& dic, string *data, int instance)
{
    Chrono chron;
    if (m_d->m_fd < 0) {
        m_d->m_reason << "CirCache::get: no data or not open";
        return false;
    }

    LOGDEB0("CirCache::get: udi [" << udi << "], instance " << instance << "\n");

    // If memory map is up to date, use it:
    if (m_d->m_ofskhcplt) {
        LOGDEB1("CirCache::get: using ofskh\n");
        //m_d->khDump();
        vector ofss;
        if (m_d->khFind(udi, ofss)) {
            LOGDEB1("Circache::get: h found, colls " << ofss.size() << "\n");
            int finst = 1;
            EntryHeaderData d_good;
            int64_t           o_good = 0;
            for (vector::iterator it = ofss.begin();
                    it != ofss.end(); it++) {
                LOGDEB1("Circache::get: trying offs " << *it << "\n");
                EntryHeaderData d;
                string fudi;
                if (!m_d->readHUdi(*it, d, fudi)) {
                    return false;
                }
                if (!fudi.compare(udi)) {
                    // Found one, memorize offset. Done if instance
                    // matches, else go on. If instance is -1 need to
                    // go to the end anyway
                    d_good = d;
                    o_good = *it;
                    if (finst == instance) {
                        break;
                    } else {
                        finst++;
                    }
                }
            }
            // Did we read an appropriate entry ?
            if (o_good != 0 && (instance == -1 || instance == finst)) {
                bool ret = m_d->readDicData(o_good, d_good, dic, data);
                LOGDEB0("Circache::get: hfound, " << chron.millis() << " mS\n");
                return ret;
            }
            // Else try to scan anyway.
        }
    }

    CCScanHookGetter getter(udi, instance);
    int64_t start = m_d->m_oheadoffs;

    CCScanHook::status ret = m_d->scan(start, &getter, true);
    if (ret == CCScanHook::Eof) {
        if (getter.m_instance == 0) {
            return false;
        }
    } else if (ret != CCScanHook::Stop) {
        return false;
    }
    bool bret = m_d->readDicData(getter.m_offs, getter.m_hd, dic, data);
    LOGDEB0("Circache::get: scanfound, " << chron.millis() << " mS\n");
    return bret;
}

bool CirCache::erase(const string& udi, bool reallyclear)
{
    if (m_d == 0) {
        LOGERR("CirCache::erase: null data\n");
        return false;
    }
    if (m_d->m_fd < 0) {
        m_d->m_reason << "CirCache::erase: no data or not open";
        return false;
    }

    LOGDEB0("CirCache::erase: udi [" << udi << "]\n");

    // If the mem cache is not up to date, update it, we're too lazy
    // to do a scan
    if (!m_d->m_ofskhcplt) {
        string dic;
        get("nosuchudi probably exists", dic);
        if (!m_d->m_ofskhcplt) {
            LOGERR("CirCache::erase : cache not updated after get\n");
            return false;
        }
    }

    vector ofss;
    if (!m_d->khFind(udi, ofss)) {
        // Udi not in there,  erase ok
        LOGDEB("CirCache::erase: khFind returns none\n");
        return true;
    }

    for (vector::iterator it = ofss.begin(); it != ofss.end(); it++) {
        LOGDEB2("CirCache::erase: reading at " << *it << "\n");
        EntryHeaderData d;
        string fudi;
        if (!m_d->readHUdi(*it, d, fudi)) {
            return false;
        }
        LOGDEB2("CirCache::erase: found fudi [" << fudi << "]\n");
        if (!fudi.compare(udi)) {
            EntryHeaderData nd;
            nd.padsize = d.dicsize + d.datasize + d.padsize;
            LOGDEB2("CirCache::erase: rewrite at " << *it << "\n");
            if (*it == m_d->m_nheadoffs) {
                m_d->m_npadsize = nd.padsize;
            }
            if (!m_d->writeEntryHeader(*it, nd, reallyclear)) {
                LOGERR("CirCache::erase: write header failed\n");
                return false;
            }
        }
    }
    m_d->khClear(udi);
    return true;
}

// Used to scan the file ahead until we accumulated enough space for the new
// entry.
class CCScanHookSpacer : public  CCScanHook {
public:
    int64_t sizewanted;
    int64_t sizeseen;
    vector > squashed_udis;
    CCScanHookSpacer(int64_t sz)
        : sizewanted(sz), sizeseen(0) {
        assert(sz > 0);
    }

    virtual status takeone(int64_t offs, const string& udi,
                           const EntryHeaderData& d) {
        LOGDEB2("Circache:ScanSpacer:off " << offs << " dcsz " << d.dicsize << " dtsz " << d.datasize <<
                " pdsz " << d.padsize << " udi[" << udi << "]\n");
        sizeseen += CIRCACHE_HEADER_SIZE + d.dicsize + d.datasize + d.padsize;
        squashed_udis.push_back(make_pair(udi, offs));
        if (sizeseen >= sizewanted) {
            return Stop;
        }
        return Continue;
    }
};

bool CirCache::put(const string& udi, const ConfSimple *iconf,
                   const string& data, unsigned int iflags)
{
    if (m_d == 0) {
        LOGERR("CirCache::put: null data\n");
        return false;
    }
    if (m_d->m_fd < 0) {
        m_d->m_reason << "CirCache::put: no data or not open";
        return false;
    }

    // We need the udi in input metadata
    string dic;
    if (!iconf || !iconf->get("udi", dic) || dic.empty() || dic.compare(udi)) {
        m_d->m_reason << "No/bad 'udi' entry in input dic";
        LOGERR("Circache::put: no/bad udi: DIC:[" << dic << "] UDI [" << udi << "]\n");
        return false;
    }

    // Possibly erase older entries. Need to do this first because we may be
    // able to reuse the space if the same udi was last written
    if (m_d->m_uniquentries && !erase(udi)) {
        LOGERR("CirCache::put: can't erase older entries\n");
        return false;
    }

    ostringstream s;
    iconf->write(s);
    dic = s.str();

    // Data compression ?
    const char *datap = data.c_str();
    size_t datalen = data.size();
    unsigned short flags = 0;
    ZLibUtBuf buf;
    if (!(iflags & NoCompHint)) {
        if (deflateToBuf(data.c_str(), data.size(), buf)) {
            // If compression succeeds, and the ratio makes sense,
            // store compressed
            if (float(buf.getCnt()) < 0.9 * float(data.size())) {
                datap = buf.getBuf();
                datalen = buf.getCnt();
                flags |= EFDataCompressed;
            }
        }
    }

    struct stat st;
    if (fstat(m_d->m_fd, &st) < 0) {
        m_d->m_reason << "CirCache::put: fstat failed. errno " << errno;
        return false;
    }

    // Characteristics for the new entry.
    int64_t nsize = CIRCACHE_HEADER_SIZE + dic.size() + datalen;
    int64_t nwriteoffs = m_d->m_oheadoffs;
    int64_t npadsize = 0;
    bool extending = false;

    LOGDEB("CirCache::put: nsz " << nsize << " oheadoffs " << m_d->m_oheadoffs << "\n");

    // Check if we can recover some pad space from the (physically) previous
    // entry.
    int64_t recovpadsize = m_d->m_oheadoffs == CIRCACHE_FIRSTBLOCK_SIZE ?
                         0 : m_d->m_npadsize;
    if (recovpadsize != 0) {
        // Need to read the latest entry's header, to rewrite it with a
        // zero pad size
        EntryHeaderData pd;
        if (m_d->readEntryHeader(m_d->m_nheadoffs, pd) != CCScanHook::Continue) {
            return false;
        }
        if (int(pd.padsize) != m_d->m_npadsize) {
            m_d->m_reason << "CirCache::put: logic error: bad padsize ";
            return false;
        }
        if (pd.dicsize == 0) {
            // erased entry. Also recover the header space, no need to rewrite
            // the header, we're going to write on it.
            recovpadsize += CIRCACHE_HEADER_SIZE;
        } else {
            LOGDEB("CirCache::put: recov. prev. padsize " << pd.padsize << "\n");
            pd.padsize = 0;
            if (!m_d->writeEntryHeader(m_d->m_nheadoffs, pd)) {
                return false;
            }
            // If we fail between here and the end, the file is broken.
        }
        nwriteoffs = m_d->m_oheadoffs - recovpadsize;
    }

    if (nsize <= recovpadsize) {
        // If the new entry fits entirely in the pad area from the
        // latest one, no need to recycle stuff
        LOGDEB("CirCache::put: new fits in old padsize " << recovpadsize << "\n");
        npadsize = recovpadsize - nsize;
    } else if (st.st_size < m_d->m_maxsize) {
        // Still growing the file.
        npadsize = 0;
        extending = true;
    } else {
        // Scan the file until we have enough space for the new entry,
        // and determine the pad size up to the 1st preserved entry
        int64_t scansize = nsize - recovpadsize;
        LOGDEB("CirCache::put: scanning for size " << scansize << " from offs " << m_d->m_oheadoffs << "\n");
        CCScanHookSpacer spacer(scansize);
        switch (m_d->scan(m_d->m_oheadoffs, &spacer)) {
        case CCScanHook::Stop:
            LOGDEB("CirCache::put: Scan ok, sizeseen " << spacer.sizeseen << "\n");
            npadsize = spacer.sizeseen - scansize;
            break;
        case CCScanHook::Eof:
            npadsize = 0;
            extending = true;
            break;
        case CCScanHook::Continue:
        case CCScanHook::Error:
            return false;
        }
        // Take the recycled entries off the multimap
        m_d->khClear(spacer.squashed_udis);
    }

    LOGDEB("CirCache::put: writing " << nsize << " at " << nwriteoffs << " padsize " << npadsize << "\n");

    if (lseek(m_d->m_fd, nwriteoffs, 0) != nwriteoffs) {
        m_d->m_reason << "CirCache::put: lseek failed: " << errno;
        return false;
    }

    char head[CIRCACHE_HEADER_SIZE];
    memset(head, 0, CIRCACHE_HEADER_SIZE);
    snprintf(head, CIRCACHE_HEADER_SIZE,
             headerformat, dic.size(), datalen, npadsize, flags);
    struct iovec vecs[3];
    vecs[0].iov_base = head;
    vecs[0].iov_len = CIRCACHE_HEADER_SIZE;
    vecs[1].iov_base = (void *)dic.c_str();
    vecs[1].iov_len = dic.size();
    vecs[2].iov_base = (void *)datap;
    vecs[2].iov_len = datalen;
    if (writev(m_d->m_fd, vecs, 3) !=  nsize) {
        m_d->m_reason << "put: write failed. errno " << errno;
        if (extending)
            if (ftruncate(m_d->m_fd, m_d->m_oheadoffs) == -1) {
                m_d->m_reason << "put: ftruncate failed. errno " << errno;
            }
        return false;
    }

    m_d->khEnter(udi, nwriteoffs);

    // Update first block information
    m_d->m_nheadoffs = nwriteoffs;
    m_d->m_npadsize  = npadsize;
    // New oldest header is the one just after the one we just wrote.
    m_d->m_oheadoffs = nwriteoffs + nsize + npadsize;
    if (nwriteoffs + nsize >= m_d->m_maxsize) {
        // Max size or top of file reached, next write at BOT.
        m_d->m_oheadoffs = CIRCACHE_FIRSTBLOCK_SIZE;
    }
    return m_d->writefirstblock();
}

bool CirCache::rewind(bool& eof)
{
    if (m_d == 0) {
        LOGERR("CirCache::rewind: null data\n");
        return false;
    }

    eof = false;

    int64_t fsize = lseek(m_d->m_fd, 0, SEEK_END);
    if (fsize == (int64_t) - 1) {
        LOGERR("CirCache::rewind: seek to EOF failed\n");
        return false;
    }
    // Read oldest header. This is either at the position pointed to
    // by oheadoffs, or after the first block if the file is still
    // growing.
    if (m_d->m_oheadoffs == fsize) {
        m_d->m_itoffs = CIRCACHE_FIRSTBLOCK_SIZE;
    } else {
        m_d->m_itoffs = m_d->m_oheadoffs;
    }
    CCScanHook::status st = m_d->readEntryHeader(m_d->m_itoffs, m_d->m_ithd);

    switch (st) {
    case CCScanHook::Eof:
        eof = true;
        return false;
    case CCScanHook::Continue:
        return true;
    default:
        return false;
    }
}

bool CirCache::next(bool& eof)
{
    if (m_d == 0) {
        LOGERR("CirCache::next: null data\n");
        return false;
    }

    eof = false;

    // Skip to next header, using values stored from previous one
    m_d->m_itoffs += CIRCACHE_HEADER_SIZE + m_d->m_ithd.dicsize +
                     m_d->m_ithd.datasize + m_d->m_ithd.padsize;

    // Looped back ?
    if (m_d->m_itoffs == m_d->m_oheadoffs) {
        eof = true;
        return false;
    }

    // Read. If we hit physical eof, fold.
    CCScanHook::status st = m_d->readEntryHeader(m_d->m_itoffs, m_d->m_ithd);
    if (st == CCScanHook::Eof) {
        m_d->m_itoffs = CIRCACHE_FIRSTBLOCK_SIZE;
        if (m_d->m_itoffs == m_d->m_oheadoffs) {
            // Then the file is not folded yet (still growing)
            eof = true;
            return false;
        }
        st = m_d->readEntryHeader(m_d->m_itoffs, m_d->m_ithd);
    }

    if (st == CCScanHook::Continue) {
        return true;
    }
    return false;
}

bool CirCache::getCurrentUdi(string& udi)
{
    if (m_d == 0) {
        LOGERR("CirCache::getCurrentUdi: null data\n");
        return false;
    }

    if (!m_d->readHUdi(m_d->m_itoffs, m_d->m_ithd, udi)) {
        return false;
    }
    return true;
}

bool CirCache::getCurrent(string& udi, string& dic, string *data)
{
    if (m_d == 0) {
        LOGERR("CirCache::getCurrent: null data\n");
        return false;
    }
    if (!m_d->readDicData(m_d->m_itoffs, m_d->m_ithd, dic, data)) {
        return false;
    }

    ConfSimple conf(dic, 1);
    conf.get("udi", udi, cstr_null);
    return true;
}

// Copy all entries from occ to ncc. Both are already open.
static bool copyall(std::shared_ptr occ,
                    std::shared_ptr ncc, int& nentries,
    ostringstream& msg)
{
    bool eof = false;
    if (!occ->rewind(eof)) {
        if (!eof) {
            msg << "Initial rewind failed" << endl;
            return false;
        }
    }
    nentries = 0;
    while (!eof) {
        string udi, sdic, data;
        if (!occ->getCurrent(udi, sdic, &data)) {
            msg << "getCurrent failed: " << occ->getReason() << endl;
            return false;
        }
        // Shouldn't getcurrent deal with this ?
        if (sdic.size() == 0) {
            //cerr << "Skip empty entry" << endl;
            occ->next(eof);
            continue;
        }
        ConfSimple dic(sdic);
        if (!dic.ok()) {
            msg << "Could not parse entry attributes dic" << endl;
            return false;
        }
        //cerr << "UDI: " << udi << endl;
        if (!ncc->put(udi, &dic, data)) {
            msg << "put failed: " << ncc->getReason() << " sdic [" << sdic <<
                 "]" << endl;
            return false;
        }
        nentries++;
        occ->next(eof);
    }
    return true;
}

// Append all entries from sdir to ddir
int CirCache::append(const string ddir, const string& sdir, string *reason)
{
    ostringstream msg;
    // Open source file
    std::shared_ptr occ(new CirCache(sdir));
    if (!occ->open(CirCache::CC_OPREAD)) {
        if (reason) {
            msg << "Open failed in " << sdir << " : " <<
                occ->getReason() << endl;
            *reason = msg.str();
        }
        return -1;
    }
    // Open dest file
    std::shared_ptr ncc(new CirCache(ddir));
    if (!ncc->open(CirCache::CC_OPWRITE)) {
        if (reason) {
            msg << "Open failed in " << ddir << " : " <<
                ncc->getReason() << endl;
            *reason = msg.str();
        }
        return -1;
    }

    int nentries;
    if (!copyall(occ, ncc, nentries, msg)) {
        if (reason) {
            *reason = msg.str();
        }
        return -1;
    }

    return nentries;
}


#else // TEST ->
#include "autoconfig.h"

#include 
#include 
#include 
#include 
#include 
#include 
#include 

#include 
#include 
#include 

#include "circache.h"
#include "fileudi.h"
#include "conftree.h"
#include "readfile.h"
#include "log.h"

#include "smallut.h"

using namespace std;

static char *thisprog;

static char usage [] =
    " -c [-u]  : create\n"
    " -p   [apath ...] : put files\n"
    " -d  : dump\n"
    " -g [-i instance] [-D]  : get\n"
    "   -D: also dump data\n"
    " -e   : erase\n"
    " -a   [ ...]: append old content to target\n"
    "  The target should be first resized to hold all the data, else only\n"
    "  as many entries as capacity permit will be retained\n"
    ;

static void
Usage(FILE *fp = stderr)
{
    fprintf(fp, "%s: usage:\n%s", thisprog, usage);
    exit(1);
}

static int     op_flags;
#define OPT_MOINS 0x1
#define OPT_c     0x2
#define OPT_p     0x8
#define OPT_g     0x10
#define OPT_d     0x20
#define OPT_i     0x40
#define OPT_D     0x80
#define OPT_u     0x100
#define OPT_e     0x200
#define OPT_a     0x800

int main(int argc, char **argv)
{
    int instance = -1;

    thisprog = argv[0];
    argc--;
    argv++;

    while (argc > 0 && **argv == '-') {
        (*argv)++;
        if (!(**argv))
            /* Cas du "adb - core" */
        {
            Usage();
        }
        while (**argv)
            switch (*(*argv)++) {
            case 'a':
                op_flags |= OPT_a;
                break;
            case 'c':
                op_flags |= OPT_c;
                break;
            case 'D':
                op_flags |= OPT_D;
                break;
            case 'd':
                op_flags |= OPT_d;
                break;
            case 'e':
                op_flags |= OPT_e;
                break;
            case 'g':
                op_flags |= OPT_g;
                break;
            case 'i':
                op_flags |= OPT_i;
                if (argc < 2) {
                    Usage();
                }
                if ((sscanf(*(++argv), "%d", &instance)) != 1) {
                    Usage();
                }
                argc--;
                goto b1;
            case 'p':
                op_flags |= OPT_p;
                break;
            case 'u':
                op_flags |= OPT_u;
                break;
            default:
                Usage();
                break;
            }
b1:
        argc--;
        argv++;
    }

    Logger::getTheLog("")->setLogLevel(Logger::LLDEB1);

    if (argc < 1) {
        Usage();
    }
    string dir = *argv++;
    argc--;

    CirCache cc(dir);

    if (op_flags & OPT_c) {
        if (argc != 1) {
            Usage();
        }
        int64_t sizekb = atoi(*argv++);
        argc--;
        int flags = 0;
        if (op_flags & OPT_u) {
            flags |= CirCache::CC_CRUNIQUE;
        }
        if (!cc.create(sizekb * 1024, flags)) {
            cerr << "Create failed:" << cc.getReason() << endl;
            exit(1);
        }
    } else if (op_flags & OPT_a) {
        if (argc < 1) {
            Usage();
        }
        while (argc) {
            string reason;
            if (CirCache::append(dir, *argv++, &reason) < 0) {
                cerr << reason << endl;
                return 1;
            }
            argc--;
        }
    } else if (op_flags & OPT_p) {
        if (argc < 1) {
            Usage();
        }
        if (!cc.open(CirCache::CC_OPWRITE)) {
            cerr << "Open failed: " << cc.getReason() << endl;
            exit(1);
        }
        while (argc) {
            string fn = *argv++;
            argc--;
            char dic[1000];
            string data, reason;
            if (!file_to_string(fn, data, &reason)) {
                cerr << "File_to_string: " << reason << endl;
                exit(1);
            }
            string udi;
            make_udi(fn, "", udi);
            string cmd("xdg-mime query filetype ");
            // Should do more quoting here...
            cmd += "'" + fn + "'";
            FILE *fp = popen(cmd.c_str(), "r");
            char* buf=0;
            size_t sz = 0;
            ::getline(&buf, &sz, fp);
            pclose(fp);
            string mimetype(buf);
            free(buf);
            trimstring(mimetype, "\n\r");
            cout << "Got [" << mimetype << "]\n";

            string s;
            ConfSimple conf(s);
            conf.set("udi", udi);
            conf.set("mimetype", mimetype);
            //ostringstream str; conf.write(str); cout << str.str() << endl;

            if (!cc.put(udi, &conf, data, 0)) {
                cerr << "Put failed: " << cc.getReason() << endl;
                cerr << "conf: [";
                conf.write(cerr);
                cerr << "]" << endl;
                exit(1);
            }
        }
        cc.open(CirCache::CC_OPREAD);
    } else if (op_flags & OPT_g) {
        if (!cc.open(CirCache::CC_OPREAD)) {
            cerr << "Open failed: " << cc.getReason() << endl;
            exit(1);
        }
        while (argc) {
            string udi = *argv++;
            argc--;
            string dic, data;
            if (!cc.get(udi, dic, &data, instance)) {
                cerr << "Get failed: " << cc.getReason() << endl;
                exit(1);
            }
            cout << "Dict: [" << dic << "]" << endl;
            if (op_flags & OPT_D) {
                cout << "Data: [" << data << "]" << endl;
            }
        }
    } else if (op_flags & OPT_e) {
        if (!cc.open(CirCache::CC_OPWRITE)) {
            cerr << "Open failed: " << cc.getReason() << endl;
            exit(1);
        }
        while (argc) {
            string udi = *argv++;
            argc--;
            string dic, data;
            if (!cc.erase(udi)) {
                cerr << "Erase failed: " << cc.getReason() << endl;
                exit(1);
            }
        }
    } else if (op_flags & OPT_d) {
        if (!cc.open(CirCache::CC_OPREAD)) {
            cerr << "Open failed: " << cc.getReason() << endl;
            exit(1);
        }
        cc.dump();
    } else {
        Usage();
    }

    exit(0);
}

#endif

recoll-1.26.3/utils/rclionice.h0000644000175000017500000000173113533651561013276 00000000000000/* Copyright (C) 2011 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _RCLIONICE_H_INCLUDED_
#define _RCLIONICE_H_INCLUDED_

#include 
using std::string;

extern bool rclionice(const string& clss, const string& classdata);

#endif /* _RCLIONICE_H_INCLUDED_ */
recoll-1.26.3/utils/listmem.h0000644000175000017500000000054213303776060012775 00000000000000#ifndef _LISTMEM_H_INCLUDED_
#define _LISTMEM_H_INCLUDED_
#include 

enum ListmemOpts {LISTMEM_SWAP16 = 1, LISTMEM_SWAP32 = 2};

/// @param startadr starting value for offset listings on the right
extern void listmem(std::ostream&, const void *ptr, int sz,
                    int startadr = 0, int opts = 0);

#endif /* _LISTMEM_H_INCLUDED_ */
recoll-1.26.3/utils/appformime.cpp0000644000175000017500000001400213533651561014014 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef TEST_APPFORMIME
#include 
#include 

#include 
using namespace std;

#include "pathut.h"
#include "smallut.h"
#include "appformime.h"

static const string topappsdir("/usr/share/applications");
static const string desktopext("desktop");

static DesktopDb *theDb;

class FstCb : public FsTreeWalkerCB {
public:
    FstCb(DesktopDb::AppMap *appdefs)
        : m_appdefs(appdefs)
        {
        }
    virtual FsTreeWalker::Status 
    processone(const string &, const struct stat *, FsTreeWalker::CbFlag);
    DesktopDb::AppMap *m_appdefs;
};

FsTreeWalker::Status FstCb::processone(const string& fn, const struct stat *, 
                                       FsTreeWalker::CbFlag flg) 
{
    if (flg != FsTreeWalker::FtwRegular)
        return FsTreeWalker::FtwOk;

    if (path_suffix(fn).compare(desktopext)) {
        //cerr << fn << " does not end with .desktop" << endl;
        return FsTreeWalker::FtwOk;
    }

    ConfSimple dt(fn.c_str(), true);
    if (!dt.ok()) {
        cerr << fn << " cant parse" << endl;
        return FsTreeWalker::FtwOk;
    }
    string tp, nm, cmd, mt;
    if (!dt.get("Type", tp, "Desktop Entry")) {
        //cerr << fn << " no Type" << endl;
        return FsTreeWalker::FtwOk;
    }
    if (tp.compare("Application")) {
        //cerr << fn << " wrong Type " << tp << endl;
        return FsTreeWalker::FtwOk;
    }
    if (!dt.get("Exec", cmd, "Desktop Entry")) {
        //cerr << fn << " no Exec" << endl;
        return FsTreeWalker::FtwOk;
    }
    if (!dt.get("Name", nm, "Desktop Entry")) {
        //cerr << fn << " no Name" << endl;
        nm = path_basename(fn, desktopext);
    }
    if (!dt.get("MimeType", mt, "Desktop Entry")) {
        //cerr << fn << " no MimeType" << endl;
        return FsTreeWalker::FtwOk;
    }
    DesktopDb::AppDef appdef(nm, cmd);
    // Breakup mime type list, and push app to mime entries
    vector mimes;
    stringToTokens(mt, mimes, ";");
    for (vector::const_iterator it = mimes.begin();
         it != mimes.end(); it++) {
        (*m_appdefs)[*it].push_back(appdef);
    }
    return FsTreeWalker::FtwOk;
}

DesktopDb* DesktopDb::getDb()
{
    if (theDb == 0) {
        theDb = new DesktopDb();
    }
    if (theDb && theDb->m_ok)
        return theDb;
    return 0;
}

void DesktopDb::build(const string& dir)
{
    FstCb procapp(&m_appMap);
    FsTreeWalker walker;
    if (walker.walk(dir, procapp) != FsTreeWalker::FtwOk) {
        m_ok = false;
        m_reason = walker.getReason();
    }
    m_ok = true;
}

DesktopDb::DesktopDb()
{
    build(topappsdir);
}

DesktopDb::DesktopDb(const string& dir)
{
    build(dir);
}

bool DesktopDb::appForMime(const string& mime, vector *apps, 
                           string *reason)
{
    AppMap::const_iterator it = m_appMap.find(mime);
    if (it == m_appMap.end()) {
        if (reason)
            *reason = string("No application found for ") + mime;
        return false;
    }
    *apps = it->second;
    return true;
}

bool DesktopDb::allApps(vector *apps)
{
    map allaps;
    for (AppMap::const_iterator it = m_appMap.begin();
         it != m_appMap.end(); it++) {
        for (vector::const_iterator it1 = it->second.begin();
             it1 != it->second.end(); it1++) {
            allaps.insert(pair
                          (it1->name, AppDef(it1->name, it1->command)));
        }
    }
    for (map::const_iterator it = allaps.begin();
         it != allaps.end(); it++) {
        apps->push_back(it->second);
    }
    return true;
}

bool DesktopDb::appByName(const string& nm, AppDef& app)
{
    for (AppMap::const_iterator it = m_appMap.begin();
         it != m_appMap.end(); it++) {
        for (vector::const_iterator it1 = it->second.begin();
             it1 != it->second.end(); it1++) {
            if (!nm.compare(it1->name)) {
                app.name = it1->name;
                app.command = it1->command;
                return true;
            }
        }
    }
    return false;
}

const string& DesktopDb::getReason()
{
    return m_reason;
}

#else // TEST_APPFORMIME

#include 
#include 
#include 
#include 

#include 
#include 
#include 
using namespace std;

#include "appformime.h"

static char *thisprog;

static char usage [] =
"  appformime \n\n"
;
static void
Usage(void)
{
    fprintf(stderr, "%s: usage:\n%s", thisprog, usage);
    exit(1);
}

int main(int argc, char **argv)
{
  thisprog = argv[0];
  argc--; argv++;

  if (argc != 1)
    Usage();
  string mime = *argv++;argc--;

  string reason;
  vector appdefs;
  DesktopDb *ddb = DesktopDb::getDb();
  if (ddb == 0) {
      cerr << "Could not create desktop db\n";
      exit(1);
  }
  if (!ddb->appForMime(mime, &appdefs, &reason)) {
      cerr << "appForMime failed: " << reason << endl;
      exit(1);
  }
  if (appdefs.empty()) {
      cerr << "No application found for [" << mime << "]" << endl;
      exit(1);
  }
  cout << mime << " -> ";
  for (vector::const_iterator it = appdefs.begin();
       it != appdefs.end(); it++) {
      cout << "[" << it->name << ", " << it->command << "], ";
  }
  cout << endl;

  exit(0);
}

#endif //TEST_APPFORMIME
recoll-1.26.3/utils/rclionice.cpp0000644000175000017500000000307613533651561013635 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include 
#include 

#include 

#include "rclionice.h"
#include "execmd.h"
#include "log.h"

using namespace std;

bool rclionice(const string& clss, const string& cdata)
{
    string ionicexe;
    if (!ExecCmd::which("ionice", ionicexe)) {
	// ionice not found, bail out
	LOGDEB0("rclionice: ionice not found\n" );
	return false;
    }
    vector args;
    args.push_back("-c");
    args.push_back(clss);

    if (!cdata.empty()) {
	args.push_back("-n");
	args.push_back(cdata);
    }
    
    char cpid[100];
    sprintf(cpid, "%d", getpid());
    args.push_back("-p");
    args.push_back(cpid);

    ExecCmd cmd;
    int status = cmd.doexec(ionicexe, args);

    if (status) {
	LOGERR("rclionice: failed, status 0x"  << (status) << "\n" );
	return false;
    }
    return true;
}

recoll-1.26.3/utils/hldata.cpp0000644000175000017500000002675313566424763013142 00000000000000/* Copyright (C) 2017-2019 J.F.Dockes
 *
 * License: GPL 2.1
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2.1 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program; if not, write to the
 * Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#include "autoconfig.h"

#include "hldata.h"

#include 
#include 

#include "log.h"
#include "smallut.h"

using std::string;
using std::unordered_map;
using std::vector;
using std::pair;

#undef DEBUGGROUPS
#ifdef DEBUGGROUPS
#define LOGRP LOGINF
#else
#define LOGRP LOGDEB1
#endif

// Combined position list for or'd terms
struct OrPList {
    void addplist(const string& term, const vector* pl) {
        terms.push_back(term);
        plists.push_back(pl);
        indexes.push_back(0);
        totalsize += pl->size();
    }

    // Returns -1 for eof, else the next smallest value in the
    // combined lists, according to the current indexes.
    int value() {
        int minval = INT_MAX;
        int minidx = -1;
        for (unsigned ii = 0; ii < indexes.size(); ii++) {
            const vector& pl(*plists[ii]);
            if (indexes[ii] >= pl.size())
                continue; // this list done
            if (pl[indexes[ii]] < minval) {
                minval = pl[indexes[ii]];
                minidx = ii;
            }
        }
        if (minidx != -1) {
            LOGRP("OrPList::value() -> " << minval << " for " <<
                  terms[minidx] << "\n");
            currentidx = minidx;
            return minval;
        } else {
            LOGRP("OrPList::value(): EOL for " << stringsToString(terms)<<"\n");
            return -1;
        }
    }

    int next() {
        if (currentidx != -1) {
            indexes[currentidx]++;
        }
        return value();
    }
    
    int size() const {
        return totalsize;
    }
    void rewind() {
        for (auto& idx : indexes) {
            idx = 0;
        }
        currentidx = -1;
    }

    vector*> plists;
    vector indexes;
    vector terms;
    int currentidx{-1};
    int totalsize{0};
};

static inline void setWinMinMax(int pos, int& sta, int& sto)
{
    if (pos < sta) {
        sta = pos;
    }
    if (pos > sto) {
        sto = pos;
    }
}

/*
 * @param window the total width for the "near" area, in positions.

 * @param plists the position vectors for the terms. The array is
 *    sorted shorted first for optimization. The function does a
 *    recursive call on the next array if the match is still possible
 *    after dealing with the current one

 * @param plist_idx the index for the position list we will work with.
 * @param min, max the current minimum and maximum term positions.
 * @param[output] sp, ep, the start and end positions of the found match.
 * @param minpos  Highest end of a found match. While looking for
 *   further matches, we don't want the search to extend before
 *   this, because it does not make sense for highlight regions to
 *   overlap.
 * @param isphrase if true, the position lists are in term order, and
 *     we only look for the next match beyond the current window top.
 */
static bool do_proximity_test(
    const int window, vector& plists,
    unsigned int plist_idx, int min, int max, int *sp, int *ep, int minpos,
    bool isphrase)
{
    // Overlap interdiction: possibly adjust window start by input minpos
    int actualminpos = isphrase ? max + 1 : max + 1 - window;
    if (actualminpos < minpos)
        actualminpos = minpos;
    LOGRP("do_prox_test: win " << window << " plist_idx " << plist_idx <<
          " min " <<  min << " max " << max << " minpos " << minpos <<
          " isphrase " << isphrase << " actualminpos " << actualminpos << "\n");

    // Find 1st position bigger than window start. A previous call may
    // have advanced the index, so we begin by retrieving the current
    // value.
    int nextpos = plists[plist_idx].value();
    while (nextpos != -1 && nextpos < actualminpos)
        nextpos = plists[plist_idx].next();

    // Look for position inside window. If not found, no match. If
    // found: if this is the last list we're done, else recurse on
    // next list after adjusting the window
    while (nextpos != -1) {
        if (nextpos > min + window - 1) {
            return false;
        }
        if (plist_idx + 1 == plists.size()) {
            // We already checked pos > min, now we also have pos <
            // max, and we are the last list: done: set return values.
            setWinMinMax(nextpos, *sp, *ep);
            return true;
        }
        setWinMinMax(nextpos, min, max);
        if (do_proximity_test(window, plists, plist_idx + 1,
                              min, max, sp, ep, minpos, isphrase)) {
            return true;
        }
        nextpos = plists[plist_idx].next();
    }
    return false;
}


// Find matches for one group of terms
bool matchGroup(const HighlightData& hldata,
                unsigned int grpidx,
                const unordered_map>& inplists,
                const unordered_map>& gpostobytes,
                vector& tboffs)
{

    const auto& tg(hldata.index_term_groups[grpidx]);
    bool isphrase =  tg.kind == HighlightData::TermGroup::TGK_PHRASE;
    string allplterms;
    for (const auto& entry:inplists) {
        allplterms += entry.first + " ";
    }
    LOGRP("matchGroup: isphrase " << isphrase <<
          ". Have plists for [" << allplterms << "]\n");
    LOGRP("matchGroup: hldata: " << hldata.toString() << std::endl);
    
    int window = int(tg.orgroups.size() + tg.slack);
    // The position lists we are going to work with. We extract them from the 
    // (string->plist) map
    vector orplists;

    // Find the position list for each term in the group and build the
    // combined lists for the term or groups (each group is the result
    // of the exansion of one user term). It is possible that this
    // particular group was not actually matched by the search, so
    // that some terms are not found, in which case we bail out.
    for (const auto& group : tg.orgroups) {
        orplists.push_back(OrPList());
        for (const auto& term : group) {
            const auto pl = inplists.find(term);
            if (pl == inplists.end()) {
                LOGRP("TextSplitPTR::matchGroup: term [" << term <<
                      "] not found in plists\n");
                continue;
            }
            orplists.back().addplist(pl->first, &(pl->second));
        }
        if (orplists.back().plists.empty()) {
            LOGRP("No positions list found for group " <<
                   stringsToString(group) << std::endl);
            orplists.pop_back();
        }
    }

    // I think this can't actually happen, was useful when we used to
    // prune the groups, but doesn't hurt.
    if (orplists.size() < 2) {
        LOGRP("TextSplitPTR::matchGroup: no actual groups found\n");
        return false;
    }

    if (!isphrase) {
        // Sort the positions lists so that the shorter is first
        std::sort(orplists.begin(), orplists.end(),
                  [](const OrPList& a, const OrPList& b) -> bool {
                      return a.size() < b.size();
                  }
            );
    }

    // Minpos is the highest end of a found match. While looking for
    // further matches, we don't want the search to extend before
    // this, because it does not make sense for highlight regions to
    // overlap
    int minpos = 0;
    // Walk the shortest plist and look for matches
    int pos;
    while ((pos = orplists[0].next()) != -1) {
        int sta = INT_MAX, sto = 0;
        LOGDEB2("MatchGroup: Testing at pos " << pos << "\n");
        if (do_proximity_test(
                window, orplists, 1, pos, pos, &sta, &sto, minpos, isphrase)) {
            setWinMinMax(pos, sta, sto);
            LOGRP("TextSplitPTR::matchGroup: MATCH termpos [" << sta <<
                    "," << sto << "]\n"); 
            minpos = sto + 1;
            // Translate the position window into a byte offset window
            auto i1 =  gpostobytes.find(sta);
            auto i2 =  gpostobytes.find(sto);
            if (i1 != gpostobytes.end() && i2 != gpostobytes.end()) {
                LOGDEB2("TextSplitPTR::matchGroup: pushing bpos " <<
                        i1->second.first << " " << i2->second.second << "\n");
                tboffs.push_back(GroupMatchEntry(i1->second.first, 
                                                 i2->second.second, grpidx));
            } else {
                LOGDEB0("matchGroup: no bpos found for " << sta << " or "
                        << sto << "\n");
            }
        } else {
            LOGRP("matchGroup: no group match found at this position\n");
        }
    }

    return !tboffs.empty();
}

string HighlightData::toString() const
{
    string out;
    out.append("\nUser terms (orthograph): ");
    for (const auto& term : uterms) {
        out.append(" [").append(term).append("]");
    }
    out.append("\nUser terms to Query terms:");
    for (const auto& entry: terms) {
        out.append("[").append(entry.first).append("]->[");
        out.append(entry.second).append("] ");
    }
    out.append("\nGroups: ");
    char cbuf[200];
    sprintf(cbuf, "index_term_groups size %d ugroups size %d",
            int(index_term_groups.size()), int(ugroups.size()));
    out.append(cbuf);

    size_t ugidx = (size_t) - 1;
    for (HighlightData::TermGroup tg : index_term_groups) {
        if (ugidx != tg.grpsugidx) {
            ugidx = tg.grpsugidx;
            out.append("\n(");
            for (unsigned int j = 0; j < ugroups[ugidx].size(); j++) {
                out.append("[").append(ugroups[ugidx][j]).append("] ");
            }
            out.append(") ->");
        }
        if (tg.kind == HighlightData::TermGroup::TGK_TERM) {
            out.append(" <").append(tg.term).append(">");
        } else {
            out.append(" {");
            for (unsigned int j = 0; j < tg.orgroups.size(); j++) {
                out.append(" {");
                for (unsigned int k = 0; k < tg.orgroups[j].size(); k++) {
                    out.append("[").append(tg.orgroups[j][k]).append("]");
                }
                out.append("}");
            }
            sprintf(cbuf, "%d", tg.slack);
            out.append("}").append(cbuf);
        }
    }
    out.append("\n");
    return out;
}

void HighlightData::append(const HighlightData& hl)
{
    uterms.insert(hl.uterms.begin(), hl.uterms.end());
    terms.insert(hl.terms.begin(), hl.terms.end());
    size_t ugsz0 = ugroups.size();
    ugroups.insert(ugroups.end(), hl.ugroups.begin(), hl.ugroups.end());

    size_t itgsize = index_term_groups.size();
    index_term_groups.insert(index_term_groups.end(),
                             hl.index_term_groups.begin(),
                             hl.index_term_groups.end());
    // Adjust the grpsugidx values for the newly inserted entries
    for (unsigned int idx = itgsize; idx < index_term_groups.size(); idx++) {
        index_term_groups[idx].grpsugidx += ugsz0;
    }
}
recoll-1.26.3/utils/closefrom.cpp0000644000175000017500000001740513533651561013660 00000000000000/* Copyright (C) 2009 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
/*
 * Close all file descriptors above a given value.
 *
 * A Unix execXX() call used to execute another program does not close open 
 * file descriptors by default.
 *
 * The only descriptors closed are those on which the FD_CLOEXEC flag was
 * set. FD_CLOEXEC is not easily usable on files opened by external
 * libraries.
 *
 * There are many reasons for closing file descriptors before
 * an exec (security, pipe control, the possibility that a bug will
 *  trigger an unwanted write, etc.)
 *
 * A process has currently no POSIX way to determine the set of open file 
 * descriptors or at least the highest value. Closing all files (except a few),
 * thus implies performing a close() system call on each entry up to the 
 * maximum, which can be both relatively difficult to determine, and quite
 * high (ie: several thousands), incurring a non-negligible cost.
 *
 * A number of systems have non-portable support for mitigating or solving
 * this problem. 
 * 
 * This module supplies a portable interface to this functionality.
 *
 * The initial data on system interfaces was obtained from:
 * http://stackoverflow.com/questions/899038/\
 *   getting-the-highest-allocated-file-descriptor
 *
 * System interfaces:
 *  FreeBSD/DragonFly:
 *   - Have a closefrom() system call as of release 7.x around Sep 2009
 *   - Have a /dev/fd, directory which shows the current process' open
 *     descriptors. Only descriptors 0, 1, 2 are shown except if
 *     fdescfs is mounted which it is not by default
 *
 *  Solaris:
 *   - Solaris 10+ has closefrom, and can specify closefrom to posix_spawn()
 *
 *  Linux:
 *   - Has nothing. The method we initially used (listing /dev/fd) could
 *     deadlock in multithread fork/exec context. We now use a close()
 *     loop but there is no completely reliable way to determine the high limit.
 *     glibc maintainers think that closefrom() is a bad idea
 *     *especially* because it is implemented on *BSD and Solaris. Go
 *     figure...: https://sourceware.org/bugzilla/show_bug.cgi?id=10353
 *
 * Interface:
 *
 * int libclf_closefrom(fd)
 *  @param fd All open file descriptors with equal or higher numeric 
 *       values will be closed. fd needs not be a valid descriptor.
 *  @return 0 for success, -1 for error.
 */
#ifndef TEST_CLOSEFROM

#include "closefrom.h"

#include 
#include 
#include 
#include 
#include 
#include 
#include 


/* #define DEBUG_CLOSEFROM */
#ifdef DEBUG_CLOSEFROM
#define DPRINT(X) fprintf X
#else
#define DPRINT(X)
#endif

/* Note: sudo has a closefrom implementation, needs a lot of autoconfig, but
 * we could use it instead. It's quite close to this though */

/*************************************************************************/

/* closefrom() exists on Solaris, netbsd and openbsd, but someone will
 * have to provide me the appropriate macro to test */
#if ((defined(__FreeBSD__) && __FreeBSD_version >= 702104)) || \
    defined(__DragonFly__)
/* Use closefrom() system call */
int libclf_closefrom(int fd0)
{
    DPRINT((stderr, "libclf_closefrom: using closefrom(2)\n"));
    closefrom(fd0);
    return 0;
}

/*************************************************************************/
#elif defined(F_CLOSEM)

/* Use fcntl(fd, F_CLOSEM) */

int libclf_closefrom(int fd0)
{
    DPRINT((stderr, "libclf_closefrom: using fcntl(F_CLOSEM)\n"));
    // We need a valid descriptor for this to work. Try to dup stdin, else
    // go wild
    if (fcntl(0, F_GETFL) != -1) {
        if (fd0 != 0)
            dup2(0, fd0);
    } else {
        int fd = open("/etc/group", 0); // yes i am a unix man
        if (fd >= 0 && fd != fd0) {
            dup2(fd, fd0);
            close(fd);
        }
    }
    return fcntl(fd0, F_CLOSEM, 0);
}

/*************************************************************************/
#elif 0 && (defined(linux) || defined(__linux))

/* We don't do this on linux anymore because opendir() may call
   malloc which is unsafe in the [fork-exec] interval for a
   multithreaded program. Linux does not have a good solution for
   implementing closefrom as far as I know */

/* Use /proc/self/fd directory */
#include 
#include 

int libclf_closefrom(int fd0)
{
    DIR *dirp;
    struct dirent *ent;

    DPRINT((stderr, "libclf_closefrom: using /proc\n"));
    dirp = opendir("/proc/self/fd");
    if (dirp == 0)
        return -1;

    while ((ent = readdir(dirp)) != 0) {
        int fd;
        if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, "..")) 
            continue;

        if (sscanf(ent->d_name, "%d", &fd) == 1 && fd >= fd0 && 
            fd != dirfd(dirp)) {
            close(fd);
        }
    }
    closedir(dirp);
    return 0;
}

/*************************************************************************/
#else 

/* System has no native support for this functionality.
 *
 * Close all descriptors up to compiled/configured maximum.
 * The caller will usually have an idea of a reasonable maximum, else
 * we retrieve a value from the system.
 *
 * Note that there is actually no real guarantee that no open
 * descriptor higher than the reported limit can exist, as noted by
 * the Solaris man page for closefrom()
 */

static int closefrom_maxfd = -1;

void libclf_setmaxfd(int max)
{
    closefrom_maxfd = max;
}

#include 

#ifndef OPEN_MAX
#define OPEN_MAX 1024
#endif

int libclf_closefrom(int fd0)
{
    int i, maxfd = closefrom_maxfd;

    if (maxfd < 0) {
        maxfd = libclf_maxfd();
    }
    if (maxfd < 0)
	maxfd = OPEN_MAX;

    DPRINT((stderr, "libclf_closefrom: using loop to %d\n", maxfd));

    for (i = fd0; i < maxfd; i++) {
        (void)close(i);
    }
    return 0;
}
#endif

// Note that this will not work if the limit was lowered after a
// higher fd was opened. But we don't call setrlimit() inside recoll
// code, so we should be ok. It seems that sysconf(_SC_OPEN_MAX)
// usually reports the soft limit, so it's redundant, but it could be
// useful in case getrlimit() is not implemented (unlikely as they're
// both POSIX.1-2001?
int libclf_maxfd(int)
{
    struct rlimit lim;
    getrlimit(RLIMIT_NOFILE, &lim);
    return int(lim.rlim_cur);
}

#else /* TEST_CLOSEFROM */

#include 
#include 
#include 
#include 

#include "closefrom.h"

int main(int argc, char **argv)
{
    int i;

    int fd0 = open("/etc/group", 0);
    if (fd0 < 0) {
        perror("open /etc/group");
        exit(1);
    }
    
    if (dup2(fd0, 11) < 0) {
        perror("dup2->11");
        exit(1);
    }
    if (dup2(fd0, 19) < 0) {
        perror("dup2->19");
        exit(1);
    }
    if (dup2(fd0, 99)< 0) {
        perror("dup2->99 (ok)");
    }
    if (dup2(fd0, 999) < 0) {
        perror("dup3->999 (ok)");
    }

    libclf_closefrom(11);
    for (i = 0; i < 10000; i++) {
        if (fcntl(i, F_GETFL) != -1) {
            fprintf(stderr, "Descriptor %d is still open", i);
            if (i < 11)
                fprintf(stderr, " (OK)\n");
            else
                fprintf(stderr, " (BAD)\n");
        }
    }
    exit(0);
}

#endif
recoll-1.26.3/utils/idfile.h0000644000175000017500000000226013533651561012561 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _IDFILE_H_INCLUDED_
#define _IDFILE_H_INCLUDED_

#include 

// Look at data inside file or string, and return mime type or empty string. 
//
// The system's file utility does a bad job on mail folders. idFile
// only looks for mail file types for now, but this may change

extern std::string idFile(const char *fn);
extern std::string idFileMem(const std::string& data);

#endif /* _IDFILE_H_INCLUDED_ */
recoll-1.26.3/utils/log.h0000644000175000017500000001674313533651561012121 00000000000000/* Copyright (C) 2014 J.F.Dockes
 *	 This program is free software; you can redistribute it and/or modify
 *	 it under the terms of the GNU Lesser General Public License as published by
 *	 the Free Software Foundation; either version 2.1 of the License, or
 *	 (at your option) any later version.
 *
 *	 This program is distributed in the hope that it will be useful,
 *	 but WITHOUT ANY WARRANTY; without even the implied warranty of
 *	 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *	 GNU Lesser General Public License for more details.
 *
 *	 You should have received a copy of the GNU Lesser General Public License
 *	 along with this program; if not, write to the
 *	 Free Software Foundation, Inc.,
 *	 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
/* Copyright (C) 2006-2016 J.F.Dockes
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 *   02110-1301 USA
 */
#ifndef _LOG_H_X_INCLUDED_
#define _LOG_H_X_INCLUDED_

#include 

#include  
#include 
#include 
#include 

#ifndef LOGGER_THREADSAFE
#define LOGGER_THREADSAFE 1
#endif

#if LOGGER_THREADSAFE
#include 
#endif

// Can't use the symbolic Logger::LLXX names in preproc. 6 is LLDEB1
#ifndef LOGGER_STATICVERBOSITY
#define LOGGER_STATICVERBOSITY 5
#endif

class Logger {
public:
    /** Initialize logging to file name. Use "stderr" for stderr
       output. Creates the singleton logger object */
    static Logger *getTheLog(const std::string& fn);

    bool reopen(const std::string& fn);
    
    std::ostream& getstream() {
        return m_tocerr ? std::cerr : m_stream;
    }
    enum LogLevel {LLNON=0, LLFAT=1, LLERR=2, LLINF=3, LLDEB=4,
                   LLDEB0=5, LLDEB1=6, LLDEB2=7};
    void setLogLevel(LogLevel level) {
        m_loglevel = level;
    }
    int getloglevel() {
        return m_loglevel;
    }

#if LOGGER_THREADSAFE
    std::recursive_mutex& getmutex() {
        return m_mutex;
    }
#endif
    
private:
    bool m_tocerr{false};
    int m_loglevel{LLERR};
    std::string m_fn;
    std::ofstream m_stream;
#if LOGGER_THREADSAFE
    std::recursive_mutex m_mutex;
#endif

    Logger(const std::string& fn);
    Logger(const Logger &);
    Logger& operator=(const Logger &);
};

#define LOGGER_PRT (Logger::getTheLog("")->getstream())

#if LOGGER_THREADSAFE
#define LOGGER_LOCK \
    std::unique_lock lock(Logger::getTheLog("")->getmutex())
#else
#define LOGGER_LOCK
#endif

#ifndef LOGGER_LOCAL_LOGINC
#define LOGGER_LOCAL_LOGINC 0
#endif

#define LOGGER_LEVEL (Logger::getTheLog("")->getloglevel() + \
                      LOGGER_LOCAL_LOGINC)

#define LOGGER_DOLOG(L,X) LOGGER_PRT << ":" << L << ":" <<            \
                                  __FILE__ << ":" << __LINE__ << "::" << X \
    << std::flush

#if LOGGER_STATICVERBOSITY >= 7
#define LOGDEB2(X) {                                                    \
        if (LOGGER_LEVEL >= Logger::LLDEB2) {                           \
            LOGGER_LOCK;                                                \
            LOGGER_DOLOG(Logger::LLDEB2, X);                            \
        }                                                               \
    }
#else
#define LOGDEB2(X)
#endif

#if LOGGER_STATICVERBOSITY >= 6
#define LOGDEB1(X) {                                                    \
        if (LOGGER_LEVEL >= Logger::LLDEB1) {                           \
            LOGGER_LOCK;                                                \
            LOGGER_DOLOG(Logger::LLDEB1, X);                            \
        }                                                               \
    }
#else
#define LOGDEB1(X)
#endif

#if LOGGER_STATICVERBOSITY >= 5
#define LOGDEB0(X) {                                                    \
        if (LOGGER_LEVEL >= Logger::LLDEB0) {                           \
            LOGGER_LOCK;                                                \
            LOGGER_DOLOG(Logger::LLDEB0, X);                            \
        }                                                               \
    }
#else
#define LOGDEB0(X)
#endif

#if LOGGER_STATICVERBOSITY >= 4
#define LOGDEB(X) {                                                     \
        if (LOGGER_LEVEL >= Logger::LLDEB) {                            \
            LOGGER_LOCK;                                                \
            LOGGER_DOLOG(Logger::LLDEB, X);                             \
        }                                                               \
    }
#else
#define LOGDEB(X)
#endif

#if LOGGER_STATICVERBOSITY >= 3
#define LOGINF(X) {                                                     \
        if (LOGGER_LEVEL >= Logger::LLINF) {                            \
            LOGGER_LOCK;                                                \
            LOGGER_DOLOG(Logger::LLINF, X);                             \
        }                                                               \
    }
#else
#define LOGINF(X)
#endif
#define LOGINFO LOGINF

#if LOGGER_STATICVERBOSITY >= 2
#define LOGERR(X) {                                                     \
        if (LOGGER_LEVEL >= Logger::LLERR) {                            \
            LOGGER_LOCK;                                                \
            LOGGER_DOLOG(Logger::LLERR, X);                             \
        }                                                               \
    }
#else
#define LOGERR(X)
#endif

#if LOGGER_STATICVERBOSITY >= 1
#define LOGFAT(X) {                                                     \
        if (LOGGER_LEVEL >= Logger::LLFAT) {                            \
            LOGGER_LOCK;                                                \
            LOGGER_DOLOG(Logger::LLFAT, X);                             \
        }                                                               \
    }
#else
#define LOGFAT(X)
#endif
#define LOGFATAL LOGFAT

#if defined(sun) || defined(_WIN32)
#define LOGSYSERR(who, what, arg) {                                     \
        LOGERR(who << ": " << what << "("  << arg << "): errno " << errno << \
               ": " << strerror(errno) << std::endl);                   \
    }
#else // !WINDOWS->
#if (_POSIX_C_SOURCE >= 200112L) && !  _GNU_SOURCE
#define LOGSYSERR(who, what, arg) {                                     \
        char buf[200]; buf[0] = 0; strerror_r(errno, buf, 200);         \
        LOGERR(who << ": " << what << "("  << arg << "): errno " << errno << \
               ": " << buf << std::endl);                               \
    }
#else
#define LOGSYSERR(who, what, arg) {                                     \
        char buf[200]; buf[0] = 0;                                      \
        LOGERR(who << ": " << what << "("  << arg << "): errno " << errno << \
               ": " << strerror_r(errno, buf, 200) << std::endl);       \
    }
#endif
#endif // not windows

#endif /* _LOG_H_X_INCLUDED_ */
recoll-1.26.3/utils/transcode.cpp0000644000175000017500000001340613533651561013646 00000000000000/* Copyright (C) 2004-2019 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include "autoconfig.h"

#include 
#include 
#include 

#include 
#include 
#include 

#include "transcode.h"
#include "log.h"

using namespace std;

// We gain approximately 25% exec time for word at a time conversions by
// caching the iconv_open thing. 
//
// We may also lose some concurrency on multiproc because of the
// necessary locking, but we only have one processing-intensive
// possible thread for now (the indexing one), so this is probably not
// an issue (and could be worked around with a slightly more
// sohisticated approach).
#define ICONV_CACHE_OPEN

bool transcode(const string &in, string &out, const string &icode,
               const string &ocode, int *ecnt)
{
    LOGDEB2("Transcode: " << icode << " -> " << ocode << "\n");
#ifdef ICONV_CACHE_OPEN
    static iconv_t ic = (iconv_t)-1;
    static string cachedicode;
    static string cachedocode;
    static std::mutex o_cachediconv_mutex;
    std::unique_lock lock(o_cachediconv_mutex);
#else 
    iconv_t ic;
#endif
    bool ret = false;
    const int OBSIZ = 8192;
    char obuf[OBSIZ], *op;
    bool icopen = false;
    int mecnt = 0;
    out.erase();
    size_t isiz = in.length();
    out.reserve(isiz);
    const char *ip = in.c_str();

#ifdef ICONV_CACHE_OPEN
    if (cachedicode.compare(icode) || cachedocode.compare(ocode)) {
        if (ic != (iconv_t)-1) {
            iconv_close(ic);
            ic = (iconv_t)-1;
        }
#endif
        if((ic = iconv_open(ocode.c_str(), icode.c_str())) == (iconv_t)-1) {
            out = string("iconv_open failed for ") + icode
                + " -> " + ocode;
#ifdef ICONV_CACHE_OPEN
            cachedicode.erase();
            cachedocode.erase();
#endif
            goto error;
        }

#ifdef ICONV_CACHE_OPEN
        cachedicode.assign(icode);
        cachedocode.assign(ocode);
    }
#endif

    icopen = true;

    while (isiz > 0) {
        size_t osiz;
        op = obuf;
        osiz = OBSIZ;

        if(iconv(ic, (ICONV_CONST char **)&ip, &isiz, &op, &osiz) == (size_t)-1
           && errno != E2BIG) {
#if 0
            out.erase();
            out = string("iconv failed for ") + icode + " -> " + ocode +
                " : " + strerror(errno);
#endif
            if (errno == EILSEQ) {
                LOGDEB1("transcode:iconv: bad input seq.: shift, retry\n");
                LOGDEB1(" Input consumed " << ip - in << " output produced " <<
                        out.length() + OBSIZ - osiz << "\n");
                out.append(obuf, OBSIZ - osiz);
                out += "?";
                mecnt++;
                ip++;isiz--;
                continue;
            }
            // Normally only EINVAL is possible here: incomplete
            // multibyte sequence at the end. This is not fatal. Any
            // other is supposedly impossible, we return an error
            if (errno == EINVAL)
                goto out;
            else
                goto error;
        }

        out.append(obuf, OBSIZ - osiz);
    }

#ifndef ICONV_CACHE_OPEN
    icopen = false;
    if(iconv_close(ic) == -1) {
        out.erase();
        out = string("iconv_close failed for ") + icode + " -> " + ocode;
        goto error;
    }
#endif

out:
    ret = true;

error:

    if (icopen) {
#ifndef ICONV_CACHE_OPEN
        iconv_close(ic);
#else
        // Just reset conversion
        iconv(ic, 0, 0, 0, 0);
#endif
    }

    if (mecnt)
        LOGDEB("transcode: [" << icode << "]->[" << ocode << "] " <<
               mecnt << " errors\n");
    if (ecnt)
        *ecnt = mecnt;
    return ret;
}

bool wchartoutf8(const wchar_t *in, std::string& out)
{
    static iconv_t ic = (iconv_t)-1;
    if (ic == (iconv_t)-1) {
        if((ic = iconv_open("UTF-8", "WCHAR_T")) == (iconv_t)-1) {
            LOGERR("wchartoutf8: iconv_open failed\n");
            return false;
        }
    }
    const int OBSIZ = 8192;
    char obuf[OBSIZ], *op;
    out.erase();
    size_t isiz = 2 * wcslen(in);
    out.reserve(isiz);
    const char *ip = (const char *)in;

    while (isiz > 0) {
        size_t osiz;
        op = obuf;
        osiz = OBSIZ;

        if(iconv(ic, (ICONV_CONST char **)&ip, &isiz, &op, &osiz) == (size_t)-1
           && errno != E2BIG) {
            LOGERR("wchartoutf8: iconv error, errno: " << errno << endl);
            return false;
        }
        out.append(obuf, OBSIZ - osiz);
    }
    return true;
}

bool utf8towchar(const std::string& in, wchar_t *out, size_t obytescap)
{
    static iconv_t ic = (iconv_t)-1;
    if (ic == (iconv_t)-1) {
        if((ic = iconv_open("WCHAR_T", "UTF-8")) == (iconv_t)-1) {
            LOGERR("utf8towchar: iconv_open failed\n");
            return false;
        }
    }
    size_t isiz = in.size();
    const char *ip = in.c_str();
    size_t osiz = (size_t)obytescap-2;
    char *op = (char *)out;
    if (iconv(ic, (ICONV_CONST char **)&ip, &isiz, &op, &osiz) == (size_t)-1) {
        LOGERR("utf8towchar: iconv error, errno: " << errno << endl);
        return false;
    }
    *op++ = 0;
    *op = 0;
    return true;
}
recoll-1.26.3/utils/transcode.h0000644000175000017500000000302613533651561013310 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _TRANSCODE_H_INCLUDED_
#define _TRANSCODE_H_INCLUDED_
/** 
 * 
 */
#include 
/**
 * c++ized interface to iconv
 *
 * @param in input string
 * @param out output string
 * @param icode input encoding
 * @param ocode input encoding
 * @param ecnt (output) number of transcoding errors
 * @return true if transcoding succeeded, even with errors. False for global
 *     errors like unknown charset names
 */
extern bool transcode(const std::string &in, std::string &out, 
		      const std::string &icode,
		      const std::string &ocode, 
		      int *ecnt = 0);

#ifdef _WIN32
extern bool wchartoutf8(const wchar_t *in, std::string& out);
extern bool utf8towchar(const std::string& in, wchar_t *out, size_t obytescap);
#endif

#endif /* _TRANSCODE_H_INCLUDED_ */
recoll-1.26.3/utils/rclutil.cpp0000644000175000017500000003366613566424763013364 00000000000000/* Copyright (C) 2016-2019 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include "autoconfig.h"

#include 
#include 
#include 
#include "safefcntl.h"
#include "safeunistd.h"
#include "dirent.h"
#include "cstr.h"
#ifdef _WIN32
#include "safewindows.h"
#else
#include 
#include 
#include 
#endif
#include 
#include 
#include 
#include "safesysstat.h"

#include 
#include 
#include 
#include 

#include "rclutil.h"
#include "pathut.h"
#include "wipedir.h"
#include "transcode.h"
#include "md5ut.h"
#include "log.h"
#include "smallut.h"

using namespace std;

template  void map_ss_cp_noshr(T s, T *d)
{
    for (const auto& ent : s) {
        d->insert(
            pair(string(ent.first.begin(), ent.first.end()),
                                 string(ent.second.begin(), ent.second.end())));
    }
}
template void map_ss_cp_noshr >(
    map s, map*d);
template void map_ss_cp_noshr >(
    unordered_map s, unordered_map*d);

#ifdef _WIN32
static bool path_hasdrive(const string& s)
{
    if (s.size() >= 2 && isalpha(s[0]) && s[1] == ':') {
        return true;
    }
    return false;
}
static bool path_isdriveabs(const string& s)
{
    if (s.size() >= 3 && isalpha(s[0]) && s[1] == ':' && s[2] == '/') {
        return true;
    }
    return false;
}

#include 
#pragma comment(lib, "shlwapi.lib")

string path_thisexecpath()
{
    wchar_t text[MAX_PATH];
    GetModuleFileNameW(NULL, text, MAX_PATH);
#ifdef NTDDI_WIN8_future
    PathCchRemoveFileSpec(text, MAX_PATH);
#else
    PathRemoveFileSpecW(text);
#endif
    string path;
    wchartoutf8(text, path);
    if (path.empty()) {
        path = "c:/";
    }

    return path;
}

// On Windows, we ese a subdirectory named "rcltmp" inside the windows
// temp location to create the temporary files in.
static const string& path_wingetrcltmpdir()
{
    // Constant: only need to compute once
    static string tdir;
    if (tdir.empty()) {
        wchar_t dbuf[MAX_PATH + 1];
        GetTempPathW(MAX_PATH + 1, dbuf);
        wchartoutf8(dbuf, tdir);
        tdir = path_cat(tdir, "rcltmp");;
        if (!path_exists(tdir)) {
            if (path_makepath(tdir, 0700)) {
                LOGSYSERR("path_wingettempfilename", "path_makepath", tdir);
            }
        }
    }
    return tdir;
}

static bool path_gettempfilename(string& filename, string& reason)
{
    string tdir = path_wingetrcltmpdir();
    wchar_t dbuf[MAX_PATH + 1];
    utf8towchar(tdir, dbuf, MAX_PATH);

    wchar_t buf[MAX_PATH + 1];
    static wchar_t prefix[]{L"rcl"};
    GetTempFileNameW(dbuf, prefix, 0, buf);
    wchartoutf8(buf, filename);

    // Windows will have created a temp file, we delete it.
    if (!DeleteFileW(buf)) {
        LOGSYSERR("path_wingettempfilename", "DeleteFileW", filename);
    } else {
        LOGDEB1("path_wingettempfilename: DeleteFile " << filename << " Ok\n");
    }
    path_slashize(filename);
    return true;
}

#else // _WIN32 above

static bool path_gettempfilename(string& filename, string& reason)
{
    filename = path_cat(tmplocation(), "rcltmpfXXXXXX");
    char *cp = strdup(filename.c_str());
    if (!cp) {
        reason = "Out of memory (for file name !)\n";
        return false;
    }

    // Using mkstemp this way is awful (bot the suffix adding and
    // using mkstemp() instead of mktemp just to avoid the warnings)
    int fd;
    if ((fd = mkstemp(cp)) < 0) {
        free(cp);
        reason = "TempFileInternal: mkstemp failed\n";
        return false;
    }
    close(fd);
    unlink(cp);
    filename = cp;
    free(cp);
    return true;
}
#endif // posix

// Check if path is either non-existing or an empty directory.
bool path_empty(const string& path)
{
    if (path_isdir(path)) {
        string reason;
        std::set entries;
        if (!readdir(path, reason, entries) || entries.empty()) {
            return true;
        }
        return false;
    } else {
        return !path_exists(path);
    }
}

string path_defaultrecollconfsubdir()
{
#ifdef _WIN32
    return "Recoll";
#else
    return ".recoll";
#endif
}

// Location for sample config, filters, etc. (e.g. /usr/share/recoll/)
const string& path_pkgdatadir()
{
    static string datadir;
    if (datadir.empty()) {
#ifdef _WIN32
        datadir = path_cat(path_thisexecpath(), "Share");
#else
        const char *cdatadir = getenv("RECOLL_DATADIR");
        if (cdatadir == 0) {
            // If not in environment, use the compiled-in constant.
            datadir = RECOLL_DATADIR;
        } else {
            datadir = cdatadir;
        }
#endif
    }
    return datadir;
}

// Printable url: this is used to transcode from the system charset
// into either utf-8 if transcoding succeeds, or url-encoded
bool printableUrl(const string& fcharset, const string& in, string& out)
{
#ifdef _WIN32
    // On windows our paths are always utf-8
    out = in;
#else
    int ecnt = 0;
    if (!transcode(in, out, fcharset, "UTF-8", &ecnt) || ecnt) {
        out = url_encode(in, 7);
    }
#endif
    return true;
}

string url_gpathS(const string& url)
{
#ifdef _WIN32
    string u = url_gpath(url);
    string nu;
    if (path_hasdrive(u)) {
        nu.append(1, '/');
        nu.append(1, u[0]);
        if (path_isdriveabs(u)) {
            nu.append(u.substr(2));
        } else {
            // This should be an error really
            nu.append(1, '/');
            nu.append(u.substr(2));
        }
    }
    return nu;
#else
    return url_gpath(url);
#endif
}

const string& tmplocation()
{
    static string stmpdir;
    if (stmpdir.empty()) {
        const char *tmpdir = getenv("RECOLL_TMPDIR");
        if (tmpdir == 0) {
            tmpdir = getenv("TMPDIR");
        }
        if (tmpdir == 0) {
            tmpdir = getenv("TMP");
        }
        if (tmpdir == 0) {
            tmpdir = getenv("TEMP");
        }
        if (tmpdir == 0) {
#ifdef _WIN32
            wchar_t bufw[MAX_PATH + 1];
            GetTempPathW(MAX_PATH + 1, bufw);
            wchartoutf8(bufw, stmpdir);
#else
            stmpdir = "/tmp";
#endif
        } else {
            stmpdir = tmpdir;
        }
        stmpdir = path_canon(stmpdir);
    }

    return stmpdir;
}

bool maketmpdir(string& tdir, string& reason)
{
#ifndef _WIN32
    tdir = path_cat(tmplocation(), "rcltmpXXXXXX");

    char *cp = strdup(tdir.c_str());
    if (!cp) {
        reason = "maketmpdir: out of memory (for file name !)\n";
        tdir.erase();
        return false;
    }

    // There is a race condition between name computation and
    // mkdir. try to make sure that we at least don't shoot ourselves
    // in the foot
#if !defined(HAVE_MKDTEMP)
    static std::mutex mmutex;
    std::unique_lock lock(mmutex);
#endif

    if (!
#ifdef HAVE_MKDTEMP
        mkdtemp(cp)
#else
        mktemp(cp)
#endif // HAVE_MKDTEMP
        ) {
        free(cp);
        reason = "maketmpdir: mktemp failed for [" + tdir + "] : " +
            strerror(errno);
        tdir.erase();
        return false;
    }
    tdir = cp;
    free(cp);
#else // _WIN32
    // There is a race condition between name computation and
    // mkdir. try to make sure that we at least don't shoot ourselves
    // in the foot
    static std::mutex mmutex;
    std::unique_lock lock(mmutex);
    if (!path_gettempfilename(tdir, reason)) {
        return false;
    }
#endif

    // At this point the directory does not exist yet except if we used
    // mkdtemp

#if !defined(HAVE_MKDTEMP) || defined(_WIN32)
    if (mkdir(tdir.c_str(), 0700) < 0) {
        reason = string("maketmpdir: mkdir ") + tdir + " failed";
        tdir.erase();
        return false;
    }
#endif

    return true;
}


class TempFile::Internal {
public:
    Internal(const std::string& suffix);
    ~Internal();
    friend class TempFile;
private:
    std::string m_filename;
    std::string m_reason;
    bool m_noremove{false};
};

TempFile::TempFile(const string& suffix)
    : m(new Internal(suffix))
{
}

TempFile::TempFile()
{
    m = std::shared_ptr();
}

const char *TempFile::filename() const
{
    return m ? m->m_filename.c_str() : "";
}

const std::string& TempFile::getreason() const
{
    static string fatal{"fatal error"};
    return m ? m->m_reason : fatal;
}

void TempFile::setnoremove(bool onoff)
{
    if (m)
        m->m_noremove = onoff;
}

bool TempFile::ok() const
{
    return m ? !m->m_filename.empty() : false;
}

TempFile::Internal::Internal(const string& suffix)
{
    // Because we need a specific suffix, can't use mkstemp
    // well. There is a race condition between name computation and
    // file creation. try to make sure that we at least don't shoot
    // our own selves in the foot. maybe we'll use mkstemps one day.
    static std::mutex mmutex;
    std::unique_lock lock(mmutex);

    if (!path_gettempfilename(m_filename, m_reason)) {
        return;
    }
    m_filename += suffix;
    LOGDEB1("TempFile: filename: " << m_filename << endl);
    int fd1 = open(m_filename.c_str(), O_CREAT | O_EXCL, 0600);
    if (fd1 < 0) {
        m_reason = string("Open/create error. errno : ") +
            lltodecstr(errno) + " file name: " + m_filename;
        m_filename.erase();
    } else {
        close(fd1);
    }
}

const std::string& TempFile::rcltmpdir()
{
#ifdef _WIN32
    return path_wingetrcltmpdir();
#else
    return tmplocation();
#endif
}

#ifdef _WIN32
static list remainingTempFileNames;
static std::mutex remTmpFNMutex;
#endif

TempFile::Internal::~Internal()
{
    if (!m_filename.empty() && !m_noremove) {
        LOGDEB1("TempFile:~: unlinking " << m_filename << endl);
        if (unlink(m_filename.c_str()) != 0) {
            LOGSYSERR("TempFile:~", "unlink", m_filename);
#ifdef _WIN32
            {
                std::unique_lock lock(remTmpFNMutex);
                remainingTempFileNames.push_back(m_filename);
            }
#endif
        } else {
            LOGDEB1("TempFile:~: unlink " << m_filename << " Ok\n");
        }
    }
}

// On Windows we sometimes fail to remove temporary files because
// they are open. It's difficult to make sure this does not
// happen, so we add a cleaning pass after clearing the input
// handlers cache (which should kill subprocesses etc.)
void TempFile::tryRemoveAgain()
{
#ifdef _WIN32
    LOGDEB1("TempFile::tryRemoveAgain. List size: " <<
            remainingTempFileNames.size() << endl);
    std::unique_lock lock(remTmpFNMutex);
    std::list::iterator pos = remainingTempFileNames.begin();
    while (pos != remainingTempFileNames.end()) {
        if (unlink(pos->c_str()) != 0) {
            LOGSYSERR("TempFile::tryRemoveAgain", "unlink", *pos);
            pos++;
        } else {
            pos = remainingTempFileNames.erase(pos);
        }
    }
#endif
}

TempDir::TempDir()
{
    if (!maketmpdir(m_dirname, m_reason)) {
        m_dirname.erase();
        return;
    }
    LOGDEB("TempDir::TempDir: -> " << m_dirname << endl);
}

TempDir::~TempDir()
{
    if (!m_dirname.empty()) {
        LOGDEB("TempDir::~TempDir: erasing " << m_dirname << endl);
        (void)wipedir(m_dirname, true, true);
        m_dirname.erase();
    }
}

bool TempDir::wipe()
{
    if (m_dirname.empty()) {
        m_reason = "TempDir::wipe: no directory !\n";
        return false;
    }
    if (wipedir(m_dirname, false, true)) {
        m_reason = "TempDir::wipe: wipedir failed\n";
        return false;
    }
    return true;
}

// Freedesktop standard paths for cache directory (thumbnails are now in there)
static const string& xdgcachedir()
{
    static string xdgcache;
    if (xdgcache.empty()) {
        const char *cp = getenv("XDG_CACHE_HOME");
        if (cp == 0) {
            xdgcache = path_cat(path_home(), ".cache");
        } else {
            xdgcache = string(cp);
        }
    }
    return xdgcache;
}

static const string& thumbnailsdir()
{
    static string thumbnailsd;
    if (thumbnailsd.empty()) {
        thumbnailsd = path_cat(xdgcachedir(), "thumbnails");
        if (access(thumbnailsd.c_str(), 0) != 0) {
            thumbnailsd = path_cat(path_home(), ".thumbnails");
        }
    }
    return thumbnailsd;
}

// Place for 256x256 files
static const string thmbdirlarge = "large";
// 128x128
static const string thmbdirnormal = "normal";

static void thumbname(const string& url, string& name)
{
    string digest;
    string l_url = url_encode(url);
    MD5String(l_url, digest);
    MD5HexPrint(digest, name);
    name += ".png";
}

bool thumbPathForUrl(const string& url, int size, string& path)
{
    string name;
    thumbname(url, name);
    if (size <= 128) {
        path = path_cat(thumbnailsdir(), thmbdirnormal);
        path = path_cat(path, name);
        if (access(path.c_str(), R_OK) == 0) {
            return true;
        }
    }
    path = path_cat(thumbnailsdir(), thmbdirlarge);
    path = path_cat(path, name);
    if (access(path.c_str(), R_OK) == 0) {
        return true;
    }

    // File does not exist. Path corresponds to the large version at this point,
    // fix it if needed.
    if (size <= 128) {
        path = path_cat(path_home(), thmbdirnormal);
        path = path_cat(path, name);
    }
    return false;
}

void rclutil_init_mt()
{
    path_pkgdatadir();
    tmplocation();
    thumbnailsdir();
}
recoll-1.26.3/utils/utf8iter.h0000644000175000017500000001672513533651561013112 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _UTF8ITER_H_INCLUDED_
#define _UTF8ITER_H_INCLUDED_

#ifdef UTF8ITER_CHECK
#include "assert.h"
#endif
#include 

/** 
 * A small helper class to iterate over utf8 strings. This is not an
 * STL iterator and does not much error checking. It is designed purely
 * for recoll usage, where the utf-8 string comes out of iconv in most cases
 * and is assumed legal. We just try to catch cases where there would be 
 * a risk of crash.
 */
class Utf8Iter {
public:
    Utf8Iter(const std::string &in) 
	: m_sp(&in) {
	update_cl();
    }

    const std::string& buffer() const {
        return *m_sp;
    }

    void rewind() {
	m_cl = 0; 
	m_pos = 0; 
	m_charpos = 0; 
	update_cl();
    }

    /** "Direct" access. Awfully inefficient as we skip from start or current
     * position at best. This can only be useful for a lookahead from the
     * current position */
    unsigned int operator[](std::string::size_type charpos) const {
	std::string::size_type mypos = 0;
	unsigned int mycp = 0;
	if (charpos >= m_charpos) {
	    mypos = m_pos;
	    mycp = m_charpos;
	}
	int l;
	while (mypos < m_sp->length() && mycp != charpos) {
	    l = get_cl(mypos);
	    if (l <= 0 || !poslok(mypos, l) || !checkvalidat(mypos, l))
		return (unsigned int)-1;
	    mypos += l;
	    ++mycp;
	}
	if (mypos < m_sp->length() && mycp == charpos) {
	    l = get_cl(mypos);
	    if (poslok(mypos, l) && checkvalidat(mypos, l))
		return getvalueat(mypos, l);
	}
	return (unsigned int)-1;
    }

    /** Increment current position to next utf-8 char */
    std::string::size_type operator++(int) {
	// Note: m_cl may be zero at eof if user's test not right
	// this shouldn't crash the program until actual data access
#ifdef UTF8ITER_CHECK
	assert(m_cl != 0);
#endif
	if (m_cl == 0)
	    return std::string::npos;

	m_pos += m_cl;
	m_charpos++;
	update_cl();
	return m_pos;
    }

    /** operator* returns the ucs4 value as a machine integer*/
    unsigned int operator*() {
#ifdef UTF8ITER_CHECK
	assert(m_cl > 0);
#endif
	return m_cl == 0 ? (unsigned int)-1 : getvalueat(m_pos, m_cl);
    }

    /** Append current utf-8 possibly multi-byte character to string param.
	This needs to be fast. No error checking. */
    unsigned int appendchartostring(std::string &out) const {
#ifdef UTF8ITER_CHECK
	assert(m_cl != 0);
#endif
	out.append(&(*m_sp)[m_pos], m_cl);
	return m_cl;
    }

    /** Return current character as string */
    operator std::string() {
#ifdef UTF8ITER_CHECK
	assert(m_cl != 0);
#endif
	return m_cl > 0 ? m_sp->substr(m_pos, m_cl) : std::string();
    }

    bool eof() const {
	return m_pos == m_sp->length();
    }

    bool error() const {
	return m_cl == 0;
    }

    /** Return current byte offset in input string */
    std::string::size_type getBpos() const {
	return m_pos;
    }

    /** Return current character length */
    std::string::size_type getBlen() const {
	return m_cl;
    }

    /** Return current unicode character offset in input string */
    std::string::size_type getCpos() const {
	return m_charpos;
    }

private:
    // String we're working with
    const std::string*     m_sp; 
    // Character length at current position. A value of zero indicates
    // an error.
    unsigned int m_cl{0};
    // Current byte offset in string.
    std::string::size_type m_pos{0}; 
    // Current character position
    unsigned int      m_charpos{0}; 

    // Check position and cl against string length
    bool poslok(std::string::size_type p, int l) const {
#ifdef UTF8ITER_CHECK
	assert(p != std::string::npos && l > 0 && p + l <= m_sp->length());
#endif
	return p != std::string::npos && l > 0 && p + l <= m_sp->length();
    }

    // Update current char length in object state, check
    // for errors
    inline void update_cl() {
	m_cl = 0;
	if (m_pos >= m_sp->length())
	    return;
	m_cl = get_cl(m_pos);
	if (!poslok(m_pos, m_cl)) {
	    // Used to set eof here for safety, but this is bad because it
	    // basically prevents the caller to discriminate error and eof.
	    //	    m_pos = m_sp->length();
	    m_cl = 0;
	    return;
	}
	if (!checkvalidat(m_pos, m_cl)) {
	    m_cl = 0;
	}
    }

    inline bool checkvalidat(std::string::size_type p, int l) const {
	switch (l) {
	case 1: 
	    return (unsigned char)(*m_sp)[p] < 128;
	case 2: 
	    return (((unsigned char)(*m_sp)[p]) & 224) == 192
		&& (((unsigned char)(*m_sp)[p+1]) & 192) == 128;
	case 3: 
	    return (((unsigned char)(*m_sp)[p]) & 240) == 224
		   && (((unsigned char)(*m_sp)[p+1]) & 192) ==  128
		   && (((unsigned char)(*m_sp)[p+2]) & 192) ==  128
		   ;
	case 4: 
	    return (((unsigned char)(*m_sp)[p]) & 248) == 240
		   && (((unsigned char)(*m_sp)[p+1]) & 192) ==  128
		   && (((unsigned char)(*m_sp)[p+2]) & 192) ==  128
		   && (((unsigned char)(*m_sp)[p+3]) & 192) ==  128
		;
	default:
	    return false;
	}
    }

    // Get character byte length at specified position. Returns 0 for error.
    inline int get_cl(std::string::size_type p) const {
	unsigned int z = (unsigned char)(*m_sp)[p];
	if (z <= 127) {
	    return 1;
	} else if ((z & 224) == 192) {
	    return 2;
	} else if ((z & 240) == 224) {
	    return 3;
	} else if ((z & 248) == 240) {
	    return 4;
	}
#ifdef UTF8ITER_CHECK
	assert(z <= 127 || (z & 224) == 192 || (z & 240) == 224 ||
	       (z & 248) == 240);
#endif
	return 0;
    }

    // Compute value at given position. No error checking.
    inline unsigned int getvalueat(std::string::size_type p, int l) const {
	switch (l) {
	case 1: 
#ifdef UTF8ITER_CHECK
	    assert((unsigned char)(*m_sp)[p] < 128);
#endif
	    return (unsigned char)(*m_sp)[p];
	case 2: 
#ifdef UTF8ITER_CHECK
	    assert(
		   ((unsigned char)(*m_sp)[p] & 224) == 192
		   && ((unsigned char)(*m_sp)[p+1] & 192) ==  128
		   );
#endif
	    return ((unsigned char)(*m_sp)[p] - 192) * 64 + 
		(unsigned char)(*m_sp)[p+1] - 128 ;
	case 3: 
#ifdef UTF8ITER_CHECK
	    assert(
		   (((unsigned char)(*m_sp)[p]) & 240) == 224
		   && (((unsigned char)(*m_sp)[p+1]) & 192) ==  128
		   && (((unsigned char)(*m_sp)[p+2]) & 192) ==  128
		   );
#endif

	    return ((unsigned char)(*m_sp)[p] - 224) * 4096 + 
		((unsigned char)(*m_sp)[p+1] - 128) * 64 + 
		(unsigned char)(*m_sp)[p+2] - 128;
	case 4: 
#ifdef UTF8ITER_CHECK
	    assert(
		   (((unsigned char)(*m_sp)[p]) & 248) == 240
		   && (((unsigned char)(*m_sp)[p+1]) & 192) ==  128
		   && (((unsigned char)(*m_sp)[p+2]) & 192) ==  128
		   && (((unsigned char)(*m_sp)[p+3]) & 192) ==  128
		   );
#endif

	    return ((unsigned char)(*m_sp)[p]-240)*262144 + 
		((unsigned char)(*m_sp)[p+1]-128)*4096 + 
		((unsigned char)(*m_sp)[p+2]-128)*64 + 
		(unsigned char)(*m_sp)[p+3]-128;

	default:
#ifdef UTF8ITER_CHECK
	    assert(l <= 4);
#endif
	    return (unsigned int)-1;
	}
    }

};


extern void utf8truncate(std::string& s, int maxlen);

#endif /* _UTF8ITER_H_INCLUDED_ */
recoll-1.26.3/utils/mimeparse.h0000644000175000017500000000720413533651561013312 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _MIME_H_INCLUDED_
#define _MIME_H_INCLUDED_
/*
  Mime definitions RFC to 4-9-2006:

  2045 Multipurpose Internet Mail Extensions (MIME) Part One: Format of
  Internet Message Bodies. N. Freed, N. Borenstein. November 1996.
  (Format: TXT=72932 bytes) (Obsoletes RFC1521, RFC1522, RFC1590)
  (Updated by RFC2184, RFC2231) (Status: DRAFT STANDARD)

  2046 Multipurpose Internet Mail Extensions (MIME) Part Two: Media
  Types. N. Freed, N. Borenstein. November 1996. (Format: TXT=105854
  bytes) (Obsoletes RFC1521, RFC1522, RFC1590) (Updated by RFC2646,
  RFC3798) (Status: DRAFT STANDARD)

  2047 MIME (Multipurpose Internet Mail Extensions) Part Three: Message
  Header Extensions for Non-ASCII Text. K. Moore. November 1996.
  (Format: TXT=33262 bytes) (Obsoletes RFC1521, RFC1522, RFC1590)
  (Updated by RFC2184, RFC2231) (Status: DRAFT STANDARD)

  2183 Communicating Presentation Information in Internet Messages: The
  Content-Disposition Header Field. R. Troost, S. Dorner, K. Moore,
  Ed.. August 1997. (Format: TXT=23150 bytes) (Updates RFC1806)
  (Updated by RFC2184, RFC2231) (Status: PROPOSED STANDARD)

  2231 MIME Parameter Value and Encoded Word Extensions: Character Sets,
  Languages, and Continuations. N. Freed, K. Moore. November 1997.
  (Format: TXT=19280 bytes) (Obsoletes RFC2184) (Updates RFC2045,
  RFC2047, RFC2183) (Status: PROPOSED STANDARD)
*/


#include 

#include 
#include 

#include "base64.h"

/** A class to represent a MIME header value with parameters */
class MimeHeaderValue {
public:
    std::string value;
    std::map params;
};

/** 
 * Parse MIME Content-type and Content-disposition value
 *
 * @param in the input string should be like: value; pn1=pv1; pn2=pv2. 
 *   Example: text/plain; charset="iso-8859-1" 
 */
extern bool parseMimeHeaderValue(const std::string& in, MimeHeaderValue& psd);

/** 
 * Quoted Printable decoding. 
 *
 * Doubles up as rfc2231 decoder, with the help of the hence the @param esc 
 * parameter.
 * RFC2045 Quoted Printable uses '=' , RFC2331 uses '%'. The two encodings are
 * otherwise similar.
 */
extern bool qp_decode(const std::string& in, std::string &out, char esc = '=');

/** Decode an Internet mail field value encoded according to rfc2047 
 *
 * Example input:  Some words =?iso-8859-1?Q?RE=A0=3A_Smoke_Tests?= more input
 * 
 * Note that MIME parameter values are explicitly NOT to be encoded with
 * this encoding which is only for headers like Subject:, To:. But it
 * is sometimes used anyway...
 * 
 * @param in input string, ascii with rfc2047 markup
 * @return out output string encoded in utf-8
 */
extern bool rfc2047_decode(const std::string& in, std::string &out);


/** Decode RFC2822 date to unix time (gmt secs from 1970)
 *
 * @param dt date string (the part after Date: )
 * @return unix time
 */
time_t rfc2822DateToUxTime(const std::string& dt);

#endif /* _MIME_H_INCLUDED_ */
recoll-1.26.3/utils/chrono.h0000644000175000017500000000373213533651561012622 00000000000000/* Copyright (C) 2014 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#ifndef _CHRONO_H_INCLUDED_
#define _CHRONO_H_INCLUDED_

#include 

/** Easy interface to measuring time intervals */
class Chrono {
public:
    /** Initialize, setting the origin time */
    Chrono();

    /** Re-store current time and return mS since init or last call */
    long restart();
    /** Re-store current time and return uS since init or last call */
    long urestart();

    /** Snapshot current time to static storage */
    static void refnow();

    /** Return interval value in various units.
     *
     * If frozen is set this gives the time since the last refnow call
     * (this is to allow for using one actual system call to get
       values from many chrono objects, like when examining timeouts
       in a queue
     */
    long long nanos(bool frozen = false);
    long micros(bool frozen = false);
    long millis(bool frozen = false);
    float secs(bool frozen = false);

    /** Return the absolute value of the current origin */
    long long amicros() const;

    struct TimeSpec {
        time_t tv_sec; /* Time in seconds */
        long   tv_nsec; /* And nanoseconds (< 10E9) */
    };

private:
    TimeSpec m_orig;
    static TimeSpec o_now;
};

#endif /* _CHRONO_H_INCLUDED_ */
recoll-1.26.3/utils/smallut.h0000644000175000017500000002247013533651561013013 00000000000000/* Copyright (C) 2006-2016 J.F.Dockes
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 *   02110-1301 USA
 */
#ifndef _SMALLUT_H_INCLUDED_
#define _SMALLUT_H_INCLUDED_

#include 
#include 

#include 
#include 
#include 
#include 

// Miscellaneous mostly string-oriented small utilities
// Note that none of the following code knows about utf-8.

// Call this before going multithread.
void smallut_init_mt();

#ifndef SMALLUT_DISABLE_MACROS
#ifndef MIN
#define MIN(A,B) (((A)<(B)) ? (A) : (B))
#endif
#ifndef MAX
#define MAX(A,B) (((A)>(B)) ? (A) : (B))
#endif
#ifndef deleteZ
#define deleteZ(X) {delete X;X = 0;}
#endif
#endif /* SMALLUT_DISABLE_MACROS */

// Case-insensitive compare. ASCII ONLY !
extern int stringicmp(const std::string& s1, const std::string& s2);

// For find_if etc.
struct StringIcmpPred {
    StringIcmpPred(const std::string& s1)
        : m_s1(s1) {
    }
    bool operator()(const std::string& s2) {
        return stringicmp(m_s1, s2) == 0;
    }
    const std::string& m_s1;
};

extern int stringlowercmp(const std::string& alreadylower,
                          const std::string& s2);
extern int stringuppercmp(const std::string& alreadyupper,
                          const std::string& s2);

extern void stringtolower(std::string& io);
extern std::string stringtolower(const std::string& io);
extern void stringtoupper(std::string& io);
extern std::string stringtoupper(const std::string& io);
extern bool beginswith(const std::string& big, const std::string& small);

// Is one string the end part of the other ?
extern int stringisuffcmp(const std::string& s1, const std::string& s2);

// Divine language from locale
extern std::string localelang();
// Divine 8bit charset from language
extern std::string langtocode(const std::string& lang);

// Compare charset names, removing the more common spelling variations
extern bool samecharset(const std::string& cs1, const std::string& cs2);

// Parse date interval specifier into pair of y,m,d dates.  The format
// for the time interval is based on a subset of iso 8601 with
// the addition of open intervals, and removal of all time indications.
// 'P' is the Period indicator, it's followed by a length in
// years/months/days (or any subset thereof)
// Dates: YYYY-MM-DD YYYY-MM YYYY
// Periods: P[nY][nM][nD] where n is an integer value.
// At least one of YMD must be specified
// The separator for the interval is /. Interval examples
// YYYY/ (from YYYY) YYYY-MM-DD/P3Y (3 years after date) etc.
// This returns a pair of y,m,d dates.
struct DateInterval {
    int y1;
    int m1;
    int d1;
    int y2;
    int m2;
    int d2;
};
extern bool parsedateinterval(const std::string& s, DateInterval *di);
extern int monthdays(int mon, int year);

/**
 * Parse input string into list of strings.
 *
 * Token delimiter is " \t\n" except inside dquotes. dquote inside
 * dquotes can be escaped with \ etc...
 * Input is handled a byte at a time, things will work as long as
 * space tab etc. have the ascii values and can't appear as part of a
 * multibyte char. utf-8 ok but so are the iso-8859-x and surely
 * others. addseps do have to be single-bytes
 */
template  bool stringToStrings(const std::string& s, T& tokens,
                                        const std::string& addseps = "");

/**
 * Inverse operation:
 */
template  void stringsToString(const T& tokens, std::string& s);
template  std::string stringsToString(const T& tokens);

/**
 * Strings to CSV string. tokens containing the separator are quoted (")
 * " inside tokens is escaped as "" ([word "quote"] =>["word ""quote"""]
 */
template  void stringsToCSV(const T& tokens, std::string& s,
                                     char sep = ',');

/**
 * Split input string. No handling of quoting
 */
extern void stringToTokens(const std::string& s,
                           std::vector& tokens,
                           const std::string& delims = " \t",
                           bool skipinit = true);

/** Like toTokens but with multichar separator */
extern void stringSplitString(const std::string& str,
                              std::vector& tokens,
                              const std::string& sep);

/** Convert string to boolean */
extern bool stringToBool(const std::string& s);

/** Remove instances of characters belonging to set (default {space,
    tab}) at beginning and end of input string */
extern void trimstring(std::string& s, const char *ws = " \t");
extern void rtrimstring(std::string& s, const char *ws = " \t");
extern void ltrimstring(std::string& s, const char *ws = " \t");

/** Escape things like < or & by turning them into entities */
extern std::string escapeHtml(const std::string& in);

/** Double-quote and escape to produce C source code string (prog generation) */
extern std::string makeCString(const std::string& in);

/** Replace some chars with spaces (ie: newline chars). */
extern std::string neutchars(const std::string& str, const std::string& chars,
                             char rep = ' ');
extern void neutchars(const std::string& str, std::string& out,
                      const std::string& chars, char rep = ' ');

/** Turn string into something that won't be expanded by a shell. In practise
 *  quote with double-quotes and escape $`\ */
extern std::string escapeShell(const std::string& str);

/** Truncate a string to a given maxlength, avoiding cutting off midword
 *  if reasonably possible. */
extern std::string truncate_to_word(const std::string& input,
                                    std::string::size_type maxlen);

void ulltodecstr(uint64_t val, std::string& buf);
void lltodecstr(int64_t val, std::string& buf);
std::string lltodecstr(int64_t val);
std::string ulltodecstr(uint64_t val);

/** Convert byte count into unit (KB/MB...) appropriate for display */
std::string displayableBytes(int64_t size);

/** Break big string into lines */
std::string breakIntoLines(const std::string& in, unsigned int ll = 100,
                           unsigned int maxlines = 50);

/** Small utility to substitute printf-like percents cmds in a string */
bool pcSubst(const std::string& in, std::string& out,
             const std::map& subs);
/** Substitute printf-like percents and also %(key) */
bool pcSubst(const std::string& in, std::string& out,
             const std::map& subs);

/** Append system error message */
void catstrerror(std::string *reason, const char *what, int _errno);

/** Portable timegm. MS C has _mkgmtime, but there is a bug in Gminw which
 * makes it inaccessible */
struct tm;
time_t portable_timegm(struct tm *tm);

inline void leftzeropad(std::string& s, unsigned len)
{
    if (s.length() && s.length() < len) {
        s = s.insert(0, len - s.length(), '0');
    }
}

// A class to solve platorm/compiler issues for simple regex
// matches. Uses the appropriate native lib under the hood.
// This always uses extended regexp syntax.
class SimpleRegexp {
public:
    enum Flags {SRE_NONE = 0, SRE_ICASE = 1, SRE_NOSUB = 2};
    /// @param nmatch must be >= the number of parenthesed subexp in exp
    SimpleRegexp(const std::string& exp, int flags, int nmatch = 0);
    ~SimpleRegexp();
    /// Match input against exp, return true if matches
    bool simpleMatch(const std::string& val) const;
    /// After simpleMatch success, get nth submatch, 0 is the whole
    /// match, 1 first parentheses, etc.
    std::string getMatch(const std::string& val, int matchidx) const;
    /// Calls simpleMatch()
    bool operator() (const std::string& val) const;
    /// Check after construction
    bool ok() const;
    
    class Internal;
private:
    Internal *m;
};

/// Utilities for printing names for defined values (Ex: O_RDONLY->"O_RDONLY")

/// Entries for the descriptive table
struct CharFlags {
    CharFlags(int v, const char *y, const char *n=0)
        : value(v), yesname(y), noname(n) {}
    unsigned int value; // Flag or value
    const char *yesname;// String to print if flag set or equal
    const char *noname; // String to print if flag not set (unused for values)
};

/// Helper macro for the common case where we want to print the
/// flag/value defined name
#define CHARFLAGENTRY(NM) {NM, #NM}

/// Translate a bitfield into string description
extern std::string flagsToString(const std::vector&,
                                 unsigned int flags);

/// Translate a value into a name
extern std::string valToString(const std::vector&, unsigned int val);

/// Reverse operation: translate string into bitfield
extern unsigned int
stringToFlags(const std::vector&, const std::string& input,
              const char *sep = "|");

#endif /* _SMALLUT_H_INCLUDED_ */
recoll-1.26.3/utils/conftree.cpp0000644000175000017500000005335113561263100013461 00000000000000/* Copyright (C) 2003-2016 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifdef BUILDING_RECOLL
#include "autoconfig.h"
#else
#include "config.h"
#endif

#include "conftree.h"

#include 
#include 
#ifdef _WIN32
#include "safesysstat.h"
#else
#include 
#include 
#include 
#include 
#endif

#include 
#include 
#include 
#include 
#include 
#include 

#include "pathut.h"
#include "smallut.h"
#ifdef MDU_INCLUDE_LOG
#include MDU_INCLUDE_LOG
#else
#include "log.h"
#endif

using namespace std;

#undef DEBUG_CONFTREE
#ifdef DEBUG_CONFTREE
#define CONFDEB LOGDEB
#else
#define CONFDEB LOGDEB2
#endif

static const SimpleRegexp varcomment_rx("[ \t]*#[ \t]*([a-zA-Z0-9]+)[ \t]*=",
                                        0, 1);

void ConfSimple::parseinput(istream& input)
{
    string submapkey;
    string cline;
    bool appending = false;
    string line;
    bool eof = false;

    for (;;) {
        cline.clear();
        std::getline(input, cline);
        CONFDEB("Parse:line: ["  << cline << "] status "  << status << "\n");
        if (!input.good()) {
            if (input.bad()) {
                CONFDEB("Parse: input.bad()\n");
                status = STATUS_ERROR;
                return;
            }
            CONFDEB("Parse: eof\n");
            // Must be eof ? But maybe we have a partial line which
            // must be processed. This happens if the last line before
            // eof ends with a backslash, or there is no final \n
            eof = true;
        }

        {
            string::size_type pos = cline.find_last_not_of("\n\r");
            if (pos == string::npos) {
                cline.clear();
            } else if (pos != cline.length() - 1) {
                cline.erase(pos + 1);
            }
        }

        if (appending) {
            line += cline;
        } else {
            line = cline;
        }

        // Note that we trim whitespace before checking for backslash-eol
        // This avoids invisible whitespace problems.
        if (trimvalues) {
            trimstring(line);
        } else {
            ltrimstring(line);
        }
        if (line.empty() || line.at(0) == '#') {
            if (eof) {
                break;
            }
            if (varcomment_rx.simpleMatch(line)) {
                m_order.push_back(ConfLine(ConfLine::CFL_VARCOMMENT, line,
                                           varcomment_rx.getMatch(line, 1)));
            } else {
                m_order.push_back(ConfLine(ConfLine::CFL_COMMENT, line));
            }
            continue;
        }
        if (line[line.length() - 1] == '\\') {
            line.erase(line.length() - 1);
            appending = true;
            continue;
        }
        appending = false;

        if (line[0] == '[') {
            trimstring(line, "[] \t");
            if (dotildexpand) {
                submapkey = path_tildexpand(line);
            } else {
                submapkey = line;
            }
            m_subkeys_unsorted.push_back(submapkey);
            m_order.push_back(ConfLine(ConfLine::CFL_SK, submapkey));
            continue;
        }

        // Look for first equal sign
        string::size_type eqpos = line.find("=");
        if (eqpos == string::npos) {
            m_order.push_back(ConfLine(ConfLine::CFL_COMMENT, line));
            continue;
        }

        // Compute name and value, trim white space
        string nm, val;
        nm = line.substr(0, eqpos);
        trimstring(nm);
        val = line.substr(eqpos + 1, string::npos);
        if (trimvalues) {
            trimstring(val);
        }

        if (nm.length() == 0) {
            m_order.push_back(ConfLine(ConfLine::CFL_COMMENT, line));
            continue;
        }
        i_set(nm, val, submapkey, true);
        if (eof) {
            break;
        }
    }
}


ConfSimple::ConfSimple(int readonly, bool tildexp, bool trimv)
    : dotildexpand(tildexp), trimvalues(trimv), m_fmtime(0), m_holdWrites(false)
{
    status = readonly ? STATUS_RO : STATUS_RW;
}

void ConfSimple::reparse(const string& d)
{
    clear();
    stringstream input(d, ios::in);
    parseinput(input);
}

ConfSimple::ConfSimple(const string& d, int readonly, bool tildexp, bool trimv)
    : dotildexpand(tildexp), trimvalues(trimv), m_fmtime(0), m_holdWrites(false)
{
    status = readonly ? STATUS_RO : STATUS_RW;

    stringstream input(d, ios::in);
    parseinput(input);
}

ConfSimple::ConfSimple(const char *fname, int readonly, bool tildexp,
                       bool trimv)
    : dotildexpand(tildexp), trimvalues(trimv), m_filename(fname),
      m_fmtime(0), m_holdWrites(false)
{
    status = readonly ? STATUS_RO : STATUS_RW;

    ifstream input;
    if (readonly) {
        input.open(fname, ios::in);
    } else {
        ios::openmode mode = ios::in | ios::out;
        // It seems that there is no separate 'create if not exists'
        // open flag. Have to truncate to create, but dont want to do
        // this to an existing file !
        if (!path_exists(fname)) {
            mode |= ios::trunc;
        }
        input.open(fname, mode);
        if (input.is_open()) {
            status = STATUS_RW;
        } else {
            input.clear();
            input.open(fname, ios::in);
            if (input.is_open()) {
                status = STATUS_RO;
            }
        }
    }

    if (!input.is_open()) {
        status = STATUS_ERROR;
        return;
    }

    parseinput(input);
    i_changed(true);
}

ConfSimple::StatusCode ConfSimple::getStatus() const
{
    switch (status) {
    case STATUS_RO:
        return STATUS_RO;
    case STATUS_RW:
        return STATUS_RW;
    default:
        return STATUS_ERROR;
    }
}

bool ConfSimple::sourceChanged() const
{
    if (!m_filename.empty()) {
        struct stat st;
        if (stat(m_filename.c_str(), &st) == 0) {
            if (m_fmtime != st.st_mtime) {
                return true;
            }
        }
    }
    return false;
}

bool ConfSimple::i_changed(bool upd)
{
    if (!m_filename.empty()) {
        struct stat st;
        if (stat(m_filename.c_str(), &st) == 0) {
            if (m_fmtime != st.st_mtime) {
                if (upd) {
                    m_fmtime = st.st_mtime;
                }
                return true;
            }
        }
    }
    return false;
}

int ConfSimple::get(const string& nm, string& value, const string& sk) const
{
    if (!ok()) {
        return 0;
    }

    // Find submap
    map >::const_iterator ss;
    if ((ss = m_submaps.find(sk)) == m_submaps.end()) {
        return 0;
    }

    // Find named value
    map::const_iterator s;
    if ((s = ss->second.find(nm)) == ss->second.end()) {
        return 0;
    }
    value = s->second;
    return 1;
}

int ConfSimple::get(const string& nm, int *value, const string& sk) const
{
    string sval;
    if (!get(nm, sval, sk)) {
        return 0;
    }
    *value = atoi(sval.c_str());
    return 1;
}

// Appropriately output a subkey (nm=="") or variable line.
// We can't make any assumption about the data except that it does not
// contain line breaks.
// Avoid long lines if possible (for hand-editing)
// We used to break at arbitrary places, but this was ennoying for
// files with pure UTF-8 encoding (some files can be binary anyway),
// because it made later editing difficult, as the file would no
// longer have a valid encoding.
// Any ASCII byte would be a safe break point for utf-8, but could
// break some other encoding with, e.g. escape sequences? So break at
// whitespace (is this safe with all encodings?).
// Note that the choice of break point does not affect the validity of
// the file data (when read back by conftree), only its ease of
// editing with a normal editor.
static ConfSimple::WalkerCode varprinter(void *f, const string& nm,
        const string& value)
{
    ostream& output = *((ostream *)f);
    if (nm.empty()) {
        output << "\n[" << value << "]\n";
    } else {
        output << nm << " = ";
        if (nm.length() + value.length() < 75) {
            output << value;
        } else {
            string::size_type ll = 0;
            for (string::size_type pos = 0; pos < value.length(); pos++) {
                string::value_type c = value[pos];
                output << c;
                ll++;
                // Break at whitespace if line too long and "a lot" of
                // remaining data
                if (ll > 50 && (value.length() - pos) > 10 &&
                        (c == ' ' || c == '\t')) {
                    ll = 0;
                    output << "\\\n";
                }
            }
        }
        output << "\n";
    }
    return ConfSimple::WALK_CONTINUE;
}

// Set variable and rewrite data
int ConfSimple::set(const std::string& nm, const std::string& value,
                    const string& sk)
{
    if (status  != STATUS_RW) {
        return 0;
    }
    CONFDEB("ConfSimple::set ["< [" << value << "]\n");
    if (!i_set(nm, value, sk)) {
        return 0;
    }
    return write();
}
int ConfSimple::set(const string& nm, long long val,
                    const string& sk)
{
    return this->set(nm, lltodecstr(val), sk);
}


// Internal set variable: no rw checking or file rewriting. If init is
// set, we're doing initial parsing, else we are changing a parsed
// tree (changes the way we update the order data)
int ConfSimple::i_set(const std::string& nm, const std::string& value,
                      const string& sk, bool init)
{
    CONFDEB("ConfSimple::i_set: nm[" << nm << "] val[" << value <<
            "] key[" << sk << "], init " << init << "\n");
    // Values must not have embedded newlines
    if (value.find_first_of("\n\r") != string::npos) {
        CONFDEB("ConfSimple::i_set: LF in value\n");
        return 0;
    }
    bool existing = false;
    map >::iterator ss;
    // Test if submap already exists, else create it, and insert variable:
    if ((ss = m_submaps.find(sk)) == m_submaps.end()) {
        CONFDEB("ConfSimple::i_set: new submap\n");
        map submap;
        submap[nm] = value;
        m_submaps[sk] = submap;

        // Maybe add sk entry to m_order data, if not already there.
        if (!sk.empty()) {
            ConfLine nl(ConfLine::CFL_SK, sk);
            // Append SK entry only if it's not already there (erase
            // does not remove entries from the order data, and it may
            // be being recreated after deletion)
            if (find(m_order.begin(), m_order.end(), nl) == m_order.end()) {
                m_order.push_back(nl);
            }
        }
    } else {
        // Insert or update variable in existing map.
        map::iterator it;
        it = ss->second.find(nm);
        if (it == ss->second.end()) {
            ss->second.insert(pair(nm, value));
        } else {
            it->second = value;
            existing = true;
        }
    }

    // If the variable already existed, no need to change the m_order data
    if (existing) {
        CONFDEB("ConfSimple::i_set: existing var: no order update\n");
        return 1;
    }

    // Add the new variable at the end of its submap in the order data.

    if (init) {
        // During the initial construction, just append:
        CONFDEB("ConfSimple::i_set: init true: append\n");
        m_order.push_back(ConfLine(ConfLine::CFL_VAR, nm));
        m_order.back().m_value = value;
        return 1;
    }

    // Look for the start and end of the subkey zone. Start is either
    // at begin() for a null subkey, or just behind the subkey
    // entry. End is either the next subkey entry, or the end of
    // list. We insert the new entry just before end.
    vector::iterator start, fin;
    if (sk.empty()) {
        start = m_order.begin();
        CONFDEB("ConfSimple::i_set: null sk, start at top of order\n");
    } else {
        start = find(m_order.begin(), m_order.end(),
                     ConfLine(ConfLine::CFL_SK, sk));
        if (start == m_order.end()) {
            // This is not logically possible. The subkey must
            // exist. We're doomed
            std::cerr << "Logical failure during configuration variable "
                      "insertion" << endl;
            abort();
        }
    }

    fin = m_order.end();
    if (start != m_order.end()) {
        // The null subkey has no entry (maybe it should)
        if (!sk.empty()) {
            start++;
        }
        for (vector::iterator it = start; it != m_order.end(); it++) {
            if (it->m_kind == ConfLine::CFL_SK) {
                fin = it;
                break;
            }
        }
    }

    // It may happen that the order entry already exists because erase doesnt
    // update m_order
    if (find(start, fin, ConfLine(ConfLine::CFL_VAR, nm)) == fin) {
        // Look for a varcomment line, insert the value right after if
        // it's there.
        bool inserted(false);
        vector::iterator it;
        for (it = start; it != fin; it++) {
            if (it->m_kind == ConfLine::CFL_VARCOMMENT && it->m_aux == nm) {
                it++;
                m_order.insert(it, ConfLine(ConfLine::CFL_VAR, nm));
                inserted = true;
                break;
            }
        }
        if (!inserted) {
            m_order.insert(fin, ConfLine(ConfLine::CFL_VAR, nm));
        }
    }

    return 1;
}

int ConfSimple::erase(const string& nm, const string& sk)
{
    if (status  != STATUS_RW) {
        return 0;
    }

    map >::iterator ss;
    if ((ss = m_submaps.find(sk)) == m_submaps.end()) {
        return 0;
    }

    ss->second.erase(nm);
    if (ss->second.empty()) {
        m_submaps.erase(ss);
    }
    return write();
}

int ConfSimple::eraseKey(const string& sk)
{
    vector nms = getNames(sk);
    for (vector::iterator it = nms.begin(); it != nms.end(); it++) {
        erase(*it, sk);
    }
    return write();
}

int ConfSimple::clear()
{
    m_submaps.clear();
    m_order.clear();
    return write();
}

// Walk the tree, calling user function at each node
ConfSimple::WalkerCode
ConfSimple::sortwalk(WalkerCode(*walker)(void *, const string&, const string&),
                     void *clidata) const
{
    if (!ok()) {
        return WALK_STOP;
    }
    // For all submaps:
    for (map >::const_iterator sit =
                m_submaps.begin();
            sit != m_submaps.end(); sit++) {

        // Possibly emit submap name:
        if (!sit->first.empty() && walker(clidata, string(), sit->first.c_str())
                == WALK_STOP) {
            return WALK_STOP;
        }

        // Walk submap
        const map& sm = sit->second;
        for (map::const_iterator it = sm.begin(); it != sm.end();
                it++) {
            if (walker(clidata, it->first, it->second) == WALK_STOP) {
                return WALK_STOP;
            }
        }
    }
    return WALK_CONTINUE;
}

// Write to default output. This currently only does something if output is
// a file
bool ConfSimple::write()
{
    if (!ok()) {
        return false;
    }
    if (m_holdWrites) {
        return true;
    }
    if (m_filename.length()) {
        ofstream output(m_filename.c_str(), ios::out | ios::trunc);
        if (!output.is_open()) {
            return 0;
        }
        return write(output);
    } else {
        // No backing store, no writing. Maybe one day we'll need it with
        // some kind of output string. This can't be the original string which
        // is currently readonly.
        //ostringstream output(m_ostring, ios::out | ios::trunc);
        return 1;
    }
}

// Write out the tree in configuration file format:
// This does not check holdWrites, this is done by write(void), which
// lets ie: showall work even when holdWrites is set
bool ConfSimple::write(ostream& out) const
{
    if (!ok()) {
        return false;
    }
    string sk;
    for (vector::const_iterator it = m_order.begin();
            it != m_order.end(); it++) {
        switch (it->m_kind) {
        case ConfLine::CFL_COMMENT:
        case ConfLine::CFL_VARCOMMENT:
            out << it->m_data << endl;
            if (!out.good()) {
                return false;
            }
            break;
        case ConfLine::CFL_SK:
            sk = it->m_data;
            CONFDEB("ConfSimple::write: SK ["  << sk << "]\n");
            // Check that the submap still exists, and only output it if it
            // does
            if (m_submaps.find(sk) != m_submaps.end()) {
                out << "[" << it->m_data << "]" << endl;
                if (!out.good()) {
                    return false;
                }
            }
            break;
        case ConfLine::CFL_VAR:
            string nm = it->m_data;
            CONFDEB("ConfSimple::write: VAR [" << nm << "], sk [" < ConfSimple::getNames(const string& sk, const char *pattern) const
{
    vector mylist;
    if (!ok()) {
        return mylist;
    }
    map >::const_iterator ss;
    if ((ss = m_submaps.find(sk)) == m_submaps.end()) {
        return mylist;
    }
    mylist.reserve(ss->second.size());
    map::const_iterator it;
    for (it = ss->second.begin(); it != ss->second.end(); it++) {
        if (pattern && 0 != fnmatch(pattern, it->first.c_str(), 0)) {
            continue;
        }
        mylist.push_back(it->first);
    }
    return mylist;
}

vector ConfSimple::getSubKeys() const
{
    vector mylist;
    if (!ok()) {
        return mylist;
    }
    mylist.reserve(m_submaps.size());
    map >::const_iterator ss;
    for (ss = m_submaps.begin(); ss != m_submaps.end(); ss++) {
        mylist.push_back(ss->first);
    }
    return mylist;
}

bool ConfSimple::hasNameAnywhere(const string& nm) const
{
    vectorkeys = getSubKeys();
    for (vector::const_iterator it = keys.begin();
            it != keys.end(); it++) {
        string val;
        if (get(nm, val, *it)) {
            return true;
        }
    }
    return false;
}

bool ConfSimple::commentsAsXML(ostream& out)
{
    const vector& lines = getlines();

    out << "\n";
    
    string sk;
    for (vector::const_iterator it = lines.begin();
         it != lines.end(); it++) {
        switch (it->m_kind) {
        case ConfLine::CFL_COMMENT:
        case ConfLine::CFL_VARCOMMENT:
        {
            string::size_type pos = it->m_data.find_first_not_of("# ");
            if (pos != string::npos) {
                out << it->m_data.substr(pos) << endl;
            }
            break;
        }
        case ConfLine::CFL_SK:
            out << "" << it->m_data << "" << endl;
            break;
        case ConfLine::CFL_VAR:
            out << "" << it->m_data << " = " <<
                it->m_value << "" << endl;
            break;
        default:
            break;
        }
    }
    out << "\n";
    
    return true;
}


// //////////////////////////////////////////////////////////////////////////
// ConfTree Methods: conftree interpret keys like a hierarchical file tree
// //////////////////////////////////////////////////////////////////////////

int ConfTree::get(const std::string& name, string& value, const string& sk)
const
{
    if (sk.empty() || !path_isabsolute(sk)) {
        LOGDEB2("ConfTree::get: looking in global space for ["  <<
                sk << "]\n");
        return ConfSimple::get(name, value, sk);
    }

    // Get writable copy of subkey path
    string msk = sk;

    // Handle the case where the config file path has an ending / and not
    // the input sk
    path_catslash(msk);

    // Look in subkey and up its parents until root ('')
    for (;;) {
        LOGDEB2("ConfTree::get: looking for ["  << name << "] in ["  <<
                msk << "]\n");
        if (ConfSimple::get(name, value, msk)) {
            return 1;
        }
        string::size_type pos = msk.rfind("/");
        if (pos != string::npos) {
            msk.replace(pos, string::npos, string());
        } else {
#ifdef _WIN32
            if (msk.size() == 2 && isalpha(msk[0]) && msk[1] == ':') {
                msk.clear();
            } else
#endif
                break;
        }
    }
    return 0;
}

recoll-1.26.3/utils/log.cpp0000644000175000017500000000334613533651561012447 00000000000000/* Copyright (C) 2006-2016 J.F.Dockes
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 *   02110-1301 USA
 */
#include "log.h"

#include 
#include 

using namespace std;

Logger::Logger(const std::string& fn)
    : m_fn(fn)
{
    reopen(fn);
}

bool Logger::reopen(const std::string& fn)
{
#if LOGGER_THREADSAFE
    std::unique_lock lock(m_mutex);
#endif
    if (!fn.empty()) {
        m_fn = fn;
    }
    if (!m_tocerr && m_stream.is_open()) {
        m_stream.close();
    }
    if (!m_fn.empty() && m_fn.compare("stderr")) {
        m_stream.open(m_fn, std::fstream::out | std::ofstream::trunc);
        if (!m_stream.is_open()) {
            cerr << "Logger::Logger: log open failed: for [" <<
                 fn << "] errno " << errno << endl;
            m_tocerr = true;
        } else {
            m_tocerr = false;
        }
    } else {
        m_tocerr = true;
    }
    return true;
}

static Logger *theLog;

Logger *Logger::getTheLog(const string& fn)
{
    if (theLog == 0)
        theLog = new Logger(fn);
    return theLog;
}

recoll-1.26.3/utils/x11mon.cpp0000644000175000017500000000332713303776060013005 00000000000000#ifndef TEST_X11MON
/* Copyright (C) 2006 J.F.Dockes */
// Poll state of X11 connectibility (to detect end of user session).
#include "autoconfig.h"
#ifndef DISABLE_X11MON
#include 
#include 
#include 
#include 

#define DODEBUG
#ifdef DODEBUG
#define DEBUG(X) fprintf X
#else
#define DEBUG(X) fprintf X
#endif

static Display *m_display;
static bool m_ok;
static jmp_buf env;

static int errorHandler(Display *, XErrorEvent*)
{
    DEBUG((stderr, "x11mon: error handler: Got X11 error\n"));
    m_ok = false;
    return 0;
}
static int ioErrorHandler(Display *)
{
    DEBUG((stderr, "x11mon: error handler: Got X11 IO error\n"));
    m_ok = false;
    m_display = 0;
    longjmp(env, 1);
}

bool x11IsAlive()
{
    // Xlib always exits on IO errors. Need a setjmp to avoid this (will jump
    // from IO error handler instead of returning).
    if (setjmp(env)) {
	DEBUG((stderr, "x11IsAlive: Long jump\n"));
	return false;
    }
    if (m_display == 0) {
	signal(SIGPIPE, SIG_IGN);
	XSetErrorHandler(errorHandler);
	XSetIOErrorHandler(ioErrorHandler);
	if ((m_display = XOpenDisplay(0)) == 0) {
	    DEBUG((stderr, "x11IsAlive: cant connect\n"));
	    m_ok = false;
	    return false;
	}
    }
    m_ok = true;
    bool sync= XSynchronize(m_display, true);
    XNoOp(m_display);
    XSynchronize(m_display, sync);
    return m_ok;
}
#else
bool x11IsAlive() 
{
    return true;
}
#endif /* DISABLE_X11MON */

#else

// Test driver

#include 
#include 
#include 

#include "x11mon.h"

int main(int argc, char **argv)
{
    for (;;) {
	if (!x11IsAlive()) {
	    fprintf(stderr, "x11IsAlive failed\n");
	} else {
	    fprintf(stderr, "x11IsAlive Ok\n");
	}
	sleep(1);
    }
}
#endif
recoll-1.26.3/utils/mimeparse.cpp0000644000175000017500000007745413533651561013663 00000000000000/* Copyright (C) 2004 J.F.Dockes 
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#ifndef TEST_MIMEPARSE
#include "autoconfig.h"

#include 
#include 

#include 
#include 
#include 
#include 
#include 
#include 

#include "mimeparse.h"
#include "base64.h"
#include "transcode.h"
#include "smallut.h"

using namespace std;

//#define DEBUG_MIMEPARSE 
#ifdef DEBUG_MIMEPARSE
#define DPRINT(X) fprintf X
#else
#define DPRINT(X)
#endif

// Parsing a header value. Only content-type and content-disposition
// have parameters, but others are compatible with content-type
// syntax, only, parameters are not used. So we can parse all like:
//
//    headertype: value [; paramname=paramvalue] ...
//
// Value and paramvalues can be quoted strings, and there can be
// comments too. Note that RFC2047 is explicitly forbidden for
// parameter values (RFC2231 must be used), but I have seen it used
// anyway (ie: thunderbird 1.0)
//
// Ref: RFC2045/6/7 (MIME) RFC2183/2231 (content-disposition and encodings)



/** Decode a MIME parameter value encoded according to rfc2231
 *
 * Example input withs input charset == "":  
 *     [iso-8859-1'french'RE%A0%3A_Smoke_Tests%20bla]
 * Or (if charset is set) : RE%A0%3A_Smoke_Tests%20bla
 *
 * @param in input string, ascii with rfc2231 markup
 * @param out output string
 * @param charset if empty: decode string like 'charset'lang'more%20stuff,
 *      else just do the %XX part
 * @return out output string encoded in utf-8
 */
bool rfc2231_decode(const string &in, string &out, string &charset)
{
    string::size_type pos1, pos2=0;

    if (charset.empty()) {
        if ((pos1 = in.find("'")) == string::npos)
            return false;
        charset = in.substr(0, pos1);
        // fprintf(stderr, "Charset: [%s]\n", charset.c_str());
        pos1++;

        if ((pos2 = in.find("'", pos1)) == string::npos)
            return false;
        // We have no use for lang for now
        // string lang = in.substr(pos1, pos2-pos1); 
        // fprintf(stderr, "Lang: [%s]\n", lang.c_str());
        pos2++;
    }

    string raw;
    qp_decode(in.substr(pos2), raw, '%');
    // fprintf(stderr, "raw [%s]\n", raw.c_str());
    if (!transcode(raw, out, charset, "UTF-8"))
        return false;
    return true;
}


/////////////////////////////////////////
/// Decoding of MIME fields values and parameters

// The lexical token returned by find_next_token
class Lexical {
public:
    enum kind {none, token, separator};
    kind   what;
    string value;
    string error;
    char quote;
    Lexical() : what(none), quote(0) {}
    void reset() {what = none; value.erase(); error.erase();quote = 0;}
};

// Skip mime comment. This must be called with in[start] == '('
static string::size_type 
skip_comment(const string &in, string::size_type start, Lexical &lex)
{
    int commentlevel = 0;
    for (; start < in.size(); start++) {
        if (in[start] == '\\') {
            // Skip escaped char. 
            if (start+1 < in.size()) {
                start++;
                continue;
            } else {
                lex.error.append("\\ at end of string ");
                return in.size();
            }
        }
        if (in[start] == '(')
            commentlevel++;
        if (in[start] == ')') {
            if (--commentlevel == 0)
                break;
        }
    }
    if (start == in.size() && commentlevel != 0) {
        lex.error.append("Unclosed comment ");
        return in.size();
    }
    return start;
}

// Skip initial whitespace and (possibly nested) comments. 
static string::size_type 
skip_whitespace_and_comment(const string &in, string::size_type start, 
                            Lexical &lex)
{
    while (1) {
        if ((start = in.find_first_not_of(" \t\r\n", start)) == string::npos)
            return in.size();
        if (in[start] == '(') {
            if ((start = skip_comment(in, start, lex)) == string::npos)
                return string::npos;
        } else {
            break;
        }
    }
    return start;
}

/// Find next token in mime header value string. 
/// @return the next starting position in string, string::npos for error 
/// @param in the input string
/// @param start the starting position
/// @param lex  the returned token and its description
/// @param delims separators we should look for
static string::size_type 
find_next_token(const string &in, string::size_type start, 
                Lexical &lex, string delims = ";=")
{
    char oquot, cquot;

    start = skip_whitespace_and_comment(in, start, lex);
    if (start == string::npos || start == in.size())
        return in.size();

    // Begins with separator ? return it.
    string::size_type delimi = delims.find_first_of(in[start]);
    if (delimi != string::npos) {
        lex.what = Lexical::separator;
        lex.value = delims[delimi];
        return start+1;
    }

    // Check for start of quoted string
    oquot = in[start];
    switch (oquot) {
    case '<': cquot = '>';break;
    case '"': cquot = '"';break;
    default: cquot = 0; break;
    }

    if (cquot != 0) {
        // Quoted string parsing
        string::size_type end;
        start++; // Skip quote character
        for (end = start;end < in.size() && in[end] != cquot; end++) {
            if (in[end] == '\\') {
                // Skip escaped char. 
                if (end+1 < in.size()) {
                    end++;
                } else {
                    // backslash at end of string: error
                    lex.error.append("\\ at end of string ");
                    return string::npos;
                }
            }
        }
        if (end == in.size()) {
            // Found end of string before closing quote character: error
            lex.error.append("Unclosed quoted string ");
            return string::npos;
        }
        lex.what = Lexical::token;
        lex.value = in.substr(start, end-start);
        lex.quote = oquot;
        return ++end;
    } else {
        string::size_type end = in.find_first_of(delims + "\r\n \t(", start);
        lex.what = Lexical::token;
        lex.quote = 0;
        if (end == string::npos) {
            end = in.size();
            lex.value = in.substr(start);
        } else {
            lex.value = in.substr(start, end-start);
        }
        return end;
    }
}

// Classes for handling rfc2231 value continuations
class Chunk {
public:
    Chunk() : decode(false) {}
    bool decode;
    string value;
};
class Chunks {
public:
    vector chunks;
};

void stringtolower(string &out, const string& in)
{
    for (string::size_type i = 0; i < in.size(); i++)
        out.append(1, char(tolower(in[i])));
}

// Parse MIME field value. Should look like:
//  somevalue ; param1=val1;param2=val2
bool parseMimeHeaderValue(const string& value, MimeHeaderValue& parsed)
{
    parsed.value.erase();
    parsed.params.clear();

    Lexical lex;
    string::size_type start = 0;

    // Get the field value
    start = find_next_token(value, start, lex);
    if (start == string::npos || lex.what != Lexical::token) 
        return false;
    parsed.value = lex.value;

    map rawparams;
    // Look for parameters
    for (;;) {
        string paramname, paramvalue;
        lex.reset();
        start = find_next_token(value, start, lex);
        if (start == value.size())
            break;
        if (start == string::npos) {
            //fprintf(stderr, "Find_next_token error(1)\n");
            return false;
        }
        if (lex.what == Lexical::separator && lex.value[0] == ';')
            continue;
        if (lex.what != Lexical::token) 
            return false;
        stringtolower(paramname, lex.value);

        start = find_next_token(value, start, lex);
        if (start == string::npos || lex.what != Lexical::separator || 
            lex.value[0] != '=') {
            //fprintf(stderr, "Find_next_token error (2)\n");
            return false;
        }

        start = find_next_token(value, start, lex);
        if (start == string::npos || lex.what != Lexical::token) {
            //fprintf(stderr, "Parameter has no value!");
            return false;
        }
        paramvalue = lex.value;
        rawparams[paramname] = paramvalue;
        //fprintf(stderr, "RAW: name [%s], value [%s]\n", paramname.c_str(),
        //              paramvalue.c_str());
    }
    //    fprintf(stderr, "Number of raw params %d\n", rawparams.size());

    // RFC2231 handling: 
    // - if a parameter name ends in * it must be decoded 
    // - If a parameter name looks line name*ii[*] it is a
    //   partial value, and must be concatenated with other such.
    
    map chunks;
    for (map::const_iterator it = rawparams.begin(); 
         it != rawparams.end(); it++) {
        string nm = it->first;
        //      fprintf(stderr, "NM: [%s]\n", nm.c_str());
        if (nm.empty()) // ??
            continue;

        Chunk chunk;
        if (nm[nm.length()-1] == '*') {
            nm.erase(nm.length() - 1);
            chunk.decode = true;
        } else
            chunk.decode = false;
        //      fprintf(stderr, "NM1: [%s]\n", nm.c_str());

        chunk.value = it->second;

        // Look for another asterisk in nm. If none, assign index 0
        string::size_type aster;
        int idx = 0;
        if ((aster = nm.rfind("*")) != string::npos) {
            string num = nm.substr(aster+1);
            //fprintf(stderr, "NUM: [%s]\n", num.c_str());
            nm.erase(aster);
            idx = atoi(num.c_str());
        }
        Chunks empty;
        if (chunks.find(nm) == chunks.end())
            chunks[nm] = empty;
        chunks[nm].chunks.resize(idx+1);
        chunks[nm].chunks[idx] = chunk;
        //fprintf(stderr, "CHNKS: nm [%s], idx %d, decode %d, value [%s]\n", 
        // nm.c_str(), idx, int(chunk.decode), chunk.value.c_str());
    }

    // For each parameter name, concatenate its chunks and possibly
    // decode Note that we pass the whole concatenated string to
    // decoding if the first chunk indicates that decoding is needed,
    // which is not right because there might be uncoded chunks
    // according to the rfc.
    for (map::const_iterator it = chunks.begin(); 
         it != chunks.end(); it++) {
        if (it->second.chunks.empty())
            continue;
        string nm = it->first;
        // Create the name entry
        if (parsed.params.find(nm) == parsed.params.end())
            parsed.params[nm].clear();
        // Concatenate all chunks and decode the whole if the first one needs
        // to. Yes, this is not quite right.
        string value;
        for (vector::const_iterator vi = it->second.chunks.begin();
             vi != it->second.chunks.end(); vi++) {
            value += vi->value;
        }
        if (it->second.chunks[0].decode) {
            string charset;
            rfc2231_decode(value, parsed.params[nm], charset);
        } else {
            // rfc2047 MUST NOT but IS used by some agents
            rfc2047_decode(value, parsed.params[nm]);
        }
        //fprintf(stderr, "FINAL: nm [%s], value [%s]\n", 
        //nm.c_str(), parsed.params[nm].c_str());
    }
    
    return true;
}

// Decode a string encoded with quoted-printable encoding. 
// we reuse the code for rfc2231 % encoding, even if the eol
// processing is not useful in this case
bool qp_decode(const string& in, string &out, char esc) 
{
    out.reserve(in.length());
    string::size_type ii;
    for (ii = 0; ii < in.length(); ii++) {
        if (in[ii] == esc) {
            ii++; // Skip '=' or '%'
            if(ii >= in.length() - 1) { // Need at least 2 more chars
                break;
            } else if (in[ii] == '\r' && in[ii+1] == '\n') { // Soft nl, skip
                ii++;
            } else if (in[ii] != '\n' && in[ii] != '\r') { // decode
                char c = in[ii];
                char co;
                if(c >= 'A' && c <= 'F') {
                    co = char((c - 'A' + 10) * 16);
                } else if (c >= 'a' && c <= 'f') {
                    co = char((c - 'a' + 10) * 16);
                } else if (c >= '0' && c <= '9') {
                    co = char((c - '0') * 16);
                } else {
                    return false;
                }
                if(++ii >= in.length()) 
                    break;
                c = in[ii];
                if (c >= 'A' && c <= 'F') {
                    co += char(c - 'A' + 10);
                } else if (c >= 'a' && c <= 'f') {
                    co += char(c - 'a' + 10);
                } else if (c >= '0' && c <= '9') {
                    co += char(c - '0');
                } else {
                    return false;
                }
                out += co;
            }
        } else {
            out += in[ii];
        }
    }
    return true;
}

// Decode an word encoded as quoted printable or base 64
static bool rfc2047_decodeParsed(const std::string& charset, 
                                 const std::string& encoding, 
                                 const std::string& value, 
                                 std::string &utf8)
{
    DPRINT((stderr, "DecodeParsed: charset [%s] enc [%s] val [%s]\n",
            charset.c_str(), encoding.c_str(), value.c_str()));
    utf8.clear();

    string decoded;
    if (!stringlowercmp("b", encoding)) {
        if (!base64_decode(value, decoded))
            return false;
        DPRINT((stderr, "FromB64: [%s]\n", decoded.c_str()));
    } else if (!stringlowercmp("q", encoding)) {
        if (!qp_decode(value, decoded))
            return false;
        // Need to translate _ to ' ' here
        string temp;
        for (string::size_type pos = 0; pos < decoded.length(); pos++)
            if (decoded[pos] == '_')
                temp += ' ';
            else 
                temp += decoded[pos];
        decoded = temp;
        DPRINT((stderr, "FromQP: [%s]\n", decoded.c_str()));
    } else {
        DPRINT((stderr, "Bad encoding [%s]\n", encoding.c_str()));
        return false;
    }

    if (!transcode(decoded, utf8, charset, "UTF-8")) {
        DPRINT((stderr, "Transcode failed\n"));
        return false;
    }
    return true;
}

// Parse a mail header value encoded according to RFC2047. 
// This is not supposed to be used for MIME parameter values, but it
// happens.
// Bugs: 
//    - We should turn off decoding while inside quoted strings
//
typedef enum  {rfc2047ready, rfc2047open_eq, 
               rfc2047charset, rfc2047encoding, 
               rfc2047value, rfc2047close_q} Rfc2047States;

bool rfc2047_decode(const std::string& in, std::string &out) 
{
    DPRINT((stderr, "rfc2047_decode: [%s]\n", in.c_str()));

    Rfc2047States state = rfc2047ready;
    string encoding, charset, value, utf8;

    out.clear();

    for (string::size_type ii = 0; ii < in.length(); ii++) {
        char ch = in[ii];
        switch (state) {
        case rfc2047ready: 
        {
            DPRINT((stderr, "STATE: ready, ch %c\n", ch));
            switch (ch) {
                // Whitespace: stay ready
            case ' ': case '\t': value += ch;break;
                // '=' -> forward to next state
            case '=': state = rfc2047open_eq; break;
                DPRINT((stderr, "STATE: open_eq\n"));
                // Other: go back to sleep
            default: value += ch; state = rfc2047ready;
            }
        }
        break;
        case rfc2047open_eq: 
        {
            DPRINT((stderr, "STATE: open_eq, ch %c\n", ch));
            switch (ch) {
            case '?': 
            {
                // Transcode current (unencoded part) value:
                // we sometimes find 8-bit chars in
                // there. Interpret as Iso8859.
                if (value.length() > 0) {
                    transcode(value, utf8, "ISO-8859-1", "UTF-8");
                    out += utf8;
                    value.clear();
                }
                state = rfc2047charset; 
            }
            break;
            default: state = rfc2047ready; out += '='; out += ch;break;
            }
        } 
        break;
        case rfc2047charset: 
        {
            DPRINT((stderr, "STATE: charset, ch %c\n", ch));
            switch (ch) {
            case '?': state = rfc2047encoding; break;
            default: charset += ch; break;
            }
        } 
        break;
        case rfc2047encoding: 
        {
            DPRINT((stderr, "STATE: encoding, ch %c\n", ch));
            switch (ch) {
            case '?': state = rfc2047value; break;
            default: encoding += ch; break;
            }
        }
        break;
        case rfc2047value: 
        {
            DPRINT((stderr, "STATE: value, ch %c\n", ch));
            switch (ch) {
            case '?': state = rfc2047close_q; break;
            default: value += ch;break;
            }
        }
        break;
        case rfc2047close_q: 
        {
            DPRINT((stderr, "STATE: close_q, ch %c\n", ch));
            switch (ch) {
            case '=': 
            {
                DPRINT((stderr, "End of encoded area. Charset %s, Encoding %s\n", charset.c_str(), encoding.c_str()));
                string utf8;
                state = rfc2047ready; 
                if (!rfc2047_decodeParsed(charset, encoding, value, 
                                          utf8)) {
                    return false;
                }
                out += utf8;
                charset.clear();
                encoding.clear();
                value.clear();
            }
            break;
            default: state = rfc2047value; value += '?';value += ch;break;
            }
        }
        break;
        default: // ??
            DPRINT((stderr, "STATE: default ?? ch %c\n", ch));
            return false;
        }
    }

    if (value.length() > 0) {
        transcode(value, utf8, "CP1252", "UTF-8");
        out += utf8;
        value.clear();
    }
    if (state != rfc2047ready) 
        return false;
    return true;
}

#define DEBUGDATE 0
#if DEBUGDATE
#define DATEDEB(X) fprintf X
#else
#define DATEDEB(X)
#endif

// Convert rfc822 date to unix time. A date string normally looks like:
//  Mon, 3 Jul 2006 09:51:58 +0200
// But there are many close common variations
// And also hopeless things like: Fri Nov  3 13:13:33 2006
time_t rfc2822DateToUxTime(const string& dt)
{
    // Strip everything up to first comma if any, we don't need weekday,
    // then break into tokens
    vector toks;
    string::size_type idx;
    if ((idx = dt.find_first_of(",")) != string::npos) {
        if (idx == dt.length() - 1) {
            DATEDEB((stderr, "Bad rfc822 date format (short1): [%s]\n", 
                     dt.c_str()));
            return (time_t)-1;
        }
        string date = dt.substr(idx+1);
        stringToTokens(date, toks, " \t:");
    } else {
        // No comma. Enter strangeland
        stringToTokens(dt, toks, " \t:");
        // Test for date like: Sun Nov 19 06:18:41 2006
        //                      0   1  2   3 4  5  6
        // and change to:      19 Nov 2006 06:18:41
        if (toks.size() == 7) {
            if (toks[0].length() == 3 &&
                toks[0].find_first_of("0123456789") == string::npos) {
                swap(toks[0], toks[2]);
                swap(toks[6], toks[2]);
                toks.pop_back();
            }
        }
    }

#if DEBUGDATE
    for (list::iterator it = toks.begin(); it != toks.end(); it++) {
        DATEDEB((stderr, "[%s] ", it->c_str()));
    }
    DATEDEB((stderr, "\n"));
#endif

    if (toks.size() < 6) {
        DATEDEB((stderr, "Bad rfc822 date format (toks cnt): [%s]\n", 
                 dt.c_str()));
        return (time_t)-1;
    }

    if (toks.size() == 6) {
        // Probably no timezone, sometimes happens
        toks.push_back("+0000");
    }

    struct tm tm;
    memset(&tm, 0, sizeof(tm));

    // Load struct tm with appropriate tokens, possibly converting
    // when needed

    vector::iterator it = toks.begin();

    // Day of month: no conversion needed
    tm.tm_mday = atoi(it->c_str());
    it++;

    // Month. Only Jan-Dec are legal. January, February do happen
    // though. Convert to 0-11
    if (*it == "Jan" || *it == "January") tm.tm_mon = 0; else if
        (*it == "Feb" || *it == "February") tm.tm_mon = 1; else if
        (*it == "Mar" || *it == "March") tm.tm_mon = 2; else if
        (*it == "Apr" || *it == "April") tm.tm_mon = 3; else if
        (*it == "May") tm.tm_mon = 4; else if
        (*it == "Jun" || *it == "June") tm.tm_mon = 5; else if
        (*it == "Jul" || *it == "July") tm.tm_mon = 6; else if
        (*it == "Aug" || *it == "August") tm.tm_mon = 7; else if
        (*it == "Sep" || *it == "September") tm.tm_mon = 8; else if
        (*it == "Oct" || *it == "October") tm.tm_mon = 9; else if
        (*it == "Nov" || *it == "November") tm.tm_mon = 10; else if
        (*it == "Dec" || *it == "December") tm.tm_mon = 11; else {
        DATEDEB((stderr, "Bad rfc822 date format (month): [%s]\n", 
                 dt.c_str()));
        return (time_t)-1;
    }
    it++;

    // Year. Struct tm counts from 1900. 2 char years are quite rare
    // but do happen. I've seen 00 happen so count small values from 2000
    tm.tm_year = atoi(it->c_str());
    if (it->length() == 2) {
        if (tm.tm_year < 10)
            tm.tm_year += 2000;
        else
            tm.tm_year += 1900;
    }
    if (tm.tm_year > 1900)
        tm.tm_year -= 1900;
    it++;

    // Hour minute second need no adjustments
    tm.tm_hour = atoi(it->c_str()); it++;
    tm.tm_min  = atoi(it->c_str()); it++;
    tm.tm_sec  = atoi(it->c_str()); it++;       


    // Timezone is supposed to be either +-XYZT or a zone name
    int zonesecs = 0;
    if (it->length() < 1) {
        DATEDEB((stderr, "Bad rfc822 date format (zlen): [%s]\n", dt.c_str()));
        return (time_t)-1;
    }
    if (it->at(0) == '-' || it->at(0) == '+') {
        // Note that +xy:zt (instead of +xyzt) sometimes happen, we
        // may want to process it one day
        if (it->length() < 5) {
            DATEDEB((stderr, "Bad rfc822 date format (zlen1): [%s]\n", 
                     dt.c_str()));
            goto nozone;
        }
        zonesecs = 3600*((it->at(1)-'0') * 10 + it->at(2)-'0')+ 
            (it->at(3)-'0')*10 + it->at(4)-'0';
        zonesecs = it->at(0) == '+' ? -1 * zonesecs : zonesecs;
    } else {
        int hours;
        if (*it == "A") hours= 1; else if (*it == "B") hours= 2; 
        else if (*it == "C") hours= 3; else if (*it == "D") hours= 4; 
        else if (*it == "E") hours= 5; else if (*it == "F") hours= 6;
        else if (*it == "G") hours= 7; else if (*it == "H") hours= 8; 
        else if (*it == "I") hours= 9; else if (*it == "K") hours= 10;
        else if (*it == "L") hours= 11; else if (*it == "M") hours= 12; 
        else if (*it == "N") hours= -1; else if (*it == "O") hours= -2; 
        else if (*it == "P") hours= -3; else if (*it == "Q") hours= -4; 
        else if (*it == "R") hours= -5; else if (*it == "S") hours= -6; 
        else if (*it == "T") hours= -7; else if (*it == "U") hours= -8; 
        else if (*it == "V") hours= -9; else if (*it == "W") hours= -10;
        else if (*it == "X") hours= -11; else if (*it == "Y") hours= -12;
        else if (*it == "Z") hours=  0; else if  (*it == "UT") hours= 0; 
        else if (*it == "GMT") hours= 0; else if (*it == "EST") hours= 5;
        else if (*it == "EDT") hours= 4; else if (*it == "CST") hours= 6;
        else if (*it == "CDT") hours= 5; else if (*it == "MST") hours= 7;
        else if (*it == "MDT") hours= 6; else if (*it == "PST") hours= 8;
        else if (*it == "PDT") hours= 7; 
        // Non standard names
        // Standard Time (or Irish Summer Time?) is actually +5.5
        else if (*it == "CET") hours= -1; else if (*it == "JST") hours= -9; 
        else if (*it == "IST") hours= -5; else if (*it == "WET") hours= 0; 
        else if (*it == "MET") hours= -1; 
        else {
            DATEDEB((stderr, "Bad rfc822 date format (zname): [%s]\n", 
                     dt.c_str()));
            // Forget tz
            goto nozone;
        }
        zonesecs = 3600 * hours;
    }
    DATEDEB((stderr, "Tz: [%s] -> %d\n", it->c_str(), zonesecs));
nozone:

    // Compute the UTC Unix time value
#ifndef sun
    time_t tim = timegm(&tm);
#else
    // No timegm on Sun. Use mktime, then correct for local timezone
    time_t tim = mktime(&tm);
    // altzone and timezone hold the difference in seconds between UTC
    // and local. They are negative for places east of greenwich
    // 
    // mktime takes our buffer to be local time, so it adds timezone
    // to the conversion result (if timezone is < 0 it's currently
    // earlier in greenwhich). 
    //
    // We have to substract it back (hey! hopefully! maybe we have to
    // add it). Who can really know?
    tim -= timezone;
#endif

    // And add in the correction from the email's Tz
    tim += zonesecs;

    DATEDEB((stderr, "Date: %s  uxtime %ld \n", ctime(&tim), tim));
    return tim;
}

#else 

#include 
#include 
#include 
#include 

#include 
#include "mimeparse.h"
#include "readfile.h"


using namespace std;
extern bool rfc2231_decode(const string& in, string& out, string& charset); 
extern time_t rfc2822DateToUxTime(const string& date);
static const char *thisprog;

static char usage [] =
    "-p: header value and parameter test\n"
    "-q: qp decoding\n"
    "-b: base64\n"
    "-7: rfc2047\n"
    "-1: rfc2331\n"
    "-t: date time\n"
    "  \n\n"
    ;
static void
Usage(void)
{
    fprintf(stderr, "%s: usage:\n%s", thisprog, usage);
    exit(1);
}

static int     op_flags;
#define OPT_MOINS 0x1
#define OPT_p     0x2 
#define OPT_q     0x4 
#define OPT_b     0x8
#define OPT_7     0x10
#define OPT_1     0x20
#define OPT_t     0x40
int
main(int argc, const char **argv)
{
    int count = 10;
    
    thisprog = argv[0];
    argc--; argv++;

    while (argc > 0 && **argv == '-') {
        (*argv)++;
        if (!(**argv))
            /* Cas du "adb - core" */
            Usage();
        while (**argv)
            switch (*(*argv)++) {
            case 'p':   op_flags |= OPT_p; break;
            case 'q':   op_flags |= OPT_q; break;
            case 'b':   op_flags |= OPT_b; break;
            case '1':   op_flags |= OPT_1; break;
            case '7':   op_flags |= OPT_7; break;
            case 't':   op_flags |= OPT_t; break;
            default: Usage();   break;
            }
    b1: argc--; argv++;
    }

    if (argc != 0)
        Usage();

    if (op_flags & OPT_p) {
        // Mime header value and parameters extraction
        const char *tr[] = {
            "text/html;charset = UTF-8 ; otherparam=garb; \n"
            "QUOTEDPARAM=\"quoted value\"",

            "text/plain; charset=ASCII\r\n name=\"809D3016_5691DPS_5.2.LIC\"",

            "application/x-stuff;"
            "title*0*=us-ascii'en'This%20is%20even%20more%20;"
            "title*1*=%2A%2A%2Afun%2A%2A%2A%20;"
            "title*2=\"isn't it!\"",

            // The following are all invalid, trying to crash the parser...
            "",
            // This does not parse because of whitespace in the value.
            " complete garbage;",
            // This parses, but only the first word gets into the value
            " some value",
            " word ;",  ";",  "=",  "; = ",  "a;=\"toto tutu\"=", ";;;;a=b",
        };
      
        for (unsigned int i = 0; i < sizeof(tr) / sizeof(char *); i++) {
            MimeHeaderValue parsed;
            if (!parseMimeHeaderValue(tr[i], parsed)) {
                fprintf(stderr, "PARSE ERROR for [%s]\n", tr[i]);
                continue;
            }
            printf("Field value: [%s]\n", parsed.value.c_str());
            map::iterator it;
            for (it = parsed.params.begin();it != parsed.params.end();it++) {
                if (it == parsed.params.begin())
                    printf("Parameters:\n");
                printf("  [%s] = [%s]\n", it->first.c_str(), it->second.c_str());
            }
        }

    } else if (op_flags & OPT_q) {
        // Quoted printable stuff
        const char *qp = 
            "=41=68 =e0 boire=\r\n continue 1ere\ndeuxieme\n\r3eme "
            "agrave is: '=E0' probable skipped decode error: =\n"
            "Actual decode error =xx this wont show";

        string out;
        if (!qp_decode(string(qp), out)) {
            fprintf(stderr, "qp_decode returned error\n");
        }
        printf("Decoded: '%s'\n", out.c_str());
    } else if (op_flags & OPT_b) {
        // Base64
        //'C'est  boire qu'il nous faut viter l'excs.'
        //'Deuxime ligne'
        //'Troisime ligne'
        //'Et la fin (pas de nl). '
        const char *b64 = 
            "Qydlc3Qg4CBib2lyZSBxdSdpbCBub3VzIGZhdXQg6XZpdGVyIGwnZXhj6HMuCkRldXhp6G1l\r\n"
            "IGxpZ25lClRyb2lzaehtZSBsaWduZQpFdCBsYSBmaW4gKHBhcyBkZSBubCkuIA==\r\n";

        string out;
        if (!base64_decode(string(b64), out)) {
            fprintf(stderr, "base64_decode returned error\n");
            exit(1);
        }
        printf("Decoded: [%s]\n", out.c_str());
#if 0
        string coded, decoded;
        const char *fname = "/tmp/recoll_decodefail";
        if (!file_to_string(fname, coded)) {
            fprintf(stderr, "Cant read %s\n", fname);
            exit(1);
        }
    
        if (!base64_decode(coded, decoded)) {
            fprintf(stderr, "base64_decode returned error\n");
            exit(1);
        }
        printf("Decoded: [%s]\n", decoded.c_str());
#endif

    } else if (op_flags & (OPT_7|OPT_1)) {
        // rfc2047
        char line [1024];
        string out;
        bool res;
        while (fgets(line, 1023, stdin)) {
            int l = strlen(line);
            if (l == 0)
                continue;
            line[l-1] = 0;
            fprintf(stderr, "Line: [%s]\n", line);
            string charset;
            if (op_flags & OPT_7) {
                res = rfc2047_decode(line, out);
            } else {
                res = rfc2231_decode(line, out, charset);
            }
            if (res)
                fprintf(stderr, "Out:  [%s] cs %s\n", out.c_str(), charset.c_str());
            else
                fprintf(stderr, "Decoding failed\n");
        }
    } else if (op_flags & OPT_t) {
        time_t t;
        
        const char *dates[] = {
            " Wed, 13 Sep 2006 11:40:26 -0700 (PDT)",
            " Mon, 3 Jul 2006 09:51:58 +0200",
            " Wed, 13 Sep 2006 08:19:48 GMT-07:00",
            " Wed, 13 Sep 2006 11:40:26 -0700 (PDT)",
            " Sat, 23 Dec 89 19:27:12 EST",
            "   13 Jan 90 08:23:29 GMT"};

        for (unsigned int i = 0; i  [%s]\n", dates[i], datebuf);
        }
        printf("Enter date:\n");
        char line [1024];
        while (fgets(line, 1023, stdin)) {
            int l = strlen(line);
            if (l == 0) continue;
            line[l-1] = 0;
            t = rfc2822DateToUxTime(line);
            struct tm *tm = localtime(&t);
            char datebuf[100];
            strftime(datebuf, 99, " %Y-%m-%d %H:%M:%S %z", tm);
            printf("[%s] -> [%s]\n", line, datebuf);
        }


    }
    exit(0);
}

#endif // TEST_MIMEPARSE
recoll-1.26.3/utils/appformime.h0000644000175000017500000000554013533651561013470 00000000000000/* Copyright (C) 2014 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _APPFORMIME_H_INCLUDED_
#define _APPFORMIME_H_INCLUDED_

#include 
#include 
#include 

/**
 * Rather strangely, I could not find a reasonably simple piece of
 * code which would parse /usr/share/applications to return a list of
 * apps for a given mime type. So here goes. Note that the implementation
 * is very primitive for now (no use of cache file, no updating once built).
 * Also, this is not thread-safe, but could be made so quite easily.
 */
class DesktopDb {
public:
    class AppDef {
    public:
        AppDef(const std::string& nm, const std::string& cmd)
            : name(nm), command(cmd)
            {}
        AppDef() {}

        std::string name;
        std::string command;
    };

    /** Build/Get the db for the standard fdo directory */
    static DesktopDb* getDb();

    /** Constructor for a db based on a non-standard location */
    DesktopDb(const string& dir);

    /** In case of error: what happened ? */
    const string& getReason();

    /**
     * Get a list of applications able to process a given MIME type.
     * @param mime MIME type we want the apps for
     * @param[output] apps appropriate applications 
     * @param[output] reason if we fail, an explanation ?
     * @return true for no error (apps may still be empty). false if a serious
     *   problem was detected.
     */
    bool appForMime(const std::string& mime, vector *apps, 
                    std::string *reason = 0);

    /**
     * Get all applications defs:
     * @param[output] apps applications 
     * @return true 
     */
    bool allApps(vector *apps);

    /** 
     * Get app with given name 
     */
    bool appByName(const string& nm, AppDef& app);

    typedef std::map > AppMap;

private:
    /** This is used by getDb() and builds a db for the standard location */
    DesktopDb();
    void build(const std::string& dir);
    DesktopDb(const DesktopDb &);
    DesktopDb& operator=(const DesktopDb &);

    AppMap m_appMap;
    std::string m_reason;
    bool m_ok;
};


#endif /* _APPFORMIME_H_INCLUDED_ */
recoll-1.26.3/utils/zlibut.cpp0000644000175000017500000001177013533651561013177 00000000000000/* Copyright (C) 2017 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#include "zlibut.h"

#include 

#include "log.h"

using namespace std;

static void *allocmem(
    void *cp,    /* The array to grow. may be NULL */
    int  sz,     /* Unit size in bytes */
    int  *np,    /* Pointer to current allocation number */
    int  min,    /* Number to allocate the first time */
    int  maxinc) /* Maximum increment */
{
    if (cp == 0) {
        cp = malloc(min * sz);
        *np = cp ? min : 0;
        return cp;
    }

    int inc = (*np > maxinc) ?  maxinc : *np;
    if ((cp = realloc(cp, (*np + inc) * sz)) != 0) {
        *np += inc;
    }
    return cp;
}

class ZLibUtBuf::Internal {
public:
    Internal() {}
    ~Internal() {
        if (buf && dofree) {
            free(buf);
        }
    }
    bool grow(size_t n) {
        if (!initsz)
            initsz = n;
        buf = (char *)allocmem(buf, initsz, &alloc, 1, 20);
        return nullptr != buf;
    }
    int getAlloc() {
        return alloc * initsz;
    }
    char *buf{nullptr};
    int initsz{0}; // Set to first alloc size
    int alloc{0}; // Allocation count (allocmem()). Capa is alloc*inisz
    int datacnt{0}; // Data count
    bool dofree{true}; // Does buffer belong to me ?
    friend bool inflateToBuf(void* inp, unsigned int inlen, ZLibUtBuf& buf);
};

ZLibUtBuf::ZLibUtBuf()
{
    m = new Internal;
}
ZLibUtBuf::~ZLibUtBuf()
{
    delete m;
}

char *ZLibUtBuf::getBuf() const
{
    return m->buf;
}            
char *ZLibUtBuf::takeBuf()
{
    m->dofree = false;
    return m->buf;
}
size_t ZLibUtBuf::getCnt()
{
    return m->datacnt;
}

bool inflateToBuf(const void* inp, unsigned int inlen, ZLibUtBuf& buf)
{
    LOGDEB0("inflateToBuf: inlen " << inlen << "\n");

    z_stream d_stream; /* decompression stream */

    d_stream.zalloc = (alloc_func)0;
    d_stream.zfree = (free_func)0;
    d_stream.opaque = (voidpf)0;
    d_stream.next_in  = (Bytef*)inp;
    d_stream.avail_in = inlen;
    d_stream.next_out = 0;
    d_stream.avail_out = 0;

    int err;
    if ((err = inflateInit(&d_stream)) != Z_OK) {
        LOGERR("Inflate: inflateInit: err " << err << " msg "  <<
               d_stream.msg << "\n");
        return false;
    }

    for (;;) {
        LOGDEB2("InflateToDynBuf: avail_in " << d_stream.avail_in <<
                " total_in " << d_stream.total_in << " avail_out " <<
                d_stream.avail_out << " total_out " << d_stream.total_out <<
                "\n");
        if (d_stream.avail_out == 0) {
            if (!buf.m->grow(inlen)) {
                LOGERR("Inflate: out of memory, current alloc " <<
                       buf.m->getAlloc() << "\n");
                inflateEnd(&d_stream);
                return false;
            }
            d_stream.avail_out = buf.m->getAlloc() - d_stream.total_out;
            d_stream.next_out = (Bytef*)(buf.getBuf() + d_stream.total_out);
        }
        err = inflate(&d_stream, Z_NO_FLUSH);
        if (err == Z_STREAM_END) {
            break;
        }
        if (err != Z_OK) {
            LOGERR("Inflate: error " << err << " msg " <<
                   (d_stream.msg ? d_stream.msg : "") << endl);
            inflateEnd(&d_stream);
            return false;
        }
    }
    if ((err = inflateEnd(&d_stream)) != Z_OK) {
        LOGERR("Inflate: inflateEnd error " << err << " msg " <<
               (d_stream.msg ? d_stream.msg : "") << endl);
        return false;
    }
    buf.m->datacnt = d_stream.total_out;
    LOGDEB1("inflateToBuf: ok, output size " << buf.getCnt() << endl);
    return true;
}


bool deflateToBuf(const void* inp, unsigned int inlen, ZLibUtBuf& buf)
{
    uLongf len = compressBound(static_cast(inlen));
    // This needs cleanup: because the buffer is reused inside
    // e.g. circache, we want a minimum size in case the 1st doc size,
    // which sets the grow increment is small. It would be better to
    // let the user set a min size hint.
    if (len < 500 *1024)
        len = 500 * 1024;

    while (buf.m->getAlloc() < int(len)) {
        if (!buf.m->grow(len)) {
            LOGERR("deflateToBuf: can't get buffer for " << len << " bytes\n");
            return false;
        }
    }
    bool ret = compress((Bytef*)buf.getBuf(), &len, (Bytef*)inp,
                        static_cast(inlen)) == Z_OK;
    buf.m->datacnt = len;
    return ret;
}
recoll-1.26.3/utils/copyfile.h0000644000175000017500000000317713533651561013147 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _COPYFILE_H_INCLUDED_
#define _COPYFILE_H_INCLUDED_

#include 

enum CopyfileFlags {COPYFILE_NONE = 0, 
                    COPYFILE_NOERRUNLINK = 1,
                    COPYFILE_EXCL = 2,
};

/** Copy src to dst. 
 *
 * We destroy an existing dst except if COPYFILE_EXCL is set (or we if
 * have no permission...).
 * A partially copied dst is normally removed, except if COPYFILE_NOERRUNLINK 
 * is set.
 */
extern bool copyfile(const char *src, const char *dst, std::string &reason,
		     int flags = 0);

/** Save c++ string to file */
extern bool stringtofile(const std::string& dt, const char *dst, 
                         std::string& reason, int flags = 0);

/** Try to rename src. If this fails (different devices) copy then unlink src */
extern bool renameormove(const char *src, const char *dst, std::string &reason);

#endif /* _COPYFILE_H_INCLUDED_ */
recoll-1.26.3/utils/hldata.h0000644000175000017500000001204713566424763012576 00000000000000/* Copyright (C) 2017-2019 J.F.Dockes
 *
 * License: GPL 2.1
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2.1 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program; if not, write to the
 * Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _hldata_h_included_
#define _hldata_h_included_

#include 
#include 
#include 
#include 

/** Store data about user search terms and their expansions. This is used
 * mostly for highlighting result text and walking the matches, generating 
 * spelling suggestions.
 */
struct HighlightData {
    /** The user terms, excluding those with wildcards. This list is
     * intended for orthographic suggestions so the terms are always
     * lowercased, unaccented or not depending on the type of index 
     * (as the spelling dictionary is generated from the index terms).
     */
    std::set uterms;

    /** The db query terms linked to the uterms entry they were expanded from. 
     * This is used for aggregating term stats when generating snippets (for 
     * choosing the best terms, allocating slots, etc. )
     */
    std::unordered_map terms;

    /** The original user terms-or-groups. This is for display
     * purposes: ie when creating a menu to look for a specific
     * matched group inside a preview window. We want to show the
     * user-entered data in the menu, not some transformation, so
     * these are always raw, diacritics and case preserved.
     */
    std::vector > ugroups;

    /** Processed/expanded terms and groups. Used for looking for
     * regions to highlight. A group can be a PHRASE or NEAR entry
     * Terms are just groups with 1 entry. All
     * terms are transformed to be compatible with index content
     * (unaccented and lowercased as needed depending on
     * configuration), and the list may include values
     * expanded from the original terms by stem or wildcard expansion.
     */
    struct TermGroup {
        // We'd use an union but no can do
        std::string term;
        std::vector > orgroups;
        int slack{0};

        /* Index into ugroups. As a user term or group may generate
         * many processed/expanded terms or groups, this is how we
         * relate an expansion to its source (used, e.g. for
         * generating anchors for walking search matches in the
         * preview window). */
        size_t grpsugidx{0};
        enum TGK {TGK_TERM, TGK_NEAR, TGK_PHRASE};
        TGK kind{TGK_TERM};
    };
    std::vector index_term_groups;

    void clear() {
	uterms.clear();
	ugroups.clear();
	index_term_groups.clear();
    }
    void append(const HighlightData&);

    // Print (debug)
    std::string toString() const;
};

/* The following is used by plaintorich.cpp for finding zones to
   highlight and by rclabsfromtext.cpp to choose fragments for the
   abstract */

struct GroupMatchEntry {
    // Start/End byte offsets in the document text
    std::pair offs;
    // Index of the search group this comes from: this is to relate a 
    // match to the original user input.
    size_t grpidx;
    GroupMatchEntry(int sta, int sto, size_t idx) 
        : offs(sta, sto), grpidx(idx) {
    }
};

// Find NEAR or PHRASE matches for one group of terms.
//
// @param hldata User query expansion descriptor (see above). We only use
//      the index_term_groups entry
//
// @param grpidx Index in hldata.index_term_groups for the group we
//     process. This is used by us to get the terms, group type
//     (phrase/near) and slacks. We also set it in the output
//     GroupMatchEntry structures to allow the caller to link a match
//     with a specific user input (e.g. for walking the match in the
//     GUI preview)
//
// @param inplists Position lists for the the group terms. This is the
//     data used to look for matches.
//
// @param gpostobytes Translation of term position to start/end byte
//     offsets. This is used to translate term positions to byte
//     positions in the output, for ease of use by caller.
//
// @param[out] tboffs Found matches. Each match has a begin and end
//     byte offset and an index linking to the origin data in the
//     HighlightData structure.
extern bool matchGroup(
    const HighlightData& hldata,
    unsigned int grpidx,
    const std::unordered_map>& inplists,
    const std::unordered_map>& gpostobytes,
    std::vector& tboffs
    );

#endif /* _hldata_h_included_ */
recoll-1.26.3/utils/readfile.h0000644000175000017500000000770713533651561013113 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _READFILE_H_INCLUDED_
#define _READFILE_H_INCLUDED_

#include 

#include 

class FileScanUpstream;

/** Data sink for the file reader. */
class FileScanDo {
public:
    virtual ~FileScanDo() {}
    /* Initialize and allocate. 
     * @param size if set, lower bound of data size.
     * @param reason[output] set to error message in case of error.
     * @return false for error (file_scan will return), true if ok.
     */
    virtual bool init(int64_t size, std::string *reason) = 0;
    /* Process chunk of data
     * @param buf  the data buffer.
     * @param cnt byte count.
     * @param reason[output] set to error message in case of error.
     * @return false for error (file_scan will return), true if ok.
     */
    virtual bool data(const char *buf, int cnt, std::string *reason) = 0;
    
    virtual void setUpstream(FileScanUpstream*) {}
};

/** Open and read file, calling the FileScanDo data() method for each chunk.
 *
 * @param filename File name. Use empty value for stdin

 * @param doer the data processor. The init() method will be called
 * initially witht a lower bound of the data size (may be used to
 * reserve a buffer), or with a 0 size if nothing is known about the
 * size. The data() method will be called for every chunk of data
 * read. 
 * @param offs Start offset. If not zero, will disable decompression 
 *             (set to -1 to start at 0 with no decompression).
 * @param cnt Max bytes in output. Set cnt to -1 for no limit.
 * @param[output] md5p If not null, points to a string to store the hex ascii 
 *     md5 of the uncompressed data.
 * @param[output] reason If not null, points to a string for storing an 
 *     error message if the return value is false.
 * @return true if the operation ended normally, else false.
 */
bool file_scan(const std::string& fn, FileScanDo* doer, int64_t startoffs,
               int64_t cnttoread, std::string *reason
#ifdef READFILE_ENABLE_MD5
               , std::string *md5p
#endif
    );

/** Same as above, not offset/cnt/md5 */
bool file_scan(const std::string& filename, FileScanDo* doer,
               std::string *reason);

/** Same as file_scan, from a memory buffer. No libz processing */
bool string_scan(const char *data, size_t cnt, FileScanDo* doer, 
                 std::string *reason
#ifdef READFILE_ENABLE_MD5
                 , std::string *md5p
#endif
    );

#if defined(READFILE_ENABLE_MINIZ)
/* Process a zip archive member */
bool file_scan(const std::string& filename, const std::string& membername,
               FileScanDo* doer, std::string *reason);
bool string_scan(const char* data, size_t cnt, const std::string& membername,
                 FileScanDo* doer, std::string *reason);
#endif

/**
 * Read file into string.
 * @return true for ok, false else
 */
bool file_to_string(const std::string& filename, std::string& data,
                    std::string *reason = 0);

/** Read file chunk into string. Set cnt to -1 for going to
 * eof, offs to -1 for going from the start without decompression */
bool file_to_string(const std::string& filename, std::string& data,
                    int64_t offs, size_t cnt, std::string *reason = 0);


#endif /* _READFILE_H_INCLUDED_ */
recoll-1.26.3/utils/dlib.h0000644000175000017500000000215713566424763012254 00000000000000/* Copyright (C) 2017-2019 J.F.Dockes
 *
 * License: GPL 2.1
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2.1 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public License
 * along with this program; if not, write to the
 * Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _DLIB_H_INCLUDED_
#define _DLIB_H_INCLUDED_

/** Dynamic library functions */

#include 

extern void *dlib_open(const std::string& libname, int flags = 0);
extern void *dlib_sym(void *handle, const char *name);
extern void dlib_close(void *handle);
extern const char *dlib_error();

#endif /* _DLIB_H_INCLUDED_ */
recoll-1.26.3/utils/x11mon.h0000644000175000017500000000165713533651561012461 00000000000000#ifndef _X11MON_H_INCLUDED_
#define _X11MON_H_INCLUDED_
/* Copyright (C) 2006 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

/** Poll X11 server status and connectivity */
extern bool x11IsAlive();

#endif /* _X11MON_H_INCLUDED_ */
recoll-1.26.3/utils/workqueue.h0000644000175000017500000002644513533651561013367 00000000000000/* Copyright (C) 2006-2016 J.F.Dockes
 *
 * This library is free software; you can redistribute it and/or
 * modify it under the terms of the GNU Lesser General Public
 * License as published by the Free Software Foundation; either
 * version 2.1 of the License, or (at your option) any later version.
 *
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 * Lesser General Public License for more details.
 *
 * You should have received a copy of the GNU Lesser General Public
 * License along with this library; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 *   02110-1301 USA
 */
#ifndef _WORKQUEUE_H_INCLUDED_
#define _WORKQUEUE_H_INCLUDED_

#include 
#if HAVE_STD_FUTURE
#include 
#endif
#include 
#include 
#include 
#include 
#include 

#ifdef MDU_INCLUDE_LOG
#include MDU_INCLUDE_LOG
#else
#include "log.h"
#endif

/**
 * A WorkQueue manages the synchronisation around a queue of work items,
 * where a number of client threads queue tasks and a number of worker
 * threads take and execute them. The goal is to introduce some level
 * of parallelism between the successive steps of a previously single
 * threaded pipeline. For example data extraction / data preparation / index
 * update, but this could have other uses.
 *
 * There is no individual task status return. In case of fatal error,
 * the client or worker sets an end condition on the queue. A second
 * queue could conceivably be used for returning individual task
 * status.
 *
 * The strange thread functions argument and return values
 * comes from compatibility with an earlier pthread-based
 * implementation.
 */
template  class WorkQueue {
public:

    /** Create a WorkQueue
     * @param name for message printing
     * @param hi number of tasks on queue before clients blocks. Default 0
     *    meaning no limit. hi == -1 means that the queue is disabled.
     * @param lo minimum count of tasks before worker starts. Default 1.
     */
    WorkQueue(const std::string& name, size_t hi = 0, size_t lo = 1)
        : m_name(name), m_high(hi), m_low(lo), m_workers_exited(0),
          m_ok(true), m_clients_waiting(0), m_workers_waiting(0),
          m_tottasks(0), m_nowake(0), m_workersleeps(0), m_clientsleeps(0) {
    }

    ~WorkQueue() {
        if (!m_worker_threads.empty()) {
            setTerminateAndWait();
        }
    }

    /** Start the worker threads.
     *
     * @param nworkers number of threads copies to start.
     * @param start_routine thread function. It should loop
     *      taking (QueueWorker::take()) and executing tasks.
     * @param arg initial parameter to thread function.
     * @return true if ok.
     */
    bool start(int nworkers, void *(workproc)(void *), void *arg) {
        std::unique_lock lock(m_mutex);
        for (int i = 0; i < nworkers; i++) {
            Worker w;
#if HAVE_STD_FUTURE
            std::packaged_task task(workproc);
            w.res = task.get_future();
            w.thr = std::thread(std::move(task), arg);
#else
            w.thr = std::thread(workproc, arg);
#endif
            m_worker_threads.push_back(std::move(w));
        }
        return true;
    }

    /** Add item to work queue, called from client.
     *
     * Sleeps if there are already too many.
     */
    bool put(T t, bool flushprevious = false) {
        std::unique_lock lock(m_mutex);
        if (!ok()) {
            LOGERR("WorkQueue::put:"  << m_name << ": !ok\n");
            return false;
        }

        while (ok() && m_high > 0 && m_queue.size() >= m_high) {
            m_clientsleeps++;
            // Keep the order: we test ok() AFTER the sleep...
            m_clients_waiting++;
            m_ccond.wait(lock);
            if (!ok()) {
                m_clients_waiting--;
                return false;
            }
            m_clients_waiting--;
        }
        if (flushprevious) {
            while (!m_queue.empty()) {
                m_queue.pop();
            }
        }

        m_queue.push(t);
        if (m_workers_waiting > 0) {
            // Just wake one worker, there is only one new task.
            m_wcond.notify_one();
        } else {
            m_nowake++;
        }

        return true;
    }

    /** Wait until the queue is inactive. Called from client.
     *
     * Waits until the task queue is empty and the workers are all
     * back sleeping. Used by the client to wait for all current work
     * to be completed, when it needs to perform work that couldn't be
     * done in parallel with the worker's tasks, or before shutting
     * down. Work can be resumed after calling this. Note that the
     * only thread which can call it safely is the client just above
     * (which can control the task flow), else there could be
     * tasks in the intermediate queues.
     * To rephrase: there is no warranty on return that the queue is actually
     * idle EXCEPT if the caller knows that no jobs are still being created.
     * It would be possible to transform this into a safe call if some kind
     * of suspend condition was set on the queue by waitIdle(), to be reset by
     * some kind of "resume" call. Not currently the case.
     */
    bool waitIdle() {
        std::unique_lock lock(m_mutex);
        if (!ok()) {
            LOGERR("WorkQueue::waitIdle:"  << m_name << ": not ok\n");
            return false;
        }

        // We're done when the queue is empty AND all workers are back
        // waiting for a task.
        while (ok() && (m_queue.size() > 0 ||
                        m_workers_waiting != m_worker_threads.size())) {
            m_clients_waiting++;
            m_ccond.wait(lock);
            m_clients_waiting--;
        }

        return ok();
    }

    /** Tell the workers to exit, and wait for them.
     *
     * Does not bother about tasks possibly remaining on the queue, so
     * should be called after waitIdle() for an orderly shutdown.
     */
    void *setTerminateAndWait() {
        std::unique_lock lock(m_mutex);
        LOGDEB("setTerminateAndWait:"  << m_name << "\n");

        if (m_worker_threads.empty()) {
            // Already called ?
            return (void*)0;
        }

        // Wait for all worker threads to have called workerExit()
        m_ok = false;
        while (m_workers_exited < m_worker_threads.size()) {
            m_wcond.notify_all();
            m_clients_waiting++;
            m_ccond.wait(lock);
            m_clients_waiting--;
        }

        LOGINFO(""  << m_name << ": tasks "  << m_tottasks << " nowakes "  <<
                m_nowake << " wsleeps "  << m_workersleeps << " csleeps "  <<
                m_clientsleeps << "\n");
        // Perform the thread joins and compute overall status
        // Workers return (void*)1 if ok
        void *statusall = (void*)1;
        while (!m_worker_threads.empty()) {
#if HAVE_STD_FUTURE
            void *status = m_worker_threads.front().res.get();
#else
            void *status = (void*) 1;
#endif
            m_worker_threads.front().thr.join();
            if (status == (void *)0) {
                statusall = status;
            }
            m_worker_threads.pop_front();
        }

        // Reset to start state.
        m_workers_exited = m_clients_waiting = m_workers_waiting =
                m_tottasks = m_nowake = m_workersleeps = m_clientsleeps = 0;
        m_ok = true;

        LOGDEB("setTerminateAndWait:"  << m_name << " done\n");
        return statusall;
    }

    /** Take task from queue. Called from worker.
     *
     * Sleeps if there are not enough. Signal if we go to sleep on empty
     * queue: client may be waiting for our going idle.
     */
    bool take(T* tp, size_t *szp = 0) {
        std::unique_lock lock(m_mutex);
        if (!ok()) {
            LOGDEB("WorkQueue::take:"  << m_name << ": not ok\n");
            return false;
        }

        while (ok() && m_queue.size() < m_low) {
            m_workersleeps++;
            m_workers_waiting++;
            if (m_queue.empty()) {
                m_ccond.notify_all();
            }
            m_wcond.wait(lock);
            if (!ok()) {
                // !ok is a normal condition when shutting down
                m_workers_waiting--;
                return false;
            }
            m_workers_waiting--;
        }

        m_tottasks++;
        *tp = m_queue.front();
        if (szp) {
            *szp = m_queue.size();
        }
        m_queue.pop();
        if (m_clients_waiting > 0) {
            // No reason to wake up more than one client thread
            m_ccond.notify_one();
        } else {
            m_nowake++;
        }
        return true;
    }

    bool waitminsz(size_t sz) {
        std::unique_lock lock(m_mutex);
        if (!ok()) {
            return false;
        }

        while (ok() && m_queue.size() < sz) {
            m_workersleeps++;
            m_workers_waiting++;
            if (m_queue.empty()) {
                m_ccond.notify_all();
            }
            m_wcond.wait(lock);
            if (!ok()) {
                m_workers_waiting--;
                return false;
            }
            m_workers_waiting--;
        }
        return true;
    }

    /** Advertise exit and abort queue. Called from worker
     *
     * This would happen after an unrecoverable error, or when
     * the queue is terminated by the client. Workers never exit normally,
     * except when the queue is shut down (at which point m_ok is set to
     * false by the shutdown code anyway). The thread must return/exit
     * immediately after calling this.
     */
    void workerExit() {
        LOGDEB("workerExit:"  << m_name << "\n");
        std::unique_lock lock(m_mutex);
        m_workers_exited++;
        m_ok = false;
        m_ccond.notify_all();
    }

    size_t qsize() {
        std::unique_lock lock(m_mutex);
        return m_queue.size();
    }

private:
    bool ok() {
        bool isok = m_ok && m_workers_exited == 0 && !m_worker_threads.empty();
        if (!isok) {
            LOGDEB("WorkQueue:ok:" << m_name << ": not ok m_ok " << m_ok <<
                   " m_workers_exited " << m_workers_exited <<
                   " m_worker_threads size " << m_worker_threads.size() <<
                   "\n");
        }
        return isok;
    }

    struct Worker {
        std::thread         thr;
#if HAVE_STD_FUTURE
        std::future res;
#endif
    };
    
    // Configuration
    std::string m_name;
    size_t m_high;
    size_t m_low;

    // Worker threads having called exit. Used to decide when we're done
    unsigned int m_workers_exited;
    // Status
    bool m_ok;

    // Our threads. 
    std::list m_worker_threads;

    // Jobs input queue
    std::queue m_queue;
    
    // Synchronization
    std::condition_variable m_ccond;
    std::condition_variable m_wcond;
    std::mutex m_mutex;

    // Client/Worker threads currently waiting for a job
    unsigned int m_clients_waiting;
    unsigned int m_workers_waiting;

    // Statistics
    unsigned int m_tottasks;
    unsigned int m_nowake;
    unsigned int m_workersleeps;
    unsigned int m_clientsleeps;
};

#endif /* _WORKQUEUE_H_INCLUDED_ */

recoll-1.26.3/utils/listmem.cpp0000644000175000017500000000656013303776060013336 00000000000000#include "listmem.h"

#include 
#include 
#include 

using namespace std;

/*
 * Functions to list a memory buffer:
 */

/* Turn byte into Hexadecimal ascii representation */
static char *hexa(unsigned int i)
{
    int j;
    static char asc[3];

    asc[0] = (i >> 4) & 0x0f;
    asc[1] = i & 0x0f;
    asc[2] = 0;
    for (j = 0; j < 2; j++)
        if (asc[j] > 9) {
            asc[j] += 55;
        } else {
            asc[j] += 48;
        }
    return (asc);
}

static void swap16(unsigned char *d, const unsigned char *s, int n)
{
    if (n & 1) {
        n >>= 1;
        n++;
    } else {
        n >>= 1;
    }
    while (n--) {
        int i;
        i = 2 * n;
        d[i] = s[i + 1];
        d[i + 1] = s[i];
    }
}

static void swap32(unsigned char *d, const unsigned char *s, int n)
{
    if (n & 3) {
        n >>= 2;
        n++;
    } else {
        n >>= 2;
    }
    while (n--) {
        int i;
        i = 4 * n;
        d[i] = s[i + 3];
        d[i + 1] = s[i + 2];
        d[i + 2] = s[i + 1];
        d[i + 3] = s[i];
    }
}

/* Turn byte buffer into hexadecimal representation */
void charbuftohex(int len, unsigned char *dt, int maxlen, char *str)
{
    int i;
    char *bf;

    for (i = 0, bf = str; i < len; i++) {
        char *cp;
        if (bf - str >= maxlen - 4) {
            break;
        }
        cp = hexa((unsigned int)dt[i]);
        *bf++ = *cp++;
        *bf++ = *cp++;
        *bf++ = ' ';
    }
    *bf++ = 0;
}

void listmem(ostream& os, const void *_ptr, int siz, int adr, int opts)
{
    const unsigned char *ptr = (const unsigned char *)_ptr;
    int             i, j, c;
    char            lastlisted[16];
    int             alreadysame = 0;
    int         oneout = 0;
    unsigned char  *mpt;

    if (opts & (LISTMEM_SWAP16 | LISTMEM_SWAP32)) {
        if ((mpt = (unsigned char *)malloc(siz + 4)) == NULL) {
            os << "OUT OF MEMORY\n";
            return;
        }
        if (opts & LISTMEM_SWAP16) {
            swap16(mpt, ptr, siz);
        } else if (opts & LISTMEM_SWAP32) {
            swap32(mpt, ptr, siz);
        }
    } else {
        mpt = (unsigned char *)ptr;
    }

    for (i = 0; i < siz; i += 16) {
        /* Check for same data (only print first line in this case) */
        if (oneout != 0  &&
                siz - i >= 16 && memcmp(lastlisted, mpt + i, 16) == 0) {
            if (alreadysame == 0) {
                os << "*\n";
                alreadysame = 1;
            }
            continue;
        }
        alreadysame = 0;
        /* Line header */
        os << std::setw(4) << i + adr << " ";

        /* Hexadecimal representation */
        for (j = 0; j < 16; j++) {
            if ((i + j) < siz) {
                os << hexa(mpt[i + j]) << ((j & 1) ? " " : "");
            } else {
                os << "  " << ((j & 1) ? " " : "");
            }
        }
        os << "  ";

        /* Also print ascii for values that fit */
        for (j = 0; j < 16; j++) {
            if ((i + j) < siz) {
                c = mpt[i + j];
                if (c >= 0x20 && c <= 0x7f) {
                    os << char(c);
                } else {
                    os << ".";
                }
            } else {
                os << " ";
            }
        }
        os << "\n";
        memcpy(lastlisted, mpt + i, 16);
        oneout = 1;
    }
    if (mpt != ptr) {
        free(mpt);
    }
}
recoll-1.26.3/utils/pathut.cpp0000644000175000017500000007010213566424763013175 00000000000000/* Copyright (C) 2004-2019 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * flock emulation:
 *   Emulate flock on platforms that lack it, primarily Windows and MinGW.
 *
 *   This is derived from sqlite3 sources.
 *   https://www.sqlite.org/src/finfo?name=src/os_win.c
 *   https://www.sqlite.org/copyright.html
 *
 *   Written by Richard W.M. Jones 
 *
 *   Copyright (C) 2008-2019 Free Software Foundation, Inc.
 *
 *   This library is free software; you can redistribute it and/or
 *   modify it under the terms of the GNU Lesser General Public
 *   License as published by the Free Software Foundation; either
 *   version 2.1 of the License, or (at your option) any later version.
 *
 *   This library is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *   Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program.  If not, see .
 */

#ifdef BUILDING_RECOLL
#include "autoconfig.h"
#else
#include "config.h"
#endif

#include 
#include 
#include 
#include 

#ifdef _WIN32
#include "safefcntl.h"
#include "safeunistd.h"
#include "safewindows.h"
#include "safesysstat.h"
#include "transcode.h"

#define STAT _wstati64
#define LSTAT _wstati64
#define STATBUF _stati64
#define ACCESS _waccess

#else // Not windows ->
#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

#define STAT stat
#define LSTAT lstat
#define STATBUF stat
#define ACCESS access
#endif

#include 
#include 
#include 
#include 
#include 
#include 
#include 
#include 

#include "pathut.h"
#include "smallut.h"

using namespace std;

#ifdef _WIN32
/// Convert \ separators to /
void path_slashize(string& s)
{
    for (string::size_type i = 0; i < s.size(); i++) {
        if (s[i] == '\\') {
            s[i] = '/';
        }
    }
}
void path_backslashize(string& s)
{
    for (string::size_type i = 0; i < s.size(); i++) {
        if (s[i] == '/') {
            s[i] = '\\';
        }
    }
}
static bool path_strlookslikedrive(const string& s)
{
    return s.size() == 2 && isalpha(s[0]) && s[1] == ':';
}

static bool path_hasdrive(const string& s)
{
    if (s.size() >= 2 && isalpha(s[0]) && s[1] == ':') {
        return true;
    }
    return false;
}
static bool path_isdriveabs(const string& s)
{
    if (s.size() >= 3 && isalpha(s[0]) && s[1] == ':' && s[2] == '/') {
        return true;
    }
    return false;
}

/* Operations for the 'flock' call (same as Linux kernel constants).  */
# define LOCK_SH 1       /* Shared lock.  */
# define LOCK_EX 2       /* Exclusive lock.  */
# define LOCK_UN 8       /* Unlock.  */

/* Can be OR'd in to one of the above.  */
# define LOCK_NB 4       /* Don't block when locking.  */

#include 

/* Determine the current size of a file.  Because the other braindead
 * APIs we'll call need lower/upper 32 bit pairs, keep the file size
 * like that too.
 */
static BOOL
file_size (HANDLE h, DWORD * lower, DWORD * upper)
{
    *lower = GetFileSize (h, upper);
    /* It appears that we can't lock an empty file, a lock is always
       over a data section. But we seem to be able to set a lock
       beyond the current file size, which is enough to get Pidfile
       working */
    if (*lower == 0 && *upper == 0) {
        *lower = 100;
    }
    return 1;
}

/* LOCKFILE_FAIL_IMMEDIATELY is undefined on some Windows systems. */
# ifndef LOCKFILE_FAIL_IMMEDIATELY
#  define LOCKFILE_FAIL_IMMEDIATELY 1
# endif

/* Acquire a lock. */
static BOOL
do_lock (HANDLE h, int non_blocking, int exclusive)
{
    BOOL res;
    DWORD size_lower, size_upper;
    OVERLAPPED ovlp;
    int flags = 0;

    /* We're going to lock the whole file, so get the file size. */
    res = file_size (h, &size_lower, &size_upper);
    if (!res)
        return 0;

    /* Start offset is 0, and also zero the remaining members of this struct. */
    memset (&ovlp, 0, sizeof ovlp);

    if (non_blocking)
        flags |= LOCKFILE_FAIL_IMMEDIATELY;
    if (exclusive)
        flags |= LOCKFILE_EXCLUSIVE_LOCK;

    return LockFileEx (h, flags, 0, size_lower, size_upper, &ovlp);
}

/* Unlock reader or exclusive lock. */
static BOOL
do_unlock (HANDLE h)
{
    int res;
    DWORD size_lower, size_upper;

    res = file_size (h, &size_lower, &size_upper);
    if (!res)
        return 0;

    return UnlockFile (h, 0, 0, size_lower, size_upper);
}

/* Now our BSD-like flock operation. */
int
flock (int fd, int operation)
{
    HANDLE h = (HANDLE) _get_osfhandle (fd);
    DWORD res;
    int non_blocking;

    if (h == INVALID_HANDLE_VALUE) {
        errno = EBADF;
        return -1;
    }

    non_blocking = operation & LOCK_NB;
    operation &= ~LOCK_NB;

    switch (operation) {
    case LOCK_SH:
        res = do_lock (h, non_blocking, 0);
        break;
    case LOCK_EX:
        res = do_lock (h, non_blocking, 1);
        break;
    case LOCK_UN:
        res = do_unlock (h);
        break;
    default:
        errno = EINVAL;
        return -1;
    }

    /* Map Windows errors into Unix errnos.  As usual MSDN fails to
     * document the permissible error codes.
     */
    if (!res) {
        DWORD err = GetLastError ();
        switch (err){
            /* This means someone else is holding a lock. */
        case ERROR_LOCK_VIOLATION:
            errno = EAGAIN;
            break;

            /* Out of memory. */
        case ERROR_NOT_ENOUGH_MEMORY:
            errno = ENOMEM;
            break;

        case ERROR_BAD_COMMAND:
            errno = EINVAL;
            break;

            /* Unlikely to be other errors, but at least don't lose the
             * error code.
             */
        default:
            errno = err;
        }

        return -1;
    }

    return 0;
}

#endif // Win32 only section

bool fsocc(const string& path, int *pc, long long *avmbs)
{
    static const int FSOCC_MB = 1024 * 1024;
#ifdef _WIN32
    ULARGE_INTEGER freebytesavail;
    ULARGE_INTEGER totalbytes;
    if (!GetDiskFreeSpaceExA(path.c_str(), &freebytesavail,
							 &totalbytes, NULL)) {
        return false;
    }
    if (pc) {
        *pc = int((100 * freebytesavail.QuadPart) / totalbytes.QuadPart);
    }
    if (avmbs) {
        *avmbs = int(totalbytes.QuadPart / FSOCC_MB);
    }
    return true;
#else // not windows ->

    struct statvfs buf;
    if (statvfs(path.c_str(), &buf) != 0) {
        return false;
    }

    if (pc) {
        double fsocc_used = double(buf.f_blocks - buf.f_bfree);
        double fsocc_totavail = fsocc_used + double(buf.f_bavail);
        double fpc = 100.0;
        if (fsocc_totavail > 0) {
            fpc = 100.0 * fsocc_used / fsocc_totavail;
        }
        *pc = int(fpc);
    }
    if (avmbs) {
        *avmbs = 0;
        if (buf.f_bsize > 0) {
            int ratio = buf.f_frsize > FSOCC_MB ? buf.f_frsize / FSOCC_MB :
                        FSOCC_MB / buf.f_frsize;

            *avmbs = buf.f_frsize > FSOCC_MB ?
                     ((long long)buf.f_bavail) * ratio :
                     ((long long)buf.f_bavail) / ratio;
        }
    }
    return true;
#endif
}


string path_PATHsep()
{
    static const string w(";");
    static const string u(":");
#ifdef _WIN32
    return w;
#else
    return u;
#endif
}

void path_catslash(string& s)
{
#ifdef _WIN32
    path_slashize(s);
#endif
    if (s.empty() || s[s.length() - 1] != '/') {
        s += '/';
    }
}

string path_cat(const string& s1, const string& s2)
{
    string res = s1;
    path_catslash(res);
    res +=  s2;
    return res;
}

string path_getfather(const string& s)
{
    string father = s;
#ifdef _WIN32
    path_slashize(father);
#endif

    // ??
    if (father.empty()) {
        return "./";
    }

    if (path_isroot(father)) {
        return father;
    }

    if (father[father.length() - 1] == '/') {
        // Input ends with /. Strip it, root special case was tested above
        father.erase(father.length() - 1);
    }

    string::size_type slp = father.rfind('/');
    if (slp == string::npos) {
        return "./";
    }

    father.erase(slp);
    path_catslash(father);
    return father;
}

string path_getsimple(const string& s)
{
    string simple = s;
#ifdef _WIN32
    path_slashize(simple);
#endif

    if (simple.empty()) {
        return simple;
    }

    string::size_type slp = simple.rfind('/');
    if (slp == string::npos) {
        return simple;
    }

    simple.erase(0, slp + 1);
    return simple;
}

string path_basename(const string& s, const string& suff)
{
    string simple = path_getsimple(s);
    string::size_type pos = string::npos;
    if (suff.length() && simple.length() > suff.length()) {
        pos = simple.rfind(suff);
        if (pos != string::npos && pos + suff.length() == simple.length()) {
            return simple.substr(0, pos);
        }
    }
    return simple;
}

string path_suffix(const string& s)
{
    string::size_type dotp = s.rfind('.');
    if (dotp == string::npos) {
        return string();
    }
    return s.substr(dotp + 1);
}

string path_home()
{
#ifdef _WIN32
    string dir;
    const char *cp = getenv("USERPROFILE");
    if (cp != 0) {
        dir = cp;
    }
    if (dir.empty()) {
        cp = getenv("HOMEDRIVE");
        if (cp != 0) {
            const char *cp1 = getenv("HOMEPATH");
            if (cp1 != 0) {
                dir = string(cp) + string(cp1);
            }
        }
    }
    if (dir.empty()) {
        dir = "C:\\";
    }
    dir = path_canon(dir);
    path_catslash(dir);
    return dir;
#else
    uid_t uid = getuid();

    struct passwd *entry = getpwuid(uid);
    if (entry == 0) {
        const char *cp = getenv("HOME");
        if (cp) {
            return cp;
        } else {
            return "/";
        }
    }

    string homedir = entry->pw_dir;
    path_catslash(homedir);
    return homedir;
#endif
}

// The default place to store the default config and other stuff (e.g webqueue)
string path_homedata()
{
#ifdef _WIN32
    const char *cp = getenv("LOCALAPPDATA");
    string dir;
    if (cp != 0) {
        dir = path_canon(cp);
    }
    if (dir.empty()) {
        dir = path_cat(path_home(), "AppData/Local/");
    }
    return dir;
#else
    // We should use an xdg-conforming location, but, history...
    return path_home();
#endif
}

string path_tildexpand(const string& s)
{
    if (s.empty() || s[0] != '~') {
        return s;
    }
    string o = s;
#ifdef _WIN32
    path_slashize(o);
#endif

    if (s.length() == 1) {
        o.replace(0, 1, path_home());
    } else if (s[1] == '/') {
        o.replace(0, 2, path_home());
    } else {
        string::size_type pos = s.find('/');
        string::size_type l = (pos == string::npos) ? s.length() - 1 : pos - 1;
#ifdef _WIN32
        // Dont know what this means. Just replace with HOME
        o.replace(0, l + 1, path_home());
#else
        struct passwd *entry = getpwnam(s.substr(1, l).c_str());
        if (entry) {
            o.replace(0, l + 1, entry->pw_dir);
        }
#endif
    }
    return o;
}

bool path_isroot(const string& path)
{
    if (path.size() == 1 && path[0] == '/') {
        return true;
    }
#ifdef _WIN32
    if (path.size() == 3 && isalpha(path[0]) && path[1] == ':' &&
            (path[2] == '/' || path[2] == '\\')) {
        return true;
    }
#endif
    return false;
}

bool path_isdesc(const string& _top, const string& _sub)
{
    string top = path_canon(_top);
    string sub = path_canon(_sub);
    path_catslash(top);
    path_catslash(sub);
    for (;;) {
        if (sub == top) {
            return true;
        }
        string::size_type l = sub.size();
        sub = path_getfather(sub);
        if (sub.size() == l || sub.size() < top.size()) {
            // At root or sub shorter than top: done
            if (sub == top) {
                return true;
            } else {
                return false;
            }
        }
    }
}

bool path_isabsolute(const string& path)
{
    if (!path.empty() && (path[0] == '/'
#ifdef _WIN32
                          || path_isdriveabs(path)
#endif
                         )) {
        return true;
    }
    return false;
}

string path_absolute(const string& is)
{
    if (is.length() == 0) {
        return is;
    }
    string s = is;
#ifdef _WIN32
        path_slashize(s);
#endif
    if (!path_isabsolute(s)) {
        char buf[MAXPATHLEN];
        if (!getcwd(buf, MAXPATHLEN)) {
            return string();
        }
        s = path_cat(string(buf), s);
#ifdef _WIN32
        path_slashize(s);
#endif
    }
    return s;
}

string path_canon(const string& is, const string* cwd)
{
    if (is.length() == 0) {
        return is;
    }
    string s = is;
#ifdef _WIN32
    path_slashize(s);
    // fix possible path from file: absolute url
    if (s.size() && s[0] == '/' && path_hasdrive(s.substr(1))) {
        s = s.substr(1);
    }
#endif

    if (!path_isabsolute(s)) {
        char buf[MAXPATHLEN];
        const char *cwdp = buf;
        if (cwd) {
            cwdp = cwd->c_str();
        } else {
            if (!getcwd(buf, MAXPATHLEN)) {
                return string();
            }
        }
        s = path_cat(string(cwdp), s);
    }
    vector elems;
    stringToTokens(s, elems, "/");
    vector cleaned;
    for (vector::const_iterator it = elems.begin();
            it != elems.end(); it++) {
        if (*it == "..") {
            if (!cleaned.empty()) {
                cleaned.pop_back();
            }
        } else if (it->empty() || *it == ".") {
        } else {
            cleaned.push_back(*it);
        }
    }
    string ret;
    if (!cleaned.empty()) {
        for (vector::const_iterator it = cleaned.begin();
                it != cleaned.end(); it++) {
            ret += "/";
#ifdef _WIN32
            if (it == cleaned.begin() && path_strlookslikedrive(*it)) {
                // Get rid of just added initial "/"
                ret.clear();
            }
#endif
            ret += *it;
        }
    } else {
        ret = "/";
    }

#ifdef _WIN32
    // Raw drive needs a final /
    if (path_strlookslikedrive(ret)) {
        path_catslash(ret);
    }
#endif

    return ret;
}

bool path_makepath(const string& ipath, int mode)
{
    string path = path_canon(ipath);
    vector elems;
    stringToTokens(path, elems, "/");
    path = "/";
    for (const auto& elem : elems) {
#ifdef _WIN32
        if (path == "/" && path_strlookslikedrive(elem)) {
            path = "";
        }
#endif
        path += elem;
        // Not using path_isdir() here, because this cant grok symlinks
        // If we hit an existing file, no worry, mkdir will just fail.
        if (access(path.c_str(), 0) != 0) {
            if (mkdir(path.c_str(), mode) != 0)  {
                //cerr << "mkdir " << path << " failed, errno " << errno << endl;
                return false;
            }
        }
        path += "/";
    }
    return true;
}

bool path_isdir(const string& path)
{
    struct STATBUF st;
    SYSPATH(path, syspath);
    if (LSTAT(syspath, &st) < 0) {
        return false;
    }
    if (S_ISDIR(st.st_mode)) {
        return true;
    }
    return false;
}

long long path_filesize(const string& path)
{
    struct STATBUF st;
    SYSPATH(path, syspath);
    if (STAT(syspath, &st) < 0) {
        return -1;
    }
    return (long long)st.st_size;
}

int path_fileprops(const std::string path, struct stat *stp, bool follow)
{
    if (!stp) {
        return -1;
    }
    memset(stp, 0, sizeof(struct stat));
    struct STATBUF mst;
    SYSPATH(path, syspath);
    int ret = follow ? STAT(syspath, &mst) : LSTAT(syspath, &mst);
    if (ret != 0) {
        return ret;
    }
    stp->st_size = mst.st_size;
    stp->st_mode = mst.st_mode;
    stp->st_mtime = mst.st_mtime;
#ifdef _WIN32
    stp->st_ctime = mst.st_mtime;
#else
    stp->st_ino = mst.st_ino;
    stp->st_dev = mst.st_dev;
    stp->st_ctime = mst.st_ctime;
    stp->st_blocks = mst.st_blocks;
    stp->st_blksize = mst.st_blksize;
#endif
    return 0;
}

bool path_exists(const string& path)
{
    SYSPATH(path, syspath);
    return ACCESS(syspath, 0) == 0;
}
bool path_readable(const string& path)
{
    SYSPATH(path, syspath);
    return ACCESS(syspath, R_OK) == 0;
}

/* There is a lot of vagueness about what should be percent-encoded or
 * not in a file:// url. The constraint that we have is that we may use
 * the encoded URL to compute (MD5) a thumbnail path according to the
 * freedesktop.org thumbnail spec, which itself does not define what
 * should be escaped. We choose to exactly escape what gio does, as
 * implemented in glib/gconvert.c:g_escape_uri_string(uri, UNSAFE_PATH). 
 * Hopefully, the other desktops have the same set of escaped chars. 
 * Note that $ is not encoded, so the value is not shell-safe.
 */
string url_encode(const string& url, string::size_type offs)
{
    string out = url.substr(0, offs);
    const char *cp = url.c_str();
    for (string::size_type i = offs; i < url.size(); i++) {
        unsigned int c;
        const char *h = "0123456789ABCDEF";
        c = cp[i];
        if (c <= 0x20 ||
                c >= 0x7f ||
                c == '"' ||
                c == '#' ||
                c == '%' ||
                c == ';' ||
                c == '<' ||
                c == '>' ||
                c == '?' ||
                c == '[' ||
                c == '\\' ||
                c == ']' ||
                c == '^' ||
                c == '`' ||
                c == '{' ||
                c == '|' ||
                c == '}') {
            out += '%';
            out += h[(c >> 4) & 0xf];
            out += h[c & 0xf];
        } else {
            out += char(c);
        }
    }
    return out;
}

static inline int h2d(int c) {
    if ('0' <= c && c <= '9')
        return c - '0';
    else if ('A' <= c && c <= 'F')
        return 10 + c - 'A';
    else 
        return -1;
}

string url_decode(const string &in)
{
    if (in.size() <= 2)
        return in;
    string out;
    out.reserve(in.size());
    const char *cp = in.c_str();
    string::size_type i = 0;
    for (; i < in.size() - 2; i++) {
	if (cp[i] == '%') {
            int d1 = h2d(cp[i+1]);
            int d2 = h2d(cp[i+2]);
            if (d1 != -1 && d2 != -1) {
                out += (d1 << 4) + d2;
            } else {
                out += '%';
                out += cp[i+1];
                out += cp[i+2];
            }
            i += 2;
	} else {
            out += cp[i];
        }
    }
    while (i < in.size()) {
        out += cp[i++];
    }
    return out;
}

string url_gpath(const string& url)
{
    // Remove the access schema part (or whatever it's called)
    string::size_type colon = url.find_first_of(":");
    if (colon == string::npos || colon == url.size() - 1) {
        return url;
    }
    // If there are non-alphanum chars before the ':', then there
    // probably is no scheme. Whatever...
    for (string::size_type i = 0; i < colon; i++) {
        if (!isalnum(url.at(i))) {
            return url;
        }
    }

    // In addition we canonize the path to remove empty host parts
    // (for compatibility with older versions of recoll where file://
    // was hardcoded, but the local path was used for doc
    // identification.
    return path_canon(url.substr(colon + 1));
}

string url_parentfolder(const string& url)
{
    // In general, the parent is the directory above the full path
    string parenturl = path_getfather(url_gpath(url));
    // But if this is http, make sure to keep the host part. Recoll
    // only has file or http urls for now.
    bool isfileurl = urlisfileurl(url);
    if (!isfileurl && parenturl == "/") {
        parenturl = url_gpath(url);
    }
    return isfileurl ? string("file://") + parenturl :
           string("http://") + parenturl;
}


// Convert to file path if url is like file:
// Note: this only works with our internal pseudo-urls which are not
// encoded/escaped
string fileurltolocalpath(string url)
{
    if (url.find("file://") == 0) {
        url = url.substr(7, string::npos);
    } else {
        return string();
    }

#ifdef _WIN32
    // Absolute file urls are like: file:///c:/mydir/...
    // Get rid of the initial '/'
    if (url.size() >= 3 && url[0] == '/' && isalpha(url[1]) && url[2] == ':') {
        url = url.substr(1);
    }
#endif

    // Removing the fragment part. This is exclusively used when
    // executing a viewer for the recoll manual, and we only strip the
    // part after # if it is preceded by .html
    string::size_type pos;
    if ((pos = url.rfind(".html#")) != string::npos) {
        url.erase(pos + 5);
    } else if ((pos = url.rfind(".htm#")) != string::npos) {
        url.erase(pos + 4);
    }

    return url;
}

static const string cstr_fileu("file://");

string path_pathtofileurl(const string& path)
{
    // We're supposed to receive a canonic absolute path, but on windows we
    // may need to add a '/' in front of the drive spec
    string url(cstr_fileu);
    if (path.empty() || path[0] != '/') {
        url.push_back('/');
    }
    url += path;
    return url;
}

bool urlisfileurl(const string& url)
{
    return url.find("file://") == 0;
}

static std::regex
re_uriparse("^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\\?([^#]*))?(#(.*))?",
            std::regex::extended);

ParsedUri::ParsedUri(std::string uri)
{
    std::smatch mr;
    parsed = regex_match(uri, mr, re_uriparse);
    if (!parsed)
        return;
    // cf http://www.ietf.org/rfc/rfc2396.txt
    // scheme    = $2
    // authority = $4
    // path      = $5
    // query     = $7
    // fragment  = $9
    if (mr[2].matched) {
        scheme = mr[2].str();
    }
    if (mr[4].matched) {
        string auth = mr[4].str();
        // user:pass@host, user@host
        string::size_type at = auth.find_first_of('@');
        if (at != string::npos) {
            host = auth.substr(at+1);
            string::size_type colon = auth.find_first_of(':');
            if (colon != string::npos && colon < at) {
                user = auth.substr(0, colon);
                pass = auth.substr(colon+1, at-colon-1);
            } else {
                user = auth.substr(0, at);
            }
        } else {
            host.swap(auth);
        }
        string::size_type pc = host.find_first_of(':');
        if (pc != string::npos) {
            port = host.substr(pc+1);
            host = host.substr(0, pc);
        }
    }
    if (mr[5].matched) {
        path = mr[5].str();
    }
    if (mr[7].matched) {
        query = mr[7].str();
        string::size_type pos=0, amp, eq;
        string nm, val;
        for (;;) {
            nm.clear();
            val.clear();
            amp = query.find_first_of('&', pos);
            //cerr << "pos " << pos << " amp " << amp << endl;
            if (amp > pos && amp != string::npos) {
                eq = query.find_first_of('=', pos);
                if (eq > amp || eq == string::npos) {
                    nm = query.substr(pos, amp-pos);
                } else {
                    nm = query.substr(pos, eq-pos);
                    val = query.substr(eq+1, amp-eq-1);
                }
                pos = amp + 1;
            } else if (amp == string::npos) {
                if (pos < query.size()-1) {
                    eq = query.find_first_of('=', pos);
                    if (eq == string::npos) {
                        nm = query.substr(pos);
                    } else {
                        nm = query.substr(pos, eq-pos);
                        val = query.substr(eq+1);
                    }
                }
                pos = query.size()-1;
            } else {
                pos++;
            }
            if (!nm.empty()) {
                parsedquery.push_back(pair(nm, val));
            }
            if (pos >= query.size()-1) {
                break;
            }
        }
        
    }
    if (mr[9].matched) {
        fragment = mr[9].str();
    }
}

bool readdir(const string& dir, string& reason, set& entries)
{
    struct stat st;
    int statret;
    ostringstream msg;
    DIR *d = 0;
    statret = lstat(dir.c_str(), &st);
    if (statret == -1) {
        msg << "readdir: cant stat " << dir << " errno " <<  errno;
        goto out;
    }
    if (!S_ISDIR(st.st_mode)) {
        msg << "readdir: " << dir <<  " not a directory";
        goto out;
    }
    if (access(dir.c_str(), R_OK) < 0) {
        msg << "readdir: no read access to " << dir;
        goto out;
    }

    d = opendir(dir.c_str());
    if (d == 0) {
        msg << "readdir: cant opendir " << dir << ", errno " << errno;
        goto out;
    }

    struct dirent *ent;
    while ((ent = readdir(d)) != 0) {
        if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, "..")) {
            continue;
        }
        entries.insert(ent->d_name);
    }

out:
    if (d) {
        closedir(d);
    }
    reason = msg.str();
    if (reason.empty()) {
        return true;
    }
    return false;
}

// We do not want to mess with the pidfile content in the destructor:
// the lock might still be in use in a child process. In fact as much
// as we'd like to reset the pid inside the file when we're done, it
// would be very difficult to do it right and it's probably best left
// alone.
Pidfile::~Pidfile()
{
    this->close();
}

pid_t Pidfile::read_pid()
{
    int fd = ::open(m_path.c_str(), O_RDONLY);
    if (fd == -1) {
        return (pid_t) -1;
    }

    char buf[16];
    int i = read(fd, buf, sizeof(buf) - 1);
    ::close(fd);
    if (i <= 0) {
        return (pid_t) -1;
    }
    buf[i] = '\0';
    char *endptr;
    pid_t pid = strtol(buf, &endptr, 10);
    if (endptr != &buf[i]) {
        return (pid_t) - 1;
    }
    return pid;
}

int Pidfile::flopen()
{
    const char *path = m_path.c_str();
    if ((m_fd = ::open(path, O_RDWR | O_CREAT, 0644)) == -1) {
        m_reason = "Open failed: [" + m_path + "]: " + strerror(errno);
        return -1;
    }

#ifdef sun
    struct flock lockdata;
    lockdata.l_start = 0;
    lockdata.l_len = 0;
    lockdata.l_type = F_WRLCK;
    lockdata.l_whence = SEEK_SET;
    if (fcntl(m_fd, F_SETLK,  &lockdata) != 0) {
        int serrno = errno;
        this->close()
        errno = serrno;
         m_reason = "fcntl lock failed";
        return -1;
    }
#else
    int operation = LOCK_EX | LOCK_NB;
    if (flock(m_fd, operation) == -1) {
        int serrno = errno;
        this->close();
        errno = serrno;
        m_reason = "flock failed";
        return -1;
    }
#endif // ! sun

    if (ftruncate(m_fd, 0) != 0) {
        /* can't happen [tm] */
        int serrno = errno;
        this->close();
        errno = serrno;
        m_reason = "ftruncate failed";
        return -1;
    }
    return 0;
}

pid_t Pidfile::open()
{
    if (flopen() < 0) {
        return read_pid();
    }
    return (pid_t)0;
}

int Pidfile::write_pid()
{
    /* truncate to allow multiple calls */
    if (ftruncate(m_fd, 0) == -1) {
        m_reason = "ftruncate failed";
        return -1;
    }
    char pidstr[20];
    sprintf(pidstr, "%u", int(getpid()));
    lseek(m_fd, 0, 0);
    if (::write(m_fd, pidstr, strlen(pidstr)) != (ssize_t)strlen(pidstr)) {
        m_reason = "write failed";
        return -1;
    }
    return 0;
}

int Pidfile::close()
{
    int ret = -1;
    if (m_fd >= 0) {
        ret = ::close(m_fd);
        m_fd = -1;
    }
    return ret;
}

int Pidfile::remove()
{
    return unlink(m_path.c_str());
}

// Call funcs that need static init (not initially reentrant)
void pathut_init_mt()
{
    path_home();
}
recoll-1.26.3/utils/netcon.h0000644000175000017500000003065513533651561012624 00000000000000#ifndef _NETCON_H_
#define _NETCON_H_
/* Copyright (C) 2002 Jean-Francois Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifdef BUILDING_RECOLL
#include "autoconfig.h"
#else
#include "config.h"
#endif

#include 

#include 
#include 

/// A set of classes to manage client-server communication over a
/// connection-oriented network, or a pipe.
///
/// The listening/connection-accepting code currently only uses
/// TCP. The classes include client-side and server-side (accepting)
/// endpoints. Netcon also has server-side static code to handle a set
/// of client connections in parallel. This should be moved to a
/// friend class.
///
/// The client data transfer class can also be used for
/// timeout-protected/asynchronous io using a given fd (ie a pipe
/// descriptor)

/// Base class for all network endpoints:
class Netcon;
typedef std::shared_ptr NetconP;
class SelectLoop;

class Netcon {
public:
    enum Event {NETCONPOLL_READ = 0x1, NETCONPOLL_WRITE = 0x2};
    Netcon()
        : m_peer(0), m_fd(-1), m_ownfd(true), m_didtimo(0), m_wantedEvents(0),
          m_loop(0) {
    }
    virtual ~Netcon();
    /// Remember whom we're talking to. We let external code do this because
    /// the application may have a non-dns method to find the peer name.
    virtual void setpeer(const char *hostname);
    /// Retrieve the peer's hostname. Only works if it was set before !
    virtual const char *getpeer() {
        return m_peer ? (const char *)m_peer : "none";
    }
    /// Set or reset the TCP_NODELAY option.
    virtual int settcpnodelay(int on = 1);
    /// Did the last receive() call time out ? Resets the flag.
    virtual int timedout() {
        int s = m_didtimo;
        m_didtimo = 0;
        return s;
    }
    /// Return string version of last syscall error
    virtual char *sterror();
    /// Return the socket descriptor
    virtual int getfd() {
        return m_fd;
    }
    /// Close the current connection if it is open
    virtual void closeconn();
    /// Set/reset the non-blocking flag on the underlying fd. Returns
    /// prev state The default is that sockets are blocking except
    /// when added to the selectloop, or, transparently, to handle
    /// connection timeout issues.
    virtual int set_nonblock(int onoff);

    /// Decide what events the connection will be looking for
    /// (NETCONPOLL_READ, NETCONPOLL_WRITE)
    int setselevents(int evs);
    /// Retrieve the connection's currently monitored set of events
    int getselevents() {
        return m_wantedEvents;
    }

    friend class SelectLoop;
    SelectLoop *getloop() {
        return m_loop;
    }

    /// Utility function for a simplified select() interface: check one fd
    /// for reading or writing, for a specified maximum number of seconds.
    static int select1(int fd, int secs, int writing = 0);

protected:
    char *m_peer;   // Name of the connected host
    int   m_fd;
    bool  m_ownfd;
    int   m_didtimo;
    // Used when part of the selectloop.
    short m_wantedEvents;
    SelectLoop *m_loop;
    // Method called by the selectloop when something can be done with a netcon
    virtual int cando(Netcon::Event reason) = 0;
    // Called when added to loop
    virtual void setloop(SelectLoop *loop) {
        m_loop = loop;
    }
};


/// The selectloop interface is used to implement parallel servers.
// The select loop mechanism allows several netcons to be used for io
// in a program without blocking as long as there is data to be read
// or written. In a multithread program, if each thread needs
// non-blocking IO it may make sense to have one SelectLoop active per
// thread.
class SelectLoop {
public:
    SelectLoop();
    SelectLoop(const SelectLoop&) = delete;
    SelectLoop& operator=(const SelectLoop&) = delete;
    ~SelectLoop();
    
    /// Add a connection to be monitored (this will usually be called
    /// from the server's listen connection's accept callback)
    int addselcon(NetconP con, int events);

    /// Remove a connection from the monitored set. This is
    /// automatically called when EOF is detected on a connection.
    int remselcon(NetconP con);

    /// Set a function to be called periodically, or a time before return.
    /// @param handler the function to be called.
    ///  - if it is 0, doLoop() will return after ms mS (and can be called
    ///    again)
    ///  - if it is not 0, it will be called at ms mS intervals. If its return
    ///    value is <= 0, selectloop will return.
    /// @param clp client data to be passed to handler at every call.
    /// @param ms milliseconds interval between handler calls or
    ///   before return. Set to 0 for no periodic handler.
    void setperiodichandler(int (*handler)(void *), void *clp, int ms);

    /// Loop waiting for events on the connections and call the
    /// cando() method on the object when something happens (this will in
    /// turn typically call the app callback set on the netcon). Possibly
    /// call the periodic handler (if set) at regular intervals.
    /// @return -1 for error. 0 if no descriptors left for i/o. 1 for periodic
    ///  timeout (should call back in after processing)
    int doLoop();

    /// Call from data handler: make doLoop() return @param value
    void loopReturn(int value);

    friend class Netcon;
private:
    class Internal;
    Internal *m;
};

///////////////////////
class NetconData;

/// Class for the application callback routine (when in selectloop).
///
/// This is set by the app on the NetconData by calling
/// setcallback(). It is then called from the NetconData's cando()
/// routine, itself called by selectloop.
///
/// It would be nicer to override cando() in a subclass instead of
/// setting a callback, but this can't be done conveniently because
/// accept() always creates a base NetconData (another approach would
/// be to pass a factory function to the listener, to create
/// NetconData derived classes).
class NetconWorker {
public:
    virtual ~NetconWorker() {}
    virtual int data(NetconData *con, Netcon::Event reason) = 0;
};

/// Base class for connections that actually transfer data. T
class NetconData : public Netcon {
public:
    NetconData(bool cancellable = false);
    virtual ~NetconData();

    /// Write data to the connection.
    /// @param buf the data buffer
    /// @param cnt the number of bytes we should try to send
    /// @param expedited send data in as 'expedited' data.
    /// @return the count of bytes actually transferred, -1 if an
    ///  error occurred.
    virtual int send(const char *buf, int cnt, int expedited = 0);

    /// Read from the connection
    /// @param buf the data buffer
    /// @param cnt the number of bytes we should try to read (but we return
    ///   as soon as we get data)
    /// @param timeo maximum number of seconds we should be waiting for data.
    /// @return the count of bytes actually read (0 for EOF), or
    ///    TimeoutOrError (-1) for timeout or error (call timedout() to
    ///    discriminate and reset), Cancelled (-2) if cancelled.
    enum RcvReason {Eof = 0, TimeoutOrError = -1, Cancelled = -2};
    virtual int receive(char *buf, int cnt, int timeo = -1);
    virtual void cancelReceive();

    /// Loop on receive until cnt bytes are actually read or a timeout occurs
    virtual int doreceive(char *buf, int cnt, int timeo = -1);

    /// Read a line of text on an ascii connection. Returns -1 or byte count
    /// including final 0. \n is kept
    virtual int getline(char *buf, int cnt, int timeo = -1);

    /// Set handler to be called when the connection is placed in the
    /// selectloop and an event occurs.
    virtual void setcallback(std::shared_ptr user) {
        m_user = user;
    }

private:

    char *m_buf;    // Buffer. Only used when doing getline()s
    char *m_bufbase;    // Pointer to current 1st byte of useful data
    int m_bufbytes; // Bytes of data.
    int m_bufsize;  // Total buffer size

    int m_wkfds[2];
    
    std::shared_ptr m_user;
    virtual int cando(Netcon::Event reason); // Selectloop slot
};

/// Network endpoint, client side.
class NetconCli : public NetconData {
public:
    NetconCli(bool cancellable = false)
        : NetconData(cancellable), m_silentconnectfailure(false) {
    }

    /// Open connection to specified host and named service. Set host
    /// to an absolute path name for an AF_UNIX service. serv is
    /// ignored in this case.
    int openconn(const char *host, const char *serv, int timeo = -1);

    /// Open connection to specified host and numeric port. port is in
    /// HOST byte order. Set host to an absolute path name for an
    /// AF_UNIX service. serv is ignored in this case.
    int openconn(const char *host, unsigned int port, int timeo = -1);

    /// Reuse existing fd.
    /// We DONT take ownership of the fd, and do no closin' EVEN on an
    /// explicit closeconn() or setconn() (use getfd(), close,
    /// setconn(-1) if you need to really close the fd and have no
    /// other copy).
    int setconn(int fd);

    /// Do not log message if openconn() fails.
    void setSilentFail(bool onoff) {
        m_silentconnectfailure = onoff;
    }

private:
    bool m_silentconnectfailure; // No logging of connection failures if set
};

class NetconServCon;
#ifdef NETCON_ACCESSCONTROL
struct intarrayparam {
    int len;
    unsigned int *intarray;
};
#endif /* NETCON_ACCESSCONTROL */

/// Server listening end point.
///
/// if NETCON_ACCESSCONTROL is defined during compilation,
/// NetconServLis has primitive access control features: okaddrs holds
/// the host addresses for the hosts which we allow to connect to
/// us. okmasks holds the masks to be used for comparison.  okmasks
/// can be shorter than okaddrs, in which case we use the last entry
/// for all addrs beyond the masks array length. Both arrays are
/// retrieved from the configuration file when we create the endpoint
/// the key is either based on the service name (ex: cdpathdb_okaddrs,
/// cdpathdb_okmasks), or "default" if the service name is not found
/// (ex: default_okaddrs, default_okmasks)
class NetconServLis : public Netcon {
public:
    NetconServLis() {
#ifdef NETCON_ACCESSCONTROL
        permsinit = 0;
        okaddrs.len = okmasks.len = 0;
        okaddrs.intarray = okmasks.intarray = 0;
#endif /* NETCON_ACCESSCONTROL */
    }
    ~NetconServLis();
    /// Open named service. Used absolute pathname to create an
    /// AF_UNIX path-based socket instead of an IP one.
    int openservice(const char *serv, int backlog = 10);
    /// Open service by port number.
    int openservice(int port, int backlog = 10);
    /// Wait for incoming connection. Returned connected Netcon
    NetconServCon *accept(int timeo = -1);

protected:
    /// This should be overriden in a derived class to handle incoming
    /// connections. It will usually call NetconServLis::accept(), and
    /// insert the new connection in the selectloop.
    virtual int cando(Netcon::Event reason);

    // Empty if port was numeric, else service name or socket path
    std::string m_serv;

private:
#ifdef NETCON_ACCESSCONTROL
    int permsinit;
    struct intarrayparam okaddrs;
    struct intarrayparam okmasks;
    int initperms(const char *servicename);
    int initperms(int port);
    int checkperms(void *cli, int clilen);
#endif /* NETCON_ACCESSCONTROL */
};

/// Server-side accepted client connection. The only specific code
/// allows closing the listening endpoint in the child process (in the
/// case of a forking server)
class NetconServCon : public NetconData {
public:
    NetconServCon(int newfd, Netcon* lis = 0) {
        m_liscon = lis;
        m_fd = newfd;
    }
    /// This is for forked servers that want to get rid of the main socket
    void closeLisCon() {
        if (m_liscon) {
            m_liscon->closeconn();
        }
    }
private:
    Netcon* m_liscon;
};

#endif /* _NETCON_H_ */
recoll-1.26.3/utils/conftree.h0000644000175000017500000004730513566424763013153 00000000000000/* Copyright (C) 2006 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _CONFTREE_H_
#define  _CONFTREE_H_

/**
 * A simple configuration file implementation.
 *
 * Configuration files have lines like 'name = value', and/or like '[subkey]'
 *
 * Lines like '[subkey]' in the file define subsections, with independant
 * configuration namespaces. Only subsections holding at least one variable are
 * significant (empty subsections may be deleted during an update, or not)
 *
 * Whitespace around name and value is insignificant.
 *
 * The names are case-sensitive but don't depend on it, this might change
 *
 * Values can be queried for, or set.
 *
 * Any line without a '=' is a comment (a line like #var = value
 * actually assigns a variable named '#var', which is not a big issue)
 *
 * A configuration object can be created empty or by reading from a file or
 * a string.
 * All 'set' calls cause an immediate rewrite of the backing object if any
 * (file or string)
 *
 * The ConfTree derived class interprets the subkeys as file paths and
 * lets subdir keys hierarchically inherit the properties from
 * parents.
 *
 * The ConfStack class stacks several Con(Simple/Tree) objects so that
 * parameters from the top of the stack override the values from lower
 * (useful to have central/personal config files)
 */

#include 
#include 
#include 
#include 
#include 

// rh7.3 likes iostream better...
#if defined(__GNUC__) && __GNUC__ < 3
#include 
#else
#include 
#include 
#endif
#include 

#include "pathut.h"

/** Internal class used for storing presentation information */
class ConfLine {
public:
    enum Kind {CFL_COMMENT, CFL_SK, CFL_VAR, CFL_VARCOMMENT};
    Kind m_kind;
    std::string m_data;
    std::string m_value;
    std::string m_aux;
    ConfLine(Kind k, const std::string& d, std::string a = std::string())
        : m_kind(k), m_data(d), m_aux(a) {
    }
    bool operator==(const ConfLine& o) {
        return o.m_kind == m_kind && o.m_data == m_data;
    }
};

/**
 * Virtual base class used to define an interface mostly useful for testing
 */
class ConfNull {
public:
    enum StatusCode {STATUS_ERROR = 0, STATUS_RO = 1, STATUS_RW = 2};
    virtual ~ConfNull() {};
    virtual int get(const std::string& name, std::string& value,
                    const std::string& sk = std::string()) const = 0;
    virtual int set(const std::string& nm, const std::string& val,
                    const std::string& sk = std::string()) = 0;
    virtual bool ok() const = 0;
    virtual std::vector getNames(const std::string& sk,
                                              const char* = 0)const = 0;
    virtual bool hasNameAnywhere(const std::string& nm) const = 0;
    virtual int erase(const std::string&, const std::string&) = 0;
    virtual int eraseKey(const std::string&) = 0;
    virtual void showall() const {};
    virtual std::vector getSubKeys() const = 0;
    virtual std::vector getSubKeys(bool) const = 0;
    virtual bool holdWrites(bool) = 0;
    virtual bool sourceChanged() const = 0;
};

/**
 * Manages a simple configuration file with subsections.
 */
class ConfSimple : public ConfNull {
public:

    /**
     * Build the object by reading content from file.
     * @param filename file to open
     * @param readonly if true open readonly, else rw
     * @param tildexp  try tilde (home dir) expansion for subkey values
     */
    ConfSimple(const char *fname, int readonly = 0, bool tildexp = false,
               bool trimvalues = true);

    /**
     * Build the object by reading content from a string
     * @param data points to the data to parse.
     * @param readonly if true open readonly, else rw
     * @param tildexp  try tilde (home dir) expansion for subsection names
     */
    ConfSimple(const std::string& data, int readonly = 0, bool tildexp = false,
               bool trimvalues = true);

    /**
     * Build an empty object. This will be memory only, with no backing store.
     * @param readonly if true open read only, else rw
     * @param tildexp  try tilde (home dir) expansion for subsection names
     */
    ConfSimple(int readonly = 0, bool tildexp = false,
               bool trimvalues = true);

    virtual ~ConfSimple() {};

    /** Origin file changed. Only makes sense if we read the data from a file */
    virtual bool sourceChanged() const override;

    /**
     * Decide if we actually rewrite the backing-store after modifying the
     * tree.
     */
    virtual bool holdWrites(bool on) override {
        m_holdWrites = on;
        if (on == false) {
            return write();
        } else {
            return true;
        }
    }

    /** Clear, then reparse from string */
    void reparse(const std::string& in);

    /**
     * Get string value for named parameter, from specified subsection (looks 
     * in global space if sk is empty).
     * @return 0 if name not found, 1 else
     */
    virtual int get(const std::string& name, std::string& value,
                    const std::string& sk = std::string()) const override;

    /**
     * Get integer value for named parameter, from specified subsection (looks 
     * in global space if sk is empty).
     * @return 0 if name not found, 1 else
     */
    virtual int get(const std::string& name, int* value,
                    const std::string& sk = std::string()) const;


    /**
     * Set value for named string parameter in specified subsection (or global)
     * @return 0 for error, 1 else
     */
    virtual int set(const std::string& nm, const std::string& val,
                    const std::string& sk = std::string()) override;
    /**
     * Set value for named integer parameter in specified subsection (or global)
     * @return 0 for error, 1 else
     */
    virtual int set(const std::string& nm, long long val,
                    const std::string& sk = std::string());

    /**
     * Remove name and value from config
     */
    virtual int erase(const std::string& name, const std::string& sk) override;

    /**
     * Erase all names under given subkey (and subkey itself)
     */
    virtual int eraseKey(const std::string& sk) override;

    /** Clear all content */
    virtual int clear();

    virtual StatusCode getStatus() const;
    virtual bool ok() const override {
        return getStatus() != STATUS_ERROR;
    }

    /**
     * Walk the configuration values, calling function for each.
     * The function is called with a null nm when changing subsections (the
     * value is then the new subsection name)
     * @return WALK_STOP when/if the callback returns WALK_STOP,
     *         WALK_CONTINUE else (got to end of config)
     */
    enum WalkerCode {WALK_STOP, WALK_CONTINUE};
    virtual WalkerCode sortwalk(WalkerCode
                                (*wlkr)(void *cldata, const std::string& nm,
                                        const std::string& val),
                                void *clidata) const;

    /** Print all values to stdout */
    virtual void showall() const override;

    /** Return all names in given submap. */
    virtual std::vector getNames(
        const std::string& sk, const char *pattern = 0) const override;

    /** Check if name is present in any submap. This is relatively expensive
     * but useful for saving further processing sometimes */
    virtual bool hasNameAnywhere(const std::string& nm) const override;

    /**
     * Return all subkeys
     */
    virtual std::vector getSubKeys(bool) const override {
        return getSubKeys();
    }
    virtual std::vector getSubKeys() const override;
    
    /** Return subkeys in file order. BEWARE: only for the original from the 
     * file: the data is not duplicated to further copies */
    virtual std::vector getSubKeys_unsorted(bool = false) const {
        return m_subkeys_unsorted;
    }

    /** Test for subkey existence */
    virtual bool hasSubKey(const std::string& sk) const {
        return m_submaps.find(sk) != m_submaps.end();
    }

    virtual std::string getFilename() const {
        return m_filename;
    }

    /** Used with config files with specially formatted, xml-like comments.
     * Extract the comments as text */
    virtual bool commentsAsXML(std::ostream& out);

    /** !! Note that assignment and copy constructor do not copy the
        auxiliary data (m_order and subkeys_unsorted). */
    
    /**
     * Copy constructor. Expensive but less so than a full rebuild
     */
    ConfSimple(const ConfSimple& rhs)
        : ConfNull() {
        if ((status = rhs.status) == STATUS_ERROR) {
            return;
        }
        m_filename = rhs.m_filename;
        m_submaps = rhs.m_submaps;
    }

    /**
     * Assignement. This is expensive
     */
    ConfSimple& operator=(const ConfSimple& rhs) {
        if (this != &rhs && (status = rhs.status) != STATUS_ERROR) {
            dotildexpand = rhs.dotildexpand;
            trimvalues = rhs.trimvalues;
            m_filename = rhs.m_filename;
            m_submaps = rhs.m_submaps;
        }
        return *this;
    }

    /**
     * Write in file format to out
     */
    bool write(std::ostream& out) const;

    /** Give access to semi-parsed file contents */
    const std::vector& getlines() const {
        return m_order;
    }
    
protected:
    bool dotildexpand;
    bool trimvalues;
    StatusCode status;
private:
    // Set if we're working with a file
    std::string                            m_filename;
    time_t                            m_fmtime;
    // Configuration data submaps (one per subkey, the main data has a
    // null subkey)
    std::map > m_submaps;
    std::vector m_subkeys_unsorted;
    // Presentation data. We keep the comments, empty lines and
    // variable and subkey ordering information in there (for
    // rewriting the file while keeping hand-edited information)
    std::vector                    m_order;
    // Control if we're writing to the backing store
    bool                              m_holdWrites;

    void parseinput(std::istream& input);
    bool write();
    // Internal version of set: no RW checking
    virtual int i_set(const std::string& nm, const std::string& val,
                      const std::string& sk, bool init = false);
    bool i_changed(bool upd);
};

/**
 * This is a configuration class which attaches tree-like signification to the
 * submap names.
 *
 * If a given variable is not found in the specified section, it will be
 * looked up the tree of section names, and in the global space.
 *
 * submap names should be '/' separated paths (ie: /sub1/sub2). No checking
 * is done, but else the class adds no functionality to ConfSimple.
 *
 * NOTE: contrary to common behaviour, the global or root space is NOT
 * designated by '/' but by '' (empty subkey). A '/' subkey will not
 * be searched at all.
 *
 * Note: getNames() : uses ConfSimple method, this does *not* inherit
 *     names from englobing submaps.
 */
class ConfTree : public ConfSimple {

public:
    /* The constructors just call ConfSimple's, asking for key tilde
     * expansion */
    ConfTree(const char *fname, int readonly = 0, bool trimvalues=true)
        : ConfSimple(fname, readonly, true, trimvalues) {}
    ConfTree(const std::string& data, int readonly = 0, bool trimvalues=true)
        : ConfSimple(data, readonly, true, trimvalues) {}
    ConfTree(int readonly = 0, bool trimvalues=true)
        : ConfSimple(readonly, true, trimvalues) {}
    virtual ~ConfTree() {};
    ConfTree(const ConfTree& r) : ConfSimple(r) {};
    ConfTree& operator=(const ConfTree& r) {
        ConfSimple::operator=(r);
        return *this;
    }

    /**
     * Get value for named parameter, from specified subsection, or its
     * parents.
     * @return 0 if name not found, 1 else
     */
    virtual int get(const std::string& name, std::string& value,
                    const std::string& sk) const;
    using ConfSimple::get;
};

/**
 * Use several config files, trying to get values from each in order. Used to
 * have a central config, with possible overrides from more specific
 * (ie personal) ones.
 *
 * Notes: it's ok for some of the files not to exist, but the last
 * one must or we generate an error. We open all trees readonly, except the
 * topmost one if requested. All writes go to the topmost file. Note that
 * erase() won't work except for parameters only defined in the topmost
 * file (it erases only from there).
 */
template  class ConfStack : public ConfNull {
public:
    /// Construct from configuration file names. The earler
    /// files in have priority when fetching values. Only the first
    /// file will be updated if ro is false and set() is used.
    ConfStack(const std::vector& fns, bool ro = true) {
        construct(fns, ro);
    }
    /// Construct out of single file name and multiple directories
    ConfStack(const std::string& nm, const std::vector& dirs,
              bool ro = true) {
        std::vector fns;
        for (const auto& dir : dirs) {
            fns.push_back(path_cat(dir, nm));
        }
        ConfStack::construct(fns, ro);
    }

    ConfStack(const ConfStack& rhs)
        : ConfNull() {
        init_from(rhs);
    }

    virtual ~ConfStack() {
        clear();
        m_ok = false;
    }

    ConfStack& operator=(const ConfStack& rhs) {
        if (this != &rhs) {
            clear();
            m_ok = rhs.m_ok;
            if (m_ok) {
                init_from(rhs);
            }
        }
        return *this;
    }

     virtual bool sourceChanged() const override {
        for (const auto& conf : m_confs) {
            if (conf->sourceChanged()) {
                return true;
            }
        }
        return false;
    }

    virtual int get(const std::string& name, std::string& value,
                    const std::string& sk, bool shallow) const {
        for (const auto& conf : m_confs) {
            if (conf->get(name, value, sk)) {
                return true;
            }
            if (shallow) {
                break;
            }
        }
        return false;
    }

    virtual int get(const std::string& name, std::string& value,
                    const std::string& sk) const override {
        return get(name, value, sk, false);
    }

    virtual bool hasNameAnywhere(const std::string& nm) const override {
        for (const auto& conf : m_confs) {
            if (conf->hasNameAnywhere(nm)) {
                return true;
            }
        }
        return false;
    }

    virtual int set(const std::string& nm, const std::string& val,
                    const std::string& sk = std::string()) override {
        if (!m_ok) {
            return 0;
        }
        //LOGDEB2(("ConfStack::set [%s]:[%s] -> [%s]\n", sk.c_str(),
        //nm.c_str(), val.c_str()));
        // Avoid adding unneeded entries: if the new value matches the
        // one out from the deeper configs, erase or dont add it
        // from/to the topmost file
        auto it = m_confs.begin();
        it++;
        while (it != m_confs.end()) {
            std::string value;
            if ((*it)->get(nm, value, sk)) {
                // This file has value for nm/sk. If it is the same as the new
                // one, no need for an entry in the topmost file. Else, stop
                // looking and add the new entry
                if (value == val) {
                    m_confs.front()->erase(nm, sk);
                    return true;
                } else {
                    break;
                }
            }
            it++;
        }

        return m_confs.front()->set(nm, val, sk);
    }

    virtual int erase(const std::string& nm, const std::string& sk) override {
        return m_confs.front()->erase(nm, sk);
    }
    virtual int eraseKey(const std::string& sk) override {
        return m_confs.front()->eraseKey(sk);
    }
    virtual bool holdWrites(bool on) override {
        return m_confs.front()->holdWrites(on);
    }

    virtual std::vector getNames(
        const std::string& sk, const char *pattern = 0) const override {
        return getNames1(sk, pattern, false);
    }
    virtual std::vector getNamesShallow(const std::string& sk,
                                           const char *patt = 0) const {
        return getNames1(sk, patt, true);
    }

    virtual std::vector getNames1(
        const std::string& sk, const char *pattern, bool shallow) const {
        std::vector nms;
        bool skfound = false;
        for (const auto& conf : m_confs) {
            if (conf->hasSubKey(sk)) {
                skfound = true;
                std::vector lst = conf->getNames(sk, pattern);
                nms.insert(nms.end(), lst.begin(), lst.end());
            }
            if (shallow && skfound) {
                break;
            }
        }
        sort(nms.begin(), nms.end());
        std::vector::iterator uit = unique(nms.begin(), nms.end());
        nms.resize(uit - nms.begin());
        return nms;
    }

    virtual std::vector getSubKeys() const override {
        return getSubKeys(false);
    }
    virtual std::vector getSubKeys(bool shallow) const override {
        std::vector sks;
        for (const auto& conf : m_confs) {
            std::vector lst;
            lst = conf->getSubKeys();
            sks.insert(sks.end(), lst.begin(), lst.end());
            if (shallow) {
                break;
            }
        }
        sort(sks.begin(), sks.end());
        std::vector::iterator uit = unique(sks.begin(), sks.end());
        sks.resize(uit - sks.begin());
        return sks;
    }

    virtual bool ok() const override {
        return m_ok;
    }

private:
    bool     m_ok;
    std::vector m_confs;

    /// Reset to pristine
    void clear() {
        for (auto& conf : m_confs) {
            delete(conf);
        }
        m_confs.clear();
    }

    /// Common code to initialize from existing object
    void init_from(const ConfStack& rhs) {
        if ((m_ok = rhs.m_ok)) {
            for (const auto& conf : rhs.m_confs) {
                m_confs.push_back(new T(*conf));
            }
        }
    }

    /// Common construct from file names code. We used to be ok even
    /// if some files were not readable/parsable. Now fail if any
    /// fails.
    void construct(const std::vector& fns, bool ro) {
        bool ok{true};
        bool first{true};
        for (const auto& fn : fns) {
            T* p = new T(fn.c_str(), ro);
            if (p && p->ok()) {
                m_confs.push_back(p);
            } else {
                delete p;
                // In ro mode, we accept a non-existing topmost file
                // and treat it as an empty one.
                if (!(ro && first && !path_exists(fn))) {
                    ok = false;
                }
            }
            // Only the first file is opened rw
            ro = true;
            first = false;
        }
        m_ok = ok;
    }
};

#endif /*_CONFTREE_H_ */
recoll-1.26.3/utils/fstreewalk.h0000644000175000017500000001225213566424763013506 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _FSTREEWALK_H_INCLUDED_
#define _FSTREEWALK_H_INCLUDED_

#include 
#include 
#ifndef NO_NAMESPACES
using std::string;
using std::vector;
#endif

class FsTreeWalkerCB;
struct stat;

/**
 * Class implementing a unix directory recursive walk.
 *
 * A user-defined function object is called for every file or
 * directory. Patterns to be ignored can be set before starting the
 * walk. Options control whether we follow symlinks and whether we recurse
 * on subdirectories.
 */
class FsTreeWalker {
 public:
    // Global option to use FNM_PATHNAME when matching paths (for
    // skippedPaths).  
    // We initially used FNM_PATHNAME, and we can't change it now
    // (because of all the config files around). So add global option
    // to not use the flag, which can be set from rclconfig by adding
    // a value to the config file (skippedPathsNoFnmPathname)
    static bool o_useFnmPathname;
    static void setNoFnmPathname()
    {
	o_useFnmPathname = false;
    }

    // Global option to observe a "nowalk" file, which makes us treat
    // directories as if they were in skippedPaths) if the file exists
    // inside the directory.
    static std::string o_nowalkfn;
    static void setNoWalkFn(const std::string& nowalkfn)
    {
        o_nowalkfn = nowalkfn;
    }

    // Flags for call to processone(). FtwDirEnter is used when
    // entering a directory. FtwDirReturn is used when returning to it
    // after processing a subdirectory.
    enum CbFlag {FtwRegular, FtwDirEnter, FtwDirReturn};
    enum Status {FtwOk=0, FtwError=1, FtwStop=2, 
		 FtwStatAll = FtwError|FtwStop};
    enum Options {FtwOptNone = 0, FtwNoRecurse = 1, FtwFollow = 2,
                  FtwNoCanon = 4, FtwSkipDotFiles = 8,
    // Tree walking options.  Natural is close to depth first: process
    //   directory entries as we see them, recursing into subdirectories at 
    //   once 
    // Breadth means we process all files and dirs at a given directory level
    // before going deeper.
    //
    // FilesThenDirs is close to Natural, except that we process all files in a 
    //   given directory before going deeper: allows keeping only a single 
    //   directory open
    // We don't do pure depth first (process subdirs before files), this does 
    // not appear to make any sense.
                  FtwTravNatural = 0x10000, FtwTravBreadth = 0x20000, 
                  FtwTravFilesThenDirs = 0x40000, 
                  FtwTravBreadthThenDepth = 0x80000
    };
    static const int FtwTravMask;
    FsTreeWalker(int opts = FtwTravNatural);
    ~FsTreeWalker();

    void setOpts(int opts);
    int getOpts();
    void setDepthSwitch(int);
    void setMaxDepth(int);

    /** 
     * Begin file system walk.
     * @param dir is not checked against the ignored patterns (this is 
     *     a feature and must not change.
     * @param cb the function object that will be called back for every 
     *    file-system object (called both at entry and exit for directories).
     */
    Status walk(const string &dir, FsTreeWalkerCB& cb);
    /** Get explanation for error */
    string getReason();
    int getErrCnt();

    /**
     * Add a pattern (file or dir) to be ignored (ie: #* , *~)
     */
    bool addSkippedName(const string &pattern); 
    /** Set the ignored patterns set */
    bool setSkippedNames(const vector &patterns);
    /** Set the exclusive patterns set */
    bool setOnlyNames(const vector &patterns);

    /** Same for skipped paths: this are paths, not names, under which we
	do not descend (ie: /home/me/.recoll) */
    bool addSkippedPath(const string &path); 
    /** Set the ignored paths list */
    bool setSkippedPaths(const vector &patterns);

    /** Test if path/name should be skipped. This can be used independently of
      * an actual tree walk */
    bool inSkippedPaths(const string& path, bool ckparents = false);
    bool inSkippedNames(const string& name);
    bool inOnlyNames(const string& name);

 private:
    Status iwalk(const string &dir, struct stat *stp, FsTreeWalkerCB& cb);
    class Internal; 
   Internal *data;
};

class FsTreeWalkerCB {
 public:
    virtual ~FsTreeWalkerCB() {}
    // Only st_mtime, st_ctime, st_size, st_mode (filetype bits: dir/reg/lnk),
    virtual FsTreeWalker::Status 
	processone(const string &, const struct stat *, FsTreeWalker::CbFlag) 
	= 0;
};

// Utility function. Somewhat like du.
int64_t fsTreeBytes(const string& topdir);

#endif /* _FSTREEWALK_H_INCLUDED_ */
recoll-1.26.3/utils/pxattr.h0000644000175000017500000001133213303776060012644 00000000000000#ifndef _pxattr_h_included_
#define _pxattr_h_included_

/* @(#$Id: pxattr.h,v 1.5 2009-01-20 13:48:34 dockes Exp $  (C) 2009 J.F.Dockes
Copyright (c) 2009 Jean-Francois Dockes

Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:

The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
*/
#include 
#include 
using std::string;
using std::vector;

/**
 * Provide a uniform C++ API for extended file attributes on Linux/FreeBSD 
 * and MacOSX.
 *
 * We only deal with user attributes. Other namespaces are very
 * system-specific and would be difficult to use in a portable way.
 *
 * Linux and FreeBSD treat differently the attributes name space
 * segmentation: Linux uses the first name segment ("user.", "system.", ...),
 * FreeBSD uses an enumeration.
 *
 * We handle this by using only domain-internal names in the interface:
 * that is, the caller specifies the names as, ie, 'org.myapp.somename' 
 * not 'user.org.myapp.somename'. pxattr will deal with adding/removing
 * the 'user.' part as needed. 
 *
 * MacOsX does not segment the attribute name space.
 * 
 * In order to avoid conflicts, it is recommended that attributes
 * names be chosen in a "reverse dns" fashion, ie:
 * org.recoll.indexing.status
 *
 * The interface provided should work the same way on all 3 systems,
 * it papers over such differences as the "list" output format,
 * the existence of CREATE/UPDATE distinctions, etc.
 * 
 * Diagnostics: all functions return false on error, and preserve the errno 
 * value or set it as appropriate.
 *
 * For path-based interfaces, the PXATTR_NOFOLLOW flag can be set to decide if
 * symbolic links will be acted on or followed.
 */
namespace pxattr {
    /** nspace might be used in the future if we support multiple namespaces.*/
    enum nspace {
	/** User name space */
	PXATTR_USER
    };

    /** Flags can modify the behaviour of some methods */
    enum flags {PXATTR_NONE=0, 
		/** Act on link instead of following it */
		PXATTR_NOFOLLOW = 1, 
		/** Fail if existing */
		PXATTR_CREATE=2, 
		/** Fail if new */
		PXATTR_REPLACE=4 
    };

    /**
     * Retrieve the named attribute from path. 
     */
    bool get(const string& path, const string& name, string* value, 
	     flags flags = PXATTR_NONE, nspace dom = PXATTR_USER);
    /**
     * Retrieve the named attribute from open file. 
     */
    bool get(int fd, const string& name, string* value, 
	     flags flags = PXATTR_NONE, nspace dom = PXATTR_USER);
    /**
     * Set the named attribute on path. 
     */
    bool set(const string& path, const string& name, const string& value, 
	     flags flags = PXATTR_NONE, nspace dom = PXATTR_USER);
    /**
     * Set the named attribute on open file. 
     */
    bool set(int fd, const string& name, const string& value, 
	     flags flags = PXATTR_NONE, nspace dom = PXATTR_USER);
    /**
     * Delete the named attribute from path.
     */
    bool del(const string& path, const string& name, 
	     flags flags = PXATTR_NONE, nspace dom = PXATTR_USER);
    /**
     * Delete the named attribute from open file.
     */
    bool del(int fd, const string& name, 
	     flags flags = PXATTR_NONE, nspace dom = PXATTR_USER);
    /**
     * List attribute names from path.
     */
    bool list(const string& path, vector* names, 
	      flags flags = PXATTR_NONE, nspace dom = PXATTR_USER);
    /**
     * List attribute names from open file.
     */
    bool list(int fd, vector* names, 
	      flags flags = PXATTR_NONE, nspace dom = PXATTR_USER);

    /**
     * Compute actual/system attribute name from external name 
     * (ie: myattr->user.myattr)
     */
    bool sysname(nspace dom, const string& pname, string* sname);
    /**
     * Compute external name from actual/system name 
     * (ie: user.myattr->myattr)
     */
    bool pxname(nspace dom, const string& sname, string* pname);
}


#endif /* _pxattr_h_included_ */
recoll-1.26.3/utils/closefrom.h0000644000175000017500000000211413533651561013314 00000000000000#ifndef _closefrom_h_included_
#define _closefrom_h_included_
/* Copyright (C) 2004 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

/* Close all descriptors >=  fd */
extern int libclf_closefrom(int fd);

/* Retrieve max open fd. This might be the actual max open one (not
   thread-safe) or a system max value. */
extern int libclf_maxfd(int flags=0);

#endif /* _closefrom_h_included_ */
recoll-1.26.3/utils/md5.cpp0000644000175000017500000002171313533651561012351 00000000000000/*	$OpenBSD: md5.c,v 1.7 2004/05/28 15:10:27 millert Exp $	*/

/*
 * This code implements the MD5 message-digest algorithm.
 * The algorithm is due to Ron Rivest.  This code was
 * written by Colin Plumb in 1993, no copyright is claimed.
 * This code is in the public domain; do with it what you wish.
 *
 * Equivalent code is available from RSA Data Security, Inc.
 * This code has been tested against that, and is equivalent,
 * except that you don't need to include two pages of legalese
 * with every copy.
 *
 * To compute the message digest of a chunk of bytes, declare an
 * MD5Context structure, pass it to MD5Init, call MD5Update as
 * needed on buffers full of bytes, and then call MD5Final, which
 * will fill a supplied 16-byte array with the digest.
 */

#include "md5.h"

#include 


#define PUT_64BIT_LE(cp, value) do {					\
	(cp)[7] = (value) >> 56;					\
	(cp)[6] = (value) >> 48;					\
	(cp)[5] = (value) >> 40;					\
	(cp)[4] = (value) >> 32;					\
	(cp)[3] = (value) >> 24;					\
	(cp)[2] = (value) >> 16;					\
	(cp)[1] = (value) >> 8;						\
	(cp)[0] = (value); } while (0)

#define PUT_32BIT_LE(cp, value) do {					\
	(cp)[3] = (value) >> 24;					\
	(cp)[2] = (value) >> 16;					\
	(cp)[1] = (value) >> 8;						\
	(cp)[0] = (value); } while (0)

static uint8_t PADDING[MD5_BLOCK_LENGTH] = {
	0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
	0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
};

static void MD5Pad(MD5_CTX *);
static void MD5Transform(uint32_t [4], const uint8_t [MD5_BLOCK_LENGTH]);

/*
 * Start MD5 accumulation.  Set bit count to 0 and buffer to mysterious
 * initialization constants.
 */
void
MD5Init(MD5_CTX *ctx)
{
	ctx->count = 0;
	ctx->state[0] = 0x67452301;
	ctx->state[1] = 0xefcdab89;
	ctx->state[2] = 0x98badcfe;
	ctx->state[3] = 0x10325476;
}

/*
 * Update context to reflect the concatenation of another buffer full
 * of bytes.
 */
void
MD5Update(MD5_CTX *ctx, const unsigned char *input, size_t len)
{
	size_t have, need;

	/* Check how many bytes we already have and how many more we need. */
	have = (size_t)((ctx->count >> 3) & (MD5_BLOCK_LENGTH - 1));
	need = MD5_BLOCK_LENGTH - have;

	/* Update bitcount */
	ctx->count += (uint64_t)len << 3;

	if (len >= need) {
		if (have != 0) {
			memcpy(ctx->buffer + have, input, need);
			MD5Transform(ctx->state, ctx->buffer);
			input += need;
			len -= need;
			have = 0;
		}

		/* Process data in MD5_BLOCK_LENGTH-byte chunks. */
		while (len >= MD5_BLOCK_LENGTH) {
			MD5Transform(ctx->state, input);
			input += MD5_BLOCK_LENGTH;
			len -= MD5_BLOCK_LENGTH;
		}
	}

	/* Handle any remaining bytes of data. */
	if (len != 0)
		memcpy(ctx->buffer + have, input, len);
}

/*
 * Pad pad to 64-byte boundary with the bit pattern
 * 1 0* (64-bit count of bits processed, MSB-first)
 */
static void
MD5Pad(MD5_CTX *ctx)
{
	uint8_t count[8];
	size_t padlen;

	/* Convert count to 8 bytes in little endian order. */
	PUT_64BIT_LE(count, ctx->count);

	/* Pad out to 56 mod 64. */
	padlen = MD5_BLOCK_LENGTH -
	    ((ctx->count >> 3) & (MD5_BLOCK_LENGTH - 1));
	if (padlen < 1 + 8)
		padlen += MD5_BLOCK_LENGTH;
	MD5Update(ctx, PADDING, padlen - 8);		/* padlen - 8 <= 64 */
	MD5Update(ctx, count, 8);
}

/*
 * Final wrapup--call MD5Pad, fill in digest and zero out ctx.
 */
void
MD5Final(unsigned char digest[MD5_DIGEST_LENGTH], MD5_CTX *ctx)
{
	int i;

	MD5Pad(ctx);
	if (digest != NULL) {
		for (i = 0; i < 4; i++)
			PUT_32BIT_LE(digest + i * 4, ctx->state[i]);
		memset(ctx, 0, sizeof(*ctx));
	}
}


/* The four core functions - F1 is optimized somewhat */

/* #define F1(x, y, z) (x & y | ~x & z) */
#define F1(x, y, z) (z ^ (x & (y ^ z)))
#define F2(x, y, z) F1(z, x, y)
#define F3(x, y, z) (x ^ y ^ z)
#define F4(x, y, z) (y ^ (x | ~z))

/* This is the central step in the MD5 algorithm. */
#define MD5STEP(f, w, x, y, z, data, s) \
	( w += f(x, y, z) + data,  w = w<>(32-s),  w += x )

/*
 * The core of the MD5 algorithm, this alters an existing MD5 hash to
 * reflect the addition of 16 longwords of new data.  MD5Update blocks
 * the data and converts bytes into longwords for this routine.
 */
static void
MD5Transform(uint32_t state[4], const uint8_t block[MD5_BLOCK_LENGTH])
{
	uint32_t a, b, c, d, in[MD5_BLOCK_LENGTH / 4];

#ifndef WORDS_BIGENDIAN
	memcpy(in, block, sizeof(in));
#else
	for (a = 0; a < MD5_BLOCK_LENGTH / 4; a++) {
		in[a] = (uint32_t)(
		    (uint32_t)(block[a * 4 + 0]) |
		    (uint32_t)(block[a * 4 + 1]) <<  8 |
		    (uint32_t)(block[a * 4 + 2]) << 16 |
		    (uint32_t)(block[a * 4 + 3]) << 24);
	}
#endif

	a = state[0];
	b = state[1];
	c = state[2];
	d = state[3];

	MD5STEP(F1, a, b, c, d, in[ 0] + 0xd76aa478,  7);
	MD5STEP(F1, d, a, b, c, in[ 1] + 0xe8c7b756, 12);
	MD5STEP(F1, c, d, a, b, in[ 2] + 0x242070db, 17);
	MD5STEP(F1, b, c, d, a, in[ 3] + 0xc1bdceee, 22);
	MD5STEP(F1, a, b, c, d, in[ 4] + 0xf57c0faf,  7);
	MD5STEP(F1, d, a, b, c, in[ 5] + 0x4787c62a, 12);
	MD5STEP(F1, c, d, a, b, in[ 6] + 0xa8304613, 17);
	MD5STEP(F1, b, c, d, a, in[ 7] + 0xfd469501, 22);
	MD5STEP(F1, a, b, c, d, in[ 8] + 0x698098d8,  7);
	MD5STEP(F1, d, a, b, c, in[ 9] + 0x8b44f7af, 12);
	MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
	MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
	MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122,  7);
	MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
	MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
	MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);

	MD5STEP(F2, a, b, c, d, in[ 1] + 0xf61e2562,  5);
	MD5STEP(F2, d, a, b, c, in[ 6] + 0xc040b340,  9);
	MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
	MD5STEP(F2, b, c, d, a, in[ 0] + 0xe9b6c7aa, 20);
	MD5STEP(F2, a, b, c, d, in[ 5] + 0xd62f105d,  5);
	MD5STEP(F2, d, a, b, c, in[10] + 0x02441453,  9);
	MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
	MD5STEP(F2, b, c, d, a, in[ 4] + 0xe7d3fbc8, 20);
	MD5STEP(F2, a, b, c, d, in[ 9] + 0x21e1cde6,  5);
	MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6,  9);
	MD5STEP(F2, c, d, a, b, in[ 3] + 0xf4d50d87, 14);
	MD5STEP(F2, b, c, d, a, in[ 8] + 0x455a14ed, 20);
	MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905,  5);
	MD5STEP(F2, d, a, b, c, in[ 2] + 0xfcefa3f8,  9);
	MD5STEP(F2, c, d, a, b, in[ 7] + 0x676f02d9, 14);
	MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);

	MD5STEP(F3, a, b, c, d, in[ 5] + 0xfffa3942,  4);
	MD5STEP(F3, d, a, b, c, in[ 8] + 0x8771f681, 11);
	MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
	MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
	MD5STEP(F3, a, b, c, d, in[ 1] + 0xa4beea44,  4);
	MD5STEP(F3, d, a, b, c, in[ 4] + 0x4bdecfa9, 11);
	MD5STEP(F3, c, d, a, b, in[ 7] + 0xf6bb4b60, 16);
	MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
	MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6,  4);
	MD5STEP(F3, d, a, b, c, in[ 0] + 0xeaa127fa, 11);
	MD5STEP(F3, c, d, a, b, in[ 3] + 0xd4ef3085, 16);
	MD5STEP(F3, b, c, d, a, in[ 6] + 0x04881d05, 23);
	MD5STEP(F3, a, b, c, d, in[ 9] + 0xd9d4d039,  4);
	MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
	MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
	MD5STEP(F3, b, c, d, a, in[2 ] + 0xc4ac5665, 23);

	MD5STEP(F4, a, b, c, d, in[ 0] + 0xf4292244,  6);
	MD5STEP(F4, d, a, b, c, in[7 ] + 0x432aff97, 10);
	MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
	MD5STEP(F4, b, c, d, a, in[5 ] + 0xfc93a039, 21);
	MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3,  6);
	MD5STEP(F4, d, a, b, c, in[3 ] + 0x8f0ccc92, 10);
	MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
	MD5STEP(F4, b, c, d, a, in[1 ] + 0x85845dd1, 21);
	MD5STEP(F4, a, b, c, d, in[8 ] + 0x6fa87e4f,  6);
	MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
	MD5STEP(F4, c, d, a, b, in[6 ] + 0xa3014314, 15);
	MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
	MD5STEP(F4, a, b, c, d, in[4 ] + 0xf7537e82,  6);
	MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
	MD5STEP(F4, c, d, a, b, in[2 ] + 0x2ad7d2bb, 15);
	MD5STEP(F4, b, c, d, a, in[9 ] + 0xeb86d391, 21);

	state[0] += a;
	state[1] += b;
	state[2] += c;
	state[3] += d;
}


// C++ utilities
using std::string;

void MD5Final(string &digest, MD5_CTX *context)
{
    unsigned char d[16];
    MD5Final (d, context);
    digest.assign((const char *)d, 16);
}

string& MD5String(const string& data, string& digest)
{
    MD5_CTX ctx;
    MD5Init(&ctx);
    MD5Update(&ctx, (const unsigned char*)data.c_str(), data.length());
    MD5Final(digest, &ctx);
    return digest;
}

string& MD5HexPrint(const string& digest, string &out)
{
    out.erase();
    out.reserve(33);
    static const char hex[]="0123456789abcdef";
    const unsigned char *hash = (const unsigned char *)digest.c_str();
    for (int i = 0; i < 16; i++) {
	out.append(1, hex[hash[i] >> 4]);
	out.append(1, hex[hash[i] & 0x0f]);
    }
    return out;
}
string& MD5HexScan(const string& xdigest, string& digest)
{
    digest.erase();
    if (xdigest.length() != 32) {
	return digest;
    }
    for (unsigned int i = 0; i < 16; i++) {
	unsigned int val;
	if (sscanf(xdigest.c_str() + 2*i, "%2x", &val) != 1) {
	    digest.erase();
	    return digest;
	}
	digest.append(1, (unsigned char)val);
    }
    return digest;
}
recoll-1.26.3/utils/miniz.h0000644000175000017500000020176313533651561012464 00000000000000/* miniz.c 2.0.8 - public domain deflate/inflate, zlib-subset, ZIP reading/writing/appending, PNG writing
   See "unlicense" statement at the end of this file.
   Rich Geldreich , last updated Oct. 13, 2013
   Implements RFC 1950: http://www.ietf.org/rfc/rfc1950.txt and RFC 1951: http://www.ietf.org/rfc/rfc1951.txt

   Most API's defined in miniz.c are optional. For example, to disable the archive related functions just define
   MINIZ_NO_ARCHIVE_APIS, or to get rid of all stdio usage define MINIZ_NO_STDIO (see the list below for more macros).

   * Low-level Deflate/Inflate implementation notes:

     Compression: Use the "tdefl" API's. The compressor supports raw, static, and dynamic blocks, lazy or
     greedy parsing, match length filtering, RLE-only, and Huffman-only streams. It performs and compresses
     approximately as well as zlib.

     Decompression: Use the "tinfl" API's. The entire decompressor is implemented as a single function
     coroutine: see tinfl_decompress(). It supports decompression into a 32KB (or larger power of 2) wrapping buffer, or into a memory
     block large enough to hold the entire file.

     The low-level tdefl/tinfl API's do not make any use of dynamic memory allocation.

   * zlib-style API notes:

     miniz.c implements a fairly large subset of zlib. There's enough functionality present for it to be a drop-in
     zlib replacement in many apps:
        The z_stream struct, optional memory allocation callbacks
        deflateInit/deflateInit2/deflate/deflateReset/deflateEnd/deflateBound
        inflateInit/inflateInit2/inflate/inflateEnd
        compress, compress2, compressBound, uncompress
        CRC-32, Adler-32 - Using modern, minimal code size, CPU cache friendly routines.
        Supports raw deflate streams or standard zlib streams with adler-32 checking.

     Limitations:
      The callback API's are not implemented yet. No support for gzip headers or zlib static dictionaries.
      I've tried to closely emulate zlib's various flavors of stream flushing and return status codes, but
      there are no guarantees that miniz.c pulls this off perfectly.

   * PNG writing: See the tdefl_write_image_to_png_file_in_memory() function, originally written by
     Alex Evans. Supports 1-4 bytes/pixel images.

   * ZIP archive API notes:

     The ZIP archive API's where designed with simplicity and efficiency in mind, with just enough abstraction to
     get the job done with minimal fuss. There are simple API's to retrieve file information, read files from
     existing archives, create new archives, append new files to existing archives, or clone archive data from
     one archive to another. It supports archives located in memory or the heap, on disk (using stdio.h),
     or you can specify custom file read/write callbacks.

     - Archive reading: Just call this function to read a single file from a disk archive:

      void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name,
        size_t *pSize, mz_uint zip_flags);

     For more complex cases, use the "mz_zip_reader" functions. Upon opening an archive, the entire central
     directory is located and read as-is into memory, and subsequent file access only occurs when reading individual files.

     - Archives file scanning: The simple way is to use this function to scan a loaded archive for a specific file:

     int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags);

     The locate operation can optionally check file comments too, which (as one example) can be used to identify
     multiple versions of the same file in an archive. This function uses a simple linear search through the central
     directory, so it's not very fast.

     Alternately, you can iterate through all the files in an archive (using mz_zip_reader_get_num_files()) and
     retrieve detailed info on each file by calling mz_zip_reader_file_stat().

     - Archive creation: Use the "mz_zip_writer" functions. The ZIP writer immediately writes compressed file data
     to disk and builds an exact image of the central directory in memory. The central directory image is written
     all at once at the end of the archive file when the archive is finalized.

     The archive writer can optionally align each file's local header and file data to any power of 2 alignment,
     which can be useful when the archive will be read from optical media. Also, the writer supports placing
     arbitrary data blobs at the very beginning of ZIP archives. Archives written using either feature are still
     readable by any ZIP tool.

     - Archive appending: The simple way to add a single file to an archive is to call this function:

      mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name,
        const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags);

     The archive will be created if it doesn't already exist, otherwise it'll be appended to.
     Note the appending is done in-place and is not an atomic operation, so if something goes wrong
     during the operation it's possible the archive could be left without a central directory (although the local
     file headers and file data will be fine, so the archive will be recoverable).

     For more complex archive modification scenarios:
     1. The safest way is to use a mz_zip_reader to read the existing archive, cloning only those bits you want to
     preserve into a new archive using using the mz_zip_writer_add_from_zip_reader() function (which compiles the
     compressed file data as-is). When you're done, delete the old archive and rename the newly written archive, and
     you're done. This is safe but requires a bunch of temporary disk space or heap memory.

     2. Or, you can convert an mz_zip_reader in-place to an mz_zip_writer using mz_zip_writer_init_from_reader(),
     append new files as needed, then finalize the archive which will write an updated central directory to the
     original archive. (This is basically what mz_zip_add_mem_to_archive_file_in_place() does.) There's a
     possibility that the archive's central directory could be lost with this method if anything goes wrong, though.

     - ZIP archive support limitations:
     No zip64 or spanning support. Extraction functions can only handle unencrypted, stored or deflated files.
     Requires streams capable of seeking.

   * This is a header file library, like stb_image.c. To get only a header file, either cut and paste the
     below header, or create miniz.h, #define MINIZ_HEADER_FILE_ONLY, and then include miniz.c from it.

   * Important: For best perf. be sure to customize the below macros for your target platform:
     #define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
     #define MINIZ_LITTLE_ENDIAN 1
     #define MINIZ_HAS_64BIT_REGISTERS 1

   * On platforms using glibc, Be sure to "#define _LARGEFILE64_SOURCE 1" before including miniz.c to ensure miniz
     uses the 64-bit variants: fopen64(), stat64(), etc. Otherwise you won't be able to process large files
     (i.e. 32-bit stat() fails for me on files > 0x7FFFFFFF bytes).
*/
#pragma once





/* Defines to completely disable specific portions of miniz.c: 
   If all macros here are defined the only functionality remaining will be CRC-32, adler-32, tinfl, and tdefl. */

/* Define MINIZ_NO_STDIO to disable all usage and any functions which rely on stdio for file I/O. */
/*#define MINIZ_NO_STDIO */

/* If MINIZ_NO_TIME is specified then the ZIP archive functions will not be able to get the current time, or */
/* get/set file times, and the C run-time funcs that get/set times won't be called. */
/* The current downside is the times written to your archives will be from 1979. */
/*#define MINIZ_NO_TIME */

/* Define MINIZ_NO_ARCHIVE_APIS to disable all ZIP archive API's. */
/*#define MINIZ_NO_ARCHIVE_APIS */

/* Define MINIZ_NO_ARCHIVE_WRITING_APIS to disable all writing related ZIP archive API's. */
#define MINIZ_NO_ARCHIVE_WRITING_APIS 

/* Define MINIZ_NO_ZLIB_APIS to remove all ZLIB-style compression/decompression API's. */
#define MINIZ_NO_ZLIB_APIS 

/* Define MINIZ_NO_ZLIB_COMPATIBLE_NAME to disable zlib names, to prevent conflicts against stock zlib. */
#define MINIZ_NO_ZLIB_COMPATIBLE_NAMES 

/* Define MINIZ_NO_MALLOC to disable all calls to malloc, free, and realloc. 
   Note if MINIZ_NO_MALLOC is defined then the user must always provide custom user alloc/free/realloc
   callbacks to the zlib and archive API's, and a few stand-alone helper API's which don't provide custom user
   functions (such as tdefl_compress_mem_to_heap() and tinfl_decompress_mem_to_heap()) won't work. */
/*#define MINIZ_NO_MALLOC */

#if defined(__TINYC__) && (defined(__linux) || defined(__linux__))
/* TODO: Work around "error: include file 'sys\utime.h' when compiling with tcc on Linux */
#define MINIZ_NO_TIME
#endif

#include 

#if !defined(MINIZ_NO_TIME) && !defined(MINIZ_NO_ARCHIVE_APIS)
#include 
#endif

#if defined(_M_IX86) || defined(_M_X64) || defined(__i386__) || defined(__i386) || defined(__i486__) || defined(__i486) || defined(i386) || defined(__ia64__) || defined(__x86_64__)
/* MINIZ_X86_OR_X64_CPU is only used to help set the below macros. */
#define MINIZ_X86_OR_X64_CPU 1
#else
#define MINIZ_X86_OR_X64_CPU 0
#endif

#if (__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__) || MINIZ_X86_OR_X64_CPU
/* Set MINIZ_LITTLE_ENDIAN to 1 if the processor is little endian. */
#define MINIZ_LITTLE_ENDIAN 1
#else
#define MINIZ_LITTLE_ENDIAN 0
#endif

#if MINIZ_X86_OR_X64_CPU
/* Set MINIZ_USE_UNALIGNED_LOADS_AND_STORES to 1 on CPU's that permit efficient integer loads and stores from unaligned addresses. */
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 1
#else
#define MINIZ_USE_UNALIGNED_LOADS_AND_STORES 0
#endif

#if defined(_M_X64) || defined(_WIN64) || defined(__MINGW64__) || defined(_LP64) || defined(__LP64__) || defined(__ia64__) || defined(__x86_64__)
/* Set MINIZ_HAS_64BIT_REGISTERS to 1 if operations on 64-bit integers are reasonably fast (and don't involve compiler generated calls to helper functions). */
#define MINIZ_HAS_64BIT_REGISTERS 1
#else
#define MINIZ_HAS_64BIT_REGISTERS 0
#endif

#ifdef __cplusplus
extern "C" {
#endif

/* ------------------- zlib-style API Definitions. */

/* For more compatibility with zlib, miniz.c uses unsigned long for some parameters/struct members. Beware: mz_ulong can be either 32 or 64-bits! */
typedef unsigned long mz_ulong;

/* mz_free() internally uses the MZ_FREE() macro (which by default calls free() unless you've modified the MZ_MALLOC macro) to release a block allocated from the heap. */
void mz_free(void *p);

#define MZ_ADLER32_INIT (1)
/* mz_adler32() returns the initial adler-32 value to use when called with ptr==NULL. */
mz_ulong mz_adler32(mz_ulong adler, const unsigned char *ptr, size_t buf_len);

#define MZ_CRC32_INIT (0)
/* mz_crc32() returns the initial CRC-32 value to use when called with ptr==NULL. */
mz_ulong mz_crc32(mz_ulong crc, const unsigned char *ptr, size_t buf_len);

/* Compression strategies. */
enum
{
    MZ_DEFAULT_STRATEGY = 0,
    MZ_FILTERED = 1,
    MZ_HUFFMAN_ONLY = 2,
    MZ_RLE = 3,
    MZ_FIXED = 4
};

/* Method */
#define MZ_DEFLATED 8

/* Heap allocation callbacks.
Note that mz_alloc_func parameter types purpsosely differ from zlib's: items/size is size_t, not unsigned long. */
typedef void *(*mz_alloc_func)(void *opaque, size_t items, size_t size);
typedef void (*mz_free_func)(void *opaque, void *address);
typedef void *(*mz_realloc_func)(void *opaque, void *address, size_t items, size_t size);

/* Compression levels: 0-9 are the standard zlib-style levels, 10 is best possible compression (not zlib compatible, and may be very slow), MZ_DEFAULT_COMPRESSION=MZ_DEFAULT_LEVEL. */
enum
{
    MZ_NO_COMPRESSION = 0,
    MZ_BEST_SPEED = 1,
    MZ_BEST_COMPRESSION = 9,
    MZ_UBER_COMPRESSION = 10,
    MZ_DEFAULT_LEVEL = 6,
    MZ_DEFAULT_COMPRESSION = -1
};

#define MZ_VERSION "10.0.3"
#define MZ_VERNUM 0xA030
#define MZ_VER_MAJOR 10
#define MZ_VER_MINOR 0
#define MZ_VER_REVISION 3
#define MZ_VER_SUBREVISION 0

#ifndef MINIZ_NO_ZLIB_APIS

/* Flush values. For typical usage you only need MZ_NO_FLUSH and MZ_FINISH. The other values are for advanced use (refer to the zlib docs). */
enum
{
    MZ_NO_FLUSH = 0,
    MZ_PARTIAL_FLUSH = 1,
    MZ_SYNC_FLUSH = 2,
    MZ_FULL_FLUSH = 3,
    MZ_FINISH = 4,
    MZ_BLOCK = 5
};

/* Return status codes. MZ_PARAM_ERROR is non-standard. */
enum
{
    MZ_OK = 0,
    MZ_STREAM_END = 1,
    MZ_NEED_DICT = 2,
    MZ_ERRNO = -1,
    MZ_STREAM_ERROR = -2,
    MZ_DATA_ERROR = -3,
    MZ_MEM_ERROR = -4,
    MZ_BUF_ERROR = -5,
    MZ_VERSION_ERROR = -6,
    MZ_PARAM_ERROR = -10000
};

/* Window bits */
#define MZ_DEFAULT_WINDOW_BITS 15

struct mz_internal_state;

/* Compression/decompression stream struct. */
typedef struct mz_stream_s
{
    const unsigned char *next_in; /* pointer to next byte to read */
    unsigned int avail_in;        /* number of bytes available at next_in */
    mz_ulong total_in;            /* total number of bytes consumed so far */

    unsigned char *next_out; /* pointer to next byte to write */
    unsigned int avail_out;  /* number of bytes that can be written to next_out */
    mz_ulong total_out;      /* total number of bytes produced so far */

    char *msg;                       /* error msg (unused) */
    struct mz_internal_state *state; /* internal state, allocated by zalloc/zfree */

    mz_alloc_func zalloc; /* optional heap allocation function (defaults to malloc) */
    mz_free_func zfree;   /* optional heap free function (defaults to free) */
    void *opaque;         /* heap alloc function user pointer */

    int data_type;     /* data_type (unused) */
    mz_ulong adler;    /* adler32 of the source or uncompressed data */
    mz_ulong reserved; /* not used */
} mz_stream;

typedef mz_stream *mz_streamp;

/* Returns the version string of miniz.c. */
const char *mz_version(void);

/* mz_deflateInit() initializes a compressor with default options: */
/* Parameters: */
/*  pStream must point to an initialized mz_stream struct. */
/*  level must be between [MZ_NO_COMPRESSION, MZ_BEST_COMPRESSION]. */
/*  level 1 enables a specially optimized compression function that's been optimized purely for performance, not ratio. */
/*  (This special func. is currently only enabled when MINIZ_USE_UNALIGNED_LOADS_AND_STORES and MINIZ_LITTLE_ENDIAN are defined.) */
/* Return values: */
/*  MZ_OK on success. */
/*  MZ_STREAM_ERROR if the stream is bogus. */
/*  MZ_PARAM_ERROR if the input parameters are bogus. */
/*  MZ_MEM_ERROR on out of memory. */
int mz_deflateInit(mz_streamp pStream, int level);

/* mz_deflateInit2() is like mz_deflate(), except with more control: */
/* Additional parameters: */
/*   method must be MZ_DEFLATED */
/*   window_bits must be MZ_DEFAULT_WINDOW_BITS (to wrap the deflate stream with zlib header/adler-32 footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate/no header or footer) */
/*   mem_level must be between [1, 9] (it's checked but ignored by miniz.c) */
int mz_deflateInit2(mz_streamp pStream, int level, int method, int window_bits, int mem_level, int strategy);

/* Quickly resets a compressor without having to reallocate anything. Same as calling mz_deflateEnd() followed by mz_deflateInit()/mz_deflateInit2(). */
int mz_deflateReset(mz_streamp pStream);

/* mz_deflate() compresses the input to output, consuming as much of the input and producing as much output as possible. */
/* Parameters: */
/*   pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members. */
/*   flush may be MZ_NO_FLUSH, MZ_PARTIAL_FLUSH/MZ_SYNC_FLUSH, MZ_FULL_FLUSH, or MZ_FINISH. */
/* Return values: */
/*   MZ_OK on success (when flushing, or if more input is needed but not available, and/or there's more output to be written but the output buffer is full). */
/*   MZ_STREAM_END if all input has been consumed and all output bytes have been written. Don't call mz_deflate() on the stream anymore. */
/*   MZ_STREAM_ERROR if the stream is bogus. */
/*   MZ_PARAM_ERROR if one of the parameters is invalid. */
/*   MZ_BUF_ERROR if no forward progress is possible because the input and/or output buffers are empty. (Fill up the input buffer or free up some output space and try again.) */
int mz_deflate(mz_streamp pStream, int flush);

/* mz_deflateEnd() deinitializes a compressor: */
/* Return values: */
/*  MZ_OK on success. */
/*  MZ_STREAM_ERROR if the stream is bogus. */
int mz_deflateEnd(mz_streamp pStream);

/* mz_deflateBound() returns a (very) conservative upper bound on the amount of data that could be generated by deflate(), assuming flush is set to only MZ_NO_FLUSH or MZ_FINISH. */
mz_ulong mz_deflateBound(mz_streamp pStream, mz_ulong source_len);

/* Single-call compression functions mz_compress() and mz_compress2(): */
/* Returns MZ_OK on success, or one of the error codes from mz_deflate() on failure. */
int mz_compress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len);
int mz_compress2(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len, int level);

/* mz_compressBound() returns a (very) conservative upper bound on the amount of data that could be generated by calling mz_compress(). */
mz_ulong mz_compressBound(mz_ulong source_len);

/* Initializes a decompressor. */
int mz_inflateInit(mz_streamp pStream);

/* mz_inflateInit2() is like mz_inflateInit() with an additional option that controls the window size and whether or not the stream has been wrapped with a zlib header/footer: */
/* window_bits must be MZ_DEFAULT_WINDOW_BITS (to parse zlib header/footer) or -MZ_DEFAULT_WINDOW_BITS (raw deflate). */
int mz_inflateInit2(mz_streamp pStream, int window_bits);

/* Decompresses the input stream to the output, consuming only as much of the input as needed, and writing as much to the output as possible. */
/* Parameters: */
/*   pStream is the stream to read from and write to. You must initialize/update the next_in, avail_in, next_out, and avail_out members. */
/*   flush may be MZ_NO_FLUSH, MZ_SYNC_FLUSH, or MZ_FINISH. */
/*   On the first call, if flush is MZ_FINISH it's assumed the input and output buffers are both sized large enough to decompress the entire stream in a single call (this is slightly faster). */
/*   MZ_FINISH implies that there are no more source bytes available beside what's already in the input buffer, and that the output buffer is large enough to hold the rest of the decompressed data. */
/* Return values: */
/*   MZ_OK on success. Either more input is needed but not available, and/or there's more output to be written but the output buffer is full. */
/*   MZ_STREAM_END if all needed input has been consumed and all output bytes have been written. For zlib streams, the adler-32 of the decompressed data has also been verified. */
/*   MZ_STREAM_ERROR if the stream is bogus. */
/*   MZ_DATA_ERROR if the deflate stream is invalid. */
/*   MZ_PARAM_ERROR if one of the parameters is invalid. */
/*   MZ_BUF_ERROR if no forward progress is possible because the input buffer is empty but the inflater needs more input to continue, or if the output buffer is not large enough. Call mz_inflate() again */
/*   with more input data, or with more room in the output buffer (except when using single call decompression, described above). */
int mz_inflate(mz_streamp pStream, int flush);

/* Deinitializes a decompressor. */
int mz_inflateEnd(mz_streamp pStream);

/* Single-call decompression. */
/* Returns MZ_OK on success, or one of the error codes from mz_inflate() on failure. */
int mz_uncompress(unsigned char *pDest, mz_ulong *pDest_len, const unsigned char *pSource, mz_ulong source_len);

/* Returns a string description of the specified error code, or NULL if the error code is invalid. */
const char *mz_error(int err);

/* Redefine zlib-compatible names to miniz equivalents, so miniz.c can be used as a drop-in replacement for the subset of zlib that miniz.c supports. */
/* Define MINIZ_NO_ZLIB_COMPATIBLE_NAMES to disable zlib-compatibility if you use zlib in the same project. */
#ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES
typedef unsigned char Byte;
typedef unsigned int uInt;
typedef mz_ulong uLong;
typedef Byte Bytef;
typedef uInt uIntf;
typedef char charf;
typedef int intf;
typedef void *voidpf;
typedef uLong uLongf;
typedef void *voidp;
typedef void *const voidpc;
#define Z_NULL 0
#define Z_NO_FLUSH MZ_NO_FLUSH
#define Z_PARTIAL_FLUSH MZ_PARTIAL_FLUSH
#define Z_SYNC_FLUSH MZ_SYNC_FLUSH
#define Z_FULL_FLUSH MZ_FULL_FLUSH
#define Z_FINISH MZ_FINISH
#define Z_BLOCK MZ_BLOCK
#define Z_OK MZ_OK
#define Z_STREAM_END MZ_STREAM_END
#define Z_NEED_DICT MZ_NEED_DICT
#define Z_ERRNO MZ_ERRNO
#define Z_STREAM_ERROR MZ_STREAM_ERROR
#define Z_DATA_ERROR MZ_DATA_ERROR
#define Z_MEM_ERROR MZ_MEM_ERROR
#define Z_BUF_ERROR MZ_BUF_ERROR
#define Z_VERSION_ERROR MZ_VERSION_ERROR
#define Z_PARAM_ERROR MZ_PARAM_ERROR
#define Z_NO_COMPRESSION MZ_NO_COMPRESSION
#define Z_BEST_SPEED MZ_BEST_SPEED
#define Z_BEST_COMPRESSION MZ_BEST_COMPRESSION
#define Z_DEFAULT_COMPRESSION MZ_DEFAULT_COMPRESSION
#define Z_DEFAULT_STRATEGY MZ_DEFAULT_STRATEGY
#define Z_FILTERED MZ_FILTERED
#define Z_HUFFMAN_ONLY MZ_HUFFMAN_ONLY
#define Z_RLE MZ_RLE
#define Z_FIXED MZ_FIXED
#define Z_DEFLATED MZ_DEFLATED
#define Z_DEFAULT_WINDOW_BITS MZ_DEFAULT_WINDOW_BITS
#define alloc_func mz_alloc_func
#define free_func mz_free_func
#define internal_state mz_internal_state
#define z_stream mz_stream
#define deflateInit mz_deflateInit
#define deflateInit2 mz_deflateInit2
#define deflateReset mz_deflateReset
#define deflate mz_deflate
#define deflateEnd mz_deflateEnd
#define deflateBound mz_deflateBound
#define compress mz_compress
#define compress2 mz_compress2
#define compressBound mz_compressBound
#define inflateInit mz_inflateInit
#define inflateInit2 mz_inflateInit2
#define inflate mz_inflate
#define inflateEnd mz_inflateEnd
#define uncompress mz_uncompress
#define crc32 mz_crc32
#define adler32 mz_adler32
#define MAX_WBITS 15
#define MAX_MEM_LEVEL 9
#define zError mz_error
#define ZLIB_VERSION MZ_VERSION
#define ZLIB_VERNUM MZ_VERNUM
#define ZLIB_VER_MAJOR MZ_VER_MAJOR
#define ZLIB_VER_MINOR MZ_VER_MINOR
#define ZLIB_VER_REVISION MZ_VER_REVISION
#define ZLIB_VER_SUBREVISION MZ_VER_SUBREVISION
#define zlibVersion mz_version
#define zlib_version mz_version()
#endif /* #ifndef MINIZ_NO_ZLIB_COMPATIBLE_NAMES */

#endif /* MINIZ_NO_ZLIB_APIS */

#ifdef __cplusplus
}
#endif
#pragma once
#include 
#include 
#include 
#include 

/* ------------------- Types and macros */
typedef unsigned char mz_uint8;
typedef signed short mz_int16;
typedef unsigned short mz_uint16;
typedef unsigned int mz_uint32;
typedef unsigned int mz_uint;
typedef int64_t mz_int64;
typedef uint64_t mz_uint64;
typedef int mz_bool;

#define MZ_FALSE (0)
#define MZ_TRUE (1)

/* Works around MSVC's spammy "warning C4127: conditional expression is constant" message. */
#ifdef _MSC_VER
#define MZ_MACRO_END while (0, 0)
#else
#define MZ_MACRO_END while (0)
#endif

#ifdef MINIZ_NO_STDIO
#define MZ_FILE void *
#else
#include 
#define MZ_FILE FILE
#endif /* #ifdef MINIZ_NO_STDIO */

#ifdef MINIZ_NO_TIME
typedef struct mz_dummy_time_t_tag
{
    int m_dummy;
} mz_dummy_time_t;
#define MZ_TIME_T mz_dummy_time_t
#else
#define MZ_TIME_T time_t
#endif

#define MZ_ASSERT(x) assert(x)

#ifdef MINIZ_NO_MALLOC
#define MZ_MALLOC(x) NULL
#define MZ_FREE(x) (void)x, ((void)0)
#define MZ_REALLOC(p, x) NULL
#else
#define MZ_MALLOC(x) malloc(x)
#define MZ_FREE(x) free(x)
#define MZ_REALLOC(p, x) realloc(p, x)
#endif

#define MZ_MAX(a, b) (((a) > (b)) ? (a) : (b))
#define MZ_MIN(a, b) (((a) < (b)) ? (a) : (b))
#define MZ_CLEAR_OBJ(obj) memset(&(obj), 0, sizeof(obj))

#if MINIZ_USE_UNALIGNED_LOADS_AND_STORES && MINIZ_LITTLE_ENDIAN
#define MZ_READ_LE16(p) *((const mz_uint16 *)(p))
#define MZ_READ_LE32(p) *((const mz_uint32 *)(p))
#else
#define MZ_READ_LE16(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U))
#define MZ_READ_LE32(p) ((mz_uint32)(((const mz_uint8 *)(p))[0]) | ((mz_uint32)(((const mz_uint8 *)(p))[1]) << 8U) | ((mz_uint32)(((const mz_uint8 *)(p))[2]) << 16U) | ((mz_uint32)(((const mz_uint8 *)(p))[3]) << 24U))
#endif

#define MZ_READ_LE64(p) (((mz_uint64)MZ_READ_LE32(p)) | (((mz_uint64)MZ_READ_LE32((const mz_uint8 *)(p) + sizeof(mz_uint32))) << 32U))

#ifdef _MSC_VER
#define MZ_FORCEINLINE __forceinline
#elif defined(__GNUC__)
#define MZ_FORCEINLINE __inline__ __attribute__((__always_inline__))
#else
#define MZ_FORCEINLINE inline
#endif

#ifdef __cplusplus
extern "C" {
#endif

extern void *miniz_def_alloc_func(void *opaque, size_t items, size_t size);
extern void miniz_def_free_func(void *opaque, void *address);
extern void *miniz_def_realloc_func(void *opaque, void *address, size_t items, size_t size);

#define MZ_UINT16_MAX (0xFFFFU)
#define MZ_UINT32_MAX (0xFFFFFFFFU)

#ifdef __cplusplus
}
#endif
#pragma once


#ifdef __cplusplus
extern "C" {
#endif
/* ------------------- Low-level Compression API Definitions */

/* Set TDEFL_LESS_MEMORY to 1 to use less memory (compression will be slightly slower, and raw/dynamic blocks will be output more frequently). */
#define TDEFL_LESS_MEMORY 0

/* tdefl_init() compression flags logically OR'd together (low 12 bits contain the max. number of probes per dictionary search): */
/* TDEFL_DEFAULT_MAX_PROBES: The compressor defaults to 128 dictionary probes per dictionary search. 0=Huffman only, 1=Huffman+LZ (fastest/crap compression), 4095=Huffman+LZ (slowest/best compression). */
enum
{
    TDEFL_HUFFMAN_ONLY = 0,
    TDEFL_DEFAULT_MAX_PROBES = 128,
    TDEFL_MAX_PROBES_MASK = 0xFFF
};

/* TDEFL_WRITE_ZLIB_HEADER: If set, the compressor outputs a zlib header before the deflate data, and the Adler-32 of the source data at the end. Otherwise, you'll get raw deflate data. */
/* TDEFL_COMPUTE_ADLER32: Always compute the adler-32 of the input data (even when not writing zlib headers). */
/* TDEFL_GREEDY_PARSING_FLAG: Set to use faster greedy parsing, instead of more efficient lazy parsing. */
/* TDEFL_NONDETERMINISTIC_PARSING_FLAG: Enable to decrease the compressor's initialization time to the minimum, but the output may vary from run to run given the same input (depending on the contents of memory). */
/* TDEFL_RLE_MATCHES: Only look for RLE matches (matches with a distance of 1) */
/* TDEFL_FILTER_MATCHES: Discards matches <= 5 chars if enabled. */
/* TDEFL_FORCE_ALL_STATIC_BLOCKS: Disable usage of optimized Huffman tables. */
/* TDEFL_FORCE_ALL_RAW_BLOCKS: Only use raw (uncompressed) deflate blocks. */
/* The low 12 bits are reserved to control the max # of hash probes per dictionary lookup (see TDEFL_MAX_PROBES_MASK). */
enum
{
    TDEFL_WRITE_ZLIB_HEADER = 0x01000,
    TDEFL_COMPUTE_ADLER32 = 0x02000,
    TDEFL_GREEDY_PARSING_FLAG = 0x04000,
    TDEFL_NONDETERMINISTIC_PARSING_FLAG = 0x08000,
    TDEFL_RLE_MATCHES = 0x10000,
    TDEFL_FILTER_MATCHES = 0x20000,
    TDEFL_FORCE_ALL_STATIC_BLOCKS = 0x40000,
    TDEFL_FORCE_ALL_RAW_BLOCKS = 0x80000
};

/* High level compression functions: */
/* tdefl_compress_mem_to_heap() compresses a block in memory to a heap block allocated via malloc(). */
/* On entry: */
/*  pSrc_buf, src_buf_len: Pointer and size of source block to compress. */
/*  flags: The max match finder probes (default is 128) logically OR'd against the above flags. Higher probes are slower but improve compression. */
/* On return: */
/*  Function returns a pointer to the compressed data, or NULL on failure. */
/*  *pOut_len will be set to the compressed data's size, which could be larger than src_buf_len on uncompressible data. */
/*  The caller must free() the returned block when it's no longer needed. */
void *tdefl_compress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);

/* tdefl_compress_mem_to_mem() compresses a block in memory to another block in memory. */
/* Returns 0 on failure. */
size_t tdefl_compress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);

/* Compresses an image to a compressed PNG file in memory. */
/* On entry: */
/*  pImage, w, h, and num_chans describe the image to compress. num_chans may be 1, 2, 3, or 4. */
/*  The image pitch in bytes per scanline will be w*num_chans. The leftmost pixel on the top scanline is stored first in memory. */
/*  level may range from [0,10], use MZ_NO_COMPRESSION, MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc. or a decent default is MZ_DEFAULT_LEVEL */
/*  If flip is true, the image will be flipped on the Y axis (useful for OpenGL apps). */
/* On return: */
/*  Function returns a pointer to the compressed data, or NULL on failure. */
/*  *pLen_out will be set to the size of the PNG image file. */
/*  The caller must mz_free() the returned heap block (which will typically be larger than *pLen_out) when it's no longer needed. */
void *tdefl_write_image_to_png_file_in_memory_ex(const void *pImage, int w, int h, int num_chans, size_t *pLen_out, mz_uint level, mz_bool flip);
void *tdefl_write_image_to_png_file_in_memory(const void *pImage, int w, int h, int num_chans, size_t *pLen_out);

/* Output stream interface. The compressor uses this interface to write compressed data. It'll typically be called TDEFL_OUT_BUF_SIZE at a time. */
typedef mz_bool (*tdefl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);

/* tdefl_compress_mem_to_output() compresses a block to an output stream. The above helpers use this function internally. */
mz_bool tdefl_compress_mem_to_output(const void *pBuf, size_t buf_len, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);

enum
{
    TDEFL_MAX_HUFF_TABLES = 3,
    TDEFL_MAX_HUFF_SYMBOLS_0 = 288,
    TDEFL_MAX_HUFF_SYMBOLS_1 = 32,
    TDEFL_MAX_HUFF_SYMBOLS_2 = 19,
    TDEFL_LZ_DICT_SIZE = 32768,
    TDEFL_LZ_DICT_SIZE_MASK = TDEFL_LZ_DICT_SIZE - 1,
    TDEFL_MIN_MATCH_LEN = 3,
    TDEFL_MAX_MATCH_LEN = 258
};

/* TDEFL_OUT_BUF_SIZE MUST be large enough to hold a single entire compressed output block (using static/fixed Huffman codes). */
#if TDEFL_LESS_MEMORY
enum
{
    TDEFL_LZ_CODE_BUF_SIZE = 24 * 1024,
    TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
    TDEFL_MAX_HUFF_SYMBOLS = 288,
    TDEFL_LZ_HASH_BITS = 12,
    TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
    TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
    TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#else
enum
{
    TDEFL_LZ_CODE_BUF_SIZE = 64 * 1024,
    TDEFL_OUT_BUF_SIZE = (TDEFL_LZ_CODE_BUF_SIZE * 13) / 10,
    TDEFL_MAX_HUFF_SYMBOLS = 288,
    TDEFL_LZ_HASH_BITS = 15,
    TDEFL_LEVEL1_HASH_SIZE_MASK = 4095,
    TDEFL_LZ_HASH_SHIFT = (TDEFL_LZ_HASH_BITS + 2) / 3,
    TDEFL_LZ_HASH_SIZE = 1 << TDEFL_LZ_HASH_BITS
};
#endif

/* The low-level tdefl functions below may be used directly if the above helper functions aren't flexible enough. The low-level functions don't make any heap allocations, unlike the above helper functions. */
typedef enum {
    TDEFL_STATUS_BAD_PARAM = -2,
    TDEFL_STATUS_PUT_BUF_FAILED = -1,
    TDEFL_STATUS_OKAY = 0,
    TDEFL_STATUS_DONE = 1
} tdefl_status;

/* Must map to MZ_NO_FLUSH, MZ_SYNC_FLUSH, etc. enums */
typedef enum {
    TDEFL_NO_FLUSH = 0,
    TDEFL_SYNC_FLUSH = 2,
    TDEFL_FULL_FLUSH = 3,
    TDEFL_FINISH = 4
} tdefl_flush;

/* tdefl's compression state structure. */
typedef struct
{
    tdefl_put_buf_func_ptr m_pPut_buf_func;
    void *m_pPut_buf_user;
    mz_uint m_flags, m_max_probes[2];
    int m_greedy_parsing;
    mz_uint m_adler32, m_lookahead_pos, m_lookahead_size, m_dict_size;
    mz_uint8 *m_pLZ_code_buf, *m_pLZ_flags, *m_pOutput_buf, *m_pOutput_buf_end;
    mz_uint m_num_flags_left, m_total_lz_bytes, m_lz_code_buf_dict_pos, m_bits_in, m_bit_buffer;
    mz_uint m_saved_match_dist, m_saved_match_len, m_saved_lit, m_output_flush_ofs, m_output_flush_remaining, m_finished, m_block_index, m_wants_to_finish;
    tdefl_status m_prev_return_status;
    const void *m_pIn_buf;
    void *m_pOut_buf;
    size_t *m_pIn_buf_size, *m_pOut_buf_size;
    tdefl_flush m_flush;
    const mz_uint8 *m_pSrc;
    size_t m_src_buf_left, m_out_buf_ofs;
    mz_uint8 m_dict[TDEFL_LZ_DICT_SIZE + TDEFL_MAX_MATCH_LEN - 1];
    mz_uint16 m_huff_count[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
    mz_uint16 m_huff_codes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
    mz_uint8 m_huff_code_sizes[TDEFL_MAX_HUFF_TABLES][TDEFL_MAX_HUFF_SYMBOLS];
    mz_uint8 m_lz_code_buf[TDEFL_LZ_CODE_BUF_SIZE];
    mz_uint16 m_next[TDEFL_LZ_DICT_SIZE];
    mz_uint16 m_hash[TDEFL_LZ_HASH_SIZE];
    mz_uint8 m_output_buf[TDEFL_OUT_BUF_SIZE];
} tdefl_compressor;

/* Initializes the compressor. */
/* There is no corresponding deinit() function because the tdefl API's do not dynamically allocate memory. */
/* pBut_buf_func: If NULL, output data will be supplied to the specified callback. In this case, the user should call the tdefl_compress_buffer() API for compression. */
/* If pBut_buf_func is NULL the user should always call the tdefl_compress() API. */
/* flags: See the above enums (TDEFL_HUFFMAN_ONLY, TDEFL_WRITE_ZLIB_HEADER, etc.) */
tdefl_status tdefl_init(tdefl_compressor *d, tdefl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);

/* Compresses a block of data, consuming as much of the specified input buffer as possible, and writing as much compressed data to the specified output buffer as possible. */
tdefl_status tdefl_compress(tdefl_compressor *d, const void *pIn_buf, size_t *pIn_buf_size, void *pOut_buf, size_t *pOut_buf_size, tdefl_flush flush);

/* tdefl_compress_buffer() is only usable when the tdefl_init() is called with a non-NULL tdefl_put_buf_func_ptr. */
/* tdefl_compress_buffer() always consumes the entire input buffer. */
tdefl_status tdefl_compress_buffer(tdefl_compressor *d, const void *pIn_buf, size_t in_buf_size, tdefl_flush flush);

tdefl_status tdefl_get_prev_return_status(tdefl_compressor *d);
mz_uint32 tdefl_get_adler32(tdefl_compressor *d);

/* Create tdefl_compress() flags given zlib-style compression parameters. */
/* level may range from [0,10] (where 10 is absolute max compression, but may be much slower on some files) */
/* window_bits may be -15 (raw deflate) or 15 (zlib) */
/* strategy may be either MZ_DEFAULT_STRATEGY, MZ_FILTERED, MZ_HUFFMAN_ONLY, MZ_RLE, or MZ_FIXED */
mz_uint tdefl_create_comp_flags_from_zip_params(int level, int window_bits, int strategy);

/* Allocate the tdefl_compressor structure in C so that */
/* non-C language bindings to tdefl_ API don't need to worry about */
/* structure size and allocation mechanism. */
tdefl_compressor *tdefl_compressor_alloc();
void tdefl_compressor_free(tdefl_compressor *pComp);

#ifdef __cplusplus
}
#endif
#pragma once

/* ------------------- Low-level Decompression API Definitions */

#ifdef __cplusplus
extern "C" {
#endif
/* Decompression flags used by tinfl_decompress(). */
/* TINFL_FLAG_PARSE_ZLIB_HEADER: If set, the input has a valid zlib header and ends with an adler32 checksum (it's a valid zlib stream). Otherwise, the input is a raw deflate stream. */
/* TINFL_FLAG_HAS_MORE_INPUT: If set, there are more input bytes available beyond the end of the supplied input buffer. If clear, the input buffer contains all remaining input. */
/* TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF: If set, the output buffer is large enough to hold the entire decompressed stream. If clear, the output buffer is at least the size of the dictionary (typically 32KB). */
/* TINFL_FLAG_COMPUTE_ADLER32: Force adler-32 checksum computation of the decompressed bytes. */
enum
{
    TINFL_FLAG_PARSE_ZLIB_HEADER = 1,
    TINFL_FLAG_HAS_MORE_INPUT = 2,
    TINFL_FLAG_USING_NON_WRAPPING_OUTPUT_BUF = 4,
    TINFL_FLAG_COMPUTE_ADLER32 = 8
};

/* High level decompression functions: */
/* tinfl_decompress_mem_to_heap() decompresses a block in memory to a heap block allocated via malloc(). */
/* On entry: */
/*  pSrc_buf, src_buf_len: Pointer and size of the Deflate or zlib source data to decompress. */
/* On return: */
/*  Function returns a pointer to the decompressed data, or NULL on failure. */
/*  *pOut_len will be set to the decompressed data's size, which could be larger than src_buf_len on uncompressible data. */
/*  The caller must call mz_free() on the returned block when it's no longer needed. */
void *tinfl_decompress_mem_to_heap(const void *pSrc_buf, size_t src_buf_len, size_t *pOut_len, int flags);

/* tinfl_decompress_mem_to_mem() decompresses a block in memory to another block in memory. */
/* Returns TINFL_DECOMPRESS_MEM_TO_MEM_FAILED on failure, or the number of bytes written on success. */
#define TINFL_DECOMPRESS_MEM_TO_MEM_FAILED ((size_t)(-1))
size_t tinfl_decompress_mem_to_mem(void *pOut_buf, size_t out_buf_len, const void *pSrc_buf, size_t src_buf_len, int flags);

/* tinfl_decompress_mem_to_callback() decompresses a block in memory to an internal 32KB buffer, and a user provided callback function will be called to flush the buffer. */
/* Returns 1 on success or 0 on failure. */
typedef int (*tinfl_put_buf_func_ptr)(const void *pBuf, int len, void *pUser);
int tinfl_decompress_mem_to_callback(const void *pIn_buf, size_t *pIn_buf_size, tinfl_put_buf_func_ptr pPut_buf_func, void *pPut_buf_user, int flags);

struct tinfl_decompressor_tag;
typedef struct tinfl_decompressor_tag tinfl_decompressor;

/* Allocate the tinfl_decompressor structure in C so that */
/* non-C language bindings to tinfl_ API don't need to worry about */
/* structure size and allocation mechanism. */

tinfl_decompressor *tinfl_decompressor_alloc();
void tinfl_decompressor_free(tinfl_decompressor *pDecomp);

/* Max size of LZ dictionary. */
#define TINFL_LZ_DICT_SIZE 32768

/* Return status. */
typedef enum {
    /* This flags indicates the inflator needs 1 or more input bytes to make forward progress, but the caller is indicating that no more are available. The compressed data */
    /* is probably corrupted. If you call the inflator again with more bytes it'll try to continue processing the input but this is a BAD sign (either the data is corrupted or you called it incorrectly). */
    /* If you call it again with no input you'll just get TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS again. */
    TINFL_STATUS_FAILED_CANNOT_MAKE_PROGRESS = -4,

    /* This flag indicates that one or more of the input parameters was obviously bogus. (You can try calling it again, but if you get this error the calling code is wrong.) */
    TINFL_STATUS_BAD_PARAM = -3,

    /* This flags indicate the inflator is finished but the adler32 check of the uncompressed data didn't match. If you call it again it'll return TINFL_STATUS_DONE. */
    TINFL_STATUS_ADLER32_MISMATCH = -2,

    /* This flags indicate the inflator has somehow failed (bad code, corrupted input, etc.). If you call it again without resetting via tinfl_init() it it'll just keep on returning the same status failure code. */
    TINFL_STATUS_FAILED = -1,

    /* Any status code less than TINFL_STATUS_DONE must indicate a failure. */

    /* This flag indicates the inflator has returned every byte of uncompressed data that it can, has consumed every byte that it needed, has successfully reached the end of the deflate stream, and */
    /* if zlib headers and adler32 checking enabled that it has successfully checked the uncompressed data's adler32. If you call it again you'll just get TINFL_STATUS_DONE over and over again. */
    TINFL_STATUS_DONE = 0,

    /* This flag indicates the inflator MUST have more input data (even 1 byte) before it can make any more forward progress, or you need to clear the TINFL_FLAG_HAS_MORE_INPUT */
    /* flag on the next call if you don't have any more source data. If the source data was somehow corrupted it's also possible (but unlikely) for the inflator to keep on demanding input to */
    /* proceed, so be sure to properly set the TINFL_FLAG_HAS_MORE_INPUT flag. */
    TINFL_STATUS_NEEDS_MORE_INPUT = 1,

    /* This flag indicates the inflator definitely has 1 or more bytes of uncompressed data available, but it cannot write this data into the output buffer. */
    /* Note if the source compressed data was corrupted it's possible for the inflator to return a lot of uncompressed data to the caller. I've been assuming you know how much uncompressed data to expect */
    /* (either exact or worst case) and will stop calling the inflator and fail after receiving too much. In pure streaming scenarios where you have no idea how many bytes to expect this may not be possible */
    /* so I may need to add some code to address this. */
    TINFL_STATUS_HAS_MORE_OUTPUT = 2
} tinfl_status;

/* Initializes the decompressor to its initial state. */
#define tinfl_init(r)     \
    do                    \
    {                     \
        (r)->m_state = 0; \
    }                     \
    MZ_MACRO_END
#define tinfl_get_adler32(r) (r)->m_check_adler32

/* Main low-level decompressor coroutine function. This is the only function actually needed for decompression. All the other functions are just high-level helpers for improved usability. */
/* This is a universal API, i.e. it can be used as a building block to build any desired higher level decompression API. In the limit case, it can be called once per every byte input or output. */
tinfl_status tinfl_decompress(tinfl_decompressor *r, const mz_uint8 *pIn_buf_next, size_t *pIn_buf_size, mz_uint8 *pOut_buf_start, mz_uint8 *pOut_buf_next, size_t *pOut_buf_size, const mz_uint32 decomp_flags);

/* Internal/private bits follow. */
enum
{
    TINFL_MAX_HUFF_TABLES = 3,
    TINFL_MAX_HUFF_SYMBOLS_0 = 288,
    TINFL_MAX_HUFF_SYMBOLS_1 = 32,
    TINFL_MAX_HUFF_SYMBOLS_2 = 19,
    TINFL_FAST_LOOKUP_BITS = 10,
    TINFL_FAST_LOOKUP_SIZE = 1 << TINFL_FAST_LOOKUP_BITS
};

typedef struct
{
    mz_uint8 m_code_size[TINFL_MAX_HUFF_SYMBOLS_0];
    mz_int16 m_look_up[TINFL_FAST_LOOKUP_SIZE], m_tree[TINFL_MAX_HUFF_SYMBOLS_0 * 2];
} tinfl_huff_table;

#if MINIZ_HAS_64BIT_REGISTERS
#define TINFL_USE_64BIT_BITBUF 1
#else
#define TINFL_USE_64BIT_BITBUF 0
#endif

#if TINFL_USE_64BIT_BITBUF
typedef mz_uint64 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (64)
#else
typedef mz_uint32 tinfl_bit_buf_t;
#define TINFL_BITBUF_SIZE (32)
#endif

struct tinfl_decompressor_tag
{
    mz_uint32 m_state, m_num_bits, m_zhdr0, m_zhdr1, m_z_adler32, m_final, m_type, m_check_adler32, m_dist, m_counter, m_num_extra, m_table_sizes[TINFL_MAX_HUFF_TABLES];
    tinfl_bit_buf_t m_bit_buf;
    size_t m_dist_from_out_buf_start;
    tinfl_huff_table m_tables[TINFL_MAX_HUFF_TABLES];
    mz_uint8 m_raw_header[4], m_len_codes[TINFL_MAX_HUFF_SYMBOLS_0 + TINFL_MAX_HUFF_SYMBOLS_1 + 137];
};

#ifdef __cplusplus
}
#endif

#pragma once


/* ------------------- ZIP archive reading/writing */

#ifndef MINIZ_NO_ARCHIVE_APIS

#ifdef __cplusplus
extern "C" {
#endif

enum
{
    /* Note: These enums can be reduced as needed to save memory or stack space - they are pretty conservative. */
    MZ_ZIP_MAX_IO_BUF_SIZE = 64 * 1024,
    MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE = 512,
    MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE = 512
};

typedef struct
{
    /* Central directory file index. */
    mz_uint32 m_file_index;

    /* Byte offset of this entry in the archive's central directory. Note we currently only support up to UINT_MAX or less bytes in the central dir. */
    mz_uint64 m_central_dir_ofs;

    /* These fields are copied directly from the zip's central dir. */
    mz_uint16 m_version_made_by;
    mz_uint16 m_version_needed;
    mz_uint16 m_bit_flag;
    mz_uint16 m_method;

#ifndef MINIZ_NO_TIME
    MZ_TIME_T m_time;
#endif

    /* CRC-32 of uncompressed data. */
    mz_uint32 m_crc32;

    /* File's compressed size. */
    mz_uint64 m_comp_size;

    /* File's uncompressed size. Note, I've seen some old archives where directory entries had 512 bytes for their uncompressed sizes, but when you try to unpack them you actually get 0 bytes. */
    mz_uint64 m_uncomp_size;

    /* Zip internal and external file attributes. */
    mz_uint16 m_internal_attr;
    mz_uint32 m_external_attr;

    /* Entry's local header file offset in bytes. */
    mz_uint64 m_local_header_ofs;

    /* Size of comment in bytes. */
    mz_uint32 m_comment_size;

    /* MZ_TRUE if the entry appears to be a directory. */
    mz_bool m_is_directory;

    /* MZ_TRUE if the entry uses encryption/strong encryption (which miniz_zip doesn't support) */
    mz_bool m_is_encrypted;

    /* MZ_TRUE if the file is not encrypted, a patch file, and if it uses a compression method we support. */
    mz_bool m_is_supported;

    /* Filename. If string ends in '/' it's a subdirectory entry. */
    /* Guaranteed to be zero terminated, may be truncated to fit. */
    char m_filename[MZ_ZIP_MAX_ARCHIVE_FILENAME_SIZE];

    /* Comment field. */
    /* Guaranteed to be zero terminated, may be truncated to fit. */
    char m_comment[MZ_ZIP_MAX_ARCHIVE_FILE_COMMENT_SIZE];

} mz_zip_archive_file_stat;

typedef size_t (*mz_file_read_func)(void *pOpaque, mz_uint64 file_ofs, void *pBuf, size_t n);
typedef size_t (*mz_file_write_func)(void *pOpaque, mz_uint64 file_ofs, const void *pBuf, size_t n);
typedef mz_bool (*mz_file_needs_keepalive)(void *pOpaque);

struct mz_zip_internal_state_tag;
typedef struct mz_zip_internal_state_tag mz_zip_internal_state;

typedef enum {
    MZ_ZIP_MODE_INVALID = 0,
    MZ_ZIP_MODE_READING = 1,
    MZ_ZIP_MODE_WRITING = 2,
    MZ_ZIP_MODE_WRITING_HAS_BEEN_FINALIZED = 3
} mz_zip_mode;

typedef enum {
    MZ_ZIP_FLAG_CASE_SENSITIVE = 0x0100,
    MZ_ZIP_FLAG_IGNORE_PATH = 0x0200,
    MZ_ZIP_FLAG_COMPRESSED_DATA = 0x0400,
    MZ_ZIP_FLAG_DO_NOT_SORT_CENTRAL_DIRECTORY = 0x0800,
    MZ_ZIP_FLAG_VALIDATE_LOCATE_FILE_FLAG = 0x1000, /* if enabled, mz_zip_reader_locate_file() will be called on each file as its validated to ensure the func finds the file in the central dir (intended for testing) */
    MZ_ZIP_FLAG_VALIDATE_HEADERS_ONLY = 0x2000,     /* validate the local headers, but don't decompress the entire file and check the crc32 */
    MZ_ZIP_FLAG_WRITE_ZIP64 = 0x4000,               /* always use the zip64 file format, instead of the original zip file format with automatic switch to zip64. Use as flags parameter with mz_zip_writer_init*_v2 */
    MZ_ZIP_FLAG_WRITE_ALLOW_READING = 0x8000,
    MZ_ZIP_FLAG_ASCII_FILENAME = 0x10000
} mz_zip_flags;

typedef enum {
    MZ_ZIP_TYPE_INVALID = 0,
    MZ_ZIP_TYPE_USER,
    MZ_ZIP_TYPE_MEMORY,
    MZ_ZIP_TYPE_HEAP,
    MZ_ZIP_TYPE_FILE,
    MZ_ZIP_TYPE_CFILE,
    MZ_ZIP_TOTAL_TYPES
} mz_zip_type;

/* miniz error codes. Be sure to update mz_zip_get_error_string() if you add or modify this enum. */
typedef enum {
    MZ_ZIP_NO_ERROR = 0,
    MZ_ZIP_UNDEFINED_ERROR,
    MZ_ZIP_TOO_MANY_FILES,
    MZ_ZIP_FILE_TOO_LARGE,
    MZ_ZIP_UNSUPPORTED_METHOD,
    MZ_ZIP_UNSUPPORTED_ENCRYPTION,
    MZ_ZIP_UNSUPPORTED_FEATURE,
    MZ_ZIP_FAILED_FINDING_CENTRAL_DIR,
    MZ_ZIP_NOT_AN_ARCHIVE,
    MZ_ZIP_INVALID_HEADER_OR_CORRUPTED,
    MZ_ZIP_UNSUPPORTED_MULTIDISK,
    MZ_ZIP_DECOMPRESSION_FAILED,
    MZ_ZIP_COMPRESSION_FAILED,
    MZ_ZIP_UNEXPECTED_DECOMPRESSED_SIZE,
    MZ_ZIP_CRC_CHECK_FAILED,
    MZ_ZIP_UNSUPPORTED_CDIR_SIZE,
    MZ_ZIP_ALLOC_FAILED,
    MZ_ZIP_FILE_OPEN_FAILED,
    MZ_ZIP_FILE_CREATE_FAILED,
    MZ_ZIP_FILE_WRITE_FAILED,
    MZ_ZIP_FILE_READ_FAILED,
    MZ_ZIP_FILE_CLOSE_FAILED,
    MZ_ZIP_FILE_SEEK_FAILED,
    MZ_ZIP_FILE_STAT_FAILED,
    MZ_ZIP_INVALID_PARAMETER,
    MZ_ZIP_INVALID_FILENAME,
    MZ_ZIP_BUF_TOO_SMALL,
    MZ_ZIP_INTERNAL_ERROR,
    MZ_ZIP_FILE_NOT_FOUND,
    MZ_ZIP_ARCHIVE_TOO_LARGE,
    MZ_ZIP_VALIDATION_FAILED,
    MZ_ZIP_WRITE_CALLBACK_FAILED,
    MZ_ZIP_TOTAL_ERRORS
} mz_zip_error;

typedef struct
{
    mz_uint64 m_archive_size;
    mz_uint64 m_central_directory_file_ofs;

    /* We only support up to UINT32_MAX files in zip64 mode. */
    mz_uint32 m_total_files;
    mz_zip_mode m_zip_mode;
    mz_zip_type m_zip_type;
    mz_zip_error m_last_error;

    mz_uint64 m_file_offset_alignment;

    mz_alloc_func m_pAlloc;
    mz_free_func m_pFree;
    mz_realloc_func m_pRealloc;
    void *m_pAlloc_opaque;

    mz_file_read_func m_pRead;
    mz_file_write_func m_pWrite;
    mz_file_needs_keepalive m_pNeeds_keepalive;
    void *m_pIO_opaque;

    mz_zip_internal_state *m_pState;

} mz_zip_archive;

typedef struct
{
    mz_zip_archive *pZip;
    mz_uint flags;

    int status;
#ifndef MINIZ_DISABLE_ZIP_READER_CRC32_CHECKS
    mz_uint file_crc32;
#endif
    mz_uint64 read_buf_size, read_buf_ofs, read_buf_avail, comp_remaining, out_buf_ofs, cur_file_ofs;
    mz_zip_archive_file_stat file_stat;
    void *pRead_buf;
    void *pWrite_buf;

    size_t out_blk_remain;

    tinfl_decompressor inflator;

} mz_zip_reader_extract_iter_state;

/* -------- ZIP reading */

/* Inits a ZIP archive reader. */
/* These functions read and validate the archive's central directory. */
mz_bool mz_zip_reader_init(mz_zip_archive *pZip, mz_uint64 size, mz_uint flags);

mz_bool mz_zip_reader_init_mem(mz_zip_archive *pZip, const void *pMem, size_t size, mz_uint flags);

#ifndef MINIZ_NO_STDIO
/* Read a archive from a disk file. */
/* file_start_ofs is the file offset where the archive actually begins, or 0. */
/* actual_archive_size is the true total size of the archive, which may be smaller than the file's actual size on disk. If zero the entire file is treated as the archive. */
#ifdef _WIN32
#define WCHAR_TYPE wchar_t
#else
#define WCHAR_TYPE char
#endif
mz_bool mz_zip_reader_init_file(mz_zip_archive *pZip, const WCHAR_TYPE *pFilename, mz_uint32 flags);
mz_bool mz_zip_reader_init_file_v2(mz_zip_archive *pZip, const WCHAR_TYPE *pFilename, mz_uint flags, mz_uint64 file_start_ofs, mz_uint64 archive_size);
/* Read an archive from an already opened FILE, beginning at the current file position. */
/* The archive is assumed to be archive_size bytes long. If archive_size is < 0, then the entire rest of the file is assumed to contain the archive. */
/* The FILE will NOT be closed when mz_zip_reader_end() is called. */
mz_bool mz_zip_reader_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint64 archive_size, mz_uint flags);
#endif

/* Ends archive reading, freeing all allocations, and closing the input archive file if mz_zip_reader_init_file() was used. */
mz_bool mz_zip_reader_end(mz_zip_archive *pZip);

/* -------- ZIP reading or writing */

/* Clears a mz_zip_archive struct to all zeros. */
/* Important: This must be done before passing the struct to any mz_zip functions. */
void mz_zip_zero_struct(mz_zip_archive *pZip);

mz_zip_mode mz_zip_get_mode(mz_zip_archive *pZip);
mz_zip_type mz_zip_get_type(mz_zip_archive *pZip);

/* Returns the total number of files in the archive. */
mz_uint mz_zip_reader_get_num_files(mz_zip_archive *pZip);

mz_uint64 mz_zip_get_archive_size(mz_zip_archive *pZip);
mz_uint64 mz_zip_get_archive_file_start_offset(mz_zip_archive *pZip);
MZ_FILE *mz_zip_get_cfile(mz_zip_archive *pZip);

/* Reads n bytes of raw archive data, starting at file offset file_ofs, to pBuf. */
size_t mz_zip_read_archive_data(mz_zip_archive *pZip, mz_uint64 file_ofs, void *pBuf, size_t n);

/* All mz_zip funcs set the m_last_error field in the mz_zip_archive struct. These functions retrieve/manipulate this field. */
/* Note that the m_last_error functionality is not thread safe. */
mz_zip_error mz_zip_set_last_error(mz_zip_archive *pZip, mz_zip_error err_num);
mz_zip_error mz_zip_peek_last_error(mz_zip_archive *pZip);
mz_zip_error mz_zip_clear_last_error(mz_zip_archive *pZip);
mz_zip_error mz_zip_get_last_error(mz_zip_archive *pZip);
const char *mz_zip_get_error_string(mz_zip_error mz_err);

/* MZ_TRUE if the archive file entry is a directory entry. */
mz_bool mz_zip_reader_is_file_a_directory(mz_zip_archive *pZip, mz_uint file_index);

/* MZ_TRUE if the file is encrypted/strong encrypted. */
mz_bool mz_zip_reader_is_file_encrypted(mz_zip_archive *pZip, mz_uint file_index);

/* MZ_TRUE if the compression method is supported, and the file is not encrypted, and the file is not a compressed patch file. */
mz_bool mz_zip_reader_is_file_supported(mz_zip_archive *pZip, mz_uint file_index);

/* Retrieves the filename of an archive file entry. */
/* Returns the number of bytes written to pFilename, or if filename_buf_size is 0 this function returns the number of bytes needed to fully store the filename. */
mz_uint mz_zip_reader_get_filename(mz_zip_archive *pZip, mz_uint file_index, char *pFilename, mz_uint filename_buf_size);

/* Attempts to locates a file in the archive's central directory. */
/* Valid flags: MZ_ZIP_FLAG_CASE_SENSITIVE, MZ_ZIP_FLAG_IGNORE_PATH */
/* Returns -1 if the file cannot be found. */
int mz_zip_reader_locate_file(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags);
int mz_zip_reader_locate_file_v2(mz_zip_archive *pZip, const char *pName, const char *pComment, mz_uint flags, mz_uint32 *file_index);

/* Returns detailed information about an archive file entry. */
mz_bool mz_zip_reader_file_stat(mz_zip_archive *pZip, mz_uint file_index, mz_zip_archive_file_stat *pStat);

/* MZ_TRUE if the file is in zip64 format. */
/* A file is considered zip64 if it contained a zip64 end of central directory marker, or if it contained any zip64 extended file information fields in the central directory. */
mz_bool mz_zip_is_zip64(mz_zip_archive *pZip);

/* Returns the total central directory size in bytes. */
/* The current max supported size is <= MZ_UINT32_MAX. */
size_t mz_zip_get_central_dir_size(mz_zip_archive *pZip);

/* Extracts a archive file to a memory buffer using no memory allocation. */
/* There must be at least enough room on the stack to store the inflator's state (~34KB or so). */
mz_bool mz_zip_reader_extract_to_mem_no_alloc(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);
mz_bool mz_zip_reader_extract_file_to_mem_no_alloc(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags, void *pUser_read_buf, size_t user_read_buf_size);

/* Extracts a archive file to a memory buffer. */
mz_bool mz_zip_reader_extract_to_mem(mz_zip_archive *pZip, mz_uint file_index, void *pBuf, size_t buf_size, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_mem(mz_zip_archive *pZip, const char *pFilename, void *pBuf, size_t buf_size, mz_uint flags);

/* Extracts a archive file to a dynamically allocated heap buffer. */
/* The memory will be allocated via the mz_zip_archive's alloc/realloc functions. */
/* Returns NULL and sets the last error on failure. */
void *mz_zip_reader_extract_to_heap(mz_zip_archive *pZip, mz_uint file_index, size_t *pSize, mz_uint flags);
void *mz_zip_reader_extract_file_to_heap(mz_zip_archive *pZip, const char *pFilename, size_t *pSize, mz_uint flags);

/* Extracts a archive file using a callback function to output the file's data. */
mz_bool mz_zip_reader_extract_to_callback(mz_zip_archive *pZip, mz_uint file_index, mz_file_write_func pCallback, void *pOpaque, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_callback(mz_zip_archive *pZip, const char *pFilename, mz_file_write_func pCallback, void *pOpaque, mz_uint flags);

/* Extract a file iteratively */
mz_zip_reader_extract_iter_state* mz_zip_reader_extract_iter_new(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags);
mz_zip_reader_extract_iter_state* mz_zip_reader_extract_file_iter_new(mz_zip_archive *pZip, const char *pFilename, mz_uint flags);
size_t mz_zip_reader_extract_iter_read(mz_zip_reader_extract_iter_state* pState, void* pvBuf, size_t buf_size);
mz_bool mz_zip_reader_extract_iter_free(mz_zip_reader_extract_iter_state* pState);

#ifndef MINIZ_NO_STDIO
/* Extracts a archive file to a disk file and sets its last accessed and modified times. */
/* This function only extracts files, not archive directory records. */
mz_bool mz_zip_reader_extract_to_file(mz_zip_archive *pZip, mz_uint file_index, const char *pDst_filename, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_file(mz_zip_archive *pZip, const char *pArchive_filename, const char *pDst_filename, mz_uint flags);

/* Extracts a archive file starting at the current position in the destination FILE stream. */
mz_bool mz_zip_reader_extract_to_cfile(mz_zip_archive *pZip, mz_uint file_index, MZ_FILE *File, mz_uint flags);
mz_bool mz_zip_reader_extract_file_to_cfile(mz_zip_archive *pZip, const char *pArchive_filename, MZ_FILE *pFile, mz_uint flags);
#endif

#if 0
/* TODO */
	typedef void *mz_zip_streaming_extract_state_ptr;
	mz_zip_streaming_extract_state_ptr mz_zip_streaming_extract_begin(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags);
	uint64_t mz_zip_streaming_extract_get_size(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState);
	uint64_t mz_zip_streaming_extract_get_cur_ofs(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState);
	mz_bool mz_zip_streaming_extract_seek(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState, uint64_t new_ofs);
	size_t mz_zip_streaming_extract_read(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState, void *pBuf, size_t buf_size);
	mz_bool mz_zip_streaming_extract_end(mz_zip_archive *pZip, mz_zip_streaming_extract_state_ptr pState);
#endif

/* This function compares the archive's local headers, the optional local zip64 extended information block, and the optional descriptor following the compressed data vs. the data in the central directory. */
/* It also validates that each file can be successfully uncompressed unless the MZ_ZIP_FLAG_VALIDATE_HEADERS_ONLY is specified. */
mz_bool mz_zip_validate_file(mz_zip_archive *pZip, mz_uint file_index, mz_uint flags);

/* Validates an entire archive by calling mz_zip_validate_file() on each file. */
mz_bool mz_zip_validate_archive(mz_zip_archive *pZip, mz_uint flags);

/* Misc utils/helpers, valid for ZIP reading or writing */
mz_bool mz_zip_validate_mem_archive(const void *pMem, size_t size, mz_uint flags, mz_zip_error *pErr);
mz_bool mz_zip_validate_file_archive(const WCHAR_TYPE *pFilename, mz_uint flags, mz_zip_error *pErr);

/* Universal end function - calls either mz_zip_reader_end() or mz_zip_writer_end(). */
mz_bool mz_zip_end(mz_zip_archive *pZip);

/* -------- ZIP writing */

#ifndef MINIZ_NO_ARCHIVE_WRITING_APIS

/* Inits a ZIP archive writer. */
/*Set pZip->m_pWrite (and pZip->m_pIO_opaque) before calling mz_zip_writer_init or mz_zip_writer_init_v2*/
/*The output is streamable, i.e. file_ofs in mz_file_write_func always increases only by n*/
mz_bool mz_zip_writer_init(mz_zip_archive *pZip, mz_uint64 existing_size);
mz_bool mz_zip_writer_init_v2(mz_zip_archive *pZip, mz_uint64 existing_size, mz_uint flags);

mz_bool mz_zip_writer_init_heap(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size);
mz_bool mz_zip_writer_init_heap_v2(mz_zip_archive *pZip, size_t size_to_reserve_at_beginning, size_t initial_allocation_size, mz_uint flags);

#ifndef MINIZ_NO_STDIO
mz_bool mz_zip_writer_init_file(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning);
mz_bool mz_zip_writer_init_file_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint64 size_to_reserve_at_beginning, mz_uint flags);
mz_bool mz_zip_writer_init_cfile(mz_zip_archive *pZip, MZ_FILE *pFile, mz_uint flags);
#endif

/* Converts a ZIP archive reader object into a writer object, to allow efficient in-place file appends to occur on an existing archive. */
/* For archives opened using mz_zip_reader_init_file, pFilename must be the archive's filename so it can be reopened for writing. If the file can't be reopened, mz_zip_reader_end() will be called. */
/* For archives opened using mz_zip_reader_init_mem, the memory block must be growable using the realloc callback (which defaults to realloc unless you've overridden it). */
/* Finally, for archives opened using mz_zip_reader_init, the mz_zip_archive's user provided m_pWrite function cannot be NULL. */
/* Note: In-place archive modification is not recommended unless you know what you're doing, because if execution stops or something goes wrong before */
/* the archive is finalized the file's central directory will be hosed. */
mz_bool mz_zip_writer_init_from_reader(mz_zip_archive *pZip, const char *pFilename);
mz_bool mz_zip_writer_init_from_reader_v2(mz_zip_archive *pZip, const char *pFilename, mz_uint flags);

/* Adds the contents of a memory buffer to an archive. These functions record the current local time into the archive. */
/* To add a directory entry, call this method with an archive name ending in a forwardslash with an empty buffer. */
/* level_and_flags - compression level (0-10, see MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or just set to MZ_DEFAULT_COMPRESSION. */
mz_bool mz_zip_writer_add_mem(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, mz_uint level_and_flags);

/* Like mz_zip_writer_add_mem(), except you can specify a file comment field, and optionally supply the function with already compressed data. */
/* uncomp_size/uncomp_crc32 are only used if the MZ_ZIP_FLAG_COMPRESSED_DATA flag is specified. */
mz_bool mz_zip_writer_add_mem_ex(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags,
                                 mz_uint64 uncomp_size, mz_uint32 uncomp_crc32);

mz_bool mz_zip_writer_add_mem_ex_v2(mz_zip_archive *pZip, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags,
                                    mz_uint64 uncomp_size, mz_uint32 uncomp_crc32, MZ_TIME_T *last_modified, const char *user_extra_data_local, mz_uint user_extra_data_local_len,
                                    const char *user_extra_data_central, mz_uint user_extra_data_central_len);

#ifndef MINIZ_NO_STDIO
/* Adds the contents of a disk file to an archive. This function also records the disk file's modified time into the archive. */
/* level_and_flags - compression level (0-10, see MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or just set to MZ_DEFAULT_COMPRESSION. */
mz_bool mz_zip_writer_add_file(mz_zip_archive *pZip, const char *pArchive_name, const char *pSrc_filename, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags);

/* Like mz_zip_writer_add_file(), except the file data is read from the specified FILE stream. */
mz_bool mz_zip_writer_add_cfile(mz_zip_archive *pZip, const char *pArchive_name, MZ_FILE *pSrc_file, mz_uint64 size_to_add,
                                const MZ_TIME_T *pFile_time, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, const char *user_extra_data_local, mz_uint user_extra_data_local_len,
                                const char *user_extra_data_central, mz_uint user_extra_data_central_len);
#endif

/* Adds a file to an archive by fully cloning the data from another archive. */
/* This function fully clones the source file's compressed data (no recompression), along with its full filename, extra data (it may add or modify the zip64 local header extra data field), and the optional descriptor following the compressed data. */
mz_bool mz_zip_writer_add_from_zip_reader(mz_zip_archive *pZip, mz_zip_archive *pSource_zip, mz_uint src_file_index);

/* Finalizes the archive by writing the central directory records followed by the end of central directory record. */
/* After an archive is finalized, the only valid call on the mz_zip_archive struct is mz_zip_writer_end(). */
/* An archive must be manually finalized by calling this function for it to be valid. */
mz_bool mz_zip_writer_finalize_archive(mz_zip_archive *pZip);

/* Finalizes a heap archive, returning a poiner to the heap block and its size. */
/* The heap block will be allocated using the mz_zip_archive's alloc/realloc callbacks. */
mz_bool mz_zip_writer_finalize_heap_archive(mz_zip_archive *pZip, void **ppBuf, size_t *pSize);

/* Ends archive writing, freeing all allocations, and closing the output file if mz_zip_writer_init_file() was used. */
/* Note for the archive to be valid, it *must* have been finalized before ending (this function will not do it for you). */
mz_bool mz_zip_writer_end(mz_zip_archive *pZip);

/* -------- Misc. high-level helper functions: */

/* mz_zip_add_mem_to_archive_file_in_place() efficiently (but not atomically) appends a memory blob to a ZIP archive. */
/* Note this is NOT a fully safe operation. If it crashes or dies in some way your archive can be left in a screwed up state (without a central directory). */
/* level_and_flags - compression level (0-10, see MZ_BEST_SPEED, MZ_BEST_COMPRESSION, etc.) logically OR'd with zero or more mz_zip_flags, or just set to MZ_DEFAULT_COMPRESSION. */
/* TODO: Perhaps add an option to leave the existing central dir in place in case the add dies? We could then truncate the file (so the old central dir would be at the end) if something goes wrong. */
mz_bool mz_zip_add_mem_to_archive_file_in_place(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags);
mz_bool mz_zip_add_mem_to_archive_file_in_place_v2(const char *pZip_filename, const char *pArchive_name, const void *pBuf, size_t buf_size, const void *pComment, mz_uint16 comment_size, mz_uint level_and_flags, mz_zip_error *pErr);

/* Reads a single file from an archive into a heap block. */
/* If pComment is not NULL, only the file with the specified comment will be extracted. */
/* Returns NULL on failure. */
void *mz_zip_extract_archive_file_to_heap(const char *pZip_filename, const char *pArchive_name, size_t *pSize, mz_uint flags);
void *mz_zip_extract_archive_file_to_heap_v2(const char *pZip_filename, const char *pArchive_name, const char *pComment, size_t *pSize, mz_uint flags, mz_zip_error *pErr);

#endif /* #ifndef MINIZ_NO_ARCHIVE_WRITING_APIS */

#ifdef __cplusplus
}
#endif

#endif /* MINIZ_NO_ARCHIVE_APIS */
recoll-1.26.3/utils/execmd.h0000644000175000017500000002557513566424763012620 00000000000000/* Copyright (C) 2004-2018 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU Lesser General Public License as published by
 *   the Free Software Foundation; either version 2.1 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU Lesser General Public License for more details.
 *
 *   You should have received a copy of the GNU Lesser General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 *   51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _EXECMD_H_INCLUDED_
#define _EXECMD_H_INCLUDED_

#include 
#include 
#include 
#include 

/**
 * Callback function object to advise of new data arrival, or just periodic
 * heartbeat if cnt is 0.
 *
 * To interrupt the command, the code using ExecCmd should either
 * raise an exception inside newData() (and catch it in doexec's caller), or
 * call ExecCmd::setKill()
 *
 */
class ExecCmdAdvise {
public:
    virtual ~ExecCmdAdvise() {}
    virtual void newData(int cnt) = 0;
};

/**
 * Callback function object to get more input data. Data has to be provided
 * into the initial input string, set it to empty to signify eof.
 */
class ExecCmdProvide {
public:
    virtual ~ExecCmdProvide() {}
    virtual void newData() = 0;
};

/**
 * Execute command possibly taking both input and output (will do
 * asynchronous io as appropriate for things to work).
 *
 * Input to the command can be provided either once in a parameter to doexec
 * or provided in chunks by setting a callback which will be called to
 * request new data. In this case, the 'input' parameter to doexec may be
 * empty (but not null)
 *
 * Output from the command is normally returned in a single string, but a
 * callback can be set to be called whenever new data arrives, in which case
 * it is permissible to consume the data and erase the string.
 *
 * Note that SIGPIPE should be ignored and SIGCLD blocked when calling doexec,
 * else things might fail randomly. (This is not done inside the class because
 * of concerns with multithreaded programs).
 *
 */
class ExecCmd {
public:
    // Use vfork instead of fork. Our vfork usage is multithread-compatible as
    // far as I can see, but just in case...
    static void useVfork(bool on);

    /**
     * Add/replace environment variable before executing command. This must
     * be called before doexec() to have an effect (possibly multiple
     * times for several variables).
     * @param envassign an environment assignment string ("name=value")
     */
    void putenv(const std::string& envassign);
    void putenv(const std::string& name, const std::string& value);

    /**
     * Try to set a limit on child process vm size. This will use
     * setrlimit() and RLIMIT_AS/VMEM if available. Parameter is in
     * units of 2**10. Must be called before starting the command, default
     * is inherit from parent.
     */
    void setrlimit_as(int mbytes);

    /**
     * Set function objects to call whenever new data is available or on
     * select timeout. The data itself is stored in the output string.
     * Must be set before calling doexec.
     */
    void setAdvise(ExecCmdAdvise *adv);
    /*
     * Set function object to call whenever new data is needed. The
     * data should be stored in the input string. Must be set before
     * calling doexec()
     */
    void setProvide(ExecCmdProvide *p);

    /**
     * Set select timeout in milliseconds. The default is 1 S.
     * This is NOT a time after which an error will occur, but the period of
     * the calls to the advise routine (which normally checks for cancellation).
     */
    void setTimeout(int mS);

    /**
     * Set destination for stderr data. The default is to let it alone (will
     * usually go to the terminal or to wherever the desktop messages go).
     * There is currently no option to put stderr data into a program variable
     * If the parameter can't be opened for writing, the command's
     * stderr will be closed.
     */
    void setStderr(const std::string& stderrFile);

    /**
     * Set kill wait timeout. This is the maximum time we'll wait for
     * the command after sending a SIGTERM, before sending a SIGKILL.

     * @param mS the maximum number of mS to wait. Note that values
     *    below 1000 mS make no sense as the program will sleep for
     *    longer time before retrying the waitpid(). Use -1 for
     *    forever (bad idea), 0 for absolutely no pity.
     */
     void setKillTimeout(int mS);

    /**
     * Execute command.
     *
     * Both input and output can be specified, and asynchronous
     * io (select-based) is used to prevent blocking. This will not
     * work if input and output need to be synchronized (ie: Q/A), but
     * works ok for filtering.
     * The function is exception-safe. In case an exception occurs in the
     * advise callback, fds and pids will be cleaned-up properly.
     *
     * @param cmd the program to execute. This must be an absolute file name
     *   or exist in the PATH.
     * @param args the argument vector (NOT including argv[0]).
     * @param input Input to send TO the command.
     * @param output Output FROM the command.
     * @return the exec output status (0 if ok), or -1
     */
    int doexec(const std::string& cmd, const std::vector& args,
               const std::string *input = 0,
               std::string *output = 0);

    /** Same as doexec but cmd and args in one vector */
    int doexec1(const std::vector& args,
                const std::string *input = 0,
                std::string *output = 0) {
        if (args.empty()) {
            return -1;
        }
        return doexec(args[0],
                      std::vector(args.begin() + 1, args.end()),
                      input, output);
    }

    /*
     * The next four methods can be used when a Q/A dialog needs to be
     * performed with the command
     */
    int startExec(const std::string& cmd, const std::vector& args,
                  bool has_input, bool has_output);
    int send(const std::string& data);
    int receive(std::string& data, int cnt = -1);

    /** Read line. Will call back periodically to check for cancellation */
    int getline(std::string& data);

    /** Read line. Timeout after timeosecs seconds */
    int getline(std::string& data, int timeosecs);

    int wait();
    /** Wait with WNOHANG set.
    @return true if process exited, false else.
    @param O: status, the wait(2) call's status value */
    bool maybereap(int *status);

    pid_t getChildPid();

    /**
     * Cancel/kill command. This can be called from another thread or
     * from the advise callback, which could also raise an exception
     * to accomplish the same thing. In the owner thread, any I/O loop
     * will exit at the next iteration, and the process will be waited for.
     */
    void setKill();

    /**
     * Get rid of current process (become ready for start). This will signal
     * politely the process to stop, wait a moment, then terminate it. This
     * is a blocking call.
     */
    void zapChild();

    /**
     * Request process termination (SIGTERM or equivalent). This returns
     * immediately
     */
    bool requestChildExit();

    enum ExFlags {EXF_NONE,
                  // Only does anything on windows. Used when starting
                  // a viewer. The default is to hide the window,
                  // because it avoids windows appearing and
                  // disappearing when executing stuff for previewing
                  EXF_SHOWWINDOW = 1,
                  // Windows only: show maximized
                  EXF_MAXIMIZED = 2,
                 };
    ExecCmd(int flags = 0);
    ~ExecCmd();

    /**
     * Utility routine: check if/where a command is found according to the
     * current PATH (or the specified one
     * @param cmd command name
     * @param exe on return, executable path name if found
     * @param path exec seach path to use instead of getenv(PATH)
     * @return true if found
     */
    static bool which(const std::string& cmd, std::string& exe, const char* path = 0);

    /**
     * Execute command and return stdout output in a string
     * @param cmd input: command and args
     * @param out output: what the command printed
     * @return true if exec status was 0
     */
    static bool backtick(const std::vector cmd, std::string& out);

    class Internal;
private:
    Internal *m;
    /* Copyconst and assignment are private and forbidden */
    ExecCmd(const ExecCmd&) {}
    ExecCmd& operator=(const ExecCmd&) {
        return *this;
    };
};


/**
 * Rexecute self process with the same arguments.
 *
 * Note that there are some limitations:
 *  - argv[0] has to be valid: an executable name which will be found in
 *    the path when exec is called in the initial working directory. This is
 *    by no means guaranteed. The shells do this, but argv[0] could be an
 *    arbitrary string.
 *  - The initial working directory must be found and remain valid.
 *  - We don't try to do anything with fd 0,1,2. If they were changed by the
 *    program, their initial meaning won't be the same as at the moment of the
 *    initial invocation.
 *  - We don't restore the signals. Signals set to be blocked
 *    or ignored by the program will remain ignored even if this was not their
 *    initial state.
 *  - The environment is also not restored.
 *  - Others system aspects ?
 *  - Other program state: application-dependant. Any external cleanup
 *    (temp files etc.) must be performed by the application. ReExec()
 *    duplicates the atexit() function to make this easier, but the
 *    ReExec().atexit() calls must be done explicitly, this is not automatic
 *
 * In short, this is usable in reasonably controlled situations and if there
 * are no security issues involved, but this does not perform miracles.
 */
class ReExec {
public:
    ReExec() {}
    ReExec(int argc, char *argv[]);
    void init(int argc, char *argv[]);
    int atexit(void (*function)(void)) {
        m_atexitfuncs.push(function);
        return 0;
    }
    void reexec();
    const std::string& getreason() {
        return m_reason;
    }
    // Insert new args into the initial argv. idx designates the place
    // before which the new args are inserted (the default of 1
    // inserts after argv[0] which would probably be an appropriate
    // place for additional options)
    void insertArgs(const std::vector& args, int idx = 1);
    void removeArg(const std::string& arg);
private:
    std::vector m_argv;
    std::string m_curdir;
    int    m_cfd;
    std::string m_reason;
    std::stack m_atexitfuncs;
};

#endif /* _EXECMD_H_INCLUDED_ */
recoll-1.26.3/utils/md5.h0000644000175000017500000000256013533651561012015 00000000000000/*	$OpenBSD: md5.h,v 1.15 2004/05/03 17:30:14 millert Exp $	*/

/*
 * This code implements the MD5 message-digest algorithm.
 * The algorithm is due to Ron Rivest.  This code was
 * written by Colin Plumb in 1993, no copyright is claimed.
 * This code is in the public domain; do with it what you wish.
 *
 * Equivalent code is available from RSA Data Security, Inc.
 * This code has been tested against that, and is equivalent,
 * except that you don't need to include two pages of legalese
 * with every copy.
 */

#ifndef _MD5_H_
#define _MD5_H_

#include 
#include 

#define	MD5_BLOCK_LENGTH		64
#define	MD5_DIGEST_LENGTH		16
#define	MD5_DIGEST_STRING_LENGTH	(MD5_DIGEST_LENGTH * 2 + 1)

typedef struct MD5Context {
	uint32_t state[4];			/* state */
	uint64_t count;			/* number of bits, mod 2^64 */
	uint8_t buffer[MD5_BLOCK_LENGTH];	/* input buffer */
} MD5_CTX;

void	 MD5Init(MD5_CTX *);
void	 MD5Update(MD5_CTX *, const uint8_t *, size_t);
void	 MD5Final(uint8_t [MD5_DIGEST_LENGTH], MD5_CTX *);

/** md5 c++ utility wrappers */
#include 
extern void MD5Final(std::string& digest, MD5_CTX *);
extern std::string& MD5String(const std::string& data, std::string& digest);
extern std::string& MD5HexPrint(const std::string& digest, std::string& xdigest);
extern std::string& MD5HexScan(const std::string& xdigest, std::string& digest);

#endif /* _MD5_H_ */
recoll-1.26.3/utils/cpuconf.cpp0000644000175000017500000000313013533651561013312 00000000000000/* Copyright (C) 2013 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef TEST_CPUCONF

#include "autoconfig.h"


#include "cpuconf.h"

#include 

// Go c++11 !
bool getCpuConf(CpuConf& cpus)
{
#if defined(_WIN32)
    // On windows, indexing is actually twice slower with threads
    // enabled + there is a bug and the process does not exit at the
    // end of indexing. Until these are solved, pretend there is only
    // 1 cpu
    cpus.ncpus = 1;
#else
    // c++11
    cpus.ncpus = std::thread::hardware_concurrency();
#endif
    
    return true;
}

#else // TEST_CPUCONF

#include 

#include 
using namespace std;

#include "cpuconf.h"

// Test driver
int main(int argc, const char **argv)
{
    CpuConf cpus;
    if (!getCpuConf(cpus)) {
	cerr << "getCpuConf failed" << endl;
	exit(1);
    }
    cout << "Cpus: " << cpus.ncpus << endl;
    exit(0);
}
#endif // TEST_CPUCONF
recoll-1.26.3/utils/wipedir.cpp0000644000175000017500000000731413533651561013330 00000000000000/* Copyright (C) 2004 J.F.Dockes
 *
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */

#ifndef TEST_WIPEDIR
#include "autoconfig.h"

#include 
#include "safefcntl.h"
#include 
#include "safesysstat.h"
#include "safeunistd.h"
#include 

#include 
#include 

#include "log.h"
#include "pathut.h"
#include "wipedir.h"

using namespace std;

int wipedir(const string& dir, bool selfalso, bool recurse)
{
    struct stat st;
    int statret;
    int ret = -1;

    statret = lstat(dir.c_str(), &st);
    if (statret == -1) {
	LOGERR("wipedir: cant stat "  << (dir) << ", errno "  << (errno) << "\n" );
	return -1;
    }
    if (!S_ISDIR(st.st_mode)) {
	LOGERR("wipedir: "  << (dir) << " not a directory\n" );
	return -1;
    }

    if (access(dir.c_str(), R_OK|W_OK|X_OK) < 0) {
	LOGERR("wipedir: no write access to "  << (dir) << "\n" );
	return -1;
    }

    DIR *d = opendir(dir.c_str());
    if (d == 0) {
	LOGERR("wipedir: cant opendir "  << (dir) << ", errno "  << (errno) << "\n" );
	return -1;
    }
    int remaining = 0;
    struct dirent *ent;
    while ((ent = readdir(d)) != 0) {
	if (!strcmp(ent->d_name, ".") || !strcmp(ent->d_name, "..")) 
	    continue;

	string fn = path_cat(dir, ent->d_name);

	struct stat st;
	int statret = lstat(fn.c_str(), &st);
	if (statret == -1) {
	    LOGERR("wipedir: cant stat "  << (fn) << ", errno "  << (errno) << "\n" );
	    goto out;
	}
	if (S_ISDIR(st.st_mode)) {
	    if (recurse) {
		int rr = wipedir(fn, true, true);
		if (rr == -1) 
		    goto out;
		else 
		    remaining += rr;
	    } else {
		remaining++;
	    }
	} else {
	    if (unlink(fn.c_str()) < 0) {
		LOGERR("wipedir: cant unlink "  << (fn) << ", errno "  << (errno) << "\n" );
		goto out;
	    }
	}
    }

    ret = remaining;
    if (selfalso && ret == 0) {
	if (rmdir(dir.c_str()) < 0) {
	    LOGERR("wipedir: rmdir("  << (dir) << ") failed, errno "  << (errno) << "\n" );
	    ret = -1;
	}
    }

 out:
    if (d)
	closedir(d);
    return ret;
}


#else // FILEUT_TEST

#include 
#include 

#include "wipedir.h"

using namespace std;
static const char *thisprog;

static int     op_flags;
#define OPT_MOINS 0x1
#define OPT_r	  0x2 
#define OPT_s	  0x4 
static char usage [] =
"wipedir [-r -s] topdir\n"
" -r : recurse\n"
" -s : also delete topdir\n"
;
static void
Usage(void)
{
    fprintf(stderr, "%s: usage:\n%s", thisprog, usage);
    exit(1);
}
int main(int argc, const char **argv)
{
    thisprog = argv[0];
    argc--; argv++;

    while (argc > 0 && **argv == '-') {
	(*argv)++;
	if (!(**argv))
	    /* Cas du "adb - core" */
	    Usage();
	while (**argv)
	    switch (*(*argv)++) {
	    case 'r':	op_flags |= OPT_r; break;
	    case 's':	op_flags |= OPT_s; break;
	    default: Usage();	break;
	    }
    b1: argc--; argv++;
    }

    if (argc != 1)
	Usage();

    string dir = *argv++;argc--;

    bool topalso = ((op_flags&OPT_s) != 0);
    bool recurse = ((op_flags&OPT_r) != 0);
    int cnt = wipedir(dir, topalso, recurse);
    printf("wipedir returned %d\n", cnt);
    exit(0);
}

#endif

recoll-1.26.3/utils/circache.h0000644000175000017500000000775013533651561013077 00000000000000/* Copyright (C) 2009 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _circache_h_included_
#define _circache_h_included_

/**
 * A data cache implemented as a circularly managed file
 *
 * A single file is used to stored objects. The file grows to a
 * specified maximum size, then is rewritten from the start,
 * overwriting older entries.
 *
 * Data objects inside the cache each have two parts: a data segment and an
 * attribute (metadata) dictionary.
 * They are named using the same identifiers that are used inside the Recoll
 * index (the UDI).
 *
 * Inside the file. the UDIs are stored inside the entry dictionary
 * under the key "udi".
 *
 * It is assumed that the dictionary are small (they are routinely read/parsed)
 *
 */

#include 
#include 

#include 

class ConfSimple;
class CirCacheInternal;

class CirCache {
public:
    CirCache(const std::string& dir);
    virtual ~CirCache();

    virtual std::string getReason();

    enum CreateFlags {CC_CRNONE = 0,
                      // Unique entries: erase older instances when same udi
                      // is stored.
                      CC_CRUNIQUE = 1,
                      // Truncate file (restart from scratch).
                      CC_CRTRUNCATE = 2
                     };
    virtual bool create(int64_t maxsize, int flags);

    enum OpMode {CC_OPREAD, CC_OPWRITE};
    virtual bool open(OpMode mode);

    virtual std::string getpath();

    // Set data to 0 if you just want the header
    virtual bool get(const std::string& udi, std::string& dic,
                     std::string *data = 0, int instance = -1);

    // Note: the dicp MUST have an udi entry
    enum PutFlags {NoCompHint = 1};
    virtual bool put(const std::string& udi, const ConfSimple *dicp,
                     const std::string& data, unsigned int flags = 0);

    virtual bool erase(const std::string& udi, bool reallyclear = false);

    /** Walk the archive.
     *
     * Maybe we'll have separate iterators one day, but this is good
     * enough for now. No put() operations should be performed while
     * using these.
     */
    /** Back to oldest */
    virtual bool rewind(bool& eof);
    /** Get entry under cursor */
    virtual bool getCurrent(std::string& udi, std::string& dic,
                            std::string *data = 0);
    /** Get current entry udi only. Udi can be empty (erased empty), caller
     * should call again */
    virtual bool getCurrentUdi(std::string& udi);
    /** Skip to next. (false && !eof) -> error, (false&&eof)->EOF. */
    virtual bool next(bool& eof);

    /* Debug. This writes the entry headers to stdout */
    virtual bool dump();

    /* Utility: append all entries from sdir to ddir. 
     * 
     * This does not need to be a member at all, just using the namespace here.
     *
     * @param ddir destination circache (must be previously created
     *     with appropriate size)
     * @param sdir source circache
     * @ret number of entries copied or -a
     */
    static int append(const std::string ddir, const std::string& sdir,
                      std::string *reason = 0);

protected:
    CirCacheInternal *m_d;
    std::string m_dir;
private:
    CirCache(const CirCache&) {}
    CirCache& operator=(const CirCache&) {
        return *this;
    }
};

#endif /* _circache_h_included_ */
recoll-1.26.3/utils/cancelcheck.h0000644000175000017500000000427613533651561013561 00000000000000/* Copyright (C) 2005 J.F.Dockes
 *   This program is free software; you can redistribute it and/or modify
 *   it under the terms of the GNU General Public License as published by
 *   the Free Software Foundation; either version 2 of the License, or
 *   (at your option) any later version.
 *
 *   This program is distributed in the hope that it will be useful,
 *   but WITHOUT ANY WARRANTY; without even the implied warranty of
 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 *   GNU General Public License for more details.
 *
 *   You should have received a copy of the GNU General Public License
 *   along with this program; if not, write to the
 *   Free Software Foundation, Inc.,
 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
 */
#ifndef _CANCELCHECK_H_INCLUDED_
#define _CANCELCHECK_H_INCLUDED_


/**
 * Common cancel checking mechanism
 *
 * The CancelCheck class is used as a singleton objet (private constructor).
 * The single instance can be accessed as CancelCheck::instance.
 * It is used as follows, in an asynchronous program where there is an
 *  interactive (or otherwise controlling) task and a long-working one:
 *  - The control task calls setCancel(), usually as a result of user 
 *    interaction, if the worker takes too long.
 *  - The worker task calls checkCancel() at regular intervals, possibly as
 *    a side-effect of some other progress-reporting call. If cancellation has
 *    been requested, this will raise an exception, to be catched and processed
 *    wherever the worker was invoked.
 * The worker side must be exception-clean, but this otherwise avoids
 * having to set-up code to handle a special cancellation error along
 * the whole worker call stack.
 */
class CancelExcept {};

class CancelCheck {
 public:
    static CancelCheck& instance();
    void setCancel(bool on = true) {
	cancelRequested = on;
    }
    void checkCancel() {
	if (cancelRequested) {
	    throw CancelExcept();
	}
    }
    bool cancelState() {return cancelRequested;}
 private:
    bool cancelRequested;

    CancelCheck() : cancelRequested(false) {}
    CancelCheck& operator=(CancelCheck&);
    CancelCheck(const CancelCheck&);
};

#endif /* _CANCELCHECK_H_INCLUDED_ */
recoll-1.26.3/filters/0000755000175000017500000000000013570165410011536 500000000000000recoll-1.26.3/filters/rclsoff.py0000755000175000017500000001130413533651561013476 00000000000000#!/usr/bin/env python3
# Copyright (C) 2014 J.F.Dockes
#   This program is free software; you can redistribute it and/or modify
#   it under the terms of the GNU General Public License as published by
#   the Free Software Foundation; either version 2 of the License, or
#   (at your option) any later version.
#
#   This program is distributed in the hope that it will be useful,
#   but WITHOUT ANY WARRANTY; without even the implied warranty of
#   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
#   GNU General Public License for more details.
#
#   You should have received a copy of the GNU General Public License
#   along with this program; if not, write to the
#   Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
######################################

from __future__ import print_function

import sys
import rclexecm
import rclxslt
from rclbasehandler import RclBaseHandler
from zipfile import ZipFile

stylesheet_meta = '''





  
  
  
  
  



 <xsl:value-of select="."/> 




  
  abstract
  
     
  
  




  
  keywords
  
     
  
  




  
  author
  
     
  
  




  
  keywords
  
     
  
  




'''

stylesheet_content  = '''





  


''' class OOExtractor(RclBaseHandler): def __init__(self, em): super(OOExtractor, self).__init__(em) def html_text(self, fn): f = open(fn, 'rb') zip = ZipFile(f) docdata = b'\n\n' # Wrap metadata extraction because it can sometimes throw # while the main text will be valid try: metadata = zip.read("meta.xml") if metadata: res = rclxslt.apply_sheet_data(stylesheet_meta, metadata) docdata += res except: # To be checked. I'm under the impression that I get this when # nothing matches? #self.em.rclog("No/bad metadata in %s" % fn) pass docdata += b'\n\n' content = zip.read("content.xml") if content: res = rclxslt.apply_sheet_data(stylesheet_content, content) docdata += res docdata += b'' return docdata if __name__ == '__main__': proto = rclexecm.RclExecM() extract = OOExtractor(proto) rclexecm.main(proto, extract) recoll-1.26.3/filters/rclxls.py0000755000175000017500000000505113533651561013351 00000000000000#!/usr/bin/env python3 # Extractor for Excel files. import rclexecm import rclexec1 import xlsxmltocsv import re import sys import os import xml.sax class XLSProcessData: def __init__(self, em, ishtml = False): self.em = em self.out = [] self.gotdata = 0 self.xmldata = [] self.ishtml = ishtml def takeLine(self, line): if not line: return if self.ishtml: self.out.append(line) return if not self.gotdata: self.out.append(b'''''' + \ b'''''' + \ b'''
''')
            self.gotdata = True
        self.xmldata.append(line)

    def wrapData(self):
        if not self.gotdata:
            raise Exception("xls-dump returned no data")
            return b''
        if self.ishtml:
            return b'\n'.join(self.out)
        handler =  xlsxmltocsv.XlsXmlHandler()
        xml.sax.parseString(b'\n'.join(self.xmldata), handler)
        self.out.append(self.em.htmlescape(b'\n'.join(handler.output)))
        return b'\n'.join(self.out) + b'
' class XLSFilter: def __init__(self, em): self.em = em self.ntry = 0 def reset(self): self.ntry = 0 pass def getCmd(self, fn): if self.ntry: return ([], None) self.ntry = 1 # Some HTML files masquerade as XLS try: data = open(fn, 'rb').read(512) if data.find(b'html') != -1 or data.find(b'HTML') != -1: return ("cat", XLSProcessData(self.em, True)) except Exception as err: self.em.rclog("Error reading %s:%s" % (fn, str(err))) pass cmd = rclexecm.which("xls-dump.py") if cmd: # xls-dump.py often exits 1 with valid data. Ignore exit value # We later treat an empty output as an error return ([sys.executable, cmd, "--dump-mode=canonical-xml", \ "--utf-8", "--catch"], XLSProcessData(self.em), rclexec1.Executor.opt_ignxval) else: return ([], None) if __name__ == '__main__': if not rclexecm.which("xls-dump.py"): print("RECFILTERROR HELPERNOTFOUND ppt-dump.py") sys.exit(1) proto = rclexecm.RclExecM() filter = XLSFilter(proto) extract = rclexec1.Executor(proto, filter) rclexecm.main(proto, extract) recoll-1.26.3/filters/rclaptosidman0000755000175000017500000000466313303776060014260 00000000000000#!/bin/sh # @(#$Id: rclaptosidman,v 1.1 2010-12-11 12:40:05 dockes Exp $ (C) 2004 J.F.Dockes # Parts taken from Estraier: #================================================================ # Estraier: a personal full-text search system # Copyright (C) 2003-2004 Mikio Hirabayashi #================================================================ #================================================================ # rclaptosidman # Strip the menu part from aptosid manual pages to improve search precision #================================================================ # set variables LANG=C ; export LANG LC_ALL=C ; export LC_ALL progname="rclaptosidman" filetype="aptosid manual htm" #RECFILTCOMMONCODE ############################################################################## # !! Leave the previous line unmodified!! Code imported from the # recfiltcommon file # Utility code common to all shell filters. This could be sourced at run # time, but it's slightly more efficient to include the code in the # filters at build time (with a sed script). # Describe error in a way that can be interpreted by our caller senderror() { echo RECFILTERROR $* # Also alert on stderr just in case echo ":2:$progname::: $*" 1>&2 exit 1 } iscmd() { cmd=$1 case $cmd in */*) if test -x $cmd -a ! -d $cmd ; then return 0; else return 1; fi ;; *) oldifs=$IFS; IFS=":"; set -- $PATH; IFS=$oldifs for d in $*;do test -x $d/$cmd -a ! -d $d/$cmd && return 0;done return 1 ;; esac } checkcmds() { for cmd in $*;do if iscmd $cmd then a=1 else senderror HELPERNOTFOUND $cmd fi done } # show help message if test $# -ne 1 -o "$1" = "--help" then echo "Convert a $filetype file to HTML text for Recoll indexing." echo "Usage: $progname [infile]" exit 1 fi infile="$1" # check the input file existence (may be '-' for stdin) if test "X$infile" != X- -a ! -f "$infile" then senderror INPUTNOSUCHFILE "$infile" fi # protect access to our temp files and directories umask 77 ############################################################################## # !! Leave the following line unmodified ! #ENDRECFILTCOMMONCODE checkcmds sed # Delete everything from